X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=kernel%2Ftime%2Ftick-broadcast.c;h=877dbedc3118275ac77f34c2b63a655f6c1a7617;hb=fb63a0ebe615fba9de8c75ea44ded999d1e24c65;hp=8314ecb32d3327b763ff95b8cf4f9bd1b63f253b;hpb=79bf2bb335b85db25d27421c798595a2fa2a0e82;p=safe%2Fjmp%2Flinux-2.6 diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 8314ecb..877dbed 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include #include #include @@ -27,16 +27,38 @@ * timer stops in C3 state. */ -struct tick_device tick_broadcast_device; -static cpumask_t tick_broadcast_mask; +static struct tick_device tick_broadcast_device; +/* FIXME: Use cpumask_var_t. */ +static DECLARE_BITMAP(tick_broadcast_mask, NR_CPUS); +static DECLARE_BITMAP(tmpmask, NR_CPUS); static DEFINE_SPINLOCK(tick_broadcast_lock); +static int tick_broadcast_force; + +#ifdef CONFIG_TICK_ONESHOT +static void tick_broadcast_clear_oneshot(int cpu); +#else +static inline void tick_broadcast_clear_oneshot(int cpu) { } +#endif + +/* + * Debugging: see timer_list.c + */ +struct tick_device *tick_get_broadcast_device(void) +{ + return &tick_broadcast_device; +} + +struct cpumask *tick_get_broadcast_mask(void) +{ + return to_cpumask(tick_broadcast_mask); +} /* * Start the device in periodic mode */ static void tick_broadcast_start_periodic(struct clock_event_device *bc) { - if (bc && bc->mode == CLOCK_EVT_MODE_SHUTDOWN) + if (bc) tick_setup_periodic(bc, 1); } @@ -45,13 +67,14 @@ static void tick_broadcast_start_periodic(struct clock_event_device *bc) */ int tick_check_broadcast_device(struct clock_event_device *dev) { - if (tick_broadcast_device.evtdev || - (dev->features & CLOCK_EVT_FEAT_C3STOP)) + if ((tick_broadcast_device.evtdev && + tick_broadcast_device.evtdev->rating >= dev->rating) || + (dev->features & CLOCK_EVT_FEAT_C3STOP)) return 0; clockevents_exchange_device(NULL, dev); tick_broadcast_device.evtdev = dev; - if (!cpus_empty(tick_broadcast_mask)) + if (!cpumask_empty(tick_get_broadcast_mask())) tick_broadcast_start_periodic(dev); return 1; } @@ -83,46 +106,53 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) */ if (!tick_device_is_functional(dev)) { dev->event_handler = tick_handle_periodic; - cpu_set(cpu, tick_broadcast_mask); + cpumask_set_cpu(cpu, tick_get_broadcast_mask()); tick_broadcast_start_periodic(tick_broadcast_device.evtdev); ret = 1; - } + } else { + /* + * When the new device is not affected by the stop + * feature and the cpu is marked in the broadcast mask + * then clear the broadcast bit. + */ + if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) { + int cpu = smp_processor_id(); + cpumask_clear_cpu(cpu, tick_get_broadcast_mask()); + tick_broadcast_clear_oneshot(cpu); + } + } spin_unlock_irqrestore(&tick_broadcast_lock, flags); return ret; } /* - * Broadcast the event to the cpus, which are set in the mask + * Broadcast the event to the cpus, which are set in the mask (mangled). */ -int tick_do_broadcast(cpumask_t mask) +static void tick_do_broadcast(struct cpumask *mask) { - int ret = 0, cpu = smp_processor_id(); + int cpu = smp_processor_id(); struct tick_device *td; /* * Check, if the current cpu is in the mask */ - if (cpu_isset(cpu, mask)) { - cpu_clear(cpu, mask); + if (cpumask_test_cpu(cpu, mask)) { + cpumask_clear_cpu(cpu, mask); td = &per_cpu(tick_cpu_device, cpu); td->evtdev->event_handler(td->evtdev); - ret = 1; } - if (!cpus_empty(mask)) { + if (!cpumask_empty(mask)) { /* * It might be necessary to actually check whether the devices * have different broadcast functions. For now, just use the * one of the first device. This works as long as we have this * misfeature only on x86 (lapic) */ - cpu = first_cpu(mask); - td = &per_cpu(tick_cpu_device, cpu); + td = &per_cpu(tick_cpu_device, cpumask_first(mask)); td->evtdev->broadcast(mask); - ret = 1; } - return ret; } /* @@ -131,12 +161,11 @@ int tick_do_broadcast(cpumask_t mask) */ static void tick_do_periodic_broadcast(void) { - cpumask_t mask; - spin_lock(&tick_broadcast_lock); - cpus_and(mask, cpu_online_map, tick_broadcast_mask); - tick_do_broadcast(mask); + cpumask_and(to_cpumask(tmpmask), + cpu_online_mask, tick_get_broadcast_mask()); + tick_do_broadcast(to_cpumask(tmpmask)); spin_unlock(&tick_broadcast_lock); } @@ -146,7 +175,7 @@ static void tick_do_periodic_broadcast(void) */ static void tick_handle_periodic_broadcast(struct clock_event_device *dev) { - dev->next_event.tv64 = KTIME_MAX; + ktime_t next; tick_do_periodic_broadcast(); @@ -158,10 +187,13 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev) /* * Setup the next period for devices, which do not have - * periodic mode: + * periodic mode. We read dev->next_event first and add to it + * when the event alrady expired. clockevents_program_event() + * sets dev->next_event only when the event is really + * programmed to the device. */ - for (;;) { - ktime_t next = ktime_add(dev->next_event, tick_period); + for (next = dev->next_event; ;) { + next = ktime_add(next, tick_period); if (!clockevents_program_event(dev, next, ktime_get())) return; @@ -178,7 +210,7 @@ static void tick_do_broadcast_on_off(void *why) struct clock_event_device *bc, *dev; struct tick_device *td; unsigned long flags, *reason = why; - int cpu; + int cpu, bc_stopped; spin_lock_irqsave(&tick_broadcast_lock, flags); @@ -188,31 +220,43 @@ static void tick_do_broadcast_on_off(void *why) bc = tick_broadcast_device.evtdev; /* - * Is the device in broadcast mode forever or is it not - * affected by the powerstate ? + * Is the device not affected by the powerstate ? */ - if (!dev || !tick_device_is_functional(dev) || - !(dev->features & CLOCK_EVT_FEAT_C3STOP)) + if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP)) goto out; - if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_ON) { - if (!cpu_isset(cpu, tick_broadcast_mask)) { - cpu_set(cpu, tick_broadcast_mask); - if (td->mode == TICKDEV_MODE_PERIODIC) - clockevents_set_mode(dev, - CLOCK_EVT_MODE_SHUTDOWN); + if (!tick_device_is_functional(dev)) + goto out; + + bc_stopped = cpumask_empty(tick_get_broadcast_mask()); + + switch (*reason) { + case CLOCK_EVT_NOTIFY_BROADCAST_ON: + case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: + if (!cpumask_test_cpu(cpu, tick_get_broadcast_mask())) { + cpumask_set_cpu(cpu, tick_get_broadcast_mask()); + if (tick_broadcast_device.mode == + TICKDEV_MODE_PERIODIC) + clockevents_shutdown(dev); } - } else { - if (cpu_isset(cpu, tick_broadcast_mask)) { - cpu_clear(cpu, tick_broadcast_mask); - if (td->mode == TICKDEV_MODE_PERIODIC) + if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE) + tick_broadcast_force = 1; + break; + case CLOCK_EVT_NOTIFY_BROADCAST_OFF: + if (!tick_broadcast_force && + cpumask_test_cpu(cpu, tick_get_broadcast_mask())) { + cpumask_clear_cpu(cpu, tick_get_broadcast_mask()); + if (tick_broadcast_device.mode == + TICKDEV_MODE_PERIODIC) tick_setup_periodic(dev, 0); } + break; } - if (cpus_empty(tick_broadcast_mask)) - clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN); - else { + if (cpumask_empty(tick_get_broadcast_mask())) { + if (!bc_stopped) + clockevents_shutdown(bc); + } else if (bc_stopped) { if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) tick_broadcast_start_periodic(bc); else @@ -228,14 +272,12 @@ out: */ void tick_broadcast_on_off(unsigned long reason, int *oncpu) { - int cpu = get_cpu(); - - if (cpu == *oncpu) - tick_do_broadcast_on_off(&reason); + if (!cpumask_test_cpu(*oncpu, cpu_online_mask)) + printk(KERN_ERR "tick-broadcast: ignoring broadcast for " + "offline CPU #%d\n", *oncpu); else smp_call_function_single(*oncpu, tick_do_broadcast_on_off, - &reason, 1, 1); - put_cpu(); + &reason, 1); } /* @@ -261,60 +303,98 @@ void tick_shutdown_broadcast(unsigned int *cpup) spin_lock_irqsave(&tick_broadcast_lock, flags); bc = tick_broadcast_device.evtdev; - cpu_clear(cpu, tick_broadcast_mask); + cpumask_clear_cpu(cpu, tick_get_broadcast_mask()); if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) { - if (bc && cpus_empty(tick_broadcast_mask)) - clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN); + if (bc && cpumask_empty(tick_get_broadcast_mask())) + clockevents_shutdown(bc); } spin_unlock_irqrestore(&tick_broadcast_lock, flags); } +void tick_suspend_broadcast(void) +{ + struct clock_event_device *bc; + unsigned long flags; + + spin_lock_irqsave(&tick_broadcast_lock, flags); + + bc = tick_broadcast_device.evtdev; + if (bc) + clockevents_shutdown(bc); + + spin_unlock_irqrestore(&tick_broadcast_lock, flags); +} + +int tick_resume_broadcast(void) +{ + struct clock_event_device *bc; + unsigned long flags; + int broadcast = 0; + + spin_lock_irqsave(&tick_broadcast_lock, flags); + + bc = tick_broadcast_device.evtdev; + + if (bc) { + clockevents_set_mode(bc, CLOCK_EVT_MODE_RESUME); + + switch (tick_broadcast_device.mode) { + case TICKDEV_MODE_PERIODIC: + if (!cpumask_empty(tick_get_broadcast_mask())) + tick_broadcast_start_periodic(bc); + broadcast = cpumask_test_cpu(smp_processor_id(), + tick_get_broadcast_mask()); + break; + case TICKDEV_MODE_ONESHOT: + broadcast = tick_resume_broadcast_oneshot(bc); + break; + } + } + spin_unlock_irqrestore(&tick_broadcast_lock, flags); + + return broadcast; +} + + #ifdef CONFIG_TICK_ONESHOT -static cpumask_t tick_broadcast_oneshot_mask; +/* FIXME: use cpumask_var_t. */ +static DECLARE_BITMAP(tick_broadcast_oneshot_mask, NR_CPUS); + +/* + * Exposed for debugging: see timer_list.c + */ +struct cpumask *tick_get_broadcast_oneshot_mask(void) +{ + return to_cpumask(tick_broadcast_oneshot_mask); +} static int tick_broadcast_set_event(ktime_t expires, int force) { struct clock_event_device *bc = tick_broadcast_device.evtdev; - ktime_t now = ktime_get(); - int res; - - for(;;) { - res = clockevents_program_event(bc, expires, now); - if (!res || !force) - return res; - now = ktime_get(); - expires = ktime_add(now, ktime_set(0, bc->min_delta_ns)); - } + + return tick_dev_program_event(bc, expires, force); +} + +int tick_resume_broadcast_oneshot(struct clock_event_device *bc) +{ + clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); + return 0; } /* - * Reprogram the broadcast device: - * - * Called with tick_broadcast_lock held and interrupts disabled. + * Called from irq_enter() when idle was interrupted to reenable the + * per cpu device. */ -static int tick_broadcast_reprogram(void) +void tick_check_oneshot_broadcast(int cpu) { - ktime_t expires = { .tv64 = KTIME_MAX }; - struct tick_device *td; - int cpu; + if (cpumask_test_cpu(cpu, to_cpumask(tick_broadcast_oneshot_mask))) { + struct tick_device *td = &per_cpu(tick_cpu_device, cpu); - /* - * Find the event which expires next: - */ - for (cpu = first_cpu(tick_broadcast_oneshot_mask); cpu != NR_CPUS; - cpu = next_cpu(cpu, tick_broadcast_oneshot_mask)) { - td = &per_cpu(tick_cpu_device, cpu); - if (td->evtdev->next_event.tv64 < expires.tv64) - expires = td->evtdev->next_event; + clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_ONESHOT); } - - if (expires.tv64 == KTIME_MAX) - return 0; - - return tick_broadcast_set_event(expires, 0); } /* @@ -323,34 +403,45 @@ static int tick_broadcast_reprogram(void) static void tick_handle_oneshot_broadcast(struct clock_event_device *dev) { struct tick_device *td; - cpumask_t mask; - ktime_t now; + ktime_t now, next_event; int cpu; spin_lock(&tick_broadcast_lock); again: dev->next_event.tv64 = KTIME_MAX; - mask = CPU_MASK_NONE; + next_event.tv64 = KTIME_MAX; + cpumask_clear(to_cpumask(tmpmask)); now = ktime_get(); /* Find all expired events */ - for (cpu = first_cpu(tick_broadcast_oneshot_mask); cpu != NR_CPUS; - cpu = next_cpu(cpu, tick_broadcast_oneshot_mask)) { + for_each_cpu(cpu, tick_get_broadcast_oneshot_mask()) { td = &per_cpu(tick_cpu_device, cpu); if (td->evtdev->next_event.tv64 <= now.tv64) - cpu_set(cpu, mask); + cpumask_set_cpu(cpu, to_cpumask(tmpmask)); + else if (td->evtdev->next_event.tv64 < next_event.tv64) + next_event.tv64 = td->evtdev->next_event.tv64; } /* - * Wakeup the cpus which have an expired event. The broadcast - * device is reprogrammed in the return from idle code. + * Wakeup the cpus which have an expired event. */ - if (!tick_do_broadcast(mask)) { + tick_do_broadcast(to_cpumask(tmpmask)); + + /* + * Two reasons for reprogram: + * + * - The global event did not expire any CPU local + * events. This happens in dyntick mode, as the maximum PIT + * delta is quite small. + * + * - There are pending events on sleeping CPUs which were not + * in the event mask + */ + if (next_event.tv64 != KTIME_MAX) { /* - * The global event did not expire any CPU local - * events. This happens in dyntick mode, as the - * maximum PIT delta is quite small. + * Rearm the broadcast device. If event expired, + * repeat the above */ - if (tick_broadcast_reprogram()) + if (tick_broadcast_set_event(next_event, 0)) goto again; } spin_unlock(&tick_broadcast_lock); @@ -385,15 +476,16 @@ void tick_broadcast_oneshot_control(unsigned long reason) goto out; if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) { - if (!cpu_isset(cpu, tick_broadcast_oneshot_mask)) { - cpu_set(cpu, tick_broadcast_oneshot_mask); + if (!cpumask_test_cpu(cpu, tick_get_broadcast_oneshot_mask())) { + cpumask_set_cpu(cpu, tick_get_broadcast_oneshot_mask()); clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN); if (dev->next_event.tv64 < bc->next_event.tv64) tick_broadcast_set_event(dev->next_event, 1); } } else { - if (cpu_isset(cpu, tick_broadcast_oneshot_mask)) { - cpu_clear(cpu, tick_broadcast_oneshot_mask); + if (cpumask_test_cpu(cpu, tick_get_broadcast_oneshot_mask())) { + cpumask_clear_cpu(cpu, + tick_get_broadcast_oneshot_mask()); clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); if (dev->next_event.tv64 != KTIME_MAX) tick_program_event(dev->next_event, 1); @@ -404,15 +496,63 @@ out: spin_unlock_irqrestore(&tick_broadcast_lock, flags); } +/* + * Reset the one shot broadcast for a cpu + * + * Called with tick_broadcast_lock held + */ +static void tick_broadcast_clear_oneshot(int cpu) +{ + cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask()); +} + +static void tick_broadcast_init_next_event(struct cpumask *mask, + ktime_t expires) +{ + struct tick_device *td; + int cpu; + + for_each_cpu(cpu, mask) { + td = &per_cpu(tick_cpu_device, cpu); + if (td->evtdev) + td->evtdev->next_event = expires; + } +} + /** - * tick_broadcast_setup_highres - setup the broadcast device for highres + * tick_broadcast_setup_oneshot - setup the broadcast device */ void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { - if (bc->mode != CLOCK_EVT_MODE_ONESHOT) { + /* Set it up only once ! */ + if (bc->event_handler != tick_handle_oneshot_broadcast) { + int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC; + int cpu = smp_processor_id(); + bc->event_handler = tick_handle_oneshot_broadcast; clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); - bc->next_event.tv64 = KTIME_MAX; + + /* Take the do_timer update */ + tick_do_timer_cpu = cpu; + + /* + * We must be careful here. There might be other CPUs + * waiting for periodic broadcast. We need to set the + * oneshot_mask bits for those and program the + * broadcast device to fire. + */ + cpumask_copy(to_cpumask(tmpmask), tick_get_broadcast_mask()); + cpumask_clear_cpu(cpu, to_cpumask(tmpmask)); + cpumask_or(tick_get_broadcast_oneshot_mask(), + tick_get_broadcast_oneshot_mask(), + to_cpumask(tmpmask)); + + if (was_periodic && !cpumask_empty(to_cpumask(tmpmask))) { + tick_broadcast_init_next_event(to_cpumask(tmpmask), + tick_next_period); + tick_broadcast_set_event(tick_next_period, 1); + } else + bc->next_event.tv64 = KTIME_MAX; } } @@ -439,21 +579,26 @@ void tick_broadcast_switch_to_oneshot(void) */ void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { - struct clock_event_device *bc; unsigned long flags; unsigned int cpu = *cpup; spin_lock_irqsave(&tick_broadcast_lock, flags); - bc = tick_broadcast_device.evtdev; - cpu_clear(cpu, tick_broadcast_oneshot_mask); - - if (tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT) { - if (bc && cpus_empty(tick_broadcast_oneshot_mask)) - clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN); - } + /* + * Clear the broadcast mask flag for the dead cpu, but do not + * stop the broadcast device! + */ + cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask()); spin_unlock_irqrestore(&tick_broadcast_lock, flags); } +/* + * Check, whether the broadcast device is in one shot mode + */ +int tick_broadcast_oneshot_active(void) +{ + return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT; +} + #endif