/* BKL stats */
unsigned int bkl_count;
#endif
- struct lock_class_key rq_lock_key;
};
static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
/*
* ratelimit for updating the group shares.
- * default: 0.5ms
+ * default: 0.25ms
*/
-const_debug unsigned int sysctl_sched_shares_ratelimit = 500000;
+unsigned int sysctl_sched_shares_ratelimit = 250000;
/*
* period over which we measure -rt task cpu usage in us.
static inline u64 global_rt_runtime(void)
{
- if (sysctl_sched_rt_period < 0)
+ if (sysctl_sched_rt_runtime < 0)
return RUNTIME_INF;
return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
{
if (!sched_feat(HRTICK))
return 0;
- if (!cpu_online(cpu_of(rq)))
+ if (!cpu_active(cpu_of(rq)))
return 0;
return hrtimer_is_hres_active(&rq->hrtick_timer);
}
/*
* wait_task_inactive - wait for a thread to unschedule.
*
+ * If @match_state is nonzero, it's the @p->state value just checked and
+ * not expected to change. If it changes, i.e. @p might have woken up,
+ * then return zero. When we succeed in waiting for @p to be off its CPU,
+ * we return a positive number (its total switch count). If a second call
+ * a short while later returns the same number, the caller can be sure that
+ * @p has remained unscheduled the whole time.
+ *
* The caller must ensure that the task *will* unschedule sometime soon,
* else this function might spin for a *long* time. This function can't
* be called with interrupts off, or it may introduce deadlock with
* smp_call_function() if an IPI is sent by the same process we are
* waiting to become inactive.
*/
-void wait_task_inactive(struct task_struct *p)
+unsigned long wait_task_inactive(struct task_struct *p, long match_state)
{
unsigned long flags;
int running, on_rq;
+ unsigned long ncsw;
struct rq *rq;
for (;;) {
* return false if the runqueue has changed and p
* is actually now running somewhere else!
*/
- while (task_running(rq, p))
+ while (task_running(rq, p)) {
+ if (match_state && unlikely(p->state != match_state))
+ return 0;
cpu_relax();
+ }
/*
* Ok, time to look more closely! We need the rq
rq = task_rq_lock(p, &flags);
running = task_running(rq, p);
on_rq = p->se.on_rq;
+ ncsw = 0;
+ if (!match_state || p->state == match_state) {
+ ncsw = p->nivcsw + p->nvcsw;
+ if (unlikely(!ncsw))
+ ncsw = 1;
+ }
task_rq_unlock(rq, &flags);
/*
+ * If it changed from the expected state, bail out now.
+ */
+ if (unlikely(!ncsw))
+ break;
+
+ /*
* Was it really running after all now that we
* checked with the proper locks actually held?
*
*/
break;
}
+
+ return ncsw;
}
/***
/* Tally up the load of all CPUs in the group */
avg_load = 0;
- for_each_cpu_mask(i, group->cpumask) {
+ for_each_cpu_mask_nr(i, group->cpumask) {
/* Bias balancing toward cpus of our domain */
if (local_group)
load = source_load(i, load_idx);
/* Traverse only the allowed CPUs */
cpus_and(*tmp, group->cpumask, p->cpus_allowed);
- for_each_cpu_mask(i, *tmp) {
+ for_each_cpu_mask_nr(i, *tmp) {
load = weighted_cpuload(i);
if (load < min_load || (load == min_load && i == this_cpu)) {
} else {
if (rq1 < rq2) {
spin_lock(&rq1->lock);
- spin_lock(&rq2->lock);
+ spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
} else {
spin_lock(&rq2->lock);
- spin_lock(&rq1->lock);
+ spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
}
}
update_rq_clock(rq1);
if (busiest < this_rq) {
spin_unlock(&this_rq->lock);
spin_lock(&busiest->lock);
- spin_lock(&this_rq->lock);
+ spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING);
ret = 1;
} else
- spin_lock(&busiest->lock);
+ spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING);
}
return ret;
}
+static void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
+ __releases(busiest->lock)
+{
+ spin_unlock(&busiest->lock);
+ lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
+}
+
/*
* If dest_cpu is allowed for this process, migrate the task to it.
* This is accomplished by forcing the cpu_allowed mask to only
rq = task_rq_lock(p, &flags);
if (!cpu_isset(dest_cpu, p->cpus_allowed)
- || unlikely(cpu_is_offline(dest_cpu)))
+ || unlikely(!cpu_active(dest_cpu)))
goto out;
/* force the process onto the specified CPU */
max_cpu_load = 0;
min_cpu_load = ~0UL;
- for_each_cpu_mask(i, group->cpumask) {
+ for_each_cpu_mask_nr(i, group->cpumask) {
struct rq *rq;
if (!cpu_isset(i, *cpus))
unsigned long max_load = 0;
int i;
- for_each_cpu_mask(i, group->cpumask) {
+ for_each_cpu_mask_nr(i, group->cpumask) {
unsigned long wl;
if (!cpu_isset(i, *cpus))
ld_moved = move_tasks(this_rq, this_cpu, busiest,
imbalance, sd, CPU_NEWLY_IDLE,
&all_pinned);
- spin_unlock(&busiest->lock);
+ double_unlock_balance(this_rq, busiest);
if (unlikely(all_pinned)) {
cpu_clear(cpu_of(busiest), *cpus);
else
schedstat_inc(sd, alb_failed);
}
- spin_unlock(&target_rq->lock);
+ double_unlock_balance(busiest_rq, target_rq);
}
#ifdef CONFIG_NO_HZ
/*
* If we are going offline and still the leader, give up!
*/
- if (cpu_is_offline(cpu) &&
+ if (!cpu_active(cpu) &&
atomic_read(&nohz.load_balancer) == cpu) {
if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu)
BUG();
int balance_cpu;
cpu_clear(this_cpu, cpus);
- for_each_cpu_mask(balance_cpu, cpus) {
+ for_each_cpu_mask_nr(balance_cpu, cpus) {
/*
* If this cpu gets work to do, stop the load balancing
* work being done for other cpus. Next load
cpustat->nice = cputime64_add(cpustat->nice, tmp);
else
cpustat->user = cputime64_add(cpustat->user, tmp);
+ /* Account for user time used */
+ acct_update_integrals(p);
}
/*
}
/*
+ * Use precise platform statistics if available:
+ */
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+cputime_t task_utime(struct task_struct *p)
+{
+ return p->utime;
+}
+
+cputime_t task_stime(struct task_struct *p)
+{
+ return p->stime;
+}
+#else
+cputime_t task_utime(struct task_struct *p)
+{
+ clock_t utime = cputime_to_clock_t(p->utime),
+ total = utime + cputime_to_clock_t(p->stime);
+ u64 temp;
+
+ /*
+ * Use CFS's precise accounting:
+ */
+ temp = (u64)nsec_to_clock_t(p->se.sum_exec_runtime);
+
+ if (total) {
+ temp *= utime;
+ do_div(temp, total);
+ }
+ utime = (clock_t)temp;
+
+ p->prev_utime = max(p->prev_utime, clock_t_to_cputime(utime));
+ return p->prev_utime;
+}
+
+cputime_t task_stime(struct task_struct *p)
+{
+ clock_t stime;
+
+ /*
+ * Use CFS's precise accounting. (we subtract utime from
+ * the total, to make sure the total observed by userspace
+ * grows monotonically - apps rely on that):
+ */
+ stime = nsec_to_clock_t(p->se.sum_exec_runtime) -
+ cputime_to_clock_t(task_utime(p));
+
+ if (stime >= 0)
+ p->prev_stime = max(p->prev_stime, clock_t_to_cputime(stime));
+
+ return p->prev_stime;
+}
+#endif
+
+inline cputime_t task_gtime(struct task_struct *p)
+{
+ return p->gtime;
+}
+
+/*
* This function gets called by the timer code, with HZ frequency.
* We call it with interrupts disabled.
*
}
EXPORT_SYMBOL(wait_for_completion_killable);
+/**
+ * try_wait_for_completion - try to decrement a completion without blocking
+ * @x: completion structure
+ *
+ * Returns: 0 if a decrement cannot be done without blocking
+ * 1 if a decrement succeeded.
+ *
+ * If a completion is being used as a counting completion,
+ * attempt to decrement the counter without blocking. This
+ * enables us to avoid waiting if the resource the completion
+ * is protecting is not available.
+ */
+bool try_wait_for_completion(struct completion *x)
+{
+ int ret = 1;
+
+ spin_lock_irq(&x->wait.lock);
+ if (!x->done)
+ ret = 0;
+ else
+ x->done--;
+ spin_unlock_irq(&x->wait.lock);
+ return ret;
+}
+EXPORT_SYMBOL(try_wait_for_completion);
+
+/**
+ * completion_done - Test to see if a completion has any waiters
+ * @x: completion structure
+ *
+ * Returns: 0 if there are waiters (wait_for_completion() in progress)
+ * 1 if there are no waiters.
+ *
+ */
+bool completion_done(struct completion *x)
+{
+ int ret = 1;
+
+ spin_lock_irq(&x->wait.lock);
+ if (!x->done)
+ ret = 0;
+ spin_unlock_irq(&x->wait.lock);
+ return ret;
+}
+EXPORT_SYMBOL(completion_done);
+
static long __sched
sleep_on_common(wait_queue_head_t *q, int state, long timeout)
{
return -EPERM;
}
+ if (user) {
#ifdef CONFIG_RT_GROUP_SCHED
- /*
- * Do not allow realtime tasks into groups that have no runtime
- * assigned.
- */
- if (user
- && rt_policy(policy) && task_group(p)->rt_bandwidth.rt_runtime == 0)
- return -EPERM;
+ /*
+ * Do not allow realtime tasks into groups that have no runtime
+ * assigned.
+ */
+ if (rt_policy(policy) && task_group(p)->rt_bandwidth.rt_runtime == 0)
+ return -EPERM;
#endif
- retval = security_task_setscheduler(p, policy, param);
- if (retval)
- return retval;
+ retval = security_task_setscheduler(p, policy, param);
+ if (retval)
+ return retval;
+ }
+
/*
* make sure no PI-waiters arrive (or leave) while we are
* changing the priority of the task:
sysctl_sched_latency = limit;
sysctl_sched_wakeup_granularity *= factor;
+
+ sysctl_sched_shares_ratelimit *= factor;
}
#ifdef CONFIG_SMP
struct rq *rq_dest, *rq_src;
int ret = 0, on_rq;
- if (unlikely(cpu_is_offline(dest_cpu)))
+ if (unlikely(!cpu_active(dest_cpu)))
return ret;
rq_src = cpu_rq(src_cpu);
.priority = 10
};
-void __init migration_init(void)
+static int __init migration_init(void)
{
void *cpu = (void *)(long)smp_processor_id();
int err;
BUG_ON(err == NOTIFY_BAD);
migration_call(&migration_notifier, CPU_ONLINE, cpu);
register_cpu_notifier(&migration_notifier);
+
+ return err;
}
+early_initcall(migration_init);
#endif
#ifdef CONFIG_SMP
cpus_clear(*covered);
- for_each_cpu_mask(i, *span) {
+ for_each_cpu_mask_nr(i, *span) {
struct sched_group *sg;
int group = group_fn(i, cpu_map, &sg, tmpmask);
int j;
cpus_clear(sg->cpumask);
sg->__cpu_power = 0;
- for_each_cpu_mask(j, *span) {
+ for_each_cpu_mask_nr(j, *span) {
if (group_fn(j, cpu_map, NULL, tmpmask) != group)
continue;
if (!sg)
return;
do {
- for_each_cpu_mask(j, sg->cpumask) {
+ for_each_cpu_mask_nr(j, sg->cpumask) {
struct sched_domain *sd;
sd = &per_cpu(phys_domains, j);
{
int cpu, i;
- for_each_cpu_mask(cpu, *cpu_map) {
+ for_each_cpu_mask_nr(cpu, *cpu_map) {
struct sched_group **sched_group_nodes
= sched_group_nodes_bycpu[cpu];
/*
* Set up domains for cpus specified by the cpu_map.
*/
- for_each_cpu_mask(i, *cpu_map) {
+ for_each_cpu_mask_nr(i, *cpu_map) {
struct sched_domain *sd = NULL, *p;
SCHED_CPUMASK_VAR(nodemask, allmasks);
#ifdef CONFIG_SCHED_SMT
/* Set up CPU (sibling) groups */
- for_each_cpu_mask(i, *cpu_map) {
+ for_each_cpu_mask_nr(i, *cpu_map) {
SCHED_CPUMASK_VAR(this_sibling_map, allmasks);
SCHED_CPUMASK_VAR(send_covered, allmasks);
#ifdef CONFIG_SCHED_MC
/* Set up multi-core groups */
- for_each_cpu_mask(i, *cpu_map) {
+ for_each_cpu_mask_nr(i, *cpu_map) {
SCHED_CPUMASK_VAR(this_core_map, allmasks);
SCHED_CPUMASK_VAR(send_covered, allmasks);
goto error;
}
sched_group_nodes[i] = sg;
- for_each_cpu_mask(j, *nodemask) {
+ for_each_cpu_mask_nr(j, *nodemask) {
struct sched_domain *sd;
sd = &per_cpu(node_domains, j);
/* Calculate CPU power for physical packages and nodes */
#ifdef CONFIG_SCHED_SMT
- for_each_cpu_mask(i, *cpu_map) {
+ for_each_cpu_mask_nr(i, *cpu_map) {
struct sched_domain *sd = &per_cpu(cpu_domains, i);
init_sched_groups_power(i, sd);
}
#endif
#ifdef CONFIG_SCHED_MC
- for_each_cpu_mask(i, *cpu_map) {
+ for_each_cpu_mask_nr(i, *cpu_map) {
struct sched_domain *sd = &per_cpu(core_domains, i);
init_sched_groups_power(i, sd);
}
#endif
- for_each_cpu_mask(i, *cpu_map) {
+ for_each_cpu_mask_nr(i, *cpu_map) {
struct sched_domain *sd = &per_cpu(phys_domains, i);
init_sched_groups_power(i, sd);
#endif
/* Attach the domains */
- for_each_cpu_mask(i, *cpu_map) {
+ for_each_cpu_mask_nr(i, *cpu_map) {
struct sched_domain *sd;
#ifdef CONFIG_SCHED_SMT
sd = &per_cpu(cpu_domains, i);
}
/*
- * Free current domain masks.
- * Called after all cpus are attached to NULL domain.
- */
-static void free_sched_domains(void)
-{
- ndoms_cur = 0;
- if (doms_cur != &fallback_doms)
- kfree(doms_cur);
- doms_cur = &fallback_doms;
-}
-
-/*
* Set up scheduler domains and groups. Callers must hold the hotplug lock.
* For now this just excludes isolated cpus, but could be used to
* exclude other special cases in the future.
unregister_sched_domain_sysctl();
- for_each_cpu_mask(i, *cpu_map)
+ for_each_cpu_mask_nr(i, *cpu_map)
cpu_attach_domain(NULL, &def_root_domain, i);
synchronize_sched();
arch_destroy_sched_domains(cpu_map, &tmpmask);
* ownership of it and will kfree it when done with it. If the caller
* failed the kmalloc call, then it can pass in doms_new == NULL,
* and partition_sched_domains() will fallback to the single partition
- * 'fallback_doms'.
+ * 'fallback_doms', it also forces the domains to be rebuilt.
*
* Call with hotplug lock held
*/
/* always unregister in case we don't destroy any domains */
unregister_sched_domain_sysctl();
- if (doms_new == NULL) {
- ndoms_new = 1;
- doms_new = &fallback_doms;
- cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map);
- dattr_new = NULL;
- }
+ if (doms_new == NULL)
+ ndoms_new = 0;
/* Destroy deleted domains */
for (i = 0; i < ndoms_cur; i++) {
;
}
+ if (doms_new == NULL) {
+ ndoms_cur = 0;
+ ndoms_new = 1;
+ doms_new = &fallback_doms;
+ cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map);
+ dattr_new = NULL;
+ }
+
/* Build new domains */
for (i = 0; i < ndoms_new; i++) {
for (j = 0; j < ndoms_cur; j++) {
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
int arch_reinit_sched_domains(void)
{
- int err;
-
get_online_cpus();
- mutex_lock(&sched_domains_mutex);
- detach_destroy_domains(&cpu_online_map);
- free_sched_domains();
- err = arch_init_sched_domains(&cpu_online_map);
- mutex_unlock(&sched_domains_mutex);
+ rebuild_sched_domains();
put_online_cpus();
-
- return err;
+ return 0;
}
static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
}
#ifdef CONFIG_SCHED_MC
-static ssize_t sched_mc_power_savings_show(struct sys_device *dev, char *page)
+static ssize_t sched_mc_power_savings_show(struct sysdev_class *class,
+ char *page)
{
return sprintf(page, "%u\n", sched_mc_power_savings);
}
-static ssize_t sched_mc_power_savings_store(struct sys_device *dev,
+static ssize_t sched_mc_power_savings_store(struct sysdev_class *class,
const char *buf, size_t count)
{
return sched_power_savings_store(buf, count, 0);
}
-static SYSDEV_ATTR(sched_mc_power_savings, 0644, sched_mc_power_savings_show,
- sched_mc_power_savings_store);
+static SYSDEV_CLASS_ATTR(sched_mc_power_savings, 0644,
+ sched_mc_power_savings_show,
+ sched_mc_power_savings_store);
#endif
#ifdef CONFIG_SCHED_SMT
-static ssize_t sched_smt_power_savings_show(struct sys_device *dev, char *page)
+static ssize_t sched_smt_power_savings_show(struct sysdev_class *dev,
+ char *page)
{
return sprintf(page, "%u\n", sched_smt_power_savings);
}
-static ssize_t sched_smt_power_savings_store(struct sys_device *dev,
+static ssize_t sched_smt_power_savings_store(struct sysdev_class *dev,
const char *buf, size_t count)
{
return sched_power_savings_store(buf, count, 1);
}
-static SYSDEV_ATTR(sched_smt_power_savings, 0644, sched_smt_power_savings_show,
+static SYSDEV_CLASS_ATTR(sched_smt_power_savings, 0644,
+ sched_smt_power_savings_show,
sched_smt_power_savings_store);
#endif
}
#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
+#ifndef CONFIG_CPUSETS
/*
- * Force a reinitialization of the sched domains hierarchy. The domains
- * and groups cannot be updated in place without racing with the balancing
- * code, so we temporarily attach all running cpus to the NULL domain
- * which will prevent rebalancing while the sched domains are recalculated.
+ * Add online and remove offline CPUs from the scheduler domains.
+ * When cpusets are enabled they take over this function.
*/
static int update_sched_domains(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ partition_sched_domains(0, NULL, NULL);
+ return NOTIFY_OK;
+
+ default:
+ return NOTIFY_DONE;
+ }
+}
+#endif
+
+static int update_runtime(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
int cpu = (int)(long)hcpu;
switch (action) {
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
disable_runtime(cpu_rq(cpu));
- /* fall-through */
- case CPU_UP_PREPARE:
- case CPU_UP_PREPARE_FROZEN:
- detach_destroy_domains(&cpu_online_map);
- free_sched_domains();
return NOTIFY_OK;
-
case CPU_DOWN_FAILED:
case CPU_DOWN_FAILED_FROZEN:
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
enable_runtime(cpu_rq(cpu));
- /* fall-through */
- case CPU_UP_CANCELED:
- case CPU_UP_CANCELED_FROZEN:
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- /*
- * Fall through and re-initialise the domains.
- */
- break;
+ return NOTIFY_OK;
+
default:
return NOTIFY_DONE;
}
-
-#ifndef CONFIG_CPUSETS
- /*
- * Create default domain partitioning if cpusets are disabled.
- * Otherwise we let cpusets rebuild the domains based on the
- * current setup.
- */
-
- /* The hotplug lock is already held by cpu_up/cpu_down */
- arch_init_sched_domains(&cpu_online_map);
-#endif
-
- return NOTIFY_OK;
}
void __init sched_init_smp(void)
cpu_set(smp_processor_id(), non_isolated_cpus);
mutex_unlock(&sched_domains_mutex);
put_online_cpus();
+
+#ifndef CONFIG_CPUSETS
/* XXX: Theoretical race here - CPU may be hotplugged now */
hotcpu_notifier(update_sched_domains, 0);
+#endif
+
+ /* RT runtime code needs to handle some hotplug events */
+ hotcpu_notifier(update_runtime, 0);
+
init_hrtick();
/* Move init over to a non-isolated CPU */
rq = cpu_rq(i);
spin_lock_init(&rq->lock);
- lockdep_set_class(&rq->lock, &rq->rq_lock_key);
rq->nr_running = 0;
init_cfs_rq(&rq->cfs, rq);
init_rt_rq(&rq->rt, rq);
WARN_ON(!parent); /* root should already exist */
tg->parent = parent;
- list_add_rcu(&tg->siblings, &parent->children);
INIT_LIST_HEAD(&tg->children);
+ list_add_rcu(&tg->siblings, &parent->children);
spin_unlock_irqrestore(&task_group_lock, flags);
return tg;