{
s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
spread, rq0_min_vruntime, spread0;
- struct rq *rq = &per_cpu(runqueues, cpu);
+ struct rq *rq = cpu_rq(cpu);
struct sched_entity *last;
unsigned long flags;
if (last)
max_vruntime = last->vruntime;
min_vruntime = cfs_rq->min_vruntime;
- rq0_min_vruntime = per_cpu(runqueues, 0).cfs.min_vruntime;
+ rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
spin_unlock_irqrestore(&rq->lock, flags);
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
SPLIT_NS(MIN_vruntime));
static void print_cpu(struct seq_file *m, int cpu)
{
- struct rq *rq = &per_cpu(runqueues, cpu);
+ struct rq *rq = cpu_rq(cpu);
#ifdef CONFIG_X86
{
P(nr_switches);
P(nr_load_updates);
P(nr_uninterruptible);
- SEQ_printf(m, " .%-30s: %lu\n", "jiffies", jiffies);
PN(next_balance);
P(curr->pid);
PN(clock);
#ifdef CONFIG_SCHEDSTATS
#define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, rq->n);
- P(yld_exp_empty);
- P(yld_act_empty);
- P(yld_both_empty);
P(yld_count);
P(sched_switch);
u64 now = ktime_to_ns(ktime_get());
int cpu;
- SEQ_printf(m, "Sched Debug Version: v0.08, %s %.*s\n",
+ SEQ_printf(m, "Sched Debug Version: v0.09, %s %.*s\n",
init_utsname()->release,
(int)strcspn(init_utsname()->version, " "),
init_utsname()->version);
SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
#define PN(x) \
SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
+ P(jiffies);
PN(sysctl_sched_latency);
PN(sysctl_sched_min_granularity);
PN(sysctl_sched_wakeup_granularity);
PN(se.vruntime);
PN(se.sum_exec_runtime);
PN(se.avg_overlap);
+ PN(se.avg_wakeup);
+ PN(se.avg_running);
nr_switches = p->nvcsw + p->nivcsw;
PN(se.wait_max);
PN(se.wait_sum);
P(se.wait_count);
+ PN(se.iowait_sum);
+ P(se.iowait_count);
P(sched_info.bkl_count);
P(se.nr_migrations);
P(se.nr_migrations_cold);
p->se.wait_max = 0;
p->se.wait_sum = 0;
p->se.wait_count = 0;
+ p->se.iowait_sum = 0;
+ p->se.iowait_count = 0;
p->se.sleep_max = 0;
p->se.sum_sleep_runtime = 0;
p->se.block_max = 0;