[PATCH] for_each_possible_cpu: fixes for generic part
authorKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Tue, 28 Mar 2006 09:56:37 +0000 (01:56 -0800)
committerLinus Torvalds <torvalds@g5.osdl.org>
Tue, 28 Mar 2006 17:16:05 +0000 (09:16 -0800)
replaces for_each_cpu with for_each_possible_cpu().

Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
block/ll_rw_blk.c
fs/file.c
fs/proc/proc_misc.c
include/asm-generic/percpu.h
include/linux/genhd.h
include/linux/kernel_stat.h
init/main.c
kernel/rcutorture.c
kernel/sched.c
mm/slab.c
mm/swap.c

index 82469db..5a19e2e 100644 (file)
@@ -3514,7 +3514,7 @@ int __init blk_dev_init(void)
        iocontext_cachep = kmem_cache_create("blkdev_ioc",
                        sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL);
 
-       for_each_cpu(i)
+       for_each_possible_cpu(i)
                INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
 
        open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL);
index bbc7433..55f4e70 100644 (file)
--- a/fs/file.c
+++ b/fs/file.c
@@ -373,6 +373,6 @@ static void __devinit fdtable_defer_list_init(int cpu)
 void __init files_defer_init(void)
 {
        int i;
-       for_each_cpu(i)
+       for_each_possible_cpu(i)
                fdtable_defer_list_init(i);
 }
index 1e9ea37..1edce0c 100644 (file)
@@ -534,7 +534,7 @@ static int show_stat(struct seq_file *p, void *v)
        if (wall_to_monotonic.tv_nsec)
                --jif;
 
-       for_each_cpu(i) {
+       for_each_possible_cpu(i) {
                int j;
 
                user = cputime64_add(user, kstat_cpu(i).cpustat.user);
index 78cf455..c0caf43 100644 (file)
@@ -19,7 +19,7 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
 #define percpu_modcopy(pcpudst, src, size)                     \
 do {                                                           \
        unsigned int __i;                                       \
-       for_each_cpu(__i)                                       \
+       for_each_possible_cpu(__i)                              \
                memcpy((pcpudst)+__per_cpu_offset[__i],         \
                       (src), (size));                          \
 } while (0)
index 3c1b029..10a27f2 100644 (file)
@@ -152,14 +152,14 @@ struct disk_attribute {
 ({                                                                     \
        typeof(gendiskp->dkstats->field) res = 0;                       \
        int i;                                                          \
-       for_each_cpu(i)                                                 \
+       for_each_possible_cpu(i)                                        \
                res += per_cpu_ptr(gendiskp->dkstats, i)->field;        \
        res;                                                            \
 })
 
 static inline void disk_stat_set_all(struct gendisk *gendiskp, int value)      {
        int i;
-       for_each_cpu(i)
+       for_each_possible_cpu(i)
                memset(per_cpu_ptr(gendiskp->dkstats, i), value,
                                sizeof (struct disk_stats));
 }              
index a484572..b462490 100644 (file)
@@ -46,7 +46,7 @@ static inline int kstat_irqs(int irq)
 {
        int cpu, sum = 0;
 
-       for_each_cpu(cpu)
+       for_each_possible_cpu(cpu)
                sum += kstat_cpu(cpu).irqs[irq];
 
        return sum;
index 64466ea..4a2f089 100644 (file)
@@ -341,7 +341,7 @@ static void __init setup_per_cpu_areas(void)
 #endif
        ptr = alloc_bootmem(size * nr_possible_cpus);
 
-       for_each_cpu(i) {
+       for_each_possible_cpu(i) {
                __per_cpu_offset[i] = ptr - __per_cpu_start;
                memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
                ptr += size;
index b4b362b..8154e75 100644 (file)
@@ -301,7 +301,7 @@ rcu_torture_printk(char *page)
        long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
        long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
 
-       for_each_cpu(cpu) {
+       for_each_possible_cpu(cpu) {
                for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
                        pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i];
                        batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i];
@@ -535,7 +535,7 @@ rcu_torture_init(void)
        atomic_set(&n_rcu_torture_error, 0);
        for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
                atomic_set(&rcu_torture_wcount[i], 0);
-       for_each_cpu(cpu) {
+       for_each_possible_cpu(cpu) {
                for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
                        per_cpu(rcu_torture_count, cpu)[i] = 0;
                        per_cpu(rcu_torture_batch, cpu)[i] = 0;
index 7854ee5..a9ecac3 100644 (file)
@@ -1625,7 +1625,7 @@ unsigned long nr_uninterruptible(void)
 {
        unsigned long i, sum = 0;
 
-       for_each_cpu(i)
+       for_each_possible_cpu(i)
                sum += cpu_rq(i)->nr_uninterruptible;
 
        /*
@@ -1642,7 +1642,7 @@ unsigned long long nr_context_switches(void)
 {
        unsigned long long i, sum = 0;
 
-       for_each_cpu(i)
+       for_each_possible_cpu(i)
                sum += cpu_rq(i)->nr_switches;
 
        return sum;
@@ -1652,7 +1652,7 @@ unsigned long nr_iowait(void)
 {
        unsigned long i, sum = 0;
 
-       for_each_cpu(i)
+       for_each_possible_cpu(i)
                sum += atomic_read(&cpu_rq(i)->nr_iowait);
 
        return sum;
@@ -6080,7 +6080,7 @@ void __init sched_init(void)
        runqueue_t *rq;
        int i, j, k;
 
-       for_each_cpu(i) {
+       for_each_possible_cpu(i) {
                prio_array_t *array;
 
                rq = cpu_rq(i);
index 6818374..4cbf8bb 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3311,7 +3311,7 @@ void *__alloc_percpu(size_t size)
         * and we have no way of figuring out how to fix the array
         * that we have allocated then....
         */
-       for_each_cpu(i) {
+       for_each_possible_cpu(i) {
                int node = cpu_to_node(i);
 
                if (node_online(node))
@@ -3398,7 +3398,7 @@ void free_percpu(const void *objp)
        /*
         * We allocate for all cpus so we cannot use for online cpu here.
         */
-       for_each_cpu(i)
+       for_each_possible_cpu(i)
            kfree(p->ptrs[i]);
        kfree(p);
 }
index 91b7e20..88895c2 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -512,7 +512,7 @@ long percpu_counter_sum(struct percpu_counter *fbc)
 
        spin_lock(&fbc->lock);
        ret = fbc->count;
-       for_each_cpu(cpu) {
+       for_each_possible_cpu(cpu) {
                long *pcount = per_cpu_ptr(fbc->counters, cpu);
                ret += *pcount;
        }