for_each_cpu() actually iterates across all possible CPUs. We've had mistakes
in the past where people were using for_each_cpu() where they should have been
iterating across only online or present CPUs. This is inefficient and
possibly buggy.
We're renaming for_each_cpu() to for_each_possible_cpu() to avoid this in the
future.
This patch replaces for_each_cpu with for_each_possible_cpu under /net
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: "David S. Miller" <davem@davemloft.net>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
20 files changed:
* sizeof(struct ebt_chainstack));
if (!newinfo->chainstack)
return -ENOMEM;
* sizeof(struct ebt_chainstack));
if (!newinfo->chainstack)
return -ENOMEM;
+ for_each_possible_cpu(i) {
newinfo->chainstack[i] =
vmalloc(udc_cnt * sizeof(struct ebt_chainstack));
if (!newinfo->chainstack[i]) {
newinfo->chainstack[i] =
vmalloc(udc_cnt * sizeof(struct ebt_chainstack));
if (!newinfo->chainstack[i]) {
sizeof(struct ebt_counter) * nentries);
/* add other counters to those of cpu 0 */
sizeof(struct ebt_counter) * nentries);
/* add other counters to those of cpu 0 */
+ for_each_possible_cpu(cpu) {
if (cpu == 0)
continue;
counter_base = COUNTER_BASE(oldcounters, nentries, cpu);
if (cpu == 0)
continue;
counter_base = COUNTER_BASE(oldcounters, nentries, cpu);
vfree(table->entries);
if (table->chainstack) {
vfree(table->entries);
if (table->chainstack) {
+ for_each_possible_cpu(i)
vfree(table->chainstack[i]);
vfree(table->chainstack);
}
vfree(table->chainstack[i]);
vfree(table->chainstack);
}
vfree(counterstmp);
/* can be initialized in translate_table() */
if (newinfo->chainstack) {
vfree(counterstmp);
/* can be initialized in translate_table() */
if (newinfo->chainstack) {
+ for_each_possible_cpu(i)
vfree(newinfo->chainstack[i]);
vfree(newinfo->chainstack);
}
vfree(newinfo->chainstack[i]);
vfree(newinfo->chainstack);
}
mutex_unlock(&ebt_mutex);
free_chainstack:
if (newinfo->chainstack) {
mutex_unlock(&ebt_mutex);
free_chainstack:
if (newinfo->chainstack) {
+ for_each_possible_cpu(i)
vfree(newinfo->chainstack[i]);
vfree(newinfo->chainstack);
}
vfree(newinfo->chainstack[i]);
vfree(newinfo->chainstack);
}
mutex_unlock(&ebt_mutex);
vfree(table->private->entries);
if (table->private->chainstack) {
mutex_unlock(&ebt_mutex);
vfree(table->private->entries);
if (table->private->chainstack) {
+ for_each_possible_cpu(i)
vfree(table->private->chainstack[i]);
vfree(table->private->chainstack);
}
vfree(table->private->chainstack[i]);
vfree(table->private->chainstack);
}
* Initialise the packet receive queues.
*/
* Initialise the packet receive queues.
*/
+ for_each_possible_cpu(i) {
struct softnet_data *queue;
queue = &per_cpu(softnet_data, i);
struct softnet_data *queue;
queue = &per_cpu(softnet_data, i);
+ for_each_possible_cpu(i)
flow_hash_rnd_recalc(i) = 1;
flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
flow_hash_rnd_recalc(i) = 1;
flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
add_timer(&flow_hash_rnd_timer);
flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
add_timer(&flow_hash_rnd_timer);
+ for_each_possible_cpu(i)
flow_cache_cpu_prepare(i);
hotcpu_notifier(flow_cache_cpu, 0);
flow_cache_cpu_prepare(i);
hotcpu_notifier(flow_cache_cpu, 0);
memset(&ndst, 0, sizeof(ndst));
memset(&ndst, 0, sizeof(ndst));
+ for_each_possible_cpu(cpu) {
struct neigh_statistics *st;
st = per_cpu_ptr(tbl->stats, cpu);
struct neigh_statistics *st;
st = per_cpu_ptr(tbl->stats, cpu);
+ for_each_possible_cpu(i) {
struct nrnd_state *state = &per_cpu(net_rand_state,i);
__net_srandom(state, i+jiffies);
}
struct nrnd_state *state = &per_cpu(net_rand_state,i);
__net_srandom(state, i+jiffies);
}
unsigned long seed[NR_CPUS];
get_random_bytes(seed, sizeof(seed));
unsigned long seed[NR_CPUS];
get_random_bytes(seed, sizeof(seed));
+ for_each_possible_cpu(i) {
struct nrnd_state *state = &per_cpu(net_rand_state,i);
__net_srandom(state, seed[i]);
}
struct nrnd_state *state = &per_cpu(net_rand_state,i);
__net_srandom(state, seed[i]);
}
struct inet_sock *inet;
int i;
struct inet_sock *inet;
int i;
+ for_each_possible_cpu(i) {
int err;
err = sock_create_kern(PF_INET, SOCK_RAW, IPPROTO_ICMP,
int err;
err = sock_create_kern(PF_INET, SOCK_RAW, IPPROTO_ICMP,
+ for_each_possible_cpu(i) {
void *scratch = *per_cpu_ptr(scratches, i);
if (scratch)
vfree(scratch);
void *scratch = *per_cpu_ptr(scratches, i);
if (scratch)
vfree(scratch);
ipcomp_scratches = scratches;
ipcomp_scratches = scratches;
+ for_each_possible_cpu(i) {
void *scratch = vmalloc(IPCOMP_SCRATCH_SIZE);
if (!scratch)
return NULL;
void *scratch = vmalloc(IPCOMP_SCRATCH_SIZE);
if (!scratch)
return NULL;
+ for_each_possible_cpu(cpu) {
struct crypto_tfm *tfm = *per_cpu_ptr(tfms, cpu);
crypto_free_tfm(tfm);
}
struct crypto_tfm *tfm = *per_cpu_ptr(tfms, cpu);
crypto_free_tfm(tfm);
}
+ for_each_possible_cpu(cpu) {
struct crypto_tfm *tfm = crypto_alloc_tfm(alg_name, 0);
if (!tfm)
goto error;
struct crypto_tfm *tfm = crypto_alloc_tfm(alg_name, 0);
if (!tfm)
goto error;
}
/* And one copy for every other CPU */
}
/* And one copy for every other CPU */
+ for_each_possible_cpu(i) {
if (newinfo->entries[i] && newinfo->entries[i] != entry0)
memcpy(newinfo->entries[i], entry0, newinfo->size);
}
if (newinfo->entries[i] && newinfo->entries[i] != entry0)
memcpy(newinfo->entries[i], entry0, newinfo->size);
}
+ for_each_possible_cpu(cpu) {
if (cpu == curcpu)
continue;
i = 0;
if (cpu == curcpu)
continue;
i = 0;
struct ip_conntrack_ecache *ecache;
int cpu;
struct ip_conntrack_ecache *ecache;
int cpu;
+ for_each_possible_cpu(cpu) {
ecache = &per_cpu(ip_conntrack_ecache, cpu);
if (ecache->ct)
ip_conntrack_put(ecache->ct);
ecache = &per_cpu(ip_conntrack_ecache, cpu);
if (ecache->ct)
ip_conntrack_put(ecache->ct);
}
/* And one copy for every other CPU */
}
/* And one copy for every other CPU */
+ for_each_possible_cpu(i) {
if (newinfo->entries[i] && newinfo->entries[i] != entry0)
memcpy(newinfo->entries[i], entry0, newinfo->size);
}
if (newinfo->entries[i] && newinfo->entries[i] != entry0)
memcpy(newinfo->entries[i], entry0, newinfo->size);
}
+ for_each_possible_cpu(cpu) {
if (cpu == curcpu)
continue;
i = 0;
if (cpu == curcpu)
continue;
i = 0;
+ for_each_possible_cpu(cpu)
res += proto->stats[cpu].inuse;
return res;
res += proto->stats[cpu].inuse;
return res;
unsigned long res = 0;
int i;
unsigned long res = 0;
int i;
+ for_each_possible_cpu(i) {
res += *(((unsigned long *) per_cpu_ptr(mib[0], i)) + offt);
res += *(((unsigned long *) per_cpu_ptr(mib[1], i)) + offt);
}
res += *(((unsigned long *) per_cpu_ptr(mib[0], i)) + offt);
res += *(((unsigned long *) per_cpu_ptr(mib[1], i)) + offt);
}
memcpy(dst, src, length);
/* Add the other cpus in, one int at a time */
memcpy(dst, src, length);
/* Add the other cpus in, one int at a time */
+ for_each_possible_cpu(i) {
unsigned int j;
src = ((u32 *) IP_RT_ACCT_CPU(i)) + offset;
unsigned int j;
src = ((u32 *) IP_RT_ACCT_CPU(i)) + offset;
struct sock *sk;
int err, i, j;
struct sock *sk;
int err, i, j;
+ for_each_possible_cpu(i) {
err = sock_create_kern(PF_INET6, SOCK_RAW, IPPROTO_ICMPV6,
&per_cpu(__icmpv6_socket, i));
if (err < 0) {
err = sock_create_kern(PF_INET6, SOCK_RAW, IPPROTO_ICMPV6,
&per_cpu(__icmpv6_socket, i));
if (err < 0) {
+ for_each_possible_cpu(i) {
sock_release(per_cpu(__icmpv6_socket, i));
}
inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6);
sock_release(per_cpu(__icmpv6_socket, i));
}
inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6);
+ for_each_possible_cpu(i) {
void *scratch = *per_cpu_ptr(scratches, i);
vfree(scratch);
void *scratch = *per_cpu_ptr(scratches, i);
vfree(scratch);
ipcomp6_scratches = scratches;
ipcomp6_scratches = scratches;
+ for_each_possible_cpu(i) {
void *scratch = vmalloc(IPCOMP_SCRATCH_SIZE);
if (!scratch)
return NULL;
void *scratch = vmalloc(IPCOMP_SCRATCH_SIZE);
if (!scratch)
return NULL;
+ for_each_possible_cpu(cpu) {
struct crypto_tfm *tfm = *per_cpu_ptr(tfms, cpu);
crypto_free_tfm(tfm);
}
struct crypto_tfm *tfm = *per_cpu_ptr(tfms, cpu);
crypto_free_tfm(tfm);
}
+ for_each_possible_cpu(cpu) {
struct crypto_tfm *tfm = crypto_alloc_tfm(alg_name, 0);
if (!tfm)
goto error;
struct crypto_tfm *tfm = crypto_alloc_tfm(alg_name, 0);
if (!tfm)
goto error;
}
/* And one copy for every other CPU */
}
/* And one copy for every other CPU */
+ for_each_possible_cpu(i) {
if (newinfo->entries[i] && newinfo->entries[i] != entry0)
memcpy(newinfo->entries[i], entry0, newinfo->size);
}
if (newinfo->entries[i] && newinfo->entries[i] != entry0)
memcpy(newinfo->entries[i], entry0, newinfo->size);
}
+ for_each_possible_cpu(cpu) {
if (cpu == curcpu)
continue;
i = 0;
if (cpu == curcpu)
continue;
i = 0;
+ for_each_possible_cpu(cpu)
res += proto->stats[cpu].inuse;
return res;
res += proto->stats[cpu].inuse;
return res;
unsigned long res = 0;
int i;
unsigned long res = 0;
int i;
+ for_each_possible_cpu(i) {
res += *(((unsigned long *)per_cpu_ptr(mib[0], i)) + offt);
res += *(((unsigned long *)per_cpu_ptr(mib[1], i)) + offt);
}
res += *(((unsigned long *)per_cpu_ptr(mib[0], i)) + offt);
res += *(((unsigned long *)per_cpu_ptr(mib[1], i)) + offt);
}
struct nf_conntrack_ecache *ecache;
int cpu;
struct nf_conntrack_ecache *ecache;
int cpu;
+ for_each_possible_cpu(cpu) {
ecache = &per_cpu(nf_conntrack_ecache, cpu);
if (ecache->ct)
nf_ct_put(ecache->ct);
ecache = &per_cpu(nf_conntrack_ecache, cpu);
if (ecache->ct)
nf_ct_put(ecache->ct);
+ for_each_possible_cpu(cpu) {
if (size <= PAGE_SIZE)
newinfo->entries[cpu] = kmalloc_node(size,
GFP_KERNEL,
if (size <= PAGE_SIZE)
newinfo->entries[cpu] = kmalloc_node(size,
GFP_KERNEL,
+ for_each_possible_cpu(cpu) {
if (info->size <= PAGE_SIZE)
kfree(info->entries[cpu]);
else
if (info->size <= PAGE_SIZE)
kfree(info->entries[cpu]);
else
unsigned long res = 0;
int i;
unsigned long res = 0;
int i;
+ for_each_possible_cpu(i) {
res +=
*((unsigned long *) (((void *) per_cpu_ptr(mib[0], i)) +
sizeof (unsigned long) * nr));
res +=
*((unsigned long *) (((void *) per_cpu_ptr(mib[0], i)) +
sizeof (unsigned long) * nr));
int cpu;
int counter = 0;
int cpu;
int counter = 0;
+ for_each_possible_cpu(cpu)
counter += per_cpu(sockets_in_use, cpu);
/* It can be negative, by the way. 8) */
counter += per_cpu(sockets_in_use, cpu);
/* It can be negative, by the way. 8) */