struct sk_buff *completion_queue;
/* Elements below can be accessed between CPUs for RPS */
-#ifdef CONFIG_SMP
+#ifdef CONFIG_RPS
struct call_single_data csd ____cacheline_aligned_in_smp;
#endif
struct sk_buff_head input_pkt_queue;
/*
* get_rps_cpu is called from netif_receive_skb and returns the target
* CPU from the RPS map of the receiving queue for a given skb.
+ * rcu_read_lock must be held on entry.
*/
static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb)
{
u8 ip_proto;
u32 addr1, addr2, ports, ihl;
- rcu_read_lock();
-
if (skb_rx_queue_recorded(skb)) {
u16 index = skb_get_rx_queue(skb);
if (unlikely(index >= dev->num_rx_queues)) {
}
done:
- rcu_read_unlock();
return cpu;
}
int netif_rx(struct sk_buff *skb)
{
- int cpu;
+ int ret;
/* if netpoll wants it, pretend we never saw it */
if (netpoll_rx(skb))
net_timestamp(skb);
#ifdef CONFIG_RPS
- cpu = get_rps_cpu(skb->dev, skb);
- if (cpu < 0)
- cpu = smp_processor_id();
+ {
+ int cpu;
+
+ rcu_read_lock();
+ cpu = get_rps_cpu(skb->dev, skb);
+ if (cpu < 0)
+ cpu = smp_processor_id();
+ ret = enqueue_to_backlog(skb, cpu);
+ rcu_read_unlock();
+ }
#else
- cpu = smp_processor_id();
+ ret = enqueue_to_backlog(skb, get_cpu());
+ put_cpu();
#endif
-
- return enqueue_to_backlog(skb, cpu);
+ return ret;
}
EXPORT_SYMBOL(netif_rx);
config IP_MROUTE_MULTIPLE_TABLES
bool "IP: multicast policy routing"
- depends on IP_ADVANCED_ROUTER
+ depends on IP_MROUTE && IP_ADVANCED_ROUTER
select FIB_RULES
help
Normally, a multicast router runs a userspace daemon and decides
struct mr_table {
struct list_head list;
+#ifdef CONFIG_NET_NS
+ struct net *net;
+#endif
u32 id;
struct sock *mroute_sk;
struct timer_list ipmr_expire_timer;
mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
if (mrt == NULL)
return NULL;
+ write_pnet(&mrt->net, net);
mrt->id = id;
/* Forwarding cache */
static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
{
- struct net *net = NULL; //mrt->net;
+ struct net *net = read_pnet(&mrt->net);
struct sk_buff *skb;
struct nlmsgerr *e;
* Check to see if we resolved a queued list. If so we
* need to send on the frames and tidy up.
*/
+ found = false;
spin_lock_bh(&mfc_unres_lock);
list_for_each_entry(uc, &mrt->mfc_unres_queue, list) {
if (uc->mfc_origin == c->mfc_origin &&
uc->mfc_mcastgrp == c->mfc_mcastgrp) {
list_del(&uc->list);
atomic_dec(&mrt->cache_resolve_queue_len);
+ found = true;
break;
}
}
del_timer(&mrt->ipmr_expire_timer);
spin_unlock_bh(&mfc_unres_lock);
- if (uc) {
+ if (found) {
ipmr_cache_resolve(net, mrt, uc, c);
ipmr_cache_free(uc);
}