#include <linux/dmaengine.h>
#include <linux/workqueue.h>
-struct net;
+#include <net/net_namespace.h>
+
struct vlan_group;
struct ethtool_ops;
struct netpoll_info;
#define LL_RESERVED_SPACE_EXTRA(dev,extra) \
((((dev)->hard_header_len+extra)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
+struct header_ops {
+ int (*create) (struct sk_buff *skb, struct net_device *dev,
+ unsigned short type, const void *daddr,
+ const void *saddr, unsigned len);
+ int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
+ int (*rebuild)(struct sk_buff *skb);
+#define HAVE_HEADER_CACHE
+ int (*cache)(const struct neighbour *neigh, struct hh_cache *hh);
+ void (*cache_update)(struct hh_cache *hh,
+ const struct net_device *dev,
+ const unsigned char *haddr);
+};
+
/* These flag bits are private to the generic network queueing
* layer, they may not be explicitly referenced by any other
* code.
enum
{
NAPI_STATE_SCHED, /* Poll is scheduled */
+ NAPI_STATE_DISABLE, /* Disable pending */
};
-extern void FASTCALL(__napi_schedule(struct napi_struct *n));
+extern void __napi_schedule(struct napi_struct *n);
+
+static inline int napi_disable_pending(struct napi_struct *n)
+{
+ return test_bit(NAPI_STATE_DISABLE, &n->state);
+}
/**
* napi_schedule_prep - check if napi can be scheduled
*
* Test if NAPI routine is already running, and if not mark
* it as running. This is used as a condition variable
- * insure only one NAPI poll instance runs
+ * insure only one NAPI poll instance runs. We also make
+ * sure there is no pending NAPI disable.
*/
static inline int napi_schedule_prep(struct napi_struct *n)
{
- return !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
+ return !napi_disable_pending(n) &&
+ !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
}
/**
__napi_schedule(n);
}
+/* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
+static inline int napi_reschedule(struct napi_struct *napi)
+{
+ if (napi_schedule_prep(napi)) {
+ __napi_schedule(napi);
+ return 1;
+ }
+ return 0;
+}
+
/**
* napi_complete - NAPI processing complete
* @n: napi context
static inline void napi_complete(struct napi_struct *n)
{
- local_irq_disable();
+ unsigned long flags;
+
+ local_irq_save(flags);
__napi_complete(n);
- local_irq_enable();
+ local_irq_restore(flags);
}
/**
*/
static inline void napi_disable(struct napi_struct *n)
{
+ set_bit(NAPI_STATE_DISABLE, &n->state);
while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
- msleep_interruptible(1);
+ msleep(1);
+ clear_bit(NAPI_STATE_DISABLE, &n->state);
}
/**
clear_bit(NAPI_STATE_SCHED, &n->state);
}
+#ifdef CONFIG_SMP
+/**
+ * napi_synchronize - wait until NAPI is not running
+ * @n: napi context
+ *
+ * Wait until NAPI is done being scheduled on this context.
+ * Waits till any outstanding processing completes but
+ * does not disable future activations.
+ */
+static inline void napi_synchronize(const struct napi_struct *n)
+{
+ while (test_bit(NAPI_STATE_SCHED, &n->state))
+ msleep(1);
+}
+#else
+# define napi_synchronize(n) barrier()
+#endif
+
/*
* The DEVICE structure.
* Actually, this whole structure is a big mistake. It mixes I/O
#define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */
#define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */
#define NETIF_F_GSO 2048 /* Enable software GSO. */
-#define NETIF_F_LLTX 4096 /* LockLess TX */
+#define NETIF_F_LLTX 4096 /* LockLess TX - deprecated. Please */
+ /* do not use LLTX in new drivers */
+#define NETIF_F_NETNS_LOCAL 8192 /* Does not change network namespaces */
#define NETIF_F_MULTI_QUEUE 16384 /* Has multiple TX/RX queues */
#define NETIF_F_LRO 32768 /* large receive offload */
#endif
const struct ethtool_ops *ethtool_ops;
+ /* Hardware header description */
+ const struct header_ops *header_ops;
+
/*
* This marks the end of the "visible" part of the structure. All
* fields hereafter are internal to the system, and may change at
unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
+ /* ingress path synchronizer */
+ spinlock_t ingress_lock;
+ struct Qdisc *qdisc_ingress;
+
/*
* Cache line mostly used on queue transmit path (qdisc)
*/
/* Partially transmitted GSO packet. */
struct sk_buff *gso_skb;
- /* ingress path synchronizer */
- spinlock_t ingress_lock;
- struct Qdisc *qdisc_ingress;
-
/*
* One part is mostly used on xmit path (device)
*/
int (*open)(struct net_device *dev);
int (*stop)(struct net_device *dev);
#define HAVE_NETDEV_POLL
- int (*hard_header) (struct sk_buff *skb,
- struct net_device *dev,
- unsigned short type,
- void *daddr,
- void *saddr,
- unsigned len);
- int (*rebuild_header)(struct sk_buff *skb);
#define HAVE_CHANGE_RX_FLAGS
void (*change_rx_flags)(struct net_device *dev,
int flags);
#define HAVE_SET_MAC_ADDR
int (*set_mac_address)(struct net_device *dev,
void *addr);
+#define HAVE_VALIDATE_ADDR
+ int (*validate_addr)(struct net_device *dev);
#define HAVE_PRIVATE_IOCTL
int (*do_ioctl)(struct net_device *dev,
struct ifreq *ifr, int cmd);
#define HAVE_SET_CONFIG
int (*set_config)(struct net_device *dev,
struct ifmap *map);
-#define HAVE_HEADER_CACHE
- int (*hard_header_cache)(struct neighbour *neigh,
- struct hh_cache *hh);
- void (*header_cache_update)(struct hh_cache *hh,
- struct net_device *dev,
- unsigned char * haddr);
#define HAVE_CHANGE_MTU
int (*change_mtu)(struct net_device *dev, int new_mtu);
void (*vlan_rx_kill_vid)(struct net_device *dev,
unsigned short vid);
- int (*hard_header_parse)(struct sk_buff *skb,
- unsigned char *haddr);
int (*neigh_setup)(struct net_device *dev, struct neigh_parms *);
#ifdef CONFIG_NETPOLL
struct netpoll_info *npinfo;
void (*poll_controller)(struct net_device *dev);
#endif
+#ifdef CONFIG_NET_NS
/* Network namespace this network device is inside */
struct net *nd_net;
+#endif
/* bridge stuff */
struct net_bridge_port *br_port;
/* rtnetlink link ops */
const struct rtnl_link_ops *rtnl_link_ops;
+ /* for setting kernel sock attribute on TCP connection setup */
+#define GSO_MAX_SIZE 65536
+ unsigned int gso_max_size;
+
/* The TX queue control structures */
unsigned int egress_subqueue_count;
struct net_device_subqueue egress_subqueue[1];
#define NETDEV_ALIGN 32
#define NETDEV_ALIGN_CONST (NETDEV_ALIGN - 1)
+/*
+ * Net namespace inlines
+ */
+static inline
+struct net *dev_net(const struct net_device *dev)
+{
+#ifdef CONFIG_NET_NS
+ return dev->nd_net;
+#else
+ return &init_net;
+#endif
+}
+
+static inline
+void dev_net_set(struct net_device *dev, struct net *net)
+{
+#ifdef CONFIG_NET_NS
+ release_net(dev->nd_net);
+ dev->nd_net = hold_net(net);
+#endif
+}
+
/**
* netdev_priv - access network device private data
* @dev: network device
return dev->priv;
}
-#define SET_MODULE_OWNER(dev) do { } while (0)
/* Set the sysfs physical device reference for the network logical device
* if set prior to registration will cause a symlink during initialization.
*/
#define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
+/**
+ * netif_napi_add - initialize a napi context
+ * @dev: network device
+ * @napi: napi context
+ * @poll: polling function
+ * @weight: default weight
+ *
+ * netif_napi_add() must be used to initialize a napi context prior to calling
+ * *any* of the other napi related functions.
+ */
static inline void netif_napi_add(struct net_device *dev,
struct napi_struct *napi,
int (*poll)(struct napi_struct *, int),
#include <linux/interrupt.h>
#include <linux/notifier.h>
-extern struct net_device loopback_dev; /* The loopback */
extern rwlock_t dev_base_lock; /* Device list lock */
list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
-#define next_net_device(d) \
-({ \
- struct net_device *dev = d; \
- struct list_head *lh; \
- struct net *net; \
- \
- net = dev->nd_net; \
- lh = dev->dev_list.next; \
- lh == &net->dev_base_head ? NULL : net_device_entry(lh); \
-})
-
-#define first_net_device(N) \
-({ \
- struct net *NET = (N); \
- list_empty(&NET->dev_base_head) ? NULL : \
- net_device_entry(NET->dev_base_head.next); \
-})
+static inline struct net_device *next_net_device(struct net_device *dev)
+{
+ struct list_head *lh;
+ struct net *net;
+
+ net = dev_net(dev);
+ lh = dev->dev_list.next;
+ return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
+}
+
+static inline struct net_device *first_net_device(struct net *net)
+{
+ return list_empty(&net->dev_base_head) ? NULL :
+ net_device_entry(net->dev_base_head.next);
+}
extern int netdev_boot_setup_check(struct net_device *dev);
extern unsigned long netdev_boot_base(const char *prefix, int unit);
extern void synchronize_net(void);
extern int register_netdevice_notifier(struct notifier_block *nb);
extern int unregister_netdevice_notifier(struct notifier_block *nb);
-extern int call_netdevice_notifiers(unsigned long val, void *v);
+extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
extern struct net_device *dev_get_by_index(struct net *net, int ifindex);
extern struct net_device *__dev_get_by_index(struct net *net, int ifindex);
extern int dev_restart(struct net_device *dev);
extern int netpoll_trap(void);
#endif
+static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type,
+ const void *daddr, const void *saddr,
+ unsigned len)
+{
+ if (!dev->header_ops || !dev->header_ops->create)
+ return 0;
+
+ return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
+}
+
+static inline int dev_parse_header(const struct sk_buff *skb,
+ unsigned char *haddr)
+{
+ const struct net_device *dev = skb->dev;
+
+ if (!dev->header_ops || !dev->header_ops->parse)
+ return 0;
+ return dev->header_ops->parse(skb, haddr);
+}
+
typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf);
static inline int unregister_gifconf(unsigned int family)
*
* Check individual transmit queue of a device with multiple transmit queues.
*/
-static inline int netif_subqueue_stopped(const struct net_device *dev,
+static inline int __netif_subqueue_stopped(const struct net_device *dev,
u16 queue_index)
{
#ifdef CONFIG_NETDEVICES_MULTIQUEUE
#endif
}
+static inline int netif_subqueue_stopped(const struct net_device *dev,
+ struct sk_buff *skb)
+{
+ return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
+}
/**
* netif_wake_subqueue - allow sending packets on subqueue
}
/* Use this variant when it is known for sure that it
- * is executing from interrupt context.
+ * is executing from hardware interrupt context or with hardware interrupts
+ * disabled.
*/
extern void dev_kfree_skb_irq(struct sk_buff *skb);
/* Use this variant in places where it could be invoked
- * either from interrupt or non-interrupt context.
+ * from either hardware interrupt or other context, with hardware interrupts
+ * either disabled or enabled.
*/
extern void dev_kfree_skb_any(struct sk_buff *skb);
extern unsigned dev_get_flags(const struct net_device *);
extern int dev_change_flags(struct net_device *, unsigned);
extern int dev_change_name(struct net_device *, char *);
+extern int dev_change_net_namespace(struct net_device *,
+ struct net *, const char *);
extern int dev_set_mtu(struct net_device *, int);
extern int dev_set_mac_address(struct net_device *,
struct sockaddr *);
* dev_put - release reference to device
* @dev: network device
*
- * Hold reference to device to keep it from being freed.
+ * Release reference to device to allow it to be freed.
*/
static inline void dev_put(struct net_device *dev)
{
* dev_hold - get reference to device
* @dev: network device
*
- * Release reference to device to allow it to be freed.
+ * Hold reference to device to keep it from being freed.
*/
static inline void dev_hold(struct net_device *dev)
{
static inline int netif_rx_schedule_prep(struct net_device *dev,
struct napi_struct *napi)
{
- return netif_running(dev) && napi_schedule_prep(napi);
+ return napi_schedule_prep(napi);
}
/* Add interface to tail of rx poll list. This assumes that _prep has
static inline void __netif_rx_schedule(struct net_device *dev,
struct napi_struct *napi)
{
- dev_hold(dev);
__napi_schedule(napi);
}
struct napi_struct *napi)
{
__napi_complete(napi);
- dev_put(dev);
}
/* Remove interface from poll list: it must be in the poll list
/**
* netif_tx_lock - grab network device transmit lock
* @dev: network device
+ * @cpu: cpu number of lock owner
*
* Get network device transmit lock
*/
-static inline void netif_tx_lock(struct net_device *dev)
+static inline void __netif_tx_lock(struct net_device *dev, int cpu)
{
spin_lock(&dev->_xmit_lock);
- dev->xmit_lock_owner = smp_processor_id();
+ dev->xmit_lock_owner = cpu;
+}
+
+static inline void netif_tx_lock(struct net_device *dev)
+{
+ __netif_tx_lock(dev, smp_processor_id());
}
static inline void netif_tx_lock_bh(struct net_device *dev)
spin_unlock_bh(&dev->_xmit_lock);
}
+#define HARD_TX_LOCK(dev, cpu) { \
+ if ((dev->features & NETIF_F_LLTX) == 0) { \
+ __netif_tx_lock(dev, cpu); \
+ } \
+}
+
+#define HARD_TX_UNLOCK(dev) { \
+ if ((dev->features & NETIF_F_LLTX) == 0) { \
+ netif_tx_unlock(dev); \
+ } \
+}
+
static inline void netif_tx_disable(struct net_device *dev)
{
netif_tx_lock_bh(dev);
extern void __dev_set_rx_mode(struct net_device *dev);
extern int dev_unicast_delete(struct net_device *dev, void *addr, int alen);
extern int dev_unicast_add(struct net_device *dev, void *addr, int alen);
+extern int dev_unicast_sync(struct net_device *to, struct net_device *from);
+extern void dev_unicast_unsync(struct net_device *to, struct net_device *from);
extern int dev_mc_delete(struct net_device *dev, void *addr, int alen, int all);
extern int dev_mc_add(struct net_device *dev, void *addr, int alen, int newonly);
extern int dev_mc_sync(struct net_device *to, struct net_device *from);
extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
extern int __dev_addr_delete(struct dev_addr_list **list, int *count, void *addr, int alen, int all);
extern int __dev_addr_add(struct dev_addr_list **list, int *count, void *addr, int alen, int newonly);
+extern int __dev_addr_sync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count);
+extern void __dev_addr_unsync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count);
extern void dev_set_promiscuity(struct net_device *dev, int inc);
extern void dev_set_allmulti(struct net_device *dev, int inc);
extern void netdev_state_change(struct net_device *dev);
unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
}
+static inline void netif_set_gso_max_size(struct net_device *dev,
+ unsigned int size)
+{
+ dev->gso_max_size = size;
+}
+
/* On bonding slaves other than the currently active slave, suppress
* duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
* ARP on active-backup slaves with arp_validate enabled.