X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=include%2Flinux%2Fnetdevice.h;h=9d77b1d7dca806e540838d27bbc632961a340f0e;hb=5b37717a23b8e40f6cf7ad85a26ddcf41c171e2c;hp=6f85db3535e2f5d57628f03dc8f2ed98b6f09083;hpb=4fa4d23fa20de67df919030c1216295664866ad7;p=safe%2Fjmp%2Flinux-2.6 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 6f85db3..9d77b1d 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -11,7 +11,7 @@ * Fred N. van Kempen, * Corey Minyard * Donald J. Becker, - * Alan Cox, + * Alan Cox, * Bjorn Ekwall. * Pekka Riikonen * @@ -42,6 +42,7 @@ #include #include +#include struct vlan_group; struct ethtool_ops; @@ -61,9 +62,7 @@ struct wireless_dev; #define NET_XMIT_DROP 1 /* skb dropped */ #define NET_XMIT_CN 2 /* congestion notification */ #define NET_XMIT_POLICED 3 /* skb is shot by police */ -#define NET_XMIT_BYPASS 4 /* packet does not leave via dequeue; - (TC use only - dev_queue_xmit - returns this as NET_XMIT_SUCCESS) */ +#define NET_XMIT_MASK 0xFFFF /* qdisc flags in net/sch_generic.h */ /* Backlog congestion levels */ #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ @@ -88,19 +87,23 @@ struct wireless_dev; #define NETDEV_TX_BUSY 1 /* driver tx path was busy*/ #define NETDEV_TX_LOCKED -1 /* driver tx lock was already taken */ +#ifdef __KERNEL__ + /* * Compute the worst case header length according to the protocols * used. */ -#if !defined(CONFIG_AX25) && !defined(CONFIG_AX25_MODULE) && !defined(CONFIG_TR) -#define LL_MAX_HEADER 32 -#else -#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) -#define LL_MAX_HEADER 96 +#if defined(CONFIG_WLAN_80211) || defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) +# if defined(CONFIG_MAC80211_MESH) +# define LL_MAX_HEADER 128 +# else +# define LL_MAX_HEADER 96 +# endif +#elif defined(CONFIG_TR) +# define LL_MAX_HEADER 48 #else -#define LL_MAX_HEADER 48 -#endif +# define LL_MAX_HEADER 32 #endif #if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE) && \ @@ -112,13 +115,7 @@ struct wireless_dev; #define MAX_HEADER (LL_MAX_HEADER + 48) #endif -struct net_device_subqueue -{ - /* Give a control state for each queue. This struct may contain - * per-queue locks in the future. - */ - unsigned long state; -}; +#endif /* __KERNEL__ */ /* * Network device statistics. Akin to the 2.0 ether stats but @@ -244,11 +241,16 @@ struct hh_cache * * We could use other alignment values, but we must maintain the * relationship HH alignment <= LL alignment. + * + * LL_ALLOCATED_SPACE also takes into account the tailroom the device + * may need. */ #define LL_RESERVED_SPACE(dev) \ - (((dev)->hard_header_len&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) + ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) #define LL_RESERVED_SPACE_EXTRA(dev,extra) \ - ((((dev)->hard_header_len+extra)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) + ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) +#define LL_ALLOCATED_SPACE(dev) \ + ((((dev)->hard_header_len+(dev)->needed_headroom+(dev)->needed_tailroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) struct header_ops { int (*create) (struct sk_buff *skb, struct net_device *dev, @@ -270,14 +272,11 @@ struct header_ops { enum netdev_state_t { - __LINK_STATE_XOFF=0, __LINK_STATE_START, __LINK_STATE_PRESENT, - __LINK_STATE_SCHED, __LINK_STATE_NOCARRIER, __LINK_STATE_LINKWATCH_PENDING, __LINK_STATE_DORMANT, - __LINK_STATE_QDISC_RUNNING, }; @@ -319,9 +318,15 @@ struct napi_struct { enum { NAPI_STATE_SCHED, /* Poll is scheduled */ + NAPI_STATE_DISABLE, /* Disable pending */ }; -extern void FASTCALL(__napi_schedule(struct napi_struct *n)); +extern void __napi_schedule(struct napi_struct *n); + +static inline int napi_disable_pending(struct napi_struct *n) +{ + return test_bit(NAPI_STATE_DISABLE, &n->state); +} /** * napi_schedule_prep - check if napi can be scheduled @@ -329,11 +334,13 @@ extern void FASTCALL(__napi_schedule(struct napi_struct *n)); * * Test if NAPI routine is already running, and if not mark * it as running. This is used as a condition variable - * insure only one NAPI poll instance runs + * insure only one NAPI poll instance runs. We also make + * sure there is no pending NAPI disable. */ static inline int napi_schedule_prep(struct napi_struct *n) { - return !test_and_set_bit(NAPI_STATE_SCHED, &n->state); + return !napi_disable_pending(n) && + !test_and_set_bit(NAPI_STATE_SCHED, &n->state); } /** @@ -375,9 +382,11 @@ static inline void __napi_complete(struct napi_struct *n) static inline void napi_complete(struct napi_struct *n) { - local_irq_disable(); + unsigned long flags; + + local_irq_save(flags); __napi_complete(n); - local_irq_enable(); + local_irq_restore(flags); } /** @@ -389,8 +398,10 @@ static inline void napi_complete(struct napi_struct *n) */ static inline void napi_disable(struct napi_struct *n) { + set_bit(NAPI_STATE_DISABLE, &n->state); while (test_and_set_bit(NAPI_STATE_SCHED, &n->state)) - msleep_interruptible(1); + msleep(1); + clear_bit(NAPI_STATE_DISABLE, &n->state); } /** @@ -425,6 +436,21 @@ static inline void napi_synchronize(const struct napi_struct *n) # define napi_synchronize(n) barrier() #endif +enum netdev_queue_state_t +{ + __QUEUE_STATE_XOFF, + __QUEUE_STATE_FROZEN, +}; + +struct netdev_queue { + struct net_device *dev; + struct Qdisc *qdisc; + unsigned long state; + spinlock_t _xmit_lock; + int xmit_lock_owner; + struct Qdisc *qdisc_sleeping; +} ____cacheline_aligned_in_smp; + /* * The DEVICE structure. * Actually, this whole structure is a big mistake. It mixes I/O @@ -446,6 +472,8 @@ struct net_device char name[IFNAMSIZ]; /* device name hash chain */ struct hlist_node name_hlist; + /* snmp alias */ + char *ifalias; /* * I/O specific fields @@ -493,7 +521,6 @@ struct net_device #define NETIF_F_LLTX 4096 /* LockLess TX - deprecated. Please */ /* do not use LLTX in new drivers */ #define NETIF_F_NETNS_LOCAL 8192 /* Does not change network namespaces */ -#define NETIF_F_MULTI_QUEUE 16384 /* Has multiple TX/RX queues */ #define NETIF_F_LRO 32768 /* large receive offload */ /* Segmentation offload features */ @@ -514,7 +541,13 @@ struct net_device #define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM) #define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM) - struct net_device *next_sched; + /* + * If one device supports one of these features, then enable them + * for all in netdev_increment_features. + */ +#define NETIF_F_ONE_FOR_ALL (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \ + NETIF_F_SG | NETIF_F_HIGHDMA | \ + NETIF_F_FRAGLIST) /* Interface index. Unique device identifier */ int ifindex; @@ -555,6 +588,13 @@ struct net_device unsigned short type; /* interface hardware type */ unsigned short hard_header_len; /* hardware hdr length */ + /* extra head- and tailroom the hardware may need, but not in all cases + * can this be guaranteed, especially tailroom. Some cases also use + * LL_MAX_HEADER instead to allocate the skb. + */ + unsigned short needed_headroom; + unsigned short needed_tailroom; + struct net_device *master; /* Pointer to master device of a group, * which this device is member of. */ @@ -564,17 +604,21 @@ struct net_device unsigned char addr_len; /* hardware address length */ unsigned short dev_id; /* for shared network cards */ + spinlock_t addr_list_lock; struct dev_addr_list *uc_list; /* Secondary unicast mac addresses */ int uc_count; /* Number of installed ucasts */ int uc_promisc; struct dev_addr_list *mc_list; /* Multicast mac addresses */ int mc_count; /* Number of installed mcasts */ - int promiscuity; - int allmulti; + unsigned int promiscuity; + unsigned int allmulti; /* Protocol specific pointers */ +#ifdef CONFIG_NET_DSA + void *dsa_ptr; /* dsa specific data */ +#endif void *atalk_ptr; /* AppleTalk link */ void *ip_ptr; /* IPv4 specific data */ void *dn_ptr; /* DECnet specific data */ @@ -594,32 +638,21 @@ struct net_device unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ -/* - * Cache line mostly used on queue transmit path (qdisc) - */ - /* device queue lock */ - spinlock_t queue_lock ____cacheline_aligned_in_smp; - struct Qdisc *qdisc; - struct Qdisc *qdisc_sleeping; - struct list_head qdisc_list; - unsigned long tx_queue_len; /* Max frames per queue allowed */ + struct netdev_queue rx_queue; + + struct netdev_queue *_tx ____cacheline_aligned_in_smp; - /* Partially transmitted GSO packet. */ - struct sk_buff *gso_skb; + /* Number of TX queues allocated at alloc_netdev_mq() time */ + unsigned int num_tx_queues; - /* ingress path synchronizer */ - spinlock_t ingress_lock; - struct Qdisc *qdisc_ingress; + /* Number of TX queues currently active in device */ + unsigned int real_num_tx_queues; + unsigned long tx_queue_len; /* Max frames per queue allowed */ + spinlock_t tx_global_lock; /* * One part is mostly used on xmit path (device) */ - /* hard_start_xmit synchronizer */ - spinlock_t _xmit_lock ____cacheline_aligned_in_smp; - /* cpu id of processor entered to hard_start_xmit or -1, - if nobody entered there. - */ - int xmit_lock_owner; void *priv; /* pointer to private data */ int (*hard_start_xmit) (struct sk_buff *skb, struct net_device *dev); @@ -669,6 +702,8 @@ struct net_device #define HAVE_SET_MAC_ADDR int (*set_mac_address)(struct net_device *dev, void *addr); +#define HAVE_VALIDATE_ADDR + int (*validate_addr)(struct net_device *dev); #define HAVE_PRIVATE_IOCTL int (*do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd); @@ -696,13 +731,23 @@ struct net_device void (*poll_controller)(struct net_device *dev); #endif + u16 (*select_queue)(struct net_device *dev, + struct sk_buff *skb); + +#ifdef CONFIG_NET_NS /* Network namespace this network device is inside */ struct net *nd_net; +#endif + + /* mid-layer private */ + void *ml_priv; /* bridge stuff */ struct net_bridge_port *br_port; /* macvlan */ struct macvlan_port *macvlan_port; + /* GARP */ + struct garp_port *garp_port; /* class/net/name entry */ struct device dev; @@ -712,15 +757,79 @@ struct net_device /* rtnetlink link ops */ const struct rtnl_link_ops *rtnl_link_ops; - /* The TX queue control structures */ - unsigned int egress_subqueue_count; - struct net_device_subqueue egress_subqueue[1]; + /* VLAN feature mask */ + unsigned long vlan_features; + + /* for setting kernel sock attribute on TCP connection setup */ +#define GSO_MAX_SIZE 65536 + unsigned int gso_max_size; }; #define to_net_dev(d) container_of(d, struct net_device, dev) #define NETDEV_ALIGN 32 #define NETDEV_ALIGN_CONST (NETDEV_ALIGN - 1) +static inline +struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev, + unsigned int index) +{ + return &dev->_tx[index]; +} + +static inline void netdev_for_each_tx_queue(struct net_device *dev, + void (*f)(struct net_device *, + struct netdev_queue *, + void *), + void *arg) +{ + unsigned int i; + + for (i = 0; i < dev->num_tx_queues; i++) + f(dev, &dev->_tx[i], arg); +} + +/* + * Net namespace inlines + */ +static inline +struct net *dev_net(const struct net_device *dev) +{ +#ifdef CONFIG_NET_NS + return dev->nd_net; +#else + return &init_net; +#endif +} + +static inline +void dev_net_set(struct net_device *dev, struct net *net) +{ +#ifdef CONFIG_NET_NS + release_net(dev->nd_net); + dev->nd_net = hold_net(net); +#endif +} + +static inline bool netdev_uses_dsa_tags(struct net_device *dev) +{ +#ifdef CONFIG_NET_DSA_TAG_DSA + if (dev->dsa_ptr != NULL) + return dsa_uses_dsa_tags(dev->dsa_ptr); +#endif + + return 0; +} + +static inline bool netdev_uses_trailer_tags(struct net_device *dev) +{ +#ifdef CONFIG_NET_DSA_TAG_TRAILER + if (dev->dsa_ptr != NULL) + return dsa_uses_trailer_tags(dev->dsa_ptr); +#endif + + return 0; +} + /** * netdev_priv - access network device private data * @dev: network device @@ -729,7 +838,9 @@ struct net_device */ static inline void *netdev_priv(const struct net_device *dev) { - return dev->priv; + return (char *)dev + ((sizeof(struct net_device) + + NETDEV_ALIGN_CONST) + & ~NETDEV_ALIGN_CONST); } /* Set the sysfs physical device reference for the network logical device @@ -737,6 +848,16 @@ static inline void *netdev_priv(const struct net_device *dev) */ #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev)) +/** + * netif_napi_add - initialize a napi context + * @dev: network device + * @napi: napi context + * @poll: polling function + * @weight: default weight + * + * netif_napi_add() must be used to initialize a napi context prior to calling + * *any* of the other napi related functions. + */ static inline void netif_napi_add(struct net_device *dev, struct napi_struct *napi, int (*poll)(struct napi_struct *, int), @@ -754,6 +875,19 @@ static inline void netif_napi_add(struct net_device *dev, set_bit(NAPI_STATE_SCHED, &napi->state); } +/** + * netif_napi_del - remove a napi context + * @napi: napi context + * + * netif_napi_del() removes a napi context from the network device napi list + */ +static inline void netif_napi_del(struct napi_struct *napi) +{ +#ifdef CONFIG_NETPOLL + list_del(&napi->dev_list); +#endif +} + struct packet_type { __be16 type; /* This is really htons(ether_type). */ struct net_device *dev; /* NULL is wildcarded here */ @@ -787,7 +921,7 @@ static inline struct net_device *next_net_device(struct net_device *dev) struct list_head *lh; struct net *net; - net = dev->nd_net; + net = dev_net(dev); lh = dev->dev_list.next; return lh == &net->dev_base_head ? NULL : net_device_entry(lh); } @@ -814,6 +948,7 @@ extern struct net_device *__dev_get_by_name(struct net *net, const char *name); extern int dev_alloc_name(struct net_device *dev, const char *name); extern int dev_open(struct net_device *dev); extern int dev_close(struct net_device *dev); +extern void dev_disable_lro(struct net_device *dev); extern int dev_queue_xmit(struct sk_buff *skb); extern int register_netdevice(struct net_device *dev); extern void unregister_netdevice(struct net_device *dev); @@ -834,7 +969,7 @@ static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, const void *daddr, const void *saddr, unsigned len) { - if (!dev->header_ops) + if (!dev->header_ops || !dev->header_ops->create) return 0; return dev->header_ops->create(skb, dev, type, daddr, saddr, len); @@ -863,7 +998,7 @@ static inline int unregister_gifconf(unsigned int family) */ struct softnet_data { - struct net_device *output_queue; + struct Qdisc *output_queue; struct sk_buff_head input_pkt_queue; struct list_head poll_list; struct sk_buff *completion_queue; @@ -878,12 +1013,25 @@ DECLARE_PER_CPU(struct softnet_data,softnet_data); #define HAVE_NETIF_QUEUE -extern void __netif_schedule(struct net_device *dev); +extern void __netif_schedule(struct Qdisc *q); + +static inline void netif_schedule_queue(struct netdev_queue *txq) +{ + if (!test_bit(__QUEUE_STATE_XOFF, &txq->state)) + __netif_schedule(txq->qdisc); +} -static inline void netif_schedule(struct net_device *dev) +static inline void netif_tx_schedule_all(struct net_device *dev) { - if (!test_bit(__LINK_STATE_XOFF, &dev->state)) - __netif_schedule(dev); + unsigned int i; + + for (i = 0; i < dev->num_tx_queues; i++) + netif_schedule_queue(netdev_get_tx_queue(dev, i)); +} + +static inline void netif_tx_start_queue(struct netdev_queue *dev_queue) +{ + clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state); } /** @@ -894,7 +1042,29 @@ static inline void netif_schedule(struct net_device *dev) */ static inline void netif_start_queue(struct net_device *dev) { - clear_bit(__LINK_STATE_XOFF, &dev->state); + netif_tx_start_queue(netdev_get_tx_queue(dev, 0)); +} + +static inline void netif_tx_start_all_queues(struct net_device *dev) +{ + unsigned int i; + + for (i = 0; i < dev->num_tx_queues; i++) { + struct netdev_queue *txq = netdev_get_tx_queue(dev, i); + netif_tx_start_queue(txq); + } +} + +static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue) +{ +#ifdef CONFIG_NETPOLL_TRAP + if (netpoll_trap()) { + clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state); + return; + } +#endif + if (test_and_clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state)) + __netif_schedule(dev_queue->qdisc); } /** @@ -906,14 +1076,22 @@ static inline void netif_start_queue(struct net_device *dev) */ static inline void netif_wake_queue(struct net_device *dev) { -#ifdef CONFIG_NETPOLL_TRAP - if (netpoll_trap()) { - clear_bit(__LINK_STATE_XOFF, &dev->state); - return; + netif_tx_wake_queue(netdev_get_tx_queue(dev, 0)); +} + +static inline void netif_tx_wake_all_queues(struct net_device *dev) +{ + unsigned int i; + + for (i = 0; i < dev->num_tx_queues; i++) { + struct netdev_queue *txq = netdev_get_tx_queue(dev, i); + netif_tx_wake_queue(txq); } -#endif - if (test_and_clear_bit(__LINK_STATE_XOFF, &dev->state)) - __netif_schedule(dev); +} + +static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) +{ + set_bit(__QUEUE_STATE_XOFF, &dev_queue->state); } /** @@ -925,7 +1103,22 @@ static inline void netif_wake_queue(struct net_device *dev) */ static inline void netif_stop_queue(struct net_device *dev) { - set_bit(__LINK_STATE_XOFF, &dev->state); + netif_tx_stop_queue(netdev_get_tx_queue(dev, 0)); +} + +static inline void netif_tx_stop_all_queues(struct net_device *dev) +{ + unsigned int i; + + for (i = 0; i < dev->num_tx_queues; i++) { + struct netdev_queue *txq = netdev_get_tx_queue(dev, i); + netif_tx_stop_queue(txq); + } +} + +static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue) +{ + return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state); } /** @@ -936,7 +1129,12 @@ static inline void netif_stop_queue(struct net_device *dev) */ static inline int netif_queue_stopped(const struct net_device *dev) { - return test_bit(__LINK_STATE_XOFF, &dev->state); + return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); +} + +static inline int netif_tx_queue_frozen(const struct netdev_queue *dev_queue) +{ + return test_bit(__QUEUE_STATE_FROZEN, &dev_queue->state); } /** @@ -966,9 +1164,8 @@ static inline int netif_running(const struct net_device *dev) */ static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) { -#ifdef CONFIG_NETDEVICES_MULTIQUEUE - clear_bit(__LINK_STATE_XOFF, &dev->egress_subqueue[queue_index].state); -#endif + struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); + clear_bit(__QUEUE_STATE_XOFF, &txq->state); } /** @@ -980,13 +1177,12 @@ static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) */ static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) { -#ifdef CONFIG_NETDEVICES_MULTIQUEUE + struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); #ifdef CONFIG_NETPOLL_TRAP if (netpoll_trap()) return; #endif - set_bit(__LINK_STATE_XOFF, &dev->egress_subqueue[queue_index].state); -#endif + set_bit(__QUEUE_STATE_XOFF, &txq->state); } /** @@ -996,17 +1192,18 @@ static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) * * Check individual transmit queue of a device with multiple transmit queues. */ -static inline int netif_subqueue_stopped(const struct net_device *dev, +static inline int __netif_subqueue_stopped(const struct net_device *dev, u16 queue_index) { -#ifdef CONFIG_NETDEVICES_MULTIQUEUE - return test_bit(__LINK_STATE_XOFF, - &dev->egress_subqueue[queue_index].state); -#else - return 0; -#endif + struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); + return test_bit(__QUEUE_STATE_XOFF, &txq->state); } +static inline int netif_subqueue_stopped(const struct net_device *dev, + struct sk_buff *skb) +{ + return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb)); +} /** * netif_wake_subqueue - allow sending packets on subqueue @@ -1017,15 +1214,13 @@ static inline int netif_subqueue_stopped(const struct net_device *dev, */ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) { -#ifdef CONFIG_NETDEVICES_MULTIQUEUE + struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); #ifdef CONFIG_NETPOLL_TRAP if (netpoll_trap()) return; #endif - if (test_and_clear_bit(__LINK_STATE_XOFF, - &dev->egress_subqueue[queue_index].state)) - __netif_schedule(dev); -#endif + if (test_and_clear_bit(__QUEUE_STATE_XOFF, &txq->state)) + __netif_schedule(txq->qdisc); } /** @@ -1033,24 +1228,21 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) * @dev: network device * * Check if device has multiple transmit queues - * Always falls if NETDEVICE_MULTIQUEUE is not configured */ static inline int netif_is_multiqueue(const struct net_device *dev) { -#ifdef CONFIG_NETDEVICES_MULTIQUEUE - return (!!(NETIF_F_MULTI_QUEUE & dev->features)); -#else - return 0; -#endif + return (dev->num_tx_queues > 1); } /* Use this variant when it is known for sure that it - * is executing from interrupt context. + * is executing from hardware interrupt context or with hardware interrupts + * disabled. */ extern void dev_kfree_skb_irq(struct sk_buff *skb); /* Use this variant in places where it could be invoked - * either from interrupt or non-interrupt context. + * from either hardware interrupt or other context, with hardware interrupts + * either disabled or enabled. */ extern void dev_kfree_skb_any(struct sk_buff *skb); @@ -1059,19 +1251,22 @@ extern int netif_rx(struct sk_buff *skb); extern int netif_rx_ni(struct sk_buff *skb); #define HAVE_NETIF_RECEIVE_SKB 1 extern int netif_receive_skb(struct sk_buff *skb); +extern void netif_nit_deliver(struct sk_buff *skb); extern int dev_valid_name(const char *name); extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *); extern int dev_ethtool(struct net *net, struct ifreq *); extern unsigned dev_get_flags(const struct net_device *); extern int dev_change_flags(struct net_device *, unsigned); -extern int dev_change_name(struct net_device *, char *); +extern int dev_change_name(struct net_device *, const char *); +extern int dev_set_alias(struct net_device *, const char *, size_t); extern int dev_change_net_namespace(struct net_device *, struct net *, const char *); extern int dev_set_mtu(struct net_device *, int); extern int dev_set_mac_address(struct net_device *, struct sockaddr *); extern int dev_hard_start_xmit(struct sk_buff *skb, - struct net_device *dev); + struct net_device *dev, + struct netdev_queue *txq); extern int netdev_budget; @@ -1251,7 +1446,7 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) static inline int netif_rx_schedule_prep(struct net_device *dev, struct napi_struct *napi) { - return netif_running(dev) && napi_schedule_prep(napi); + return napi_schedule_prep(napi); } /* Add interface to tail of rx poll list. This assumes that _prep has @@ -1260,7 +1455,6 @@ static inline int netif_rx_schedule_prep(struct net_device *dev, static inline void __netif_rx_schedule(struct net_device *dev, struct napi_struct *napi) { - dev_hold(dev); __napi_schedule(napi); } @@ -1291,7 +1485,6 @@ static inline void __netif_rx_complete(struct net_device *dev, struct napi_struct *napi) { __napi_complete(napi); - dev_put(dev); } /* Remove interface from poll list: it must be in the poll list @@ -1309,67 +1502,143 @@ static inline void netif_rx_complete(struct net_device *dev, local_irq_restore(flags); } +static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) +{ + spin_lock(&txq->_xmit_lock); + txq->xmit_lock_owner = cpu; +} + +static inline void __netif_tx_lock_bh(struct netdev_queue *txq) +{ + spin_lock_bh(&txq->_xmit_lock); + txq->xmit_lock_owner = smp_processor_id(); +} + +static inline int __netif_tx_trylock(struct netdev_queue *txq) +{ + int ok = spin_trylock(&txq->_xmit_lock); + if (likely(ok)) + txq->xmit_lock_owner = smp_processor_id(); + return ok; +} + +static inline void __netif_tx_unlock(struct netdev_queue *txq) +{ + txq->xmit_lock_owner = -1; + spin_unlock(&txq->_xmit_lock); +} + +static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) +{ + txq->xmit_lock_owner = -1; + spin_unlock_bh(&txq->_xmit_lock); +} + /** * netif_tx_lock - grab network device transmit lock * @dev: network device - * @cpu: cpu number of lock owner * * Get network device transmit lock */ -static inline void __netif_tx_lock(struct net_device *dev, int cpu) -{ - spin_lock(&dev->_xmit_lock); - dev->xmit_lock_owner = cpu; -} - static inline void netif_tx_lock(struct net_device *dev) { - __netif_tx_lock(dev, smp_processor_id()); + unsigned int i; + int cpu; + + spin_lock(&dev->tx_global_lock); + cpu = smp_processor_id(); + for (i = 0; i < dev->num_tx_queues; i++) { + struct netdev_queue *txq = netdev_get_tx_queue(dev, i); + + /* We are the only thread of execution doing a + * freeze, but we have to grab the _xmit_lock in + * order to synchronize with threads which are in + * the ->hard_start_xmit() handler and already + * checked the frozen bit. + */ + __netif_tx_lock(txq, cpu); + set_bit(__QUEUE_STATE_FROZEN, &txq->state); + __netif_tx_unlock(txq); + } } static inline void netif_tx_lock_bh(struct net_device *dev) { - spin_lock_bh(&dev->_xmit_lock); - dev->xmit_lock_owner = smp_processor_id(); -} - -static inline int netif_tx_trylock(struct net_device *dev) -{ - int ok = spin_trylock(&dev->_xmit_lock); - if (likely(ok)) - dev->xmit_lock_owner = smp_processor_id(); - return ok; + local_bh_disable(); + netif_tx_lock(dev); } static inline void netif_tx_unlock(struct net_device *dev) { - dev->xmit_lock_owner = -1; - spin_unlock(&dev->_xmit_lock); + unsigned int i; + + for (i = 0; i < dev->num_tx_queues; i++) { + struct netdev_queue *txq = netdev_get_tx_queue(dev, i); + + /* No need to grab the _xmit_lock here. If the + * queue is not stopped for another reason, we + * force a schedule. + */ + clear_bit(__QUEUE_STATE_FROZEN, &txq->state); + if (!test_bit(__QUEUE_STATE_XOFF, &txq->state)) + __netif_schedule(txq->qdisc); + } + spin_unlock(&dev->tx_global_lock); } static inline void netif_tx_unlock_bh(struct net_device *dev) { - dev->xmit_lock_owner = -1; - spin_unlock_bh(&dev->_xmit_lock); + netif_tx_unlock(dev); + local_bh_enable(); } -#define HARD_TX_LOCK(dev, cpu) { \ +#define HARD_TX_LOCK(dev, txq, cpu) { \ if ((dev->features & NETIF_F_LLTX) == 0) { \ - __netif_tx_lock(dev, cpu); \ + __netif_tx_lock(txq, cpu); \ } \ } -#define HARD_TX_UNLOCK(dev) { \ +#define HARD_TX_UNLOCK(dev, txq) { \ if ((dev->features & NETIF_F_LLTX) == 0) { \ - netif_tx_unlock(dev); \ + __netif_tx_unlock(txq); \ } \ } static inline void netif_tx_disable(struct net_device *dev) { - netif_tx_lock_bh(dev); - netif_stop_queue(dev); - netif_tx_unlock_bh(dev); + unsigned int i; + int cpu; + + local_bh_disable(); + cpu = smp_processor_id(); + for (i = 0; i < dev->num_tx_queues; i++) { + struct netdev_queue *txq = netdev_get_tx_queue(dev, i); + + __netif_tx_lock(txq, cpu); + netif_tx_stop_queue(txq); + __netif_tx_unlock(txq); + } + local_bh_enable(); +} + +static inline void netif_addr_lock(struct net_device *dev) +{ + spin_lock(&dev->addr_list_lock); +} + +static inline void netif_addr_lock_bh(struct net_device *dev) +{ + spin_lock_bh(&dev->addr_list_lock); +} + +static inline void netif_addr_unlock(struct net_device *dev) +{ + spin_unlock(&dev->addr_list_lock); +} + +static inline void netif_addr_unlock_bh(struct net_device *dev) +{ + spin_unlock_bh(&dev->addr_list_lock); } /* These functions live elsewhere (drivers/net/net_init.c, but related) */ @@ -1389,15 +1658,20 @@ extern void dev_set_rx_mode(struct net_device *dev); extern void __dev_set_rx_mode(struct net_device *dev); extern int dev_unicast_delete(struct net_device *dev, void *addr, int alen); extern int dev_unicast_add(struct net_device *dev, void *addr, int alen); +extern int dev_unicast_sync(struct net_device *to, struct net_device *from); +extern void dev_unicast_unsync(struct net_device *to, struct net_device *from); extern int dev_mc_delete(struct net_device *dev, void *addr, int alen, int all); extern int dev_mc_add(struct net_device *dev, void *addr, int alen, int newonly); extern int dev_mc_sync(struct net_device *to, struct net_device *from); extern void dev_mc_unsync(struct net_device *to, struct net_device *from); extern int __dev_addr_delete(struct dev_addr_list **list, int *count, void *addr, int alen, int all); extern int __dev_addr_add(struct dev_addr_list **list, int *count, void *addr, int alen, int newonly); -extern void dev_set_promiscuity(struct net_device *dev, int inc); -extern void dev_set_allmulti(struct net_device *dev, int inc); +extern int __dev_addr_sync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count); +extern void __dev_addr_unsync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count); +extern int dev_set_promiscuity(struct net_device *dev, int inc); +extern int dev_set_allmulti(struct net_device *dev, int inc); extern void netdev_state_change(struct net_device *dev); +extern void netdev_bonding_change(struct net_device *dev); extern void netdev_features_change(struct net_device *dev); /* Load a device via the kmod */ extern void dev_load(struct net *net, const char *name); @@ -1424,9 +1698,16 @@ extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos); extern void dev_seq_stop(struct seq_file *seq, void *v); #endif +extern int netdev_class_create_file(struct class_attribute *class_attr); +extern void netdev_class_remove_file(struct class_attribute *class_attr); + +extern char *netdev_drivername(const struct net_device *dev, char *buffer, int len); + extern void linkwatch_run_queue(void); -extern int netdev_compute_features(unsigned long all, unsigned long one); +unsigned long netdev_increment_features(unsigned long all, unsigned long one, + unsigned long mask); +unsigned long netdev_fix_features(unsigned long features, const char *name); static inline int net_gso_ok(int features, int gso_type) { @@ -1446,6 +1727,12 @@ static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb) unlikely(skb->ip_summed != CHECKSUM_PARTIAL)); } +static inline void netif_set_gso_max_size(struct net_device *dev, + unsigned int size) +{ + dev->gso_max_size = size; +} + /* On bonding slaves other than the currently active slave, suppress * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and * ARP on active-backup slaves with arp_validate enabled.