2 * NET3 Protocol independent device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the non IP parts of dev.c 1.0.19
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
51 * Rudi Cilibrasi : Pass the right thing to
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
75 #include <asm/uaccess.h>
76 #include <asm/system.h>
77 #include <linux/bitops.h>
78 #include <linux/capability.h>
79 #include <linux/cpu.h>
80 #include <linux/types.h>
81 #include <linux/kernel.h>
82 #include <linux/sched.h>
83 #include <linux/mutex.h>
84 #include <linux/string.h>
86 #include <linux/socket.h>
87 #include <linux/sockios.h>
88 #include <linux/errno.h>
89 #include <linux/interrupt.h>
90 #include <linux/if_ether.h>
91 #include <linux/netdevice.h>
92 #include <linux/etherdevice.h>
93 #include <linux/ethtool.h>
94 #include <linux/notifier.h>
95 #include <linux/skbuff.h>
96 #include <net/net_namespace.h>
98 #include <linux/rtnetlink.h>
99 #include <linux/proc_fs.h>
100 #include <linux/seq_file.h>
101 #include <linux/stat.h>
102 #include <linux/if_bridge.h>
103 #include <linux/if_macvlan.h>
105 #include <net/pkt_sched.h>
106 #include <net/checksum.h>
107 #include <linux/highmem.h>
108 #include <linux/init.h>
109 #include <linux/kmod.h>
110 #include <linux/module.h>
111 #include <linux/netpoll.h>
112 #include <linux/rcupdate.h>
113 #include <linux/delay.h>
114 #include <net/wext.h>
115 #include <net/iw_handler.h>
116 #include <asm/current.h>
117 #include <linux/audit.h>
118 #include <linux/dmaengine.h>
119 #include <linux/err.h>
120 #include <linux/ctype.h>
121 #include <linux/if_arp.h>
122 #include <linux/if_vlan.h>
123 #include <linux/ip.h>
125 #include <linux/ipv6.h>
126 #include <linux/in.h>
127 #include <linux/jhash.h>
128 #include <linux/random.h>
130 #include "net-sysfs.h"
132 /* Instead of increasing this, you should create a hash table. */
133 #define MAX_GRO_SKBS 8
135 /* This should be increased if a protocol with a bigger head is added. */
136 #define GRO_MAX_HEAD (MAX_HEADER + 128)
139 * The list of packet types we will receive (as opposed to discard)
140 * and the routines to invoke.
142 * Why 16. Because with 16 the only overlap we get on a hash of the
143 * low nibble of the protocol value is RARP/SNAP/X.25.
145 * NOTE: That is no longer true with the addition of VLAN tags. Not
146 * sure which should go first, but I bet it won't make much
147 * difference if we are running VLANs. The good news is that
148 * this protocol won't be in the list unless compiled in, so
149 * the average user (w/out VLANs) will not be adversely affected.
166 #define PTYPE_HASH_SIZE (16)
167 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
169 static DEFINE_SPINLOCK(ptype_lock);
170 static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
171 static struct list_head ptype_all __read_mostly; /* Taps */
174 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
177 * Pure readers hold dev_base_lock for reading.
179 * Writers must hold the rtnl semaphore while they loop through the
180 * dev_base_head list, and hold dev_base_lock for writing when they do the
181 * actual updates. This allows pure readers to access the list even
182 * while a writer is preparing to update it.
184 * To put it another way, dev_base_lock is held for writing only to
185 * protect against pure readers; the rtnl semaphore provides the
186 * protection against other writers.
188 * See, for example usages, register_netdevice() and
189 * unregister_netdevice(), which must be called with the rtnl
192 DEFINE_RWLOCK(dev_base_lock);
194 EXPORT_SYMBOL(dev_base_lock);
196 #define NETDEV_HASHBITS 8
197 #define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
199 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
201 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
202 return &net->dev_name_head[hash & ((1 << NETDEV_HASHBITS) - 1)];
205 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
207 return &net->dev_index_head[ifindex & ((1 << NETDEV_HASHBITS) - 1)];
210 /* Device list insertion */
211 static int list_netdevice(struct net_device *dev)
213 struct net *net = dev_net(dev);
217 write_lock_bh(&dev_base_lock);
218 list_add_tail(&dev->dev_list, &net->dev_base_head);
219 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
220 hlist_add_head(&dev->index_hlist, dev_index_hash(net, dev->ifindex));
221 write_unlock_bh(&dev_base_lock);
225 /* Device list removal */
226 static void unlist_netdevice(struct net_device *dev)
230 /* Unlink dev from the device chain */
231 write_lock_bh(&dev_base_lock);
232 list_del(&dev->dev_list);
233 hlist_del(&dev->name_hlist);
234 hlist_del(&dev->index_hlist);
235 write_unlock_bh(&dev_base_lock);
242 static RAW_NOTIFIER_HEAD(netdev_chain);
245 * Device drivers call our routines to queue packets here. We empty the
246 * queue in the local softnet handler.
249 DEFINE_PER_CPU(struct softnet_data, softnet_data);
251 #ifdef CONFIG_LOCKDEP
253 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
254 * according to dev->type
256 static const unsigned short netdev_lock_type[] =
257 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
258 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
259 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
260 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
261 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
262 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
263 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
264 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
265 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
266 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
267 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
268 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
269 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
270 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
271 ARPHRD_PHONET_PIPE, ARPHRD_VOID, ARPHRD_NONE};
273 static const char *netdev_lock_name[] =
274 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
275 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
276 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
277 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
278 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
279 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
280 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
281 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
282 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
283 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
284 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
285 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
286 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
287 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
288 "_xmit_PHONET_PIPE", "_xmit_VOID", "_xmit_NONE"};
290 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
291 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
293 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
297 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
298 if (netdev_lock_type[i] == dev_type)
300 /* the last key is used by default */
301 return ARRAY_SIZE(netdev_lock_type) - 1;
304 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
305 unsigned short dev_type)
309 i = netdev_lock_pos(dev_type);
310 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
311 netdev_lock_name[i]);
314 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
318 i = netdev_lock_pos(dev->type);
319 lockdep_set_class_and_name(&dev->addr_list_lock,
320 &netdev_addr_lock_key[i],
321 netdev_lock_name[i]);
324 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
325 unsigned short dev_type)
328 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
333 /*******************************************************************************
335 Protocol management and registration routines
337 *******************************************************************************/
340 * Add a protocol ID to the list. Now that the input handler is
341 * smarter we can dispense with all the messy stuff that used to be
344 * BEWARE!!! Protocol handlers, mangling input packets,
345 * MUST BE last in hash buckets and checking protocol handlers
346 * MUST start from promiscuous ptype_all chain in net_bh.
347 * It is true now, do not change it.
348 * Explanation follows: if protocol handler, mangling packet, will
349 * be the first on list, it is not able to sense, that packet
350 * is cloned and should be copied-on-write, so that it will
351 * change it and subsequent readers will get broken packet.
356 * dev_add_pack - add packet handler
357 * @pt: packet type declaration
359 * Add a protocol handler to the networking stack. The passed &packet_type
360 * is linked into kernel lists and may not be freed until it has been
361 * removed from the kernel lists.
363 * This call does not sleep therefore it can not
364 * guarantee all CPU's that are in middle of receiving packets
365 * will see the new packet type (until the next received packet).
368 void dev_add_pack(struct packet_type *pt)
372 spin_lock_bh(&ptype_lock);
373 if (pt->type == htons(ETH_P_ALL))
374 list_add_rcu(&pt->list, &ptype_all);
376 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
377 list_add_rcu(&pt->list, &ptype_base[hash]);
379 spin_unlock_bh(&ptype_lock);
383 * __dev_remove_pack - remove packet handler
384 * @pt: packet type declaration
386 * Remove a protocol handler that was previously added to the kernel
387 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
388 * from the kernel lists and can be freed or reused once this function
391 * The packet type might still be in use by receivers
392 * and must not be freed until after all the CPU's have gone
393 * through a quiescent state.
395 void __dev_remove_pack(struct packet_type *pt)
397 struct list_head *head;
398 struct packet_type *pt1;
400 spin_lock_bh(&ptype_lock);
402 if (pt->type == htons(ETH_P_ALL))
405 head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
407 list_for_each_entry(pt1, head, list) {
409 list_del_rcu(&pt->list);
414 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
416 spin_unlock_bh(&ptype_lock);
419 * dev_remove_pack - remove packet handler
420 * @pt: packet type declaration
422 * Remove a protocol handler that was previously added to the kernel
423 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
424 * from the kernel lists and can be freed or reused once this function
427 * This call sleeps to guarantee that no CPU is looking at the packet
430 void dev_remove_pack(struct packet_type *pt)
432 __dev_remove_pack(pt);
437 /******************************************************************************
439 Device Boot-time Settings Routines
441 *******************************************************************************/
443 /* Boot time configuration table */
444 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
447 * netdev_boot_setup_add - add new setup entry
448 * @name: name of the device
449 * @map: configured settings for the device
451 * Adds new setup entry to the dev_boot_setup list. The function
452 * returns 0 on error and 1 on success. This is a generic routine to
455 static int netdev_boot_setup_add(char *name, struct ifmap *map)
457 struct netdev_boot_setup *s;
461 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
462 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
463 memset(s[i].name, 0, sizeof(s[i].name));
464 strlcpy(s[i].name, name, IFNAMSIZ);
465 memcpy(&s[i].map, map, sizeof(s[i].map));
470 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
474 * netdev_boot_setup_check - check boot time settings
475 * @dev: the netdevice
477 * Check boot time settings for the device.
478 * The found settings are set for the device to be used
479 * later in the device probing.
480 * Returns 0 if no settings found, 1 if they are.
482 int netdev_boot_setup_check(struct net_device *dev)
484 struct netdev_boot_setup *s = dev_boot_setup;
487 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
488 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
489 !strcmp(dev->name, s[i].name)) {
490 dev->irq = s[i].map.irq;
491 dev->base_addr = s[i].map.base_addr;
492 dev->mem_start = s[i].map.mem_start;
493 dev->mem_end = s[i].map.mem_end;
502 * netdev_boot_base - get address from boot time settings
503 * @prefix: prefix for network device
504 * @unit: id for network device
506 * Check boot time settings for the base address of device.
507 * The found settings are set for the device to be used
508 * later in the device probing.
509 * Returns 0 if no settings found.
511 unsigned long netdev_boot_base(const char *prefix, int unit)
513 const struct netdev_boot_setup *s = dev_boot_setup;
517 sprintf(name, "%s%d", prefix, unit);
520 * If device already registered then return base of 1
521 * to indicate not to probe for this interface
523 if (__dev_get_by_name(&init_net, name))
526 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
527 if (!strcmp(name, s[i].name))
528 return s[i].map.base_addr;
533 * Saves at boot time configured settings for any netdevice.
535 int __init netdev_boot_setup(char *str)
540 str = get_options(str, ARRAY_SIZE(ints), ints);
545 memset(&map, 0, sizeof(map));
549 map.base_addr = ints[2];
551 map.mem_start = ints[3];
553 map.mem_end = ints[4];
555 /* Add new entry to the list */
556 return netdev_boot_setup_add(str, &map);
559 __setup("netdev=", netdev_boot_setup);
561 /*******************************************************************************
563 Device Interface Subroutines
565 *******************************************************************************/
568 * __dev_get_by_name - find a device by its name
569 * @net: the applicable net namespace
570 * @name: name to find
572 * Find an interface by name. Must be called under RTNL semaphore
573 * or @dev_base_lock. If the name is found a pointer to the device
574 * is returned. If the name is not found then %NULL is returned. The
575 * reference counters are not incremented so the caller must be
576 * careful with locks.
579 struct net_device *__dev_get_by_name(struct net *net, const char *name)
581 struct hlist_node *p;
583 hlist_for_each(p, dev_name_hash(net, name)) {
584 struct net_device *dev
585 = hlist_entry(p, struct net_device, name_hlist);
586 if (!strncmp(dev->name, name, IFNAMSIZ))
593 * dev_get_by_name - find a device by its name
594 * @net: the applicable net namespace
595 * @name: name to find
597 * Find an interface by name. This can be called from any
598 * context and does its own locking. The returned handle has
599 * the usage count incremented and the caller must use dev_put() to
600 * release it when it is no longer needed. %NULL is returned if no
601 * matching device is found.
604 struct net_device *dev_get_by_name(struct net *net, const char *name)
606 struct net_device *dev;
608 read_lock(&dev_base_lock);
609 dev = __dev_get_by_name(net, name);
612 read_unlock(&dev_base_lock);
617 * __dev_get_by_index - find a device by its ifindex
618 * @net: the applicable net namespace
619 * @ifindex: index of device
621 * Search for an interface by index. Returns %NULL if the device
622 * is not found or a pointer to the device. The device has not
623 * had its reference counter increased so the caller must be careful
624 * about locking. The caller must hold either the RTNL semaphore
628 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
630 struct hlist_node *p;
632 hlist_for_each(p, dev_index_hash(net, ifindex)) {
633 struct net_device *dev
634 = hlist_entry(p, struct net_device, index_hlist);
635 if (dev->ifindex == ifindex)
643 * dev_get_by_index - find a device by its ifindex
644 * @net: the applicable net namespace
645 * @ifindex: index of device
647 * Search for an interface by index. Returns NULL if the device
648 * is not found or a pointer to the device. The device returned has
649 * had a reference added and the pointer is safe until the user calls
650 * dev_put to indicate they have finished with it.
653 struct net_device *dev_get_by_index(struct net *net, int ifindex)
655 struct net_device *dev;
657 read_lock(&dev_base_lock);
658 dev = __dev_get_by_index(net, ifindex);
661 read_unlock(&dev_base_lock);
666 * dev_getbyhwaddr - find a device by its hardware address
667 * @net: the applicable net namespace
668 * @type: media type of device
669 * @ha: hardware address
671 * Search for an interface by MAC address. Returns NULL if the device
672 * is not found or a pointer to the device. The caller must hold the
673 * rtnl semaphore. The returned device has not had its ref count increased
674 * and the caller must therefore be careful about locking
677 * If the API was consistent this would be __dev_get_by_hwaddr
680 struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
682 struct net_device *dev;
686 for_each_netdev(net, dev)
687 if (dev->type == type &&
688 !memcmp(dev->dev_addr, ha, dev->addr_len))
694 EXPORT_SYMBOL(dev_getbyhwaddr);
696 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
698 struct net_device *dev;
701 for_each_netdev(net, dev)
702 if (dev->type == type)
708 EXPORT_SYMBOL(__dev_getfirstbyhwtype);
710 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
712 struct net_device *dev;
715 dev = __dev_getfirstbyhwtype(net, type);
722 EXPORT_SYMBOL(dev_getfirstbyhwtype);
725 * dev_get_by_flags - find any device with given flags
726 * @net: the applicable net namespace
727 * @if_flags: IFF_* values
728 * @mask: bitmask of bits in if_flags to check
730 * Search for any interface with the given flags. Returns NULL if a device
731 * is not found or a pointer to the device. The device returned has
732 * had a reference added and the pointer is safe until the user calls
733 * dev_put to indicate they have finished with it.
736 struct net_device * dev_get_by_flags(struct net *net, unsigned short if_flags, unsigned short mask)
738 struct net_device *dev, *ret;
741 read_lock(&dev_base_lock);
742 for_each_netdev(net, dev) {
743 if (((dev->flags ^ if_flags) & mask) == 0) {
749 read_unlock(&dev_base_lock);
754 * dev_valid_name - check if name is okay for network device
757 * Network device names need to be valid file names to
758 * to allow sysfs to work. We also disallow any kind of
761 int dev_valid_name(const char *name)
765 if (strlen(name) >= IFNAMSIZ)
767 if (!strcmp(name, ".") || !strcmp(name, ".."))
771 if (*name == '/' || isspace(*name))
779 * __dev_alloc_name - allocate a name for a device
780 * @net: network namespace to allocate the device name in
781 * @name: name format string
782 * @buf: scratch buffer and result name string
784 * Passed a format string - eg "lt%d" it will try and find a suitable
785 * id. It scans list of devices to build up a free map, then chooses
786 * the first empty slot. The caller must hold the dev_base or rtnl lock
787 * while allocating the name and adding the device in order to avoid
789 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
790 * Returns the number of the unit assigned or a negative errno code.
793 static int __dev_alloc_name(struct net *net, const char *name, char *buf)
797 const int max_netdevices = 8*PAGE_SIZE;
798 unsigned long *inuse;
799 struct net_device *d;
801 p = strnchr(name, IFNAMSIZ-1, '%');
804 * Verify the string as this thing may have come from
805 * the user. There must be either one "%d" and no other "%"
808 if (p[1] != 'd' || strchr(p + 2, '%'))
811 /* Use one page as a bit array of possible slots */
812 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
816 for_each_netdev(net, d) {
817 if (!sscanf(d->name, name, &i))
819 if (i < 0 || i >= max_netdevices)
822 /* avoid cases where sscanf is not exact inverse of printf */
823 snprintf(buf, IFNAMSIZ, name, i);
824 if (!strncmp(buf, d->name, IFNAMSIZ))
828 i = find_first_zero_bit(inuse, max_netdevices);
829 free_page((unsigned long) inuse);
832 snprintf(buf, IFNAMSIZ, name, i);
833 if (!__dev_get_by_name(net, buf))
836 /* It is possible to run out of possible slots
837 * when the name is long and there isn't enough space left
838 * for the digits, or if all bits are used.
844 * dev_alloc_name - allocate a name for a device
846 * @name: name format string
848 * Passed a format string - eg "lt%d" it will try and find a suitable
849 * id. It scans list of devices to build up a free map, then chooses
850 * the first empty slot. The caller must hold the dev_base or rtnl lock
851 * while allocating the name and adding the device in order to avoid
853 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
854 * Returns the number of the unit assigned or a negative errno code.
857 int dev_alloc_name(struct net_device *dev, const char *name)
863 BUG_ON(!dev_net(dev));
865 ret = __dev_alloc_name(net, name, buf);
867 strlcpy(dev->name, buf, IFNAMSIZ);
873 * dev_change_name - change name of a device
875 * @newname: name (or format string) must be at least IFNAMSIZ
877 * Change name of a device, can pass format strings "eth%d".
880 int dev_change_name(struct net_device *dev, const char *newname)
882 char oldname[IFNAMSIZ];
888 BUG_ON(!dev_net(dev));
891 if (dev->flags & IFF_UP)
894 if (!dev_valid_name(newname))
897 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
900 memcpy(oldname, dev->name, IFNAMSIZ);
902 if (strchr(newname, '%')) {
903 err = dev_alloc_name(dev, newname);
907 else if (__dev_get_by_name(net, newname))
910 strlcpy(dev->name, newname, IFNAMSIZ);
913 /* For now only devices in the initial network namespace
916 if (net == &init_net) {
917 ret = device_rename(&dev->dev, dev->name);
919 memcpy(dev->name, oldname, IFNAMSIZ);
924 write_lock_bh(&dev_base_lock);
925 hlist_del(&dev->name_hlist);
926 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
927 write_unlock_bh(&dev_base_lock);
929 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
930 ret = notifier_to_errno(ret);
935 "%s: name change rollback failed: %d.\n",
939 memcpy(dev->name, oldname, IFNAMSIZ);
948 * dev_set_alias - change ifalias of a device
950 * @alias: name up to IFALIASZ
951 * @len: limit of bytes to copy from info
953 * Set ifalias for a device,
955 int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
970 dev->ifalias = krealloc(dev->ifalias, len+1, GFP_KERNEL);
974 strlcpy(dev->ifalias, alias, len+1);
980 * netdev_features_change - device changes features
981 * @dev: device to cause notification
983 * Called to indicate a device has changed features.
985 void netdev_features_change(struct net_device *dev)
987 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
989 EXPORT_SYMBOL(netdev_features_change);
992 * netdev_state_change - device changes state
993 * @dev: device to cause notification
995 * Called to indicate a device has changed state. This function calls
996 * the notifier chains for netdev_chain and sends a NEWLINK message
997 * to the routing socket.
999 void netdev_state_change(struct net_device *dev)
1001 if (dev->flags & IFF_UP) {
1002 call_netdevice_notifiers(NETDEV_CHANGE, dev);
1003 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1007 void netdev_bonding_change(struct net_device *dev)
1009 call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, dev);
1011 EXPORT_SYMBOL(netdev_bonding_change);
1014 * dev_load - load a network module
1015 * @net: the applicable net namespace
1016 * @name: name of interface
1018 * If a network interface is not present and the process has suitable
1019 * privileges this function loads the module. If module loading is not
1020 * available in this kernel then it becomes a nop.
1023 void dev_load(struct net *net, const char *name)
1025 struct net_device *dev;
1027 read_lock(&dev_base_lock);
1028 dev = __dev_get_by_name(net, name);
1029 read_unlock(&dev_base_lock);
1031 if (!dev && capable(CAP_SYS_MODULE))
1032 request_module("%s", name);
1036 * dev_open - prepare an interface for use.
1037 * @dev: device to open
1039 * Takes a device from down to up state. The device's private open
1040 * function is invoked and then the multicast lists are loaded. Finally
1041 * the device is moved into the up state and a %NETDEV_UP message is
1042 * sent to the netdev notifier chain.
1044 * Calling this function on an active interface is a nop. On a failure
1045 * a negative errno code is returned.
1047 int dev_open(struct net_device *dev)
1049 const struct net_device_ops *ops = dev->netdev_ops;
1058 if (dev->flags & IFF_UP)
1062 * Is it even present?
1064 if (!netif_device_present(dev))
1068 * Call device private open method
1070 set_bit(__LINK_STATE_START, &dev->state);
1072 if (ops->ndo_validate_addr)
1073 ret = ops->ndo_validate_addr(dev);
1075 if (!ret && ops->ndo_open)
1076 ret = ops->ndo_open(dev);
1079 * If it went open OK then:
1083 clear_bit(__LINK_STATE_START, &dev->state);
1088 dev->flags |= IFF_UP;
1093 net_dmaengine_get();
1096 * Initialize multicasting status
1098 dev_set_rx_mode(dev);
1101 * Wakeup transmit queue engine
1106 * ... and announce new interface.
1108 call_netdevice_notifiers(NETDEV_UP, dev);
1115 * dev_close - shutdown an interface.
1116 * @dev: device to shutdown
1118 * This function moves an active device into down state. A
1119 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1120 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1123 int dev_close(struct net_device *dev)
1125 const struct net_device_ops *ops = dev->netdev_ops;
1130 if (!(dev->flags & IFF_UP))
1134 * Tell people we are going down, so that they can
1135 * prepare to death, when device is still operating.
1137 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1139 clear_bit(__LINK_STATE_START, &dev->state);
1141 /* Synchronize to scheduled poll. We cannot touch poll list,
1142 * it can be even on different cpu. So just clear netif_running().
1144 * dev->stop() will invoke napi_disable() on all of it's
1145 * napi_struct instances on this device.
1147 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1149 dev_deactivate(dev);
1152 * Call the device specific close. This cannot fail.
1153 * Only if device is UP
1155 * We allow it to be called even after a DETACH hot-plug
1162 * Device is now down.
1165 dev->flags &= ~IFF_UP;
1168 * Tell people we are down
1170 call_netdevice_notifiers(NETDEV_DOWN, dev);
1175 net_dmaengine_put();
1182 * dev_disable_lro - disable Large Receive Offload on a device
1185 * Disable Large Receive Offload (LRO) on a net device. Must be
1186 * called under RTNL. This is needed if received packets may be
1187 * forwarded to another interface.
1189 void dev_disable_lro(struct net_device *dev)
1191 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1192 dev->ethtool_ops->set_flags) {
1193 u32 flags = dev->ethtool_ops->get_flags(dev);
1194 if (flags & ETH_FLAG_LRO) {
1195 flags &= ~ETH_FLAG_LRO;
1196 dev->ethtool_ops->set_flags(dev, flags);
1199 WARN_ON(dev->features & NETIF_F_LRO);
1201 EXPORT_SYMBOL(dev_disable_lro);
1204 static int dev_boot_phase = 1;
1207 * Device change register/unregister. These are not inline or static
1208 * as we export them to the world.
1212 * register_netdevice_notifier - register a network notifier block
1215 * Register a notifier to be called when network device events occur.
1216 * The notifier passed is linked into the kernel structures and must
1217 * not be reused until it has been unregistered. A negative errno code
1218 * is returned on a failure.
1220 * When registered all registration and up events are replayed
1221 * to the new notifier to allow device to have a race free
1222 * view of the network device list.
1225 int register_netdevice_notifier(struct notifier_block *nb)
1227 struct net_device *dev;
1228 struct net_device *last;
1233 err = raw_notifier_chain_register(&netdev_chain, nb);
1239 for_each_netdev(net, dev) {
1240 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1241 err = notifier_to_errno(err);
1245 if (!(dev->flags & IFF_UP))
1248 nb->notifier_call(nb, NETDEV_UP, dev);
1259 for_each_netdev(net, dev) {
1263 if (dev->flags & IFF_UP) {
1264 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1265 nb->notifier_call(nb, NETDEV_DOWN, dev);
1267 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1271 raw_notifier_chain_unregister(&netdev_chain, nb);
1276 * unregister_netdevice_notifier - unregister a network notifier block
1279 * Unregister a notifier previously registered by
1280 * register_netdevice_notifier(). The notifier is unlinked into the
1281 * kernel structures and may then be reused. A negative errno code
1282 * is returned on a failure.
1285 int unregister_netdevice_notifier(struct notifier_block *nb)
1290 err = raw_notifier_chain_unregister(&netdev_chain, nb);
1296 * call_netdevice_notifiers - call all network notifier blocks
1297 * @val: value passed unmodified to notifier function
1298 * @dev: net_device pointer passed unmodified to notifier function
1300 * Call all network notifier blocks. Parameters and return value
1301 * are as for raw_notifier_call_chain().
1304 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1306 return raw_notifier_call_chain(&netdev_chain, val, dev);
1309 /* When > 0 there are consumers of rx skb time stamps */
1310 static atomic_t netstamp_needed = ATOMIC_INIT(0);
1312 void net_enable_timestamp(void)
1314 atomic_inc(&netstamp_needed);
1317 void net_disable_timestamp(void)
1319 atomic_dec(&netstamp_needed);
1322 static inline void net_timestamp(struct sk_buff *skb)
1324 if (atomic_read(&netstamp_needed))
1325 __net_timestamp(skb);
1327 skb->tstamp.tv64 = 0;
1331 * Support routine. Sends outgoing frames to any network
1332 * taps currently in use.
1335 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1337 struct packet_type *ptype;
1339 #ifdef CONFIG_NET_CLS_ACT
1340 if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS)))
1347 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1348 /* Never send packets back to the socket
1349 * they originated from - MvS (miquels@drinkel.ow.org)
1351 if ((ptype->dev == dev || !ptype->dev) &&
1352 (ptype->af_packet_priv == NULL ||
1353 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1354 struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);
1358 /* skb->nh should be correctly
1359 set by sender, so that the second statement is
1360 just protection against buggy protocols.
1362 skb_reset_mac_header(skb2);
1364 if (skb_network_header(skb2) < skb2->data ||
1365 skb2->network_header > skb2->tail) {
1366 if (net_ratelimit())
1367 printk(KERN_CRIT "protocol %04x is "
1369 skb2->protocol, dev->name);
1370 skb_reset_network_header(skb2);
1373 skb2->transport_header = skb2->network_header;
1374 skb2->pkt_type = PACKET_OUTGOING;
1375 ptype->func(skb2, skb->dev, ptype, skb->dev);
1382 static inline void __netif_reschedule(struct Qdisc *q)
1384 struct softnet_data *sd;
1385 unsigned long flags;
1387 local_irq_save(flags);
1388 sd = &__get_cpu_var(softnet_data);
1389 q->next_sched = sd->output_queue;
1390 sd->output_queue = q;
1391 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1392 local_irq_restore(flags);
1395 void __netif_schedule(struct Qdisc *q)
1397 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1398 __netif_reschedule(q);
1400 EXPORT_SYMBOL(__netif_schedule);
1402 void dev_kfree_skb_irq(struct sk_buff *skb)
1404 if (atomic_dec_and_test(&skb->users)) {
1405 struct softnet_data *sd;
1406 unsigned long flags;
1408 local_irq_save(flags);
1409 sd = &__get_cpu_var(softnet_data);
1410 skb->next = sd->completion_queue;
1411 sd->completion_queue = skb;
1412 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1413 local_irq_restore(flags);
1416 EXPORT_SYMBOL(dev_kfree_skb_irq);
1418 void dev_kfree_skb_any(struct sk_buff *skb)
1420 if (in_irq() || irqs_disabled())
1421 dev_kfree_skb_irq(skb);
1425 EXPORT_SYMBOL(dev_kfree_skb_any);
1429 * netif_device_detach - mark device as removed
1430 * @dev: network device
1432 * Mark device as removed from system and therefore no longer available.
1434 void netif_device_detach(struct net_device *dev)
1436 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1437 netif_running(dev)) {
1438 netif_tx_stop_all_queues(dev);
1441 EXPORT_SYMBOL(netif_device_detach);
1444 * netif_device_attach - mark device as attached
1445 * @dev: network device
1447 * Mark device as attached from system and restart if needed.
1449 void netif_device_attach(struct net_device *dev)
1451 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1452 netif_running(dev)) {
1453 netif_tx_wake_all_queues(dev);
1454 __netdev_watchdog_up(dev);
1457 EXPORT_SYMBOL(netif_device_attach);
1459 static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1461 return ((features & NETIF_F_GEN_CSUM) ||
1462 ((features & NETIF_F_IP_CSUM) &&
1463 protocol == htons(ETH_P_IP)) ||
1464 ((features & NETIF_F_IPV6_CSUM) &&
1465 protocol == htons(ETH_P_IPV6)) ||
1466 ((features & NETIF_F_FCOE_CRC) &&
1467 protocol == htons(ETH_P_FCOE)));
1470 static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1472 if (can_checksum_protocol(dev->features, skb->protocol))
1475 if (skb->protocol == htons(ETH_P_8021Q)) {
1476 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1477 if (can_checksum_protocol(dev->features & dev->vlan_features,
1478 veh->h_vlan_encapsulated_proto))
1486 * Invalidate hardware checksum when packet is to be mangled, and
1487 * complete checksum manually on outgoing path.
1489 int skb_checksum_help(struct sk_buff *skb)
1492 int ret = 0, offset;
1494 if (skb->ip_summed == CHECKSUM_COMPLETE)
1495 goto out_set_summed;
1497 if (unlikely(skb_shinfo(skb)->gso_size)) {
1498 /* Let GSO fix up the checksum. */
1499 goto out_set_summed;
1502 offset = skb->csum_start - skb_headroom(skb);
1503 BUG_ON(offset >= skb_headlen(skb));
1504 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1506 offset += skb->csum_offset;
1507 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1509 if (skb_cloned(skb) &&
1510 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
1511 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1516 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
1518 skb->ip_summed = CHECKSUM_NONE;
1524 * skb_gso_segment - Perform segmentation on skb.
1525 * @skb: buffer to segment
1526 * @features: features for the output path (see dev->features)
1528 * This function segments the given skb and returns a list of segments.
1530 * It may return NULL if the skb requires no segmentation. This is
1531 * only possible when GSO is used for verifying header integrity.
1533 struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
1535 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1536 struct packet_type *ptype;
1537 __be16 type = skb->protocol;
1540 skb_reset_mac_header(skb);
1541 skb->mac_len = skb->network_header - skb->mac_header;
1542 __skb_pull(skb, skb->mac_len);
1544 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1545 struct net_device *dev = skb->dev;
1546 struct ethtool_drvinfo info = {};
1548 if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
1549 dev->ethtool_ops->get_drvinfo(dev, &info);
1551 WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d "
1553 info.driver, dev ? dev->features : 0L,
1554 skb->sk ? skb->sk->sk_route_caps : 0L,
1555 skb->len, skb->data_len, skb->ip_summed);
1557 if (skb_header_cloned(skb) &&
1558 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1559 return ERR_PTR(err);
1563 list_for_each_entry_rcu(ptype,
1564 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
1565 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
1566 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1567 err = ptype->gso_send_check(skb);
1568 segs = ERR_PTR(err);
1569 if (err || skb_gso_ok(skb, features))
1571 __skb_push(skb, (skb->data -
1572 skb_network_header(skb)));
1574 segs = ptype->gso_segment(skb, features);
1580 __skb_push(skb, skb->data - skb_mac_header(skb));
1585 EXPORT_SYMBOL(skb_gso_segment);
1587 /* Take action when hardware reception checksum errors are detected. */
1589 void netdev_rx_csum_fault(struct net_device *dev)
1591 if (net_ratelimit()) {
1592 printk(KERN_ERR "%s: hw csum failure.\n",
1593 dev ? dev->name : "<unknown>");
1597 EXPORT_SYMBOL(netdev_rx_csum_fault);
1600 /* Actually, we should eliminate this check as soon as we know, that:
1601 * 1. IOMMU is present and allows to map all the memory.
1602 * 2. No high memory really exists on this machine.
1605 static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1607 #ifdef CONFIG_HIGHMEM
1610 if (dev->features & NETIF_F_HIGHDMA)
1613 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1614 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1622 void (*destructor)(struct sk_buff *skb);
1625 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1627 static void dev_gso_skb_destructor(struct sk_buff *skb)
1629 struct dev_gso_cb *cb;
1632 struct sk_buff *nskb = skb->next;
1634 skb->next = nskb->next;
1637 } while (skb->next);
1639 cb = DEV_GSO_CB(skb);
1641 cb->destructor(skb);
1645 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1646 * @skb: buffer to segment
1648 * This function segments the given skb and stores the list of segments
1651 static int dev_gso_segment(struct sk_buff *skb)
1653 struct net_device *dev = skb->dev;
1654 struct sk_buff *segs;
1655 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1658 segs = skb_gso_segment(skb, features);
1660 /* Verifying header integrity only. */
1665 return PTR_ERR(segs);
1668 DEV_GSO_CB(skb)->destructor = skb->destructor;
1669 skb->destructor = dev_gso_skb_destructor;
1674 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1675 struct netdev_queue *txq)
1677 const struct net_device_ops *ops = dev->netdev_ops;
1680 if (likely(!skb->next)) {
1681 if (!list_empty(&ptype_all))
1682 dev_queue_xmit_nit(skb, dev);
1684 if (netif_needs_gso(dev, skb)) {
1685 if (unlikely(dev_gso_segment(skb)))
1691 rc = ops->ndo_start_xmit(skb, dev);
1693 * TODO: if skb_orphan() was called by
1694 * dev->hard_start_xmit() (for example, the unmodified
1695 * igb driver does that; bnx2 doesn't), then
1696 * skb_tx_software_timestamp() will be unable to send
1697 * back the time stamp.
1699 * How can this be prevented? Always create another
1700 * reference to the socket before calling
1701 * dev->hard_start_xmit()? Prevent that skb_orphan()
1702 * does anything in dev->hard_start_xmit() by clearing
1703 * the skb destructor before the call and restoring it
1704 * afterwards, then doing the skb_orphan() ourselves?
1711 struct sk_buff *nskb = skb->next;
1713 skb->next = nskb->next;
1715 rc = ops->ndo_start_xmit(nskb, dev);
1717 nskb->next = skb->next;
1721 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
1722 return NETDEV_TX_BUSY;
1723 } while (skb->next);
1725 skb->destructor = DEV_GSO_CB(skb)->destructor;
1732 static u32 skb_tx_hashrnd;
1734 u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
1738 if (skb_rx_queue_recorded(skb)) {
1739 hash = skb_get_rx_queue(skb);
1740 while (unlikely (hash >= dev->real_num_tx_queues))
1741 hash -= dev->real_num_tx_queues;
1745 if (skb->sk && skb->sk->sk_hash)
1746 hash = skb->sk->sk_hash;
1748 hash = skb->protocol;
1750 hash = jhash_1word(hash, skb_tx_hashrnd);
1752 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
1754 EXPORT_SYMBOL(skb_tx_hash);
1756 static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1757 struct sk_buff *skb)
1759 const struct net_device_ops *ops = dev->netdev_ops;
1760 u16 queue_index = 0;
1762 if (ops->ndo_select_queue)
1763 queue_index = ops->ndo_select_queue(dev, skb);
1764 else if (dev->real_num_tx_queues > 1)
1765 queue_index = skb_tx_hash(dev, skb);
1767 skb_set_queue_mapping(skb, queue_index);
1768 return netdev_get_tx_queue(dev, queue_index);
1772 * dev_queue_xmit - transmit a buffer
1773 * @skb: buffer to transmit
1775 * Queue a buffer for transmission to a network device. The caller must
1776 * have set the device and priority and built the buffer before calling
1777 * this function. The function can be called from an interrupt.
1779 * A negative errno code is returned on a failure. A success does not
1780 * guarantee the frame will be transmitted as it may be dropped due
1781 * to congestion or traffic shaping.
1783 * -----------------------------------------------------------------------------------
1784 * I notice this method can also return errors from the queue disciplines,
1785 * including NET_XMIT_DROP, which is a positive value. So, errors can also
1788 * Regardless of the return value, the skb is consumed, so it is currently
1789 * difficult to retry a send to this method. (You can bump the ref count
1790 * before sending to hold a reference for retry if you are careful.)
1792 * When calling this method, interrupts MUST be enabled. This is because
1793 * the BH enable code must have IRQs enabled so that it will not deadlock.
1796 int dev_queue_xmit(struct sk_buff *skb)
1798 struct net_device *dev = skb->dev;
1799 struct netdev_queue *txq;
1803 /* GSO will handle the following emulations directly. */
1804 if (netif_needs_gso(dev, skb))
1807 if (skb_shinfo(skb)->frag_list &&
1808 !(dev->features & NETIF_F_FRAGLIST) &&
1809 __skb_linearize(skb))
1812 /* Fragmented skb is linearized if device does not support SG,
1813 * or if at least one of fragments is in highmem and device
1814 * does not support DMA from it.
1816 if (skb_shinfo(skb)->nr_frags &&
1817 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
1818 __skb_linearize(skb))
1821 /* If packet is not checksummed and device does not support
1822 * checksumming for this protocol, complete checksumming here.
1824 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1825 skb_set_transport_header(skb, skb->csum_start -
1827 if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
1832 /* Disable soft irqs for various locks below. Also
1833 * stops preemption for RCU.
1837 txq = dev_pick_tx(dev, skb);
1838 q = rcu_dereference(txq->qdisc);
1840 #ifdef CONFIG_NET_CLS_ACT
1841 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
1844 spinlock_t *root_lock = qdisc_lock(q);
1846 spin_lock(root_lock);
1848 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
1852 rc = qdisc_enqueue_root(skb, q);
1855 spin_unlock(root_lock);
1860 /* The device has no queue. Common case for software devices:
1861 loopback, all the sorts of tunnels...
1863 Really, it is unlikely that netif_tx_lock protection is necessary
1864 here. (f.e. loopback and IP tunnels are clean ignoring statistics
1866 However, it is possible, that they rely on protection
1869 Check this and shot the lock. It is not prone from deadlocks.
1870 Either shot noqueue qdisc, it is even simpler 8)
1872 if (dev->flags & IFF_UP) {
1873 int cpu = smp_processor_id(); /* ok because BHs are off */
1875 if (txq->xmit_lock_owner != cpu) {
1877 HARD_TX_LOCK(dev, txq, cpu);
1879 if (!netif_tx_queue_stopped(txq)) {
1881 if (!dev_hard_start_xmit(skb, dev, txq)) {
1882 HARD_TX_UNLOCK(dev, txq);
1886 HARD_TX_UNLOCK(dev, txq);
1887 if (net_ratelimit())
1888 printk(KERN_CRIT "Virtual device %s asks to "
1889 "queue packet!\n", dev->name);
1891 /* Recursion is detected! It is possible,
1893 if (net_ratelimit())
1894 printk(KERN_CRIT "Dead loop on virtual device "
1895 "%s, fix it urgently!\n", dev->name);
1900 rcu_read_unlock_bh();
1906 rcu_read_unlock_bh();
1911 /*=======================================================================
1913 =======================================================================*/
1915 int netdev_max_backlog __read_mostly = 1000;
1916 int netdev_budget __read_mostly = 300;
1917 int weight_p __read_mostly = 64; /* old backlog weight */
1919 DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
1923 * netif_rx - post buffer to the network code
1924 * @skb: buffer to post
1926 * This function receives a packet from a device driver and queues it for
1927 * the upper (protocol) levels to process. It always succeeds. The buffer
1928 * may be dropped during processing for congestion control or by the
1932 * NET_RX_SUCCESS (no congestion)
1933 * NET_RX_DROP (packet was dropped)
1937 int netif_rx(struct sk_buff *skb)
1939 struct softnet_data *queue;
1940 unsigned long flags;
1942 /* if netpoll wants it, pretend we never saw it */
1943 if (netpoll_rx(skb))
1946 if (!skb->tstamp.tv64)
1950 * The code is rearranged so that the path is the most
1951 * short when CPU is congested, but is still operating.
1953 local_irq_save(flags);
1954 queue = &__get_cpu_var(softnet_data);
1956 __get_cpu_var(netdev_rx_stat).total++;
1957 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
1958 if (queue->input_pkt_queue.qlen) {
1960 __skb_queue_tail(&queue->input_pkt_queue, skb);
1961 local_irq_restore(flags);
1962 return NET_RX_SUCCESS;
1965 napi_schedule(&queue->backlog);
1969 __get_cpu_var(netdev_rx_stat).dropped++;
1970 local_irq_restore(flags);
1976 int netif_rx_ni(struct sk_buff *skb)
1981 err = netif_rx(skb);
1982 if (local_softirq_pending())
1989 EXPORT_SYMBOL(netif_rx_ni);
1991 static void net_tx_action(struct softirq_action *h)
1993 struct softnet_data *sd = &__get_cpu_var(softnet_data);
1995 if (sd->completion_queue) {
1996 struct sk_buff *clist;
1998 local_irq_disable();
1999 clist = sd->completion_queue;
2000 sd->completion_queue = NULL;
2004 struct sk_buff *skb = clist;
2005 clist = clist->next;
2007 WARN_ON(atomic_read(&skb->users));
2012 if (sd->output_queue) {
2015 local_irq_disable();
2016 head = sd->output_queue;
2017 sd->output_queue = NULL;
2021 struct Qdisc *q = head;
2022 spinlock_t *root_lock;
2024 head = head->next_sched;
2026 root_lock = qdisc_lock(q);
2027 if (spin_trylock(root_lock)) {
2028 smp_mb__before_clear_bit();
2029 clear_bit(__QDISC_STATE_SCHED,
2032 spin_unlock(root_lock);
2034 if (!test_bit(__QDISC_STATE_DEACTIVATED,
2036 __netif_reschedule(q);
2038 smp_mb__before_clear_bit();
2039 clear_bit(__QDISC_STATE_SCHED,
2047 static inline int deliver_skb(struct sk_buff *skb,
2048 struct packet_type *pt_prev,
2049 struct net_device *orig_dev)
2051 atomic_inc(&skb->users);
2052 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2055 #if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
2056 /* These hooks defined here for ATM */
2058 struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br,
2059 unsigned char *addr);
2060 void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent) __read_mostly;
2063 * If bridge module is loaded call bridging hook.
2064 * returns NULL if packet was consumed.
2066 struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
2067 struct sk_buff *skb) __read_mostly;
2068 static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
2069 struct packet_type **pt_prev, int *ret,
2070 struct net_device *orig_dev)
2072 struct net_bridge_port *port;
2074 if (skb->pkt_type == PACKET_LOOPBACK ||
2075 (port = rcu_dereference(skb->dev->br_port)) == NULL)
2079 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2083 return br_handle_frame_hook(port, skb);
2086 #define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
2089 #if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
2090 struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
2091 EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
2093 static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
2094 struct packet_type **pt_prev,
2096 struct net_device *orig_dev)
2098 if (skb->dev->macvlan_port == NULL)
2102 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2105 return macvlan_handle_frame_hook(skb);
2108 #define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
2111 #ifdef CONFIG_NET_CLS_ACT
2112 /* TODO: Maybe we should just force sch_ingress to be compiled in
2113 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2114 * a compare and 2 stores extra right now if we dont have it on
2115 * but have CONFIG_NET_CLS_ACT
2116 * NOTE: This doesnt stop any functionality; if you dont have
2117 * the ingress scheduler, you just cant add policies on ingress.
2120 static int ing_filter(struct sk_buff *skb)
2122 struct net_device *dev = skb->dev;
2123 u32 ttl = G_TC_RTTL(skb->tc_verd);
2124 struct netdev_queue *rxq;
2125 int result = TC_ACT_OK;
2128 if (MAX_RED_LOOP < ttl++) {
2130 "Redir loop detected Dropping packet (%d->%d)\n",
2131 skb->iif, dev->ifindex);
2135 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2136 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
2138 rxq = &dev->rx_queue;
2141 if (q != &noop_qdisc) {
2142 spin_lock(qdisc_lock(q));
2143 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
2144 result = qdisc_enqueue_root(skb, q);
2145 spin_unlock(qdisc_lock(q));
2151 static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2152 struct packet_type **pt_prev,
2153 int *ret, struct net_device *orig_dev)
2155 if (skb->dev->rx_queue.qdisc == &noop_qdisc)
2159 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2162 /* Huh? Why does turning on AF_PACKET affect this? */
2163 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
2166 switch (ing_filter(skb)) {
2180 * netif_nit_deliver - deliver received packets to network taps
2183 * This function is used to deliver incoming packets to network
2184 * taps. It should be used when the normal netif_receive_skb path
2185 * is bypassed, for example because of VLAN acceleration.
2187 void netif_nit_deliver(struct sk_buff *skb)
2189 struct packet_type *ptype;
2191 if (list_empty(&ptype_all))
2194 skb_reset_network_header(skb);
2195 skb_reset_transport_header(skb);
2196 skb->mac_len = skb->network_header - skb->mac_header;
2199 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2200 if (!ptype->dev || ptype->dev == skb->dev)
2201 deliver_skb(skb, ptype, skb->dev);
2207 * netif_receive_skb - process receive buffer from network
2208 * @skb: buffer to process
2210 * netif_receive_skb() is the main receive data processing function.
2211 * It always succeeds. The buffer may be dropped during processing
2212 * for congestion control or by the protocol layers.
2214 * This function may only be called from softirq context and interrupts
2215 * should be enabled.
2217 * Return values (usually ignored):
2218 * NET_RX_SUCCESS: no congestion
2219 * NET_RX_DROP: packet was dropped
2221 int netif_receive_skb(struct sk_buff *skb)
2223 struct packet_type *ptype, *pt_prev;
2224 struct net_device *orig_dev;
2225 struct net_device *null_or_orig;
2226 int ret = NET_RX_DROP;
2229 if (skb->vlan_tci && vlan_hwaccel_do_receive(skb))
2230 return NET_RX_SUCCESS;
2232 /* if we've gotten here through NAPI, check netpoll */
2233 if (netpoll_receive_skb(skb))
2236 if (!skb->tstamp.tv64)
2240 skb->iif = skb->dev->ifindex;
2242 null_or_orig = NULL;
2243 orig_dev = skb->dev;
2244 if (orig_dev->master) {
2245 if (skb_bond_should_drop(skb))
2246 null_or_orig = orig_dev; /* deliver only exact match */
2248 skb->dev = orig_dev->master;
2251 __get_cpu_var(netdev_rx_stat).total++;
2253 skb_reset_network_header(skb);
2254 skb_reset_transport_header(skb);
2255 skb->mac_len = skb->network_header - skb->mac_header;
2261 #ifdef CONFIG_NET_CLS_ACT
2262 if (skb->tc_verd & TC_NCLS) {
2263 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2268 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2269 if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2270 ptype->dev == orig_dev) {
2272 ret = deliver_skb(skb, pt_prev, orig_dev);
2277 #ifdef CONFIG_NET_CLS_ACT
2278 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2284 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
2287 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
2293 type = skb->protocol;
2294 list_for_each_entry_rcu(ptype,
2295 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
2296 if (ptype->type == type &&
2297 (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2298 ptype->dev == orig_dev)) {
2300 ret = deliver_skb(skb, pt_prev, orig_dev);
2306 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2309 /* Jamal, now you will not able to escape explaining
2310 * me how you were going to use this. :-)
2320 /* Network device is going away, flush any packets still pending */
2321 static void flush_backlog(void *arg)
2323 struct net_device *dev = arg;
2324 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2325 struct sk_buff *skb, *tmp;
2327 skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
2328 if (skb->dev == dev) {
2329 __skb_unlink(skb, &queue->input_pkt_queue);
2334 static int napi_gro_complete(struct sk_buff *skb)
2336 struct packet_type *ptype;
2337 __be16 type = skb->protocol;
2338 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2341 if (NAPI_GRO_CB(skb)->count == 1) {
2342 skb_shinfo(skb)->gso_size = 0;
2347 list_for_each_entry_rcu(ptype, head, list) {
2348 if (ptype->type != type || ptype->dev || !ptype->gro_complete)
2351 err = ptype->gro_complete(skb);
2357 WARN_ON(&ptype->list == head);
2359 return NET_RX_SUCCESS;
2363 return netif_receive_skb(skb);
2366 void napi_gro_flush(struct napi_struct *napi)
2368 struct sk_buff *skb, *next;
2370 for (skb = napi->gro_list; skb; skb = next) {
2373 napi_gro_complete(skb);
2376 napi->gro_count = 0;
2377 napi->gro_list = NULL;
2379 EXPORT_SYMBOL(napi_gro_flush);
2381 void *skb_gro_header(struct sk_buff *skb, unsigned int hlen)
2383 unsigned int offset = skb_gro_offset(skb);
2386 if (unlikely(skb_headlen(skb) ||
2387 skb_shinfo(skb)->frags[0].size < hlen ||
2388 PageHighMem(skb_shinfo(skb)->frags[0].page)))
2389 return pskb_may_pull(skb, hlen) ? skb->data + offset : NULL;
2391 return page_address(skb_shinfo(skb)->frags[0].page) +
2392 skb_shinfo(skb)->frags[0].page_offset + offset;
2394 EXPORT_SYMBOL(skb_gro_header);
2396 int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2398 struct sk_buff **pp = NULL;
2399 struct packet_type *ptype;
2400 __be16 type = skb->protocol;
2401 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2406 if (!(skb->dev->features & NETIF_F_GRO))
2409 if (skb_is_gso(skb) || skb_shinfo(skb)->frag_list)
2413 list_for_each_entry_rcu(ptype, head, list) {
2414 if (ptype->type != type || ptype->dev || !ptype->gro_receive)
2417 skb_set_network_header(skb, skb_gro_offset(skb));
2418 mac_len = skb->network_header - skb->mac_header;
2419 skb->mac_len = mac_len;
2420 NAPI_GRO_CB(skb)->same_flow = 0;
2421 NAPI_GRO_CB(skb)->flush = 0;
2422 NAPI_GRO_CB(skb)->free = 0;
2424 pp = ptype->gro_receive(&napi->gro_list, skb);
2429 if (&ptype->list == head)
2432 same_flow = NAPI_GRO_CB(skb)->same_flow;
2433 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
2436 struct sk_buff *nskb = *pp;
2440 napi_gro_complete(nskb);
2447 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
2451 NAPI_GRO_CB(skb)->count = 1;
2452 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
2453 skb->next = napi->gro_list;
2454 napi->gro_list = skb;
2458 if (unlikely(!pskb_may_pull(skb, skb_gro_offset(skb)))) {
2459 if (napi->gro_list == skb)
2460 napi->gro_list = skb->next;
2471 EXPORT_SYMBOL(dev_gro_receive);
2473 static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2477 if (netpoll_rx_on(skb))
2480 for (p = napi->gro_list; p; p = p->next) {
2481 NAPI_GRO_CB(p)->same_flow = (p->dev == skb->dev)
2482 && !compare_ether_header(skb_mac_header(p),
2483 skb_gro_mac_header(skb));
2484 NAPI_GRO_CB(p)->flush = 0;
2487 return dev_gro_receive(napi, skb);
2490 int napi_skb_finish(int ret, struct sk_buff *skb)
2492 int err = NET_RX_SUCCESS;
2496 return netif_receive_skb(skb);
2502 case GRO_MERGED_FREE:
2509 EXPORT_SYMBOL(napi_skb_finish);
2511 int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2513 skb_gro_reset_offset(skb);
2515 return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
2517 EXPORT_SYMBOL(napi_gro_receive);
2519 void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
2521 __skb_pull(skb, skb_headlen(skb));
2522 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
2526 EXPORT_SYMBOL(napi_reuse_skb);
2528 struct sk_buff *napi_get_frags(struct napi_struct *napi)
2530 struct net_device *dev = napi->dev;
2531 struct sk_buff *skb = napi->skb;
2534 skb = netdev_alloc_skb(dev, GRO_MAX_HEAD + NET_IP_ALIGN);
2538 skb_reserve(skb, NET_IP_ALIGN);
2546 EXPORT_SYMBOL(napi_get_frags);
2548 int napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, int ret)
2550 int err = NET_RX_SUCCESS;
2555 skb->protocol = eth_type_trans(skb, napi->dev);
2557 if (ret == GRO_NORMAL)
2558 return netif_receive_skb(skb);
2560 skb_gro_pull(skb, -ETH_HLEN);
2567 case GRO_MERGED_FREE:
2568 napi_reuse_skb(napi, skb);
2574 EXPORT_SYMBOL(napi_frags_finish);
2576 struct sk_buff *napi_frags_skb(struct napi_struct *napi)
2578 struct sk_buff *skb = napi->skb;
2583 skb_reset_mac_header(skb);
2584 skb_gro_reset_offset(skb);
2586 eth = skb_gro_header(skb, sizeof(*eth));
2588 napi_reuse_skb(napi, skb);
2593 skb_gro_pull(skb, sizeof(*eth));
2596 * This works because the only protocols we care about don't require
2597 * special handling. We'll fix it up properly at the end.
2599 skb->protocol = eth->h_proto;
2604 EXPORT_SYMBOL(napi_frags_skb);
2606 int napi_gro_frags(struct napi_struct *napi)
2608 struct sk_buff *skb = napi_frags_skb(napi);
2613 return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
2615 EXPORT_SYMBOL(napi_gro_frags);
2617 static int process_backlog(struct napi_struct *napi, int quota)
2620 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2621 unsigned long start_time = jiffies;
2623 napi->weight = weight_p;
2625 struct sk_buff *skb;
2627 local_irq_disable();
2628 skb = __skb_dequeue(&queue->input_pkt_queue);
2630 __napi_complete(napi);
2636 netif_receive_skb(skb);
2637 } while (++work < quota && jiffies == start_time);
2643 * __napi_schedule - schedule for receive
2644 * @n: entry to schedule
2646 * The entry's receive function will be scheduled to run
2648 void __napi_schedule(struct napi_struct *n)
2650 unsigned long flags;
2652 local_irq_save(flags);
2653 list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
2654 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2655 local_irq_restore(flags);
2657 EXPORT_SYMBOL(__napi_schedule);
2659 void __napi_complete(struct napi_struct *n)
2661 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
2662 BUG_ON(n->gro_list);
2664 list_del(&n->poll_list);
2665 smp_mb__before_clear_bit();
2666 clear_bit(NAPI_STATE_SCHED, &n->state);
2668 EXPORT_SYMBOL(__napi_complete);
2670 void napi_complete(struct napi_struct *n)
2672 unsigned long flags;
2675 * don't let napi dequeue from the cpu poll list
2676 * just in case its running on a different cpu
2678 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
2682 local_irq_save(flags);
2684 local_irq_restore(flags);
2686 EXPORT_SYMBOL(napi_complete);
2688 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
2689 int (*poll)(struct napi_struct *, int), int weight)
2691 INIT_LIST_HEAD(&napi->poll_list);
2692 napi->gro_count = 0;
2693 napi->gro_list = NULL;
2696 napi->weight = weight;
2697 list_add(&napi->dev_list, &dev->napi_list);
2699 #ifdef CONFIG_NETPOLL
2700 spin_lock_init(&napi->poll_lock);
2701 napi->poll_owner = -1;
2703 set_bit(NAPI_STATE_SCHED, &napi->state);
2705 EXPORT_SYMBOL(netif_napi_add);
2707 void netif_napi_del(struct napi_struct *napi)
2709 struct sk_buff *skb, *next;
2711 list_del_init(&napi->dev_list);
2712 napi_free_frags(napi);
2714 for (skb = napi->gro_list; skb; skb = next) {
2720 napi->gro_list = NULL;
2721 napi->gro_count = 0;
2723 EXPORT_SYMBOL(netif_napi_del);
2726 static void net_rx_action(struct softirq_action *h)
2728 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
2729 unsigned long time_limit = jiffies + 2;
2730 int budget = netdev_budget;
2733 local_irq_disable();
2735 while (!list_empty(list)) {
2736 struct napi_struct *n;
2739 /* If softirq window is exhuasted then punt.
2740 * Allow this to run for 2 jiffies since which will allow
2741 * an average latency of 1.5/HZ.
2743 if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
2748 /* Even though interrupts have been re-enabled, this
2749 * access is safe because interrupts can only add new
2750 * entries to the tail of this list, and only ->poll()
2751 * calls can remove this head entry from the list.
2753 n = list_entry(list->next, struct napi_struct, poll_list);
2755 have = netpoll_poll_lock(n);
2759 /* This NAPI_STATE_SCHED test is for avoiding a race
2760 * with netpoll's poll_napi(). Only the entity which
2761 * obtains the lock and sees NAPI_STATE_SCHED set will
2762 * actually make the ->poll() call. Therefore we avoid
2763 * accidently calling ->poll() when NAPI is not scheduled.
2766 if (test_bit(NAPI_STATE_SCHED, &n->state))
2767 work = n->poll(n, weight);
2769 WARN_ON_ONCE(work > weight);
2773 local_irq_disable();
2775 /* Drivers must not modify the NAPI state if they
2776 * consume the entire weight. In such cases this code
2777 * still "owns" the NAPI instance and therefore can
2778 * move the instance around on the list at-will.
2780 if (unlikely(work == weight)) {
2781 if (unlikely(napi_disable_pending(n)))
2784 list_move_tail(&n->poll_list, list);
2787 netpoll_poll_unlock(have);
2792 #ifdef CONFIG_NET_DMA
2794 * There may not be any more sk_buffs coming right now, so push
2795 * any pending DMA copies to hardware
2797 dma_issue_pending_all();
2803 __get_cpu_var(netdev_rx_stat).time_squeeze++;
2804 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2808 static gifconf_func_t * gifconf_list [NPROTO];
2811 * register_gifconf - register a SIOCGIF handler
2812 * @family: Address family
2813 * @gifconf: Function handler
2815 * Register protocol dependent address dumping routines. The handler
2816 * that is passed must not be freed or reused until it has been replaced
2817 * by another handler.
2819 int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
2821 if (family >= NPROTO)
2823 gifconf_list[family] = gifconf;
2829 * Map an interface index to its name (SIOCGIFNAME)
2833 * We need this ioctl for efficient implementation of the
2834 * if_indextoname() function required by the IPv6 API. Without
2835 * it, we would have to search all the interfaces to find a
2839 static int dev_ifname(struct net *net, struct ifreq __user *arg)
2841 struct net_device *dev;
2845 * Fetch the caller's info block.
2848 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
2851 read_lock(&dev_base_lock);
2852 dev = __dev_get_by_index(net, ifr.ifr_ifindex);
2854 read_unlock(&dev_base_lock);
2858 strcpy(ifr.ifr_name, dev->name);
2859 read_unlock(&dev_base_lock);
2861 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
2867 * Perform a SIOCGIFCONF call. This structure will change
2868 * size eventually, and there is nothing I can do about it.
2869 * Thus we will need a 'compatibility mode'.
2872 static int dev_ifconf(struct net *net, char __user *arg)
2875 struct net_device *dev;
2882 * Fetch the caller's info block.
2885 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
2892 * Loop over the interfaces, and write an info block for each.
2896 for_each_netdev(net, dev) {
2897 for (i = 0; i < NPROTO; i++) {
2898 if (gifconf_list[i]) {
2901 done = gifconf_list[i](dev, NULL, 0);
2903 done = gifconf_list[i](dev, pos + total,
2913 * All done. Write the updated control block back to the caller.
2915 ifc.ifc_len = total;
2918 * Both BSD and Solaris return 0 here, so we do too.
2920 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
2923 #ifdef CONFIG_PROC_FS
2925 * This is invoked by the /proc filesystem handler to display a device
2928 void *dev_seq_start(struct seq_file *seq, loff_t *pos)
2929 __acquires(dev_base_lock)
2931 struct net *net = seq_file_net(seq);
2933 struct net_device *dev;
2935 read_lock(&dev_base_lock);
2937 return SEQ_START_TOKEN;
2940 for_each_netdev(net, dev)
2947 void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2949 struct net *net = seq_file_net(seq);
2951 return v == SEQ_START_TOKEN ?
2952 first_net_device(net) : next_net_device((struct net_device *)v);
2955 void dev_seq_stop(struct seq_file *seq, void *v)
2956 __releases(dev_base_lock)
2958 read_unlock(&dev_base_lock);
2961 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
2963 const struct net_device_stats *stats = dev_get_stats(dev);
2965 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
2966 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
2967 dev->name, stats->rx_bytes, stats->rx_packets,
2969 stats->rx_dropped + stats->rx_missed_errors,
2970 stats->rx_fifo_errors,
2971 stats->rx_length_errors + stats->rx_over_errors +
2972 stats->rx_crc_errors + stats->rx_frame_errors,
2973 stats->rx_compressed, stats->multicast,
2974 stats->tx_bytes, stats->tx_packets,
2975 stats->tx_errors, stats->tx_dropped,
2976 stats->tx_fifo_errors, stats->collisions,
2977 stats->tx_carrier_errors +
2978 stats->tx_aborted_errors +
2979 stats->tx_window_errors +
2980 stats->tx_heartbeat_errors,
2981 stats->tx_compressed);
2985 * Called from the PROCfs module. This now uses the new arbitrary sized
2986 * /proc/net interface to create /proc/net/dev
2988 static int dev_seq_show(struct seq_file *seq, void *v)
2990 if (v == SEQ_START_TOKEN)
2991 seq_puts(seq, "Inter-| Receive "
2993 " face |bytes packets errs drop fifo frame "
2994 "compressed multicast|bytes packets errs "
2995 "drop fifo colls carrier compressed\n");
2997 dev_seq_printf_stats(seq, v);
3001 static struct netif_rx_stats *softnet_get_online(loff_t *pos)
3003 struct netif_rx_stats *rc = NULL;
3005 while (*pos < nr_cpu_ids)
3006 if (cpu_online(*pos)) {
3007 rc = &per_cpu(netdev_rx_stat, *pos);
3014 static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
3016 return softnet_get_online(pos);
3019 static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3022 return softnet_get_online(pos);
3025 static void softnet_seq_stop(struct seq_file *seq, void *v)
3029 static int softnet_seq_show(struct seq_file *seq, void *v)
3031 struct netif_rx_stats *s = v;
3033 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
3034 s->total, s->dropped, s->time_squeeze, 0,
3035 0, 0, 0, 0, /* was fastroute */
3040 static const struct seq_operations dev_seq_ops = {
3041 .start = dev_seq_start,
3042 .next = dev_seq_next,
3043 .stop = dev_seq_stop,
3044 .show = dev_seq_show,
3047 static int dev_seq_open(struct inode *inode, struct file *file)
3049 return seq_open_net(inode, file, &dev_seq_ops,
3050 sizeof(struct seq_net_private));
3053 static const struct file_operations dev_seq_fops = {
3054 .owner = THIS_MODULE,
3055 .open = dev_seq_open,
3057 .llseek = seq_lseek,
3058 .release = seq_release_net,
3061 static const struct seq_operations softnet_seq_ops = {
3062 .start = softnet_seq_start,
3063 .next = softnet_seq_next,
3064 .stop = softnet_seq_stop,
3065 .show = softnet_seq_show,
3068 static int softnet_seq_open(struct inode *inode, struct file *file)
3070 return seq_open(file, &softnet_seq_ops);
3073 static const struct file_operations softnet_seq_fops = {
3074 .owner = THIS_MODULE,
3075 .open = softnet_seq_open,
3077 .llseek = seq_lseek,
3078 .release = seq_release,
3081 static void *ptype_get_idx(loff_t pos)
3083 struct packet_type *pt = NULL;
3087 list_for_each_entry_rcu(pt, &ptype_all, list) {
3093 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
3094 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
3103 static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
3107 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
3110 static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3112 struct packet_type *pt;
3113 struct list_head *nxt;
3117 if (v == SEQ_START_TOKEN)
3118 return ptype_get_idx(0);
3121 nxt = pt->list.next;
3122 if (pt->type == htons(ETH_P_ALL)) {
3123 if (nxt != &ptype_all)
3126 nxt = ptype_base[0].next;
3128 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
3130 while (nxt == &ptype_base[hash]) {
3131 if (++hash >= PTYPE_HASH_SIZE)
3133 nxt = ptype_base[hash].next;
3136 return list_entry(nxt, struct packet_type, list);
3139 static void ptype_seq_stop(struct seq_file *seq, void *v)
3145 static int ptype_seq_show(struct seq_file *seq, void *v)
3147 struct packet_type *pt = v;
3149 if (v == SEQ_START_TOKEN)
3150 seq_puts(seq, "Type Device Function\n");
3151 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
3152 if (pt->type == htons(ETH_P_ALL))
3153 seq_puts(seq, "ALL ");
3155 seq_printf(seq, "%04x", ntohs(pt->type));
3157 seq_printf(seq, " %-8s %pF\n",
3158 pt->dev ? pt->dev->name : "", pt->func);
3164 static const struct seq_operations ptype_seq_ops = {
3165 .start = ptype_seq_start,
3166 .next = ptype_seq_next,
3167 .stop = ptype_seq_stop,
3168 .show = ptype_seq_show,
3171 static int ptype_seq_open(struct inode *inode, struct file *file)
3173 return seq_open_net(inode, file, &ptype_seq_ops,
3174 sizeof(struct seq_net_private));
3177 static const struct file_operations ptype_seq_fops = {
3178 .owner = THIS_MODULE,
3179 .open = ptype_seq_open,
3181 .llseek = seq_lseek,
3182 .release = seq_release_net,
3186 static int __net_init dev_proc_net_init(struct net *net)
3190 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
3192 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
3194 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
3197 if (wext_proc_init(net))
3203 proc_net_remove(net, "ptype");
3205 proc_net_remove(net, "softnet_stat");
3207 proc_net_remove(net, "dev");
3211 static void __net_exit dev_proc_net_exit(struct net *net)
3213 wext_proc_exit(net);
3215 proc_net_remove(net, "ptype");
3216 proc_net_remove(net, "softnet_stat");
3217 proc_net_remove(net, "dev");
3220 static struct pernet_operations __net_initdata dev_proc_ops = {
3221 .init = dev_proc_net_init,
3222 .exit = dev_proc_net_exit,
3225 static int __init dev_proc_init(void)
3227 return register_pernet_subsys(&dev_proc_ops);
3230 #define dev_proc_init() 0
3231 #endif /* CONFIG_PROC_FS */
3235 * netdev_set_master - set up master/slave pair
3236 * @slave: slave device
3237 * @master: new master device
3239 * Changes the master device of the slave. Pass %NULL to break the
3240 * bonding. The caller must hold the RTNL semaphore. On a failure
3241 * a negative errno code is returned. On success the reference counts
3242 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
3243 * function returns zero.
3245 int netdev_set_master(struct net_device *slave, struct net_device *master)
3247 struct net_device *old = slave->master;
3257 slave->master = master;
3265 slave->flags |= IFF_SLAVE;
3267 slave->flags &= ~IFF_SLAVE;
3269 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
3273 static void dev_change_rx_flags(struct net_device *dev, int flags)
3275 const struct net_device_ops *ops = dev->netdev_ops;
3277 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
3278 ops->ndo_change_rx_flags(dev, flags);
3281 static int __dev_set_promiscuity(struct net_device *dev, int inc)
3283 unsigned short old_flags = dev->flags;
3289 dev->flags |= IFF_PROMISC;
3290 dev->promiscuity += inc;
3291 if (dev->promiscuity == 0) {
3294 * If inc causes overflow, untouch promisc and return error.
3297 dev->flags &= ~IFF_PROMISC;
3299 dev->promiscuity -= inc;
3300 printk(KERN_WARNING "%s: promiscuity touches roof, "
3301 "set promiscuity failed, promiscuity feature "
3302 "of device might be broken.\n", dev->name);
3306 if (dev->flags != old_flags) {
3307 printk(KERN_INFO "device %s %s promiscuous mode\n",
3308 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
3310 if (audit_enabled) {
3311 current_uid_gid(&uid, &gid);
3312 audit_log(current->audit_context, GFP_ATOMIC,
3313 AUDIT_ANOM_PROMISCUOUS,
3314 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
3315 dev->name, (dev->flags & IFF_PROMISC),
3316 (old_flags & IFF_PROMISC),
3317 audit_get_loginuid(current),
3319 audit_get_sessionid(current));
3322 dev_change_rx_flags(dev, IFF_PROMISC);
3328 * dev_set_promiscuity - update promiscuity count on a device
3332 * Add or remove promiscuity from a device. While the count in the device
3333 * remains above zero the interface remains promiscuous. Once it hits zero
3334 * the device reverts back to normal filtering operation. A negative inc
3335 * value is used to drop promiscuity on the device.
3336 * Return 0 if successful or a negative errno code on error.
3338 int dev_set_promiscuity(struct net_device *dev, int inc)
3340 unsigned short old_flags = dev->flags;
3343 err = __dev_set_promiscuity(dev, inc);
3346 if (dev->flags != old_flags)
3347 dev_set_rx_mode(dev);
3352 * dev_set_allmulti - update allmulti count on a device
3356 * Add or remove reception of all multicast frames to a device. While the
3357 * count in the device remains above zero the interface remains listening
3358 * to all interfaces. Once it hits zero the device reverts back to normal
3359 * filtering operation. A negative @inc value is used to drop the counter
3360 * when releasing a resource needing all multicasts.
3361 * Return 0 if successful or a negative errno code on error.
3364 int dev_set_allmulti(struct net_device *dev, int inc)
3366 unsigned short old_flags = dev->flags;
3370 dev->flags |= IFF_ALLMULTI;
3371 dev->allmulti += inc;
3372 if (dev->allmulti == 0) {
3375 * If inc causes overflow, untouch allmulti and return error.
3378 dev->flags &= ~IFF_ALLMULTI;
3380 dev->allmulti -= inc;
3381 printk(KERN_WARNING "%s: allmulti touches roof, "
3382 "set allmulti failed, allmulti feature of "
3383 "device might be broken.\n", dev->name);
3387 if (dev->flags ^ old_flags) {
3388 dev_change_rx_flags(dev, IFF_ALLMULTI);
3389 dev_set_rx_mode(dev);
3395 * Upload unicast and multicast address lists to device and
3396 * configure RX filtering. When the device doesn't support unicast
3397 * filtering it is put in promiscuous mode while unicast addresses
3400 void __dev_set_rx_mode(struct net_device *dev)
3402 const struct net_device_ops *ops = dev->netdev_ops;
3404 /* dev_open will call this function so the list will stay sane. */
3405 if (!(dev->flags&IFF_UP))
3408 if (!netif_device_present(dev))
3411 if (ops->ndo_set_rx_mode)
3412 ops->ndo_set_rx_mode(dev);
3414 /* Unicast addresses changes may only happen under the rtnl,
3415 * therefore calling __dev_set_promiscuity here is safe.
3417 if (dev->uc_count > 0 && !dev->uc_promisc) {
3418 __dev_set_promiscuity(dev, 1);
3419 dev->uc_promisc = 1;
3420 } else if (dev->uc_count == 0 && dev->uc_promisc) {
3421 __dev_set_promiscuity(dev, -1);
3422 dev->uc_promisc = 0;
3425 if (ops->ndo_set_multicast_list)
3426 ops->ndo_set_multicast_list(dev);
3430 void dev_set_rx_mode(struct net_device *dev)
3432 netif_addr_lock_bh(dev);
3433 __dev_set_rx_mode(dev);
3434 netif_addr_unlock_bh(dev);
3437 /* hw addresses list handling functions */
3439 static int __hw_addr_add(struct list_head *list, unsigned char *addr,
3440 int addr_len, unsigned char addr_type)
3442 struct netdev_hw_addr *ha;
3445 if (addr_len > MAX_ADDR_LEN)
3448 alloc_size = sizeof(*ha);
3449 if (alloc_size < L1_CACHE_BYTES)
3450 alloc_size = L1_CACHE_BYTES;
3451 ha = kmalloc(alloc_size, GFP_ATOMIC);
3454 memcpy(ha->addr, addr, addr_len);
3455 ha->type = addr_type;
3456 list_add_tail_rcu(&ha->list, list);
3460 static void ha_rcu_free(struct rcu_head *head)
3462 struct netdev_hw_addr *ha;
3464 ha = container_of(head, struct netdev_hw_addr, rcu_head);
3468 static int __hw_addr_del_ii(struct list_head *list, unsigned char *addr,
3469 int addr_len, unsigned char addr_type,
3472 struct netdev_hw_addr *ha;
3475 list_for_each_entry(ha, list, list) {
3476 if (i++ != ignore_index &&
3477 !memcmp(ha->addr, addr, addr_len) &&
3478 (ha->type == addr_type || !addr_type)) {
3479 list_del_rcu(&ha->list);
3480 call_rcu(&ha->rcu_head, ha_rcu_free);
3487 static int __hw_addr_add_multiple_ii(struct list_head *to_list,
3488 struct list_head *from_list,
3489 int addr_len, unsigned char addr_type,
3493 struct netdev_hw_addr *ha, *ha2;
3496 list_for_each_entry(ha, from_list, list) {
3497 type = addr_type ? addr_type : ha->type;
3498 err = __hw_addr_add(to_list, ha->addr, addr_len, type);
3505 list_for_each_entry(ha2, from_list, list) {
3508 type = addr_type ? addr_type : ha2->type;
3509 __hw_addr_del_ii(to_list, ha2->addr, addr_len, type,
3515 static void __hw_addr_del_multiple_ii(struct list_head *to_list,
3516 struct list_head *from_list,
3517 int addr_len, unsigned char addr_type,
3520 struct netdev_hw_addr *ha;
3523 list_for_each_entry(ha, from_list, list) {
3524 type = addr_type ? addr_type : ha->type;
3525 __hw_addr_del_ii(to_list, ha->addr, addr_len, addr_type,
3530 static void __hw_addr_flush(struct list_head *list)
3532 struct netdev_hw_addr *ha, *tmp;
3534 list_for_each_entry_safe(ha, tmp, list, list) {
3535 list_del_rcu(&ha->list);
3536 call_rcu(&ha->rcu_head, ha_rcu_free);
3540 /* Device addresses handling functions */
3542 static void dev_addr_flush(struct net_device *dev)
3544 /* rtnl_mutex must be held here */
3546 __hw_addr_flush(&dev->dev_addr_list);
3547 dev->dev_addr = NULL;
3550 static int dev_addr_init(struct net_device *dev)
3552 unsigned char addr[MAX_ADDR_LEN];
3553 struct netdev_hw_addr *ha;
3556 /* rtnl_mutex must be held here */
3558 INIT_LIST_HEAD(&dev->dev_addr_list);
3559 memset(addr, 0, sizeof(*addr));
3560 err = __hw_addr_add(&dev->dev_addr_list, addr, sizeof(*addr),
3561 NETDEV_HW_ADDR_T_LAN);
3564 * Get the first (previously created) address from the list
3565 * and set dev_addr pointer to this location.
3567 ha = list_first_entry(&dev->dev_addr_list,
3568 struct netdev_hw_addr, list);
3569 dev->dev_addr = ha->addr;
3575 * dev_addr_add - Add a device address
3577 * @addr: address to add
3578 * @addr_type: address type
3580 * Add a device address to the device or increase the reference count if
3581 * it already exists.
3583 * The caller must hold the rtnl_mutex.
3585 int dev_addr_add(struct net_device *dev, unsigned char *addr,
3586 unsigned char addr_type)
3592 err = __hw_addr_add(&dev->dev_addr_list, addr, dev->addr_len,
3595 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3598 EXPORT_SYMBOL(dev_addr_add);
3601 * dev_addr_del - Release a device address.
3603 * @addr: address to delete
3604 * @addr_type: address type
3606 * Release reference to a device address and remove it from the device
3607 * if the reference count drops to zero.
3609 * The caller must hold the rtnl_mutex.
3611 int dev_addr_del(struct net_device *dev, unsigned char *addr,
3612 unsigned char addr_type)
3618 err = __hw_addr_del_ii(&dev->dev_addr_list, addr, dev->addr_len,
3621 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3624 EXPORT_SYMBOL(dev_addr_del);
3627 * dev_addr_add_multiple - Add device addresses from another device
3628 * @to_dev: device to which addresses will be added
3629 * @from_dev: device from which addresses will be added
3630 * @addr_type: address type - 0 means type will be used from from_dev
3632 * Add device addresses of the one device to another.
3634 * The caller must hold the rtnl_mutex.
3636 int dev_addr_add_multiple(struct net_device *to_dev,
3637 struct net_device *from_dev,
3638 unsigned char addr_type)
3644 if (from_dev->addr_len != to_dev->addr_len)
3646 err = __hw_addr_add_multiple_ii(&to_dev->dev_addr_list,
3647 &from_dev->dev_addr_list,
3648 to_dev->addr_len, addr_type, 0);
3650 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
3653 EXPORT_SYMBOL(dev_addr_add_multiple);
3656 * dev_addr_del_multiple - Delete device addresses by another device
3657 * @to_dev: device where the addresses will be deleted
3658 * @from_dev: device by which addresses the addresses will be deleted
3659 * @addr_type: address type - 0 means type will used from from_dev
3661 * Deletes addresses in to device by the list of addresses in from device.
3663 * The caller must hold the rtnl_mutex.
3665 int dev_addr_del_multiple(struct net_device *to_dev,
3666 struct net_device *from_dev,
3667 unsigned char addr_type)
3671 if (from_dev->addr_len != to_dev->addr_len)
3673 __hw_addr_del_multiple_ii(&to_dev->dev_addr_list,
3674 &from_dev->dev_addr_list,
3675 to_dev->addr_len, addr_type, 0);
3676 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
3679 EXPORT_SYMBOL(dev_addr_del_multiple);
3681 /* unicast and multicast addresses handling functions */
3683 int __dev_addr_delete(struct dev_addr_list **list, int *count,
3684 void *addr, int alen, int glbl)
3686 struct dev_addr_list *da;
3688 for (; (da = *list) != NULL; list = &da->next) {
3689 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3690 alen == da->da_addrlen) {
3692 int old_glbl = da->da_gusers;
3709 int __dev_addr_add(struct dev_addr_list **list, int *count,
3710 void *addr, int alen, int glbl)
3712 struct dev_addr_list *da;
3714 for (da = *list; da != NULL; da = da->next) {
3715 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3716 da->da_addrlen == alen) {
3718 int old_glbl = da->da_gusers;
3728 da = kzalloc(sizeof(*da), GFP_ATOMIC);
3731 memcpy(da->da_addr, addr, alen);
3732 da->da_addrlen = alen;
3734 da->da_gusers = glbl ? 1 : 0;
3742 * dev_unicast_delete - Release secondary unicast address.
3744 * @addr: address to delete
3745 * @alen: length of @addr
3747 * Release reference to a secondary unicast address and remove it
3748 * from the device if the reference count drops to zero.
3750 * The caller must hold the rtnl_mutex.
3752 int dev_unicast_delete(struct net_device *dev, void *addr, int alen)
3758 netif_addr_lock_bh(dev);
3759 err = __dev_addr_delete(&dev->uc_list, &dev->uc_count, addr, alen, 0);
3761 __dev_set_rx_mode(dev);
3762 netif_addr_unlock_bh(dev);
3765 EXPORT_SYMBOL(dev_unicast_delete);
3768 * dev_unicast_add - add a secondary unicast address
3770 * @addr: address to add
3771 * @alen: length of @addr
3773 * Add a secondary unicast address to the device or increase
3774 * the reference count if it already exists.
3776 * The caller must hold the rtnl_mutex.
3778 int dev_unicast_add(struct net_device *dev, void *addr, int alen)
3784 netif_addr_lock_bh(dev);
3785 err = __dev_addr_add(&dev->uc_list, &dev->uc_count, addr, alen, 0);
3787 __dev_set_rx_mode(dev);
3788 netif_addr_unlock_bh(dev);
3791 EXPORT_SYMBOL(dev_unicast_add);
3793 int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
3794 struct dev_addr_list **from, int *from_count)
3796 struct dev_addr_list *da, *next;
3800 while (da != NULL) {
3802 if (!da->da_synced) {
3803 err = __dev_addr_add(to, to_count,
3804 da->da_addr, da->da_addrlen, 0);
3809 } else if (da->da_users == 1) {
3810 __dev_addr_delete(to, to_count,
3811 da->da_addr, da->da_addrlen, 0);
3812 __dev_addr_delete(from, from_count,
3813 da->da_addr, da->da_addrlen, 0);
3820 void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
3821 struct dev_addr_list **from, int *from_count)
3823 struct dev_addr_list *da, *next;
3826 while (da != NULL) {
3828 if (da->da_synced) {
3829 __dev_addr_delete(to, to_count,
3830 da->da_addr, da->da_addrlen, 0);
3832 __dev_addr_delete(from, from_count,
3833 da->da_addr, da->da_addrlen, 0);
3840 * dev_unicast_sync - Synchronize device's unicast list to another device
3841 * @to: destination device
3842 * @from: source device
3844 * Add newly added addresses to the destination device and release
3845 * addresses that have no users left. The source device must be
3846 * locked by netif_tx_lock_bh.
3848 * This function is intended to be called from the dev->set_rx_mode
3849 * function of layered software devices.
3851 int dev_unicast_sync(struct net_device *to, struct net_device *from)
3855 netif_addr_lock_bh(to);
3856 err = __dev_addr_sync(&to->uc_list, &to->uc_count,
3857 &from->uc_list, &from->uc_count);
3859 __dev_set_rx_mode(to);
3860 netif_addr_unlock_bh(to);
3863 EXPORT_SYMBOL(dev_unicast_sync);
3866 * dev_unicast_unsync - Remove synchronized addresses from the destination device
3867 * @to: destination device
3868 * @from: source device
3870 * Remove all addresses that were added to the destination device by
3871 * dev_unicast_sync(). This function is intended to be called from the
3872 * dev->stop function of layered software devices.
3874 void dev_unicast_unsync(struct net_device *to, struct net_device *from)
3876 netif_addr_lock_bh(from);
3877 netif_addr_lock(to);
3879 __dev_addr_unsync(&to->uc_list, &to->uc_count,
3880 &from->uc_list, &from->uc_count);
3881 __dev_set_rx_mode(to);
3883 netif_addr_unlock(to);
3884 netif_addr_unlock_bh(from);
3886 EXPORT_SYMBOL(dev_unicast_unsync);
3888 static void __dev_addr_discard(struct dev_addr_list **list)
3890 struct dev_addr_list *tmp;
3892 while (*list != NULL) {
3895 if (tmp->da_users > tmp->da_gusers)
3896 printk("__dev_addr_discard: address leakage! "
3897 "da_users=%d\n", tmp->da_users);
3902 static void dev_addr_discard(struct net_device *dev)
3904 netif_addr_lock_bh(dev);
3906 __dev_addr_discard(&dev->uc_list);
3909 __dev_addr_discard(&dev->mc_list);
3912 netif_addr_unlock_bh(dev);
3916 * dev_get_flags - get flags reported to userspace
3919 * Get the combination of flag bits exported through APIs to userspace.
3921 unsigned dev_get_flags(const struct net_device *dev)
3925 flags = (dev->flags & ~(IFF_PROMISC |
3930 (dev->gflags & (IFF_PROMISC |
3933 if (netif_running(dev)) {
3934 if (netif_oper_up(dev))
3935 flags |= IFF_RUNNING;
3936 if (netif_carrier_ok(dev))
3937 flags |= IFF_LOWER_UP;
3938 if (netif_dormant(dev))
3939 flags |= IFF_DORMANT;
3946 * dev_change_flags - change device settings
3948 * @flags: device state flags
3950 * Change settings on device based state flags. The flags are
3951 * in the userspace exported format.
3953 int dev_change_flags(struct net_device *dev, unsigned flags)
3956 int old_flags = dev->flags;
3961 * Set the flags on our device.
3964 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
3965 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
3967 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
3971 * Load in the correct multicast list now the flags have changed.
3974 if ((old_flags ^ flags) & IFF_MULTICAST)
3975 dev_change_rx_flags(dev, IFF_MULTICAST);
3977 dev_set_rx_mode(dev);
3980 * Have we downed the interface. We handle IFF_UP ourselves
3981 * according to user attempts to set it, rather than blindly
3986 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
3987 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
3990 dev_set_rx_mode(dev);
3993 if (dev->flags & IFF_UP &&
3994 ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
3996 call_netdevice_notifiers(NETDEV_CHANGE, dev);
3998 if ((flags ^ dev->gflags) & IFF_PROMISC) {
3999 int inc = (flags & IFF_PROMISC) ? +1 : -1;
4000 dev->gflags ^= IFF_PROMISC;
4001 dev_set_promiscuity(dev, inc);
4004 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4005 is important. Some (broken) drivers set IFF_PROMISC, when
4006 IFF_ALLMULTI is requested not asking us and not reporting.
4008 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
4009 int inc = (flags & IFF_ALLMULTI) ? +1 : -1;
4010 dev->gflags ^= IFF_ALLMULTI;
4011 dev_set_allmulti(dev, inc);
4014 /* Exclude state transition flags, already notified */
4015 changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING);
4017 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
4023 * dev_set_mtu - Change maximum transfer unit
4025 * @new_mtu: new transfer unit
4027 * Change the maximum transfer size of the network device.
4029 int dev_set_mtu(struct net_device *dev, int new_mtu)
4031 const struct net_device_ops *ops = dev->netdev_ops;
4034 if (new_mtu == dev->mtu)
4037 /* MTU must be positive. */
4041 if (!netif_device_present(dev))
4045 if (ops->ndo_change_mtu)
4046 err = ops->ndo_change_mtu(dev, new_mtu);
4050 if (!err && dev->flags & IFF_UP)
4051 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
4056 * dev_set_mac_address - Change Media Access Control Address
4060 * Change the hardware (MAC) address of the device
4062 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4064 const struct net_device_ops *ops = dev->netdev_ops;
4067 if (!ops->ndo_set_mac_address)
4069 if (sa->sa_family != dev->type)
4071 if (!netif_device_present(dev))
4073 err = ops->ndo_set_mac_address(dev, sa);
4075 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4080 * Perform the SIOCxIFxxx calls, inside read_lock(dev_base_lock)
4082 static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
4085 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
4091 case SIOCGIFFLAGS: /* Get interface flags */
4092 ifr->ifr_flags = dev_get_flags(dev);
4095 case SIOCGIFMETRIC: /* Get the metric on the interface
4096 (currently unused) */
4097 ifr->ifr_metric = 0;
4100 case SIOCGIFMTU: /* Get the MTU of a device */
4101 ifr->ifr_mtu = dev->mtu;
4106 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
4108 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
4109 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4110 ifr->ifr_hwaddr.sa_family = dev->type;
4118 ifr->ifr_map.mem_start = dev->mem_start;
4119 ifr->ifr_map.mem_end = dev->mem_end;
4120 ifr->ifr_map.base_addr = dev->base_addr;
4121 ifr->ifr_map.irq = dev->irq;
4122 ifr->ifr_map.dma = dev->dma;
4123 ifr->ifr_map.port = dev->if_port;
4127 ifr->ifr_ifindex = dev->ifindex;
4131 ifr->ifr_qlen = dev->tx_queue_len;
4135 /* dev_ioctl() should ensure this case
4147 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
4149 static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4152 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
4153 const struct net_device_ops *ops;
4158 ops = dev->netdev_ops;
4161 case SIOCSIFFLAGS: /* Set interface flags */
4162 return dev_change_flags(dev, ifr->ifr_flags);
4164 case SIOCSIFMETRIC: /* Set the metric on the interface
4165 (currently unused) */
4168 case SIOCSIFMTU: /* Set the MTU of a device */
4169 return dev_set_mtu(dev, ifr->ifr_mtu);
4172 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
4174 case SIOCSIFHWBROADCAST:
4175 if (ifr->ifr_hwaddr.sa_family != dev->type)
4177 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
4178 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4179 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4183 if (ops->ndo_set_config) {
4184 if (!netif_device_present(dev))
4186 return ops->ndo_set_config(dev, &ifr->ifr_map);
4191 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4192 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4194 if (!netif_device_present(dev))
4196 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
4200 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4201 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4203 if (!netif_device_present(dev))
4205 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
4209 if (ifr->ifr_qlen < 0)
4211 dev->tx_queue_len = ifr->ifr_qlen;
4215 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
4216 return dev_change_name(dev, ifr->ifr_newname);
4219 * Unknown or private ioctl
4223 if ((cmd >= SIOCDEVPRIVATE &&
4224 cmd <= SIOCDEVPRIVATE + 15) ||
4225 cmd == SIOCBONDENSLAVE ||
4226 cmd == SIOCBONDRELEASE ||
4227 cmd == SIOCBONDSETHWADDR ||
4228 cmd == SIOCBONDSLAVEINFOQUERY ||
4229 cmd == SIOCBONDINFOQUERY ||
4230 cmd == SIOCBONDCHANGEACTIVE ||
4231 cmd == SIOCGMIIPHY ||
4232 cmd == SIOCGMIIREG ||
4233 cmd == SIOCSMIIREG ||
4234 cmd == SIOCBRADDIF ||
4235 cmd == SIOCBRDELIF ||
4236 cmd == SIOCSHWTSTAMP ||
4237 cmd == SIOCWANDEV) {
4239 if (ops->ndo_do_ioctl) {
4240 if (netif_device_present(dev))
4241 err = ops->ndo_do_ioctl(dev, ifr, cmd);
4253 * This function handles all "interface"-type I/O control requests. The actual
4254 * 'doing' part of this is dev_ifsioc above.
4258 * dev_ioctl - network device ioctl
4259 * @net: the applicable net namespace
4260 * @cmd: command to issue
4261 * @arg: pointer to a struct ifreq in user space
4263 * Issue ioctl functions to devices. This is normally called by the
4264 * user space syscall interfaces but can sometimes be useful for
4265 * other purposes. The return value is the return from the syscall if
4266 * positive or a negative errno code on error.
4269 int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
4275 /* One special case: SIOCGIFCONF takes ifconf argument
4276 and requires shared lock, because it sleeps writing
4280 if (cmd == SIOCGIFCONF) {
4282 ret = dev_ifconf(net, (char __user *) arg);
4286 if (cmd == SIOCGIFNAME)
4287 return dev_ifname(net, (struct ifreq __user *)arg);
4289 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
4292 ifr.ifr_name[IFNAMSIZ-1] = 0;
4294 colon = strchr(ifr.ifr_name, ':');
4299 * See which interface the caller is talking about.
4304 * These ioctl calls:
4305 * - can be done by all.
4306 * - atomic and do not require locking.
4317 dev_load(net, ifr.ifr_name);
4318 read_lock(&dev_base_lock);
4319 ret = dev_ifsioc_locked(net, &ifr, cmd);
4320 read_unlock(&dev_base_lock);
4324 if (copy_to_user(arg, &ifr,
4325 sizeof(struct ifreq)))
4331 dev_load(net, ifr.ifr_name);
4333 ret = dev_ethtool(net, &ifr);
4338 if (copy_to_user(arg, &ifr,
4339 sizeof(struct ifreq)))
4345 * These ioctl calls:
4346 * - require superuser power.
4347 * - require strict serialization.
4353 if (!capable(CAP_NET_ADMIN))
4355 dev_load(net, ifr.ifr_name);
4357 ret = dev_ifsioc(net, &ifr, cmd);
4362 if (copy_to_user(arg, &ifr,
4363 sizeof(struct ifreq)))
4369 * These ioctl calls:
4370 * - require superuser power.
4371 * - require strict serialization.
4372 * - do not return a value
4382 case SIOCSIFHWBROADCAST:
4385 case SIOCBONDENSLAVE:
4386 case SIOCBONDRELEASE:
4387 case SIOCBONDSETHWADDR:
4388 case SIOCBONDCHANGEACTIVE:
4392 if (!capable(CAP_NET_ADMIN))
4395 case SIOCBONDSLAVEINFOQUERY:
4396 case SIOCBONDINFOQUERY:
4397 dev_load(net, ifr.ifr_name);
4399 ret = dev_ifsioc(net, &ifr, cmd);
4404 /* Get the per device memory space. We can add this but
4405 * currently do not support it */
4407 /* Set the per device memory buffer space.
4408 * Not applicable in our case */
4413 * Unknown or private ioctl.
4416 if (cmd == SIOCWANDEV ||
4417 (cmd >= SIOCDEVPRIVATE &&
4418 cmd <= SIOCDEVPRIVATE + 15)) {
4419 dev_load(net, ifr.ifr_name);
4421 ret = dev_ifsioc(net, &ifr, cmd);
4423 if (!ret && copy_to_user(arg, &ifr,
4424 sizeof(struct ifreq)))
4428 /* Take care of Wireless Extensions */
4429 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
4430 return wext_handle_ioctl(net, &ifr, cmd, arg);
4437 * dev_new_index - allocate an ifindex
4438 * @net: the applicable net namespace
4440 * Returns a suitable unique value for a new device interface
4441 * number. The caller must hold the rtnl semaphore or the
4442 * dev_base_lock to be sure it remains unique.
4444 static int dev_new_index(struct net *net)
4450 if (!__dev_get_by_index(net, ifindex))
4455 /* Delayed registration/unregisteration */
4456 static LIST_HEAD(net_todo_list);
4458 static void net_set_todo(struct net_device *dev)
4460 list_add_tail(&dev->todo_list, &net_todo_list);
4463 static void rollback_registered(struct net_device *dev)
4465 BUG_ON(dev_boot_phase);
4468 /* Some devices call without registering for initialization unwind. */
4469 if (dev->reg_state == NETREG_UNINITIALIZED) {
4470 printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
4471 "was registered\n", dev->name, dev);
4477 BUG_ON(dev->reg_state != NETREG_REGISTERED);
4479 /* If device is running, close it first. */
4482 /* And unlink it from device chain. */
4483 unlist_netdevice(dev);
4485 dev->reg_state = NETREG_UNREGISTERING;
4489 /* Shutdown queueing discipline. */
4493 /* Notify protocols, that we are about to destroy
4494 this device. They should clean all the things.
4496 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4499 * Flush the unicast and multicast chains
4501 dev_addr_discard(dev);
4503 if (dev->netdev_ops->ndo_uninit)
4504 dev->netdev_ops->ndo_uninit(dev);
4506 /* Notifier chain MUST detach us from master device. */
4507 WARN_ON(dev->master);
4509 /* Remove entries from kobject tree */
4510 netdev_unregister_kobject(dev);
4517 static void __netdev_init_queue_locks_one(struct net_device *dev,
4518 struct netdev_queue *dev_queue,
4521 spin_lock_init(&dev_queue->_xmit_lock);
4522 netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
4523 dev_queue->xmit_lock_owner = -1;
4526 static void netdev_init_queue_locks(struct net_device *dev)
4528 netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
4529 __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
4532 unsigned long netdev_fix_features(unsigned long features, const char *name)
4534 /* Fix illegal SG+CSUM combinations. */
4535 if ((features & NETIF_F_SG) &&
4536 !(features & NETIF_F_ALL_CSUM)) {
4538 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no "
4539 "checksum feature.\n", name);
4540 features &= ~NETIF_F_SG;
4543 /* TSO requires that SG is present as well. */
4544 if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
4546 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no "
4547 "SG feature.\n", name);
4548 features &= ~NETIF_F_TSO;
4551 if (features & NETIF_F_UFO) {
4552 if (!(features & NETIF_F_GEN_CSUM)) {
4554 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4555 "since no NETIF_F_HW_CSUM feature.\n",
4557 features &= ~NETIF_F_UFO;
4560 if (!(features & NETIF_F_SG)) {
4562 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4563 "since no NETIF_F_SG feature.\n", name);
4564 features &= ~NETIF_F_UFO;
4570 EXPORT_SYMBOL(netdev_fix_features);
4572 /* Some devices need to (re-)set their netdev_ops inside
4573 * ->init() or similar. If that happens, we have to setup
4574 * the compat pointers again.
4576 void netdev_resync_ops(struct net_device *dev)
4578 #ifdef CONFIG_COMPAT_NET_DEV_OPS
4579 const struct net_device_ops *ops = dev->netdev_ops;
4581 dev->init = ops->ndo_init;
4582 dev->uninit = ops->ndo_uninit;
4583 dev->open = ops->ndo_open;
4584 dev->change_rx_flags = ops->ndo_change_rx_flags;
4585 dev->set_rx_mode = ops->ndo_set_rx_mode;
4586 dev->set_multicast_list = ops->ndo_set_multicast_list;
4587 dev->set_mac_address = ops->ndo_set_mac_address;
4588 dev->validate_addr = ops->ndo_validate_addr;
4589 dev->do_ioctl = ops->ndo_do_ioctl;
4590 dev->set_config = ops->ndo_set_config;
4591 dev->change_mtu = ops->ndo_change_mtu;
4592 dev->neigh_setup = ops->ndo_neigh_setup;
4593 dev->tx_timeout = ops->ndo_tx_timeout;
4594 dev->get_stats = ops->ndo_get_stats;
4595 dev->vlan_rx_register = ops->ndo_vlan_rx_register;
4596 dev->vlan_rx_add_vid = ops->ndo_vlan_rx_add_vid;
4597 dev->vlan_rx_kill_vid = ops->ndo_vlan_rx_kill_vid;
4598 #ifdef CONFIG_NET_POLL_CONTROLLER
4599 dev->poll_controller = ops->ndo_poll_controller;
4603 EXPORT_SYMBOL(netdev_resync_ops);
4606 * register_netdevice - register a network device
4607 * @dev: device to register
4609 * Take a completed network device structure and add it to the kernel
4610 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4611 * chain. 0 is returned on success. A negative errno code is returned
4612 * on a failure to set up the device, or if the name is a duplicate.
4614 * Callers must hold the rtnl semaphore. You may want
4615 * register_netdev() instead of this.
4618 * The locking appears insufficient to guarantee two parallel registers
4619 * will not get the same name.
4622 int register_netdevice(struct net_device *dev)
4624 struct hlist_head *head;
4625 struct hlist_node *p;
4627 struct net *net = dev_net(dev);
4629 BUG_ON(dev_boot_phase);
4634 /* When net_device's are persistent, this will be fatal. */
4635 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
4638 spin_lock_init(&dev->addr_list_lock);
4639 netdev_set_addr_lockdep_class(dev);
4640 netdev_init_queue_locks(dev);
4644 #ifdef CONFIG_COMPAT_NET_DEV_OPS
4645 /* Netdevice_ops API compatibility support.
4646 * This is temporary until all network devices are converted.
4648 if (dev->netdev_ops) {
4649 netdev_resync_ops(dev);
4651 char drivername[64];
4652 pr_info("%s (%s): not using net_device_ops yet\n",
4653 dev->name, netdev_drivername(dev, drivername, 64));
4655 /* This works only because net_device_ops and the
4656 compatibility structure are the same. */
4657 dev->netdev_ops = (void *) &(dev->init);
4661 /* Init, if this function is available */
4662 if (dev->netdev_ops->ndo_init) {
4663 ret = dev->netdev_ops->ndo_init(dev);
4671 if (!dev_valid_name(dev->name)) {
4676 dev->ifindex = dev_new_index(net);
4677 if (dev->iflink == -1)
4678 dev->iflink = dev->ifindex;
4680 /* Check for existence of name */
4681 head = dev_name_hash(net, dev->name);
4682 hlist_for_each(p, head) {
4683 struct net_device *d
4684 = hlist_entry(p, struct net_device, name_hlist);
4685 if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
4691 /* Fix illegal checksum combinations */
4692 if ((dev->features & NETIF_F_HW_CSUM) &&
4693 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4694 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
4696 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4699 if ((dev->features & NETIF_F_NO_CSUM) &&
4700 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4701 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
4703 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
4706 dev->features = netdev_fix_features(dev->features, dev->name);
4708 /* Enable software GSO if SG is supported. */
4709 if (dev->features & NETIF_F_SG)
4710 dev->features |= NETIF_F_GSO;
4712 netdev_initialize_kobject(dev);
4713 ret = netdev_register_kobject(dev);
4716 dev->reg_state = NETREG_REGISTERED;
4719 * Default initial state at registry is that the
4720 * device is present.
4723 set_bit(__LINK_STATE_PRESENT, &dev->state);
4725 dev_init_scheduler(dev);
4727 list_netdevice(dev);
4729 /* Notify protocols, that a new device appeared. */
4730 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
4731 ret = notifier_to_errno(ret);
4733 rollback_registered(dev);
4734 dev->reg_state = NETREG_UNREGISTERED;
4741 if (dev->netdev_ops->ndo_uninit)
4742 dev->netdev_ops->ndo_uninit(dev);
4747 * init_dummy_netdev - init a dummy network device for NAPI
4748 * @dev: device to init
4750 * This takes a network device structure and initialize the minimum
4751 * amount of fields so it can be used to schedule NAPI polls without
4752 * registering a full blown interface. This is to be used by drivers
4753 * that need to tie several hardware interfaces to a single NAPI
4754 * poll scheduler due to HW limitations.
4756 int init_dummy_netdev(struct net_device *dev)
4758 /* Clear everything. Note we don't initialize spinlocks
4759 * are they aren't supposed to be taken by any of the
4760 * NAPI code and this dummy netdev is supposed to be
4761 * only ever used for NAPI polls
4763 memset(dev, 0, sizeof(struct net_device));
4765 /* make sure we BUG if trying to hit standard
4766 * register/unregister code path
4768 dev->reg_state = NETREG_DUMMY;
4770 /* initialize the ref count */
4771 atomic_set(&dev->refcnt, 1);
4773 /* NAPI wants this */
4774 INIT_LIST_HEAD(&dev->napi_list);
4776 /* a dummy interface is started by default */
4777 set_bit(__LINK_STATE_PRESENT, &dev->state);
4778 set_bit(__LINK_STATE_START, &dev->state);
4782 EXPORT_SYMBOL_GPL(init_dummy_netdev);
4786 * register_netdev - register a network device
4787 * @dev: device to register
4789 * Take a completed network device structure and add it to the kernel
4790 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4791 * chain. 0 is returned on success. A negative errno code is returned
4792 * on a failure to set up the device, or if the name is a duplicate.
4794 * This is a wrapper around register_netdevice that takes the rtnl semaphore
4795 * and expands the device name if you passed a format string to
4798 int register_netdev(struct net_device *dev)
4805 * If the name is a format string the caller wants us to do a
4808 if (strchr(dev->name, '%')) {
4809 err = dev_alloc_name(dev, dev->name);
4814 err = register_netdevice(dev);
4819 EXPORT_SYMBOL(register_netdev);
4822 * netdev_wait_allrefs - wait until all references are gone.
4824 * This is called when unregistering network devices.
4826 * Any protocol or device that holds a reference should register
4827 * for netdevice notification, and cleanup and put back the
4828 * reference if they receive an UNREGISTER event.
4829 * We can get stuck here if buggy protocols don't correctly
4832 static void netdev_wait_allrefs(struct net_device *dev)
4834 unsigned long rebroadcast_time, warning_time;
4836 rebroadcast_time = warning_time = jiffies;
4837 while (atomic_read(&dev->refcnt) != 0) {
4838 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
4841 /* Rebroadcast unregister notification */
4842 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4844 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
4846 /* We must not have linkwatch events
4847 * pending on unregister. If this
4848 * happens, we simply run the queue
4849 * unscheduled, resulting in a noop
4852 linkwatch_run_queue();
4857 rebroadcast_time = jiffies;
4862 if (time_after(jiffies, warning_time + 10 * HZ)) {
4863 printk(KERN_EMERG "unregister_netdevice: "
4864 "waiting for %s to become free. Usage "
4866 dev->name, atomic_read(&dev->refcnt));
4867 warning_time = jiffies;
4876 * register_netdevice(x1);
4877 * register_netdevice(x2);
4879 * unregister_netdevice(y1);
4880 * unregister_netdevice(y2);
4886 * We are invoked by rtnl_unlock().
4887 * This allows us to deal with problems:
4888 * 1) We can delete sysfs objects which invoke hotplug
4889 * without deadlocking with linkwatch via keventd.
4890 * 2) Since we run with the RTNL semaphore not held, we can sleep
4891 * safely in order to wait for the netdev refcnt to drop to zero.
4893 * We must not return until all unregister events added during
4894 * the interval the lock was held have been completed.
4896 void netdev_run_todo(void)
4898 struct list_head list;
4900 /* Snapshot list, allow later requests */
4901 list_replace_init(&net_todo_list, &list);
4905 while (!list_empty(&list)) {
4906 struct net_device *dev
4907 = list_entry(list.next, struct net_device, todo_list);
4908 list_del(&dev->todo_list);
4910 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
4911 printk(KERN_ERR "network todo '%s' but state %d\n",
4912 dev->name, dev->reg_state);
4917 dev->reg_state = NETREG_UNREGISTERED;
4919 on_each_cpu(flush_backlog, dev, 1);
4921 netdev_wait_allrefs(dev);
4924 BUG_ON(atomic_read(&dev->refcnt));
4925 WARN_ON(dev->ip_ptr);
4926 WARN_ON(dev->ip6_ptr);
4927 WARN_ON(dev->dn_ptr);
4929 if (dev->destructor)
4930 dev->destructor(dev);
4932 /* Free network device */
4933 kobject_put(&dev->dev.kobj);
4938 * dev_get_stats - get network device statistics
4939 * @dev: device to get statistics from
4941 * Get network statistics from device. The device driver may provide
4942 * its own method by setting dev->netdev_ops->get_stats; otherwise
4943 * the internal statistics structure is used.
4945 const struct net_device_stats *dev_get_stats(struct net_device *dev)
4947 const struct net_device_ops *ops = dev->netdev_ops;
4949 if (ops->ndo_get_stats)
4950 return ops->ndo_get_stats(dev);
4952 unsigned long tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
4953 struct net_device_stats *stats = &dev->stats;
4955 struct netdev_queue *txq;
4957 for (i = 0; i < dev->num_tx_queues; i++) {
4958 txq = netdev_get_tx_queue(dev, i);
4959 tx_bytes += txq->tx_bytes;
4960 tx_packets += txq->tx_packets;
4961 tx_dropped += txq->tx_dropped;
4963 if (tx_bytes || tx_packets || tx_dropped) {
4964 stats->tx_bytes = tx_bytes;
4965 stats->tx_packets = tx_packets;
4966 stats->tx_dropped = tx_dropped;
4971 EXPORT_SYMBOL(dev_get_stats);
4973 static void netdev_init_one_queue(struct net_device *dev,
4974 struct netdev_queue *queue,
4980 static void netdev_init_queues(struct net_device *dev)
4982 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
4983 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
4984 spin_lock_init(&dev->tx_global_lock);
4988 * alloc_netdev_mq - allocate network device
4989 * @sizeof_priv: size of private data to allocate space for
4990 * @name: device name format string
4991 * @setup: callback to initialize device
4992 * @queue_count: the number of subqueues to allocate
4994 * Allocates a struct net_device with private data area for driver use
4995 * and performs basic initialization. Also allocates subquue structs
4996 * for each queue on the device at the end of the netdevice.
4998 struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
4999 void (*setup)(struct net_device *), unsigned int queue_count)
5001 struct netdev_queue *tx;
5002 struct net_device *dev;
5006 BUG_ON(strlen(name) >= sizeof(dev->name));
5008 alloc_size = sizeof(struct net_device);
5010 /* ensure 32-byte alignment of private area */
5011 alloc_size = (alloc_size + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST;
5012 alloc_size += sizeof_priv;
5014 /* ensure 32-byte alignment of whole construct */
5015 alloc_size += NETDEV_ALIGN_CONST;
5017 p = kzalloc(alloc_size, GFP_KERNEL);
5019 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
5023 tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
5025 printk(KERN_ERR "alloc_netdev: Unable to allocate "
5030 dev = (struct net_device *)
5031 (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
5032 dev->padded = (char *)dev - (char *)p;
5034 if (dev_addr_init(dev))
5037 dev_net_set(dev, &init_net);
5040 dev->num_tx_queues = queue_count;
5041 dev->real_num_tx_queues = queue_count;
5043 dev->gso_max_size = GSO_MAX_SIZE;
5045 netdev_init_queues(dev);
5047 INIT_LIST_HEAD(&dev->napi_list);
5049 strcpy(dev->name, name);
5059 EXPORT_SYMBOL(alloc_netdev_mq);
5062 * free_netdev - free network device
5065 * This function does the last stage of destroying an allocated device
5066 * interface. The reference to the device object is released.
5067 * If this is the last reference then it will be freed.
5069 void free_netdev(struct net_device *dev)
5071 struct napi_struct *p, *n;
5073 release_net(dev_net(dev));
5077 /* Flush device addresses */
5078 dev_addr_flush(dev);
5080 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
5083 /* Compatibility with error handling in drivers */
5084 if (dev->reg_state == NETREG_UNINITIALIZED) {
5085 kfree((char *)dev - dev->padded);
5089 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
5090 dev->reg_state = NETREG_RELEASED;
5092 /* will free via device release */
5093 put_device(&dev->dev);
5097 * synchronize_net - Synchronize with packet receive processing
5099 * Wait for packets currently being received to be done.
5100 * Does not block later packets from starting.
5102 void synchronize_net(void)
5109 * unregister_netdevice - remove device from the kernel
5112 * This function shuts down a device interface and removes it
5113 * from the kernel tables.
5115 * Callers must hold the rtnl semaphore. You may want
5116 * unregister_netdev() instead of this.
5119 void unregister_netdevice(struct net_device *dev)
5123 rollback_registered(dev);
5124 /* Finish processing unregister after unlock */
5129 * unregister_netdev - remove device from the kernel
5132 * This function shuts down a device interface and removes it
5133 * from the kernel tables.
5135 * This is just a wrapper for unregister_netdevice that takes
5136 * the rtnl semaphore. In general you want to use this and not
5137 * unregister_netdevice.
5139 void unregister_netdev(struct net_device *dev)
5142 unregister_netdevice(dev);
5146 EXPORT_SYMBOL(unregister_netdev);
5149 * dev_change_net_namespace - move device to different nethost namespace
5151 * @net: network namespace
5152 * @pat: If not NULL name pattern to try if the current device name
5153 * is already taken in the destination network namespace.
5155 * This function shuts down a device interface and moves it
5156 * to a new network namespace. On success 0 is returned, on
5157 * a failure a netagive errno code is returned.
5159 * Callers must hold the rtnl semaphore.
5162 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
5165 const char *destname;
5170 /* Don't allow namespace local devices to be moved. */
5172 if (dev->features & NETIF_F_NETNS_LOCAL)
5176 /* Don't allow real devices to be moved when sysfs
5180 if (dev->dev.parent)
5184 /* Ensure the device has been registrered */
5186 if (dev->reg_state != NETREG_REGISTERED)
5189 /* Get out if there is nothing todo */
5191 if (net_eq(dev_net(dev), net))
5194 /* Pick the destination device name, and ensure
5195 * we can use it in the destination network namespace.
5198 destname = dev->name;
5199 if (__dev_get_by_name(net, destname)) {
5200 /* We get here if we can't use the current device name */
5203 if (!dev_valid_name(pat))
5205 if (strchr(pat, '%')) {
5206 if (__dev_alloc_name(net, pat, buf) < 0)
5211 if (__dev_get_by_name(net, destname))
5216 * And now a mini version of register_netdevice unregister_netdevice.
5219 /* If device is running close it first. */
5222 /* And unlink it from device chain */
5224 unlist_netdevice(dev);
5228 /* Shutdown queueing discipline. */
5231 /* Notify protocols, that we are about to destroy
5232 this device. They should clean all the things.
5234 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5237 * Flush the unicast and multicast chains
5239 dev_addr_discard(dev);
5241 netdev_unregister_kobject(dev);
5243 /* Actually switch the network namespace */
5244 dev_net_set(dev, net);
5246 /* Assign the new device name */
5247 if (destname != dev->name)
5248 strcpy(dev->name, destname);
5250 /* If there is an ifindex conflict assign a new one */
5251 if (__dev_get_by_index(net, dev->ifindex)) {
5252 int iflink = (dev->iflink == dev->ifindex);
5253 dev->ifindex = dev_new_index(net);
5255 dev->iflink = dev->ifindex;
5258 /* Fixup kobjects */
5259 err = netdev_register_kobject(dev);
5262 /* Add the device back in the hashes */
5263 list_netdevice(dev);
5265 /* Notify protocols, that a new device appeared. */
5266 call_netdevice_notifiers(NETDEV_REGISTER, dev);
5274 static int dev_cpu_callback(struct notifier_block *nfb,
5275 unsigned long action,
5278 struct sk_buff **list_skb;
5279 struct Qdisc **list_net;
5280 struct sk_buff *skb;
5281 unsigned int cpu, oldcpu = (unsigned long)ocpu;
5282 struct softnet_data *sd, *oldsd;
5284 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
5287 local_irq_disable();
5288 cpu = smp_processor_id();
5289 sd = &per_cpu(softnet_data, cpu);
5290 oldsd = &per_cpu(softnet_data, oldcpu);
5292 /* Find end of our completion_queue. */
5293 list_skb = &sd->completion_queue;
5295 list_skb = &(*list_skb)->next;
5296 /* Append completion queue from offline CPU. */
5297 *list_skb = oldsd->completion_queue;
5298 oldsd->completion_queue = NULL;
5300 /* Find end of our output_queue. */
5301 list_net = &sd->output_queue;
5303 list_net = &(*list_net)->next_sched;
5304 /* Append output queue from offline CPU. */
5305 *list_net = oldsd->output_queue;
5306 oldsd->output_queue = NULL;
5308 raise_softirq_irqoff(NET_TX_SOFTIRQ);
5311 /* Process offline CPU's input_pkt_queue */
5312 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
5320 * netdev_increment_features - increment feature set by one
5321 * @all: current feature set
5322 * @one: new feature set
5323 * @mask: mask feature set
5325 * Computes a new feature set after adding a device with feature set
5326 * @one to the master device with current feature set @all. Will not
5327 * enable anything that is off in @mask. Returns the new feature set.
5329 unsigned long netdev_increment_features(unsigned long all, unsigned long one,
5332 /* If device needs checksumming, downgrade to it. */
5333 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
5334 all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM);
5335 else if (mask & NETIF_F_ALL_CSUM) {
5336 /* If one device supports v4/v6 checksumming, set for all. */
5337 if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) &&
5338 !(all & NETIF_F_GEN_CSUM)) {
5339 all &= ~NETIF_F_ALL_CSUM;
5340 all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
5343 /* If one device supports hw checksumming, set for all. */
5344 if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) {
5345 all &= ~NETIF_F_ALL_CSUM;
5346 all |= NETIF_F_HW_CSUM;
5350 one |= NETIF_F_ALL_CSUM;
5352 one |= all & NETIF_F_ONE_FOR_ALL;
5353 all &= one | NETIF_F_LLTX | NETIF_F_GSO;
5354 all |= one & mask & NETIF_F_ONE_FOR_ALL;
5358 EXPORT_SYMBOL(netdev_increment_features);
5360 static struct hlist_head *netdev_create_hash(void)
5363 struct hlist_head *hash;
5365 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
5367 for (i = 0; i < NETDEV_HASHENTRIES; i++)
5368 INIT_HLIST_HEAD(&hash[i]);
5373 /* Initialize per network namespace state */
5374 static int __net_init netdev_init(struct net *net)
5376 INIT_LIST_HEAD(&net->dev_base_head);
5378 net->dev_name_head = netdev_create_hash();
5379 if (net->dev_name_head == NULL)
5382 net->dev_index_head = netdev_create_hash();
5383 if (net->dev_index_head == NULL)
5389 kfree(net->dev_name_head);
5395 * netdev_drivername - network driver for the device
5396 * @dev: network device
5397 * @buffer: buffer for resulting name
5398 * @len: size of buffer
5400 * Determine network driver for device.
5402 char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
5404 const struct device_driver *driver;
5405 const struct device *parent;
5407 if (len <= 0 || !buffer)
5411 parent = dev->dev.parent;
5416 driver = parent->driver;
5417 if (driver && driver->name)
5418 strlcpy(buffer, driver->name, len);
5422 static void __net_exit netdev_exit(struct net *net)
5424 kfree(net->dev_name_head);
5425 kfree(net->dev_index_head);
5428 static struct pernet_operations __net_initdata netdev_net_ops = {
5429 .init = netdev_init,
5430 .exit = netdev_exit,
5433 static void __net_exit default_device_exit(struct net *net)
5435 struct net_device *dev;
5437 * Push all migratable of the network devices back to the
5438 * initial network namespace
5442 for_each_netdev(net, dev) {
5444 char fb_name[IFNAMSIZ];
5446 /* Ignore unmoveable devices (i.e. loopback) */
5447 if (dev->features & NETIF_F_NETNS_LOCAL)
5450 /* Delete virtual devices */
5451 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) {
5452 dev->rtnl_link_ops->dellink(dev);
5456 /* Push remaing network devices to init_net */
5457 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
5458 err = dev_change_net_namespace(dev, &init_net, fb_name);
5460 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
5461 __func__, dev->name, err);
5469 static struct pernet_operations __net_initdata default_device_ops = {
5470 .exit = default_device_exit,
5474 * Initialize the DEV module. At boot time this walks the device list and
5475 * unhooks any devices that fail to initialise (normally hardware not
5476 * present) and leaves us with a valid list of present and active devices.
5481 * This is called single threaded during boot, so no need
5482 * to take the rtnl semaphore.
5484 static int __init net_dev_init(void)
5486 int i, rc = -ENOMEM;
5488 BUG_ON(!dev_boot_phase);
5490 if (dev_proc_init())
5493 if (netdev_kobject_init())
5496 INIT_LIST_HEAD(&ptype_all);
5497 for (i = 0; i < PTYPE_HASH_SIZE; i++)
5498 INIT_LIST_HEAD(&ptype_base[i]);
5500 if (register_pernet_subsys(&netdev_net_ops))
5504 * Initialise the packet receive queues.
5507 for_each_possible_cpu(i) {
5508 struct softnet_data *queue;
5510 queue = &per_cpu(softnet_data, i);
5511 skb_queue_head_init(&queue->input_pkt_queue);
5512 queue->completion_queue = NULL;
5513 INIT_LIST_HEAD(&queue->poll_list);
5515 queue->backlog.poll = process_backlog;
5516 queue->backlog.weight = weight_p;
5517 queue->backlog.gro_list = NULL;
5518 queue->backlog.gro_count = 0;
5523 /* The loopback device is special if any other network devices
5524 * is present in a network namespace the loopback device must
5525 * be present. Since we now dynamically allocate and free the
5526 * loopback device ensure this invariant is maintained by
5527 * keeping the loopback device as the first device on the
5528 * list of network devices. Ensuring the loopback devices
5529 * is the first device that appears and the last network device
5532 if (register_pernet_device(&loopback_net_ops))
5535 if (register_pernet_device(&default_device_ops))
5538 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
5539 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
5541 hotcpu_notifier(dev_cpu_callback, 0);
5549 subsys_initcall(net_dev_init);
5551 static int __init initialize_hashrnd(void)
5553 get_random_bytes(&skb_tx_hashrnd, sizeof(skb_tx_hashrnd));
5557 late_initcall_sync(initialize_hashrnd);
5559 EXPORT_SYMBOL(__dev_get_by_index);
5560 EXPORT_SYMBOL(__dev_get_by_name);
5561 EXPORT_SYMBOL(__dev_remove_pack);
5562 EXPORT_SYMBOL(dev_valid_name);
5563 EXPORT_SYMBOL(dev_add_pack);
5564 EXPORT_SYMBOL(dev_alloc_name);
5565 EXPORT_SYMBOL(dev_close);
5566 EXPORT_SYMBOL(dev_get_by_flags);
5567 EXPORT_SYMBOL(dev_get_by_index);
5568 EXPORT_SYMBOL(dev_get_by_name);
5569 EXPORT_SYMBOL(dev_open);
5570 EXPORT_SYMBOL(dev_queue_xmit);
5571 EXPORT_SYMBOL(dev_remove_pack);
5572 EXPORT_SYMBOL(dev_set_allmulti);
5573 EXPORT_SYMBOL(dev_set_promiscuity);
5574 EXPORT_SYMBOL(dev_change_flags);
5575 EXPORT_SYMBOL(dev_set_mtu);
5576 EXPORT_SYMBOL(dev_set_mac_address);
5577 EXPORT_SYMBOL(free_netdev);
5578 EXPORT_SYMBOL(netdev_boot_setup_check);
5579 EXPORT_SYMBOL(netdev_set_master);
5580 EXPORT_SYMBOL(netdev_state_change);
5581 EXPORT_SYMBOL(netif_receive_skb);
5582 EXPORT_SYMBOL(netif_rx);
5583 EXPORT_SYMBOL(register_gifconf);
5584 EXPORT_SYMBOL(register_netdevice);
5585 EXPORT_SYMBOL(register_netdevice_notifier);
5586 EXPORT_SYMBOL(skb_checksum_help);
5587 EXPORT_SYMBOL(synchronize_net);
5588 EXPORT_SYMBOL(unregister_netdevice);
5589 EXPORT_SYMBOL(unregister_netdevice_notifier);
5590 EXPORT_SYMBOL(net_enable_timestamp);
5591 EXPORT_SYMBOL(net_disable_timestamp);
5592 EXPORT_SYMBOL(dev_get_flags);
5594 #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
5595 EXPORT_SYMBOL(br_handle_frame_hook);
5596 EXPORT_SYMBOL(br_fdb_get_hook);
5597 EXPORT_SYMBOL(br_fdb_put_hook);
5600 EXPORT_SYMBOL(dev_load);
5602 EXPORT_PER_CPU_SYMBOL(softnet_data);