2 * NET3 Protocol independent device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the non IP parts of dev.c 1.0.19
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
51 * Rudi Cilibrasi : Pass the right thing to
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
75 #include <asm/uaccess.h>
76 #include <asm/system.h>
77 #include <linux/bitops.h>
78 #include <linux/capability.h>
79 #include <linux/cpu.h>
80 #include <linux/types.h>
81 #include <linux/kernel.h>
82 #include <linux/sched.h>
83 #include <linux/mutex.h>
84 #include <linux/string.h>
86 #include <linux/socket.h>
87 #include <linux/sockios.h>
88 #include <linux/errno.h>
89 #include <linux/interrupt.h>
90 #include <linux/if_ether.h>
91 #include <linux/netdevice.h>
92 #include <linux/etherdevice.h>
93 #include <linux/ethtool.h>
94 #include <linux/notifier.h>
95 #include <linux/skbuff.h>
96 #include <net/net_namespace.h>
98 #include <linux/rtnetlink.h>
99 #include <linux/proc_fs.h>
100 #include <linux/seq_file.h>
101 #include <linux/stat.h>
102 #include <linux/if_bridge.h>
103 #include <linux/if_macvlan.h>
105 #include <net/pkt_sched.h>
106 #include <net/checksum.h>
107 #include <linux/highmem.h>
108 #include <linux/init.h>
109 #include <linux/kmod.h>
110 #include <linux/module.h>
111 #include <linux/netpoll.h>
112 #include <linux/rcupdate.h>
113 #include <linux/delay.h>
114 #include <net/wext.h>
115 #include <net/iw_handler.h>
116 #include <asm/current.h>
117 #include <linux/audit.h>
118 #include <linux/dmaengine.h>
119 #include <linux/err.h>
120 #include <linux/ctype.h>
121 #include <linux/if_arp.h>
122 #include <linux/if_vlan.h>
123 #include <linux/ip.h>
125 #include <linux/ipv6.h>
126 #include <linux/in.h>
127 #include <linux/jhash.h>
128 #include <linux/random.h>
129 #include <trace/events/napi.h>
131 #include "net-sysfs.h"
133 /* Instead of increasing this, you should create a hash table. */
134 #define MAX_GRO_SKBS 8
136 /* This should be increased if a protocol with a bigger head is added. */
137 #define GRO_MAX_HEAD (MAX_HEADER + 128)
140 * The list of packet types we will receive (as opposed to discard)
141 * and the routines to invoke.
143 * Why 16. Because with 16 the only overlap we get on a hash of the
144 * low nibble of the protocol value is RARP/SNAP/X.25.
146 * NOTE: That is no longer true with the addition of VLAN tags. Not
147 * sure which should go first, but I bet it won't make much
148 * difference if we are running VLANs. The good news is that
149 * this protocol won't be in the list unless compiled in, so
150 * the average user (w/out VLANs) will not be adversely affected.
167 #define PTYPE_HASH_SIZE (16)
168 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
170 static DEFINE_SPINLOCK(ptype_lock);
171 static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
172 static struct list_head ptype_all __read_mostly; /* Taps */
175 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
178 * Pure readers hold dev_base_lock for reading.
180 * Writers must hold the rtnl semaphore while they loop through the
181 * dev_base_head list, and hold dev_base_lock for writing when they do the
182 * actual updates. This allows pure readers to access the list even
183 * while a writer is preparing to update it.
185 * To put it another way, dev_base_lock is held for writing only to
186 * protect against pure readers; the rtnl semaphore provides the
187 * protection against other writers.
189 * See, for example usages, register_netdevice() and
190 * unregister_netdevice(), which must be called with the rtnl
193 DEFINE_RWLOCK(dev_base_lock);
194 EXPORT_SYMBOL(dev_base_lock);
196 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
198 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
199 return &net->dev_name_head[hash & (NETDEV_HASHENTRIES - 1)];
202 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
204 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
207 /* Device list insertion */
208 static int list_netdevice(struct net_device *dev)
210 struct net *net = dev_net(dev);
214 write_lock_bh(&dev_base_lock);
215 list_add_tail(&dev->dev_list, &net->dev_base_head);
216 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
217 hlist_add_head(&dev->index_hlist, dev_index_hash(net, dev->ifindex));
218 write_unlock_bh(&dev_base_lock);
222 /* Device list removal */
223 static void unlist_netdevice(struct net_device *dev)
227 /* Unlink dev from the device chain */
228 write_lock_bh(&dev_base_lock);
229 list_del(&dev->dev_list);
230 hlist_del(&dev->name_hlist);
231 hlist_del(&dev->index_hlist);
232 write_unlock_bh(&dev_base_lock);
239 static RAW_NOTIFIER_HEAD(netdev_chain);
242 * Device drivers call our routines to queue packets here. We empty the
243 * queue in the local softnet handler.
246 DEFINE_PER_CPU(struct softnet_data, softnet_data);
247 EXPORT_PER_CPU_SYMBOL(softnet_data);
249 #ifdef CONFIG_LOCKDEP
251 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
252 * according to dev->type
254 static const unsigned short netdev_lock_type[] =
255 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
256 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
257 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
258 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
259 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
260 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
261 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
262 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
263 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
264 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
265 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
266 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
267 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
268 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
269 ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154,
270 ARPHRD_VOID, ARPHRD_NONE};
272 static const char *const netdev_lock_name[] =
273 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
274 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
275 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
276 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
277 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
278 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
279 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
280 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
281 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
282 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
283 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
284 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
285 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
286 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
287 "_xmit_PHONET_PIPE", "_xmit_IEEE802154",
288 "_xmit_VOID", "_xmit_NONE"};
290 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
291 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
293 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
297 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
298 if (netdev_lock_type[i] == dev_type)
300 /* the last key is used by default */
301 return ARRAY_SIZE(netdev_lock_type) - 1;
304 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
305 unsigned short dev_type)
309 i = netdev_lock_pos(dev_type);
310 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
311 netdev_lock_name[i]);
314 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
318 i = netdev_lock_pos(dev->type);
319 lockdep_set_class_and_name(&dev->addr_list_lock,
320 &netdev_addr_lock_key[i],
321 netdev_lock_name[i]);
324 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
325 unsigned short dev_type)
328 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
333 /*******************************************************************************
335 Protocol management and registration routines
337 *******************************************************************************/
340 * Add a protocol ID to the list. Now that the input handler is
341 * smarter we can dispense with all the messy stuff that used to be
344 * BEWARE!!! Protocol handlers, mangling input packets,
345 * MUST BE last in hash buckets and checking protocol handlers
346 * MUST start from promiscuous ptype_all chain in net_bh.
347 * It is true now, do not change it.
348 * Explanation follows: if protocol handler, mangling packet, will
349 * be the first on list, it is not able to sense, that packet
350 * is cloned and should be copied-on-write, so that it will
351 * change it and subsequent readers will get broken packet.
356 * dev_add_pack - add packet handler
357 * @pt: packet type declaration
359 * Add a protocol handler to the networking stack. The passed &packet_type
360 * is linked into kernel lists and may not be freed until it has been
361 * removed from the kernel lists.
363 * This call does not sleep therefore it can not
364 * guarantee all CPU's that are in middle of receiving packets
365 * will see the new packet type (until the next received packet).
368 void dev_add_pack(struct packet_type *pt)
372 spin_lock_bh(&ptype_lock);
373 if (pt->type == htons(ETH_P_ALL))
374 list_add_rcu(&pt->list, &ptype_all);
376 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
377 list_add_rcu(&pt->list, &ptype_base[hash]);
379 spin_unlock_bh(&ptype_lock);
381 EXPORT_SYMBOL(dev_add_pack);
384 * __dev_remove_pack - remove packet handler
385 * @pt: packet type declaration
387 * Remove a protocol handler that was previously added to the kernel
388 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
389 * from the kernel lists and can be freed or reused once this function
392 * The packet type might still be in use by receivers
393 * and must not be freed until after all the CPU's have gone
394 * through a quiescent state.
396 void __dev_remove_pack(struct packet_type *pt)
398 struct list_head *head;
399 struct packet_type *pt1;
401 spin_lock_bh(&ptype_lock);
403 if (pt->type == htons(ETH_P_ALL))
406 head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
408 list_for_each_entry(pt1, head, list) {
410 list_del_rcu(&pt->list);
415 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
417 spin_unlock_bh(&ptype_lock);
419 EXPORT_SYMBOL(__dev_remove_pack);
422 * dev_remove_pack - remove packet handler
423 * @pt: packet type declaration
425 * Remove a protocol handler that was previously added to the kernel
426 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
427 * from the kernel lists and can be freed or reused once this function
430 * This call sleeps to guarantee that no CPU is looking at the packet
433 void dev_remove_pack(struct packet_type *pt)
435 __dev_remove_pack(pt);
439 EXPORT_SYMBOL(dev_remove_pack);
441 /******************************************************************************
443 Device Boot-time Settings Routines
445 *******************************************************************************/
447 /* Boot time configuration table */
448 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
451 * netdev_boot_setup_add - add new setup entry
452 * @name: name of the device
453 * @map: configured settings for the device
455 * Adds new setup entry to the dev_boot_setup list. The function
456 * returns 0 on error and 1 on success. This is a generic routine to
459 static int netdev_boot_setup_add(char *name, struct ifmap *map)
461 struct netdev_boot_setup *s;
465 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
466 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
467 memset(s[i].name, 0, sizeof(s[i].name));
468 strlcpy(s[i].name, name, IFNAMSIZ);
469 memcpy(&s[i].map, map, sizeof(s[i].map));
474 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
478 * netdev_boot_setup_check - check boot time settings
479 * @dev: the netdevice
481 * Check boot time settings for the device.
482 * The found settings are set for the device to be used
483 * later in the device probing.
484 * Returns 0 if no settings found, 1 if they are.
486 int netdev_boot_setup_check(struct net_device *dev)
488 struct netdev_boot_setup *s = dev_boot_setup;
491 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
492 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
493 !strcmp(dev->name, s[i].name)) {
494 dev->irq = s[i].map.irq;
495 dev->base_addr = s[i].map.base_addr;
496 dev->mem_start = s[i].map.mem_start;
497 dev->mem_end = s[i].map.mem_end;
503 EXPORT_SYMBOL(netdev_boot_setup_check);
507 * netdev_boot_base - get address from boot time settings
508 * @prefix: prefix for network device
509 * @unit: id for network device
511 * Check boot time settings for the base address of device.
512 * The found settings are set for the device to be used
513 * later in the device probing.
514 * Returns 0 if no settings found.
516 unsigned long netdev_boot_base(const char *prefix, int unit)
518 const struct netdev_boot_setup *s = dev_boot_setup;
522 sprintf(name, "%s%d", prefix, unit);
525 * If device already registered then return base of 1
526 * to indicate not to probe for this interface
528 if (__dev_get_by_name(&init_net, name))
531 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
532 if (!strcmp(name, s[i].name))
533 return s[i].map.base_addr;
538 * Saves at boot time configured settings for any netdevice.
540 int __init netdev_boot_setup(char *str)
545 str = get_options(str, ARRAY_SIZE(ints), ints);
550 memset(&map, 0, sizeof(map));
554 map.base_addr = ints[2];
556 map.mem_start = ints[3];
558 map.mem_end = ints[4];
560 /* Add new entry to the list */
561 return netdev_boot_setup_add(str, &map);
564 __setup("netdev=", netdev_boot_setup);
566 /*******************************************************************************
568 Device Interface Subroutines
570 *******************************************************************************/
573 * __dev_get_by_name - find a device by its name
574 * @net: the applicable net namespace
575 * @name: name to find
577 * Find an interface by name. Must be called under RTNL semaphore
578 * or @dev_base_lock. If the name is found a pointer to the device
579 * is returned. If the name is not found then %NULL is returned. The
580 * reference counters are not incremented so the caller must be
581 * careful with locks.
584 struct net_device *__dev_get_by_name(struct net *net, const char *name)
586 struct hlist_node *p;
588 hlist_for_each(p, dev_name_hash(net, name)) {
589 struct net_device *dev
590 = hlist_entry(p, struct net_device, name_hlist);
591 if (!strncmp(dev->name, name, IFNAMSIZ))
596 EXPORT_SYMBOL(__dev_get_by_name);
599 * dev_get_by_name - find a device by its name
600 * @net: the applicable net namespace
601 * @name: name to find
603 * Find an interface by name. This can be called from any
604 * context and does its own locking. The returned handle has
605 * the usage count incremented and the caller must use dev_put() to
606 * release it when it is no longer needed. %NULL is returned if no
607 * matching device is found.
610 struct net_device *dev_get_by_name(struct net *net, const char *name)
612 struct net_device *dev;
614 read_lock(&dev_base_lock);
615 dev = __dev_get_by_name(net, name);
618 read_unlock(&dev_base_lock);
621 EXPORT_SYMBOL(dev_get_by_name);
624 * __dev_get_by_index - find a device by its ifindex
625 * @net: the applicable net namespace
626 * @ifindex: index of device
628 * Search for an interface by index. Returns %NULL if the device
629 * is not found or a pointer to the device. The device has not
630 * had its reference counter increased so the caller must be careful
631 * about locking. The caller must hold either the RTNL semaphore
635 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
637 struct hlist_node *p;
639 hlist_for_each(p, dev_index_hash(net, ifindex)) {
640 struct net_device *dev
641 = hlist_entry(p, struct net_device, index_hlist);
642 if (dev->ifindex == ifindex)
647 EXPORT_SYMBOL(__dev_get_by_index);
651 * dev_get_by_index - find a device by its ifindex
652 * @net: the applicable net namespace
653 * @ifindex: index of device
655 * Search for an interface by index. Returns NULL if the device
656 * is not found or a pointer to the device. The device returned has
657 * had a reference added and the pointer is safe until the user calls
658 * dev_put to indicate they have finished with it.
661 struct net_device *dev_get_by_index(struct net *net, int ifindex)
663 struct net_device *dev;
665 read_lock(&dev_base_lock);
666 dev = __dev_get_by_index(net, ifindex);
669 read_unlock(&dev_base_lock);
672 EXPORT_SYMBOL(dev_get_by_index);
675 * dev_getbyhwaddr - find a device by its hardware address
676 * @net: the applicable net namespace
677 * @type: media type of device
678 * @ha: hardware address
680 * Search for an interface by MAC address. Returns NULL if the device
681 * is not found or a pointer to the device. The caller must hold the
682 * rtnl semaphore. The returned device has not had its ref count increased
683 * and the caller must therefore be careful about locking
686 * If the API was consistent this would be __dev_get_by_hwaddr
689 struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
691 struct net_device *dev;
695 for_each_netdev(net, dev)
696 if (dev->type == type &&
697 !memcmp(dev->dev_addr, ha, dev->addr_len))
702 EXPORT_SYMBOL(dev_getbyhwaddr);
704 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
706 struct net_device *dev;
709 for_each_netdev(net, dev)
710 if (dev->type == type)
715 EXPORT_SYMBOL(__dev_getfirstbyhwtype);
717 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
719 struct net_device *dev;
722 dev = __dev_getfirstbyhwtype(net, type);
728 EXPORT_SYMBOL(dev_getfirstbyhwtype);
731 * dev_get_by_flags - find any device with given flags
732 * @net: the applicable net namespace
733 * @if_flags: IFF_* values
734 * @mask: bitmask of bits in if_flags to check
736 * Search for any interface with the given flags. Returns NULL if a device
737 * is not found or a pointer to the device. The device returned has
738 * had a reference added and the pointer is safe until the user calls
739 * dev_put to indicate they have finished with it.
742 struct net_device *dev_get_by_flags(struct net *net, unsigned short if_flags,
745 struct net_device *dev, *ret;
748 read_lock(&dev_base_lock);
749 for_each_netdev(net, dev) {
750 if (((dev->flags ^ if_flags) & mask) == 0) {
756 read_unlock(&dev_base_lock);
759 EXPORT_SYMBOL(dev_get_by_flags);
762 * dev_valid_name - check if name is okay for network device
765 * Network device names need to be valid file names to
766 * to allow sysfs to work. We also disallow any kind of
769 int dev_valid_name(const char *name)
773 if (strlen(name) >= IFNAMSIZ)
775 if (!strcmp(name, ".") || !strcmp(name, ".."))
779 if (*name == '/' || isspace(*name))
785 EXPORT_SYMBOL(dev_valid_name);
788 * __dev_alloc_name - allocate a name for a device
789 * @net: network namespace to allocate the device name in
790 * @name: name format string
791 * @buf: scratch buffer and result name string
793 * Passed a format string - eg "lt%d" it will try and find a suitable
794 * id. It scans list of devices to build up a free map, then chooses
795 * the first empty slot. The caller must hold the dev_base or rtnl lock
796 * while allocating the name and adding the device in order to avoid
798 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
799 * Returns the number of the unit assigned or a negative errno code.
802 static int __dev_alloc_name(struct net *net, const char *name, char *buf)
806 const int max_netdevices = 8*PAGE_SIZE;
807 unsigned long *inuse;
808 struct net_device *d;
810 p = strnchr(name, IFNAMSIZ-1, '%');
813 * Verify the string as this thing may have come from
814 * the user. There must be either one "%d" and no other "%"
817 if (p[1] != 'd' || strchr(p + 2, '%'))
820 /* Use one page as a bit array of possible slots */
821 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
825 for_each_netdev(net, d) {
826 if (!sscanf(d->name, name, &i))
828 if (i < 0 || i >= max_netdevices)
831 /* avoid cases where sscanf is not exact inverse of printf */
832 snprintf(buf, IFNAMSIZ, name, i);
833 if (!strncmp(buf, d->name, IFNAMSIZ))
837 i = find_first_zero_bit(inuse, max_netdevices);
838 free_page((unsigned long) inuse);
841 snprintf(buf, IFNAMSIZ, name, i);
842 if (!__dev_get_by_name(net, buf))
845 /* It is possible to run out of possible slots
846 * when the name is long and there isn't enough space left
847 * for the digits, or if all bits are used.
853 * dev_alloc_name - allocate a name for a device
855 * @name: name format string
857 * Passed a format string - eg "lt%d" it will try and find a suitable
858 * id. It scans list of devices to build up a free map, then chooses
859 * the first empty slot. The caller must hold the dev_base or rtnl lock
860 * while allocating the name and adding the device in order to avoid
862 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
863 * Returns the number of the unit assigned or a negative errno code.
866 int dev_alloc_name(struct net_device *dev, const char *name)
872 BUG_ON(!dev_net(dev));
874 ret = __dev_alloc_name(net, name, buf);
876 strlcpy(dev->name, buf, IFNAMSIZ);
879 EXPORT_SYMBOL(dev_alloc_name);
883 * dev_change_name - change name of a device
885 * @newname: name (or format string) must be at least IFNAMSIZ
887 * Change name of a device, can pass format strings "eth%d".
890 int dev_change_name(struct net_device *dev, const char *newname)
892 char oldname[IFNAMSIZ];
898 BUG_ON(!dev_net(dev));
901 if (dev->flags & IFF_UP)
904 if (!dev_valid_name(newname))
907 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
910 memcpy(oldname, dev->name, IFNAMSIZ);
912 if (strchr(newname, '%')) {
913 err = dev_alloc_name(dev, newname);
916 } else if (__dev_get_by_name(net, newname))
919 strlcpy(dev->name, newname, IFNAMSIZ);
922 /* For now only devices in the initial network namespace
925 if (net == &init_net) {
926 ret = device_rename(&dev->dev, dev->name);
928 memcpy(dev->name, oldname, IFNAMSIZ);
933 write_lock_bh(&dev_base_lock);
934 hlist_del(&dev->name_hlist);
935 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
936 write_unlock_bh(&dev_base_lock);
938 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
939 ret = notifier_to_errno(ret);
944 "%s: name change rollback failed: %d.\n",
948 memcpy(dev->name, oldname, IFNAMSIZ);
957 * dev_set_alias - change ifalias of a device
959 * @alias: name up to IFALIASZ
960 * @len: limit of bytes to copy from info
962 * Set ifalias for a device,
964 int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
979 dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
983 strlcpy(dev->ifalias, alias, len+1);
989 * netdev_features_change - device changes features
990 * @dev: device to cause notification
992 * Called to indicate a device has changed features.
994 void netdev_features_change(struct net_device *dev)
996 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
998 EXPORT_SYMBOL(netdev_features_change);
1001 * netdev_state_change - device changes state
1002 * @dev: device to cause notification
1004 * Called to indicate a device has changed state. This function calls
1005 * the notifier chains for netdev_chain and sends a NEWLINK message
1006 * to the routing socket.
1008 void netdev_state_change(struct net_device *dev)
1010 if (dev->flags & IFF_UP) {
1011 call_netdevice_notifiers(NETDEV_CHANGE, dev);
1012 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1015 EXPORT_SYMBOL(netdev_state_change);
1017 void netdev_bonding_change(struct net_device *dev, unsigned long event)
1019 call_netdevice_notifiers(event, dev);
1021 EXPORT_SYMBOL(netdev_bonding_change);
1024 * dev_load - load a network module
1025 * @net: the applicable net namespace
1026 * @name: name of interface
1028 * If a network interface is not present and the process has suitable
1029 * privileges this function loads the module. If module loading is not
1030 * available in this kernel then it becomes a nop.
1033 void dev_load(struct net *net, const char *name)
1035 struct net_device *dev;
1037 read_lock(&dev_base_lock);
1038 dev = __dev_get_by_name(net, name);
1039 read_unlock(&dev_base_lock);
1041 if (!dev && capable(CAP_NET_ADMIN))
1042 request_module("%s", name);
1044 EXPORT_SYMBOL(dev_load);
1047 * dev_open - prepare an interface for use.
1048 * @dev: device to open
1050 * Takes a device from down to up state. The device's private open
1051 * function is invoked and then the multicast lists are loaded. Finally
1052 * the device is moved into the up state and a %NETDEV_UP message is
1053 * sent to the netdev notifier chain.
1055 * Calling this function on an active interface is a nop. On a failure
1056 * a negative errno code is returned.
1058 int dev_open(struct net_device *dev)
1060 const struct net_device_ops *ops = dev->netdev_ops;
1069 if (dev->flags & IFF_UP)
1073 * Is it even present?
1075 if (!netif_device_present(dev))
1078 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1079 ret = notifier_to_errno(ret);
1084 * Call device private open method
1086 set_bit(__LINK_STATE_START, &dev->state);
1088 if (ops->ndo_validate_addr)
1089 ret = ops->ndo_validate_addr(dev);
1091 if (!ret && ops->ndo_open)
1092 ret = ops->ndo_open(dev);
1095 * If it went open OK then:
1099 clear_bit(__LINK_STATE_START, &dev->state);
1104 dev->flags |= IFF_UP;
1109 net_dmaengine_get();
1112 * Initialize multicasting status
1114 dev_set_rx_mode(dev);
1117 * Wakeup transmit queue engine
1122 * ... and announce new interface.
1124 call_netdevice_notifiers(NETDEV_UP, dev);
1129 EXPORT_SYMBOL(dev_open);
1132 * dev_close - shutdown an interface.
1133 * @dev: device to shutdown
1135 * This function moves an active device into down state. A
1136 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1137 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1140 int dev_close(struct net_device *dev)
1142 const struct net_device_ops *ops = dev->netdev_ops;
1147 if (!(dev->flags & IFF_UP))
1151 * Tell people we are going down, so that they can
1152 * prepare to death, when device is still operating.
1154 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1156 clear_bit(__LINK_STATE_START, &dev->state);
1158 /* Synchronize to scheduled poll. We cannot touch poll list,
1159 * it can be even on different cpu. So just clear netif_running().
1161 * dev->stop() will invoke napi_disable() on all of it's
1162 * napi_struct instances on this device.
1164 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1166 dev_deactivate(dev);
1169 * Call the device specific close. This cannot fail.
1170 * Only if device is UP
1172 * We allow it to be called even after a DETACH hot-plug
1179 * Device is now down.
1182 dev->flags &= ~IFF_UP;
1185 * Tell people we are down
1187 call_netdevice_notifiers(NETDEV_DOWN, dev);
1192 net_dmaengine_put();
1196 EXPORT_SYMBOL(dev_close);
1200 * dev_disable_lro - disable Large Receive Offload on a device
1203 * Disable Large Receive Offload (LRO) on a net device. Must be
1204 * called under RTNL. This is needed if received packets may be
1205 * forwarded to another interface.
1207 void dev_disable_lro(struct net_device *dev)
1209 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1210 dev->ethtool_ops->set_flags) {
1211 u32 flags = dev->ethtool_ops->get_flags(dev);
1212 if (flags & ETH_FLAG_LRO) {
1213 flags &= ~ETH_FLAG_LRO;
1214 dev->ethtool_ops->set_flags(dev, flags);
1217 WARN_ON(dev->features & NETIF_F_LRO);
1219 EXPORT_SYMBOL(dev_disable_lro);
1222 static int dev_boot_phase = 1;
1225 * Device change register/unregister. These are not inline or static
1226 * as we export them to the world.
1230 * register_netdevice_notifier - register a network notifier block
1233 * Register a notifier to be called when network device events occur.
1234 * The notifier passed is linked into the kernel structures and must
1235 * not be reused until it has been unregistered. A negative errno code
1236 * is returned on a failure.
1238 * When registered all registration and up events are replayed
1239 * to the new notifier to allow device to have a race free
1240 * view of the network device list.
1243 int register_netdevice_notifier(struct notifier_block *nb)
1245 struct net_device *dev;
1246 struct net_device *last;
1251 err = raw_notifier_chain_register(&netdev_chain, nb);
1257 for_each_netdev(net, dev) {
1258 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1259 err = notifier_to_errno(err);
1263 if (!(dev->flags & IFF_UP))
1266 nb->notifier_call(nb, NETDEV_UP, dev);
1277 for_each_netdev(net, dev) {
1281 if (dev->flags & IFF_UP) {
1282 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1283 nb->notifier_call(nb, NETDEV_DOWN, dev);
1285 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1289 raw_notifier_chain_unregister(&netdev_chain, nb);
1292 EXPORT_SYMBOL(register_netdevice_notifier);
1295 * unregister_netdevice_notifier - unregister a network notifier block
1298 * Unregister a notifier previously registered by
1299 * register_netdevice_notifier(). The notifier is unlinked into the
1300 * kernel structures and may then be reused. A negative errno code
1301 * is returned on a failure.
1304 int unregister_netdevice_notifier(struct notifier_block *nb)
1309 err = raw_notifier_chain_unregister(&netdev_chain, nb);
1313 EXPORT_SYMBOL(unregister_netdevice_notifier);
1316 * call_netdevice_notifiers - call all network notifier blocks
1317 * @val: value passed unmodified to notifier function
1318 * @dev: net_device pointer passed unmodified to notifier function
1320 * Call all network notifier blocks. Parameters and return value
1321 * are as for raw_notifier_call_chain().
1324 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1326 return raw_notifier_call_chain(&netdev_chain, val, dev);
1329 /* When > 0 there are consumers of rx skb time stamps */
1330 static atomic_t netstamp_needed = ATOMIC_INIT(0);
1332 void net_enable_timestamp(void)
1334 atomic_inc(&netstamp_needed);
1336 EXPORT_SYMBOL(net_enable_timestamp);
1338 void net_disable_timestamp(void)
1340 atomic_dec(&netstamp_needed);
1342 EXPORT_SYMBOL(net_disable_timestamp);
1344 static inline void net_timestamp(struct sk_buff *skb)
1346 if (atomic_read(&netstamp_needed))
1347 __net_timestamp(skb);
1349 skb->tstamp.tv64 = 0;
1353 * Support routine. Sends outgoing frames to any network
1354 * taps currently in use.
1357 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1359 struct packet_type *ptype;
1361 #ifdef CONFIG_NET_CLS_ACT
1362 if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS)))
1369 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1370 /* Never send packets back to the socket
1371 * they originated from - MvS (miquels@drinkel.ow.org)
1373 if ((ptype->dev == dev || !ptype->dev) &&
1374 (ptype->af_packet_priv == NULL ||
1375 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1376 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1380 /* skb->nh should be correctly
1381 set by sender, so that the second statement is
1382 just protection against buggy protocols.
1384 skb_reset_mac_header(skb2);
1386 if (skb_network_header(skb2) < skb2->data ||
1387 skb2->network_header > skb2->tail) {
1388 if (net_ratelimit())
1389 printk(KERN_CRIT "protocol %04x is "
1391 skb2->protocol, dev->name);
1392 skb_reset_network_header(skb2);
1395 skb2->transport_header = skb2->network_header;
1396 skb2->pkt_type = PACKET_OUTGOING;
1397 ptype->func(skb2, skb->dev, ptype, skb->dev);
1404 static inline void __netif_reschedule(struct Qdisc *q)
1406 struct softnet_data *sd;
1407 unsigned long flags;
1409 local_irq_save(flags);
1410 sd = &__get_cpu_var(softnet_data);
1411 q->next_sched = sd->output_queue;
1412 sd->output_queue = q;
1413 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1414 local_irq_restore(flags);
1417 void __netif_schedule(struct Qdisc *q)
1419 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1420 __netif_reschedule(q);
1422 EXPORT_SYMBOL(__netif_schedule);
1424 void dev_kfree_skb_irq(struct sk_buff *skb)
1426 if (atomic_dec_and_test(&skb->users)) {
1427 struct softnet_data *sd;
1428 unsigned long flags;
1430 local_irq_save(flags);
1431 sd = &__get_cpu_var(softnet_data);
1432 skb->next = sd->completion_queue;
1433 sd->completion_queue = skb;
1434 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1435 local_irq_restore(flags);
1438 EXPORT_SYMBOL(dev_kfree_skb_irq);
1440 void dev_kfree_skb_any(struct sk_buff *skb)
1442 if (in_irq() || irqs_disabled())
1443 dev_kfree_skb_irq(skb);
1447 EXPORT_SYMBOL(dev_kfree_skb_any);
1451 * netif_device_detach - mark device as removed
1452 * @dev: network device
1454 * Mark device as removed from system and therefore no longer available.
1456 void netif_device_detach(struct net_device *dev)
1458 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1459 netif_running(dev)) {
1460 netif_tx_stop_all_queues(dev);
1463 EXPORT_SYMBOL(netif_device_detach);
1466 * netif_device_attach - mark device as attached
1467 * @dev: network device
1469 * Mark device as attached from system and restart if needed.
1471 void netif_device_attach(struct net_device *dev)
1473 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1474 netif_running(dev)) {
1475 netif_tx_wake_all_queues(dev);
1476 __netdev_watchdog_up(dev);
1479 EXPORT_SYMBOL(netif_device_attach);
1481 static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1483 return ((features & NETIF_F_GEN_CSUM) ||
1484 ((features & NETIF_F_IP_CSUM) &&
1485 protocol == htons(ETH_P_IP)) ||
1486 ((features & NETIF_F_IPV6_CSUM) &&
1487 protocol == htons(ETH_P_IPV6)) ||
1488 ((features & NETIF_F_FCOE_CRC) &&
1489 protocol == htons(ETH_P_FCOE)));
1492 static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1494 if (can_checksum_protocol(dev->features, skb->protocol))
1497 if (skb->protocol == htons(ETH_P_8021Q)) {
1498 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1499 if (can_checksum_protocol(dev->features & dev->vlan_features,
1500 veh->h_vlan_encapsulated_proto))
1508 * Invalidate hardware checksum when packet is to be mangled, and
1509 * complete checksum manually on outgoing path.
1511 int skb_checksum_help(struct sk_buff *skb)
1514 int ret = 0, offset;
1516 if (skb->ip_summed == CHECKSUM_COMPLETE)
1517 goto out_set_summed;
1519 if (unlikely(skb_shinfo(skb)->gso_size)) {
1520 /* Let GSO fix up the checksum. */
1521 goto out_set_summed;
1524 offset = skb->csum_start - skb_headroom(skb);
1525 BUG_ON(offset >= skb_headlen(skb));
1526 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1528 offset += skb->csum_offset;
1529 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1531 if (skb_cloned(skb) &&
1532 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
1533 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1538 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
1540 skb->ip_summed = CHECKSUM_NONE;
1544 EXPORT_SYMBOL(skb_checksum_help);
1547 * skb_gso_segment - Perform segmentation on skb.
1548 * @skb: buffer to segment
1549 * @features: features for the output path (see dev->features)
1551 * This function segments the given skb and returns a list of segments.
1553 * It may return NULL if the skb requires no segmentation. This is
1554 * only possible when GSO is used for verifying header integrity.
1556 struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
1558 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1559 struct packet_type *ptype;
1560 __be16 type = skb->protocol;
1563 skb_reset_mac_header(skb);
1564 skb->mac_len = skb->network_header - skb->mac_header;
1565 __skb_pull(skb, skb->mac_len);
1567 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1568 struct net_device *dev = skb->dev;
1569 struct ethtool_drvinfo info = {};
1571 if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
1572 dev->ethtool_ops->get_drvinfo(dev, &info);
1574 WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d "
1576 info.driver, dev ? dev->features : 0L,
1577 skb->sk ? skb->sk->sk_route_caps : 0L,
1578 skb->len, skb->data_len, skb->ip_summed);
1580 if (skb_header_cloned(skb) &&
1581 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1582 return ERR_PTR(err);
1586 list_for_each_entry_rcu(ptype,
1587 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
1588 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
1589 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1590 err = ptype->gso_send_check(skb);
1591 segs = ERR_PTR(err);
1592 if (err || skb_gso_ok(skb, features))
1594 __skb_push(skb, (skb->data -
1595 skb_network_header(skb)));
1597 segs = ptype->gso_segment(skb, features);
1603 __skb_push(skb, skb->data - skb_mac_header(skb));
1607 EXPORT_SYMBOL(skb_gso_segment);
1609 /* Take action when hardware reception checksum errors are detected. */
1611 void netdev_rx_csum_fault(struct net_device *dev)
1613 if (net_ratelimit()) {
1614 printk(KERN_ERR "%s: hw csum failure.\n",
1615 dev ? dev->name : "<unknown>");
1619 EXPORT_SYMBOL(netdev_rx_csum_fault);
1622 /* Actually, we should eliminate this check as soon as we know, that:
1623 * 1. IOMMU is present and allows to map all the memory.
1624 * 2. No high memory really exists on this machine.
1627 static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1629 #ifdef CONFIG_HIGHMEM
1632 if (dev->features & NETIF_F_HIGHDMA)
1635 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1636 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1644 void (*destructor)(struct sk_buff *skb);
1647 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1649 static void dev_gso_skb_destructor(struct sk_buff *skb)
1651 struct dev_gso_cb *cb;
1654 struct sk_buff *nskb = skb->next;
1656 skb->next = nskb->next;
1659 } while (skb->next);
1661 cb = DEV_GSO_CB(skb);
1663 cb->destructor(skb);
1667 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1668 * @skb: buffer to segment
1670 * This function segments the given skb and stores the list of segments
1673 static int dev_gso_segment(struct sk_buff *skb)
1675 struct net_device *dev = skb->dev;
1676 struct sk_buff *segs;
1677 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1680 segs = skb_gso_segment(skb, features);
1682 /* Verifying header integrity only. */
1687 return PTR_ERR(segs);
1690 DEV_GSO_CB(skb)->destructor = skb->destructor;
1691 skb->destructor = dev_gso_skb_destructor;
1696 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1697 struct netdev_queue *txq)
1699 const struct net_device_ops *ops = dev->netdev_ops;
1702 if (likely(!skb->next)) {
1703 if (!list_empty(&ptype_all))
1704 dev_queue_xmit_nit(skb, dev);
1706 if (netif_needs_gso(dev, skb)) {
1707 if (unlikely(dev_gso_segment(skb)))
1714 * If device doesnt need skb->dst, release it right now while
1715 * its hot in this cpu cache
1717 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
1720 rc = ops->ndo_start_xmit(skb, dev);
1721 if (rc == NETDEV_TX_OK)
1722 txq_trans_update(txq);
1724 * TODO: if skb_orphan() was called by
1725 * dev->hard_start_xmit() (for example, the unmodified
1726 * igb driver does that; bnx2 doesn't), then
1727 * skb_tx_software_timestamp() will be unable to send
1728 * back the time stamp.
1730 * How can this be prevented? Always create another
1731 * reference to the socket before calling
1732 * dev->hard_start_xmit()? Prevent that skb_orphan()
1733 * does anything in dev->hard_start_xmit() by clearing
1734 * the skb destructor before the call and restoring it
1735 * afterwards, then doing the skb_orphan() ourselves?
1742 struct sk_buff *nskb = skb->next;
1744 skb->next = nskb->next;
1746 rc = ops->ndo_start_xmit(nskb, dev);
1747 if (unlikely(rc != NETDEV_TX_OK)) {
1748 nskb->next = skb->next;
1752 txq_trans_update(txq);
1753 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
1754 return NETDEV_TX_BUSY;
1755 } while (skb->next);
1757 skb->destructor = DEV_GSO_CB(skb)->destructor;
1761 return NETDEV_TX_OK;
1764 static u32 skb_tx_hashrnd;
1766 u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
1770 if (skb_rx_queue_recorded(skb)) {
1771 hash = skb_get_rx_queue(skb);
1772 while (unlikely(hash >= dev->real_num_tx_queues))
1773 hash -= dev->real_num_tx_queues;
1777 if (skb->sk && skb->sk->sk_hash)
1778 hash = skb->sk->sk_hash;
1780 hash = skb->protocol;
1782 hash = jhash_1word(hash, skb_tx_hashrnd);
1784 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
1786 EXPORT_SYMBOL(skb_tx_hash);
1788 static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1789 struct sk_buff *skb)
1792 struct sock *sk = skb->sk;
1794 if (sk_tx_queue_recorded(sk)) {
1795 queue_index = sk_tx_queue_get(sk);
1797 const struct net_device_ops *ops = dev->netdev_ops;
1799 if (ops->ndo_select_queue) {
1800 queue_index = ops->ndo_select_queue(dev, skb);
1803 if (dev->real_num_tx_queues > 1)
1804 queue_index = skb_tx_hash(dev, skb);
1806 if (sk && sk->sk_dst_cache)
1807 sk_tx_queue_set(sk, queue_index);
1811 skb_set_queue_mapping(skb, queue_index);
1812 return netdev_get_tx_queue(dev, queue_index);
1815 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
1816 struct net_device *dev,
1817 struct netdev_queue *txq)
1819 spinlock_t *root_lock = qdisc_lock(q);
1822 spin_lock(root_lock);
1823 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
1826 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
1827 !test_and_set_bit(__QDISC_STATE_RUNNING, &q->state)) {
1829 * This is a work-conserving queue; there are no old skbs
1830 * waiting to be sent out; and the qdisc is not running -
1831 * xmit the skb directly.
1833 __qdisc_update_bstats(q, skb->len);
1834 if (sch_direct_xmit(skb, q, dev, txq, root_lock))
1837 clear_bit(__QDISC_STATE_RUNNING, &q->state);
1839 rc = NET_XMIT_SUCCESS;
1841 rc = qdisc_enqueue_root(skb, q);
1844 spin_unlock(root_lock);
1850 * dev_queue_xmit - transmit a buffer
1851 * @skb: buffer to transmit
1853 * Queue a buffer for transmission to a network device. The caller must
1854 * have set the device and priority and built the buffer before calling
1855 * this function. The function can be called from an interrupt.
1857 * A negative errno code is returned on a failure. A success does not
1858 * guarantee the frame will be transmitted as it may be dropped due
1859 * to congestion or traffic shaping.
1861 * -----------------------------------------------------------------------------------
1862 * I notice this method can also return errors from the queue disciplines,
1863 * including NET_XMIT_DROP, which is a positive value. So, errors can also
1866 * Regardless of the return value, the skb is consumed, so it is currently
1867 * difficult to retry a send to this method. (You can bump the ref count
1868 * before sending to hold a reference for retry if you are careful.)
1870 * When calling this method, interrupts MUST be enabled. This is because
1871 * the BH enable code must have IRQs enabled so that it will not deadlock.
1874 int dev_queue_xmit(struct sk_buff *skb)
1876 struct net_device *dev = skb->dev;
1877 struct netdev_queue *txq;
1881 /* GSO will handle the following emulations directly. */
1882 if (netif_needs_gso(dev, skb))
1885 if (skb_has_frags(skb) &&
1886 !(dev->features & NETIF_F_FRAGLIST) &&
1887 __skb_linearize(skb))
1890 /* Fragmented skb is linearized if device does not support SG,
1891 * or if at least one of fragments is in highmem and device
1892 * does not support DMA from it.
1894 if (skb_shinfo(skb)->nr_frags &&
1895 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
1896 __skb_linearize(skb))
1899 /* If packet is not checksummed and device does not support
1900 * checksumming for this protocol, complete checksumming here.
1902 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1903 skb_set_transport_header(skb, skb->csum_start -
1905 if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
1910 /* Disable soft irqs for various locks below. Also
1911 * stops preemption for RCU.
1915 txq = dev_pick_tx(dev, skb);
1916 q = rcu_dereference(txq->qdisc);
1918 #ifdef CONFIG_NET_CLS_ACT
1919 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
1922 rc = __dev_xmit_skb(skb, q, dev, txq);
1926 /* The device has no queue. Common case for software devices:
1927 loopback, all the sorts of tunnels...
1929 Really, it is unlikely that netif_tx_lock protection is necessary
1930 here. (f.e. loopback and IP tunnels are clean ignoring statistics
1932 However, it is possible, that they rely on protection
1935 Check this and shot the lock. It is not prone from deadlocks.
1936 Either shot noqueue qdisc, it is even simpler 8)
1938 if (dev->flags & IFF_UP) {
1939 int cpu = smp_processor_id(); /* ok because BHs are off */
1941 if (txq->xmit_lock_owner != cpu) {
1943 HARD_TX_LOCK(dev, txq, cpu);
1945 if (!netif_tx_queue_stopped(txq)) {
1946 rc = NET_XMIT_SUCCESS;
1947 if (!dev_hard_start_xmit(skb, dev, txq)) {
1948 HARD_TX_UNLOCK(dev, txq);
1952 HARD_TX_UNLOCK(dev, txq);
1953 if (net_ratelimit())
1954 printk(KERN_CRIT "Virtual device %s asks to "
1955 "queue packet!\n", dev->name);
1957 /* Recursion is detected! It is possible,
1959 if (net_ratelimit())
1960 printk(KERN_CRIT "Dead loop on virtual device "
1961 "%s, fix it urgently!\n", dev->name);
1966 rcu_read_unlock_bh();
1972 rcu_read_unlock_bh();
1975 EXPORT_SYMBOL(dev_queue_xmit);
1978 /*=======================================================================
1980 =======================================================================*/
1982 int netdev_max_backlog __read_mostly = 1000;
1983 int netdev_budget __read_mostly = 300;
1984 int weight_p __read_mostly = 64; /* old backlog weight */
1986 DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
1990 * netif_rx - post buffer to the network code
1991 * @skb: buffer to post
1993 * This function receives a packet from a device driver and queues it for
1994 * the upper (protocol) levels to process. It always succeeds. The buffer
1995 * may be dropped during processing for congestion control or by the
1999 * NET_RX_SUCCESS (no congestion)
2000 * NET_RX_DROP (packet was dropped)
2004 int netif_rx(struct sk_buff *skb)
2006 struct softnet_data *queue;
2007 unsigned long flags;
2009 /* if netpoll wants it, pretend we never saw it */
2010 if (netpoll_rx(skb))
2013 if (!skb->tstamp.tv64)
2017 * The code is rearranged so that the path is the most
2018 * short when CPU is congested, but is still operating.
2020 local_irq_save(flags);
2021 queue = &__get_cpu_var(softnet_data);
2023 __get_cpu_var(netdev_rx_stat).total++;
2024 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
2025 if (queue->input_pkt_queue.qlen) {
2027 __skb_queue_tail(&queue->input_pkt_queue, skb);
2028 local_irq_restore(flags);
2029 return NET_RX_SUCCESS;
2032 napi_schedule(&queue->backlog);
2036 __get_cpu_var(netdev_rx_stat).dropped++;
2037 local_irq_restore(flags);
2042 EXPORT_SYMBOL(netif_rx);
2044 int netif_rx_ni(struct sk_buff *skb)
2049 err = netif_rx(skb);
2050 if (local_softirq_pending())
2056 EXPORT_SYMBOL(netif_rx_ni);
2058 static void net_tx_action(struct softirq_action *h)
2060 struct softnet_data *sd = &__get_cpu_var(softnet_data);
2062 if (sd->completion_queue) {
2063 struct sk_buff *clist;
2065 local_irq_disable();
2066 clist = sd->completion_queue;
2067 sd->completion_queue = NULL;
2071 struct sk_buff *skb = clist;
2072 clist = clist->next;
2074 WARN_ON(atomic_read(&skb->users));
2079 if (sd->output_queue) {
2082 local_irq_disable();
2083 head = sd->output_queue;
2084 sd->output_queue = NULL;
2088 struct Qdisc *q = head;
2089 spinlock_t *root_lock;
2091 head = head->next_sched;
2093 root_lock = qdisc_lock(q);
2094 if (spin_trylock(root_lock)) {
2095 smp_mb__before_clear_bit();
2096 clear_bit(__QDISC_STATE_SCHED,
2099 spin_unlock(root_lock);
2101 if (!test_bit(__QDISC_STATE_DEACTIVATED,
2103 __netif_reschedule(q);
2105 smp_mb__before_clear_bit();
2106 clear_bit(__QDISC_STATE_SCHED,
2114 static inline int deliver_skb(struct sk_buff *skb,
2115 struct packet_type *pt_prev,
2116 struct net_device *orig_dev)
2118 atomic_inc(&skb->users);
2119 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2122 #if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
2124 #if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
2125 /* This hook is defined here for ATM LANE */
2126 int (*br_fdb_test_addr_hook)(struct net_device *dev,
2127 unsigned char *addr) __read_mostly;
2128 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
2132 * If bridge module is loaded call bridging hook.
2133 * returns NULL if packet was consumed.
2135 struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
2136 struct sk_buff *skb) __read_mostly;
2137 EXPORT_SYMBOL_GPL(br_handle_frame_hook);
2139 static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
2140 struct packet_type **pt_prev, int *ret,
2141 struct net_device *orig_dev)
2143 struct net_bridge_port *port;
2145 if (skb->pkt_type == PACKET_LOOPBACK ||
2146 (port = rcu_dereference(skb->dev->br_port)) == NULL)
2150 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2154 return br_handle_frame_hook(port, skb);
2157 #define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
2160 #if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
2161 struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
2162 EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
2164 static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
2165 struct packet_type **pt_prev,
2167 struct net_device *orig_dev)
2169 if (skb->dev->macvlan_port == NULL)
2173 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2176 return macvlan_handle_frame_hook(skb);
2179 #define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
2182 #ifdef CONFIG_NET_CLS_ACT
2183 /* TODO: Maybe we should just force sch_ingress to be compiled in
2184 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2185 * a compare and 2 stores extra right now if we dont have it on
2186 * but have CONFIG_NET_CLS_ACT
2187 * NOTE: This doesnt stop any functionality; if you dont have
2188 * the ingress scheduler, you just cant add policies on ingress.
2191 static int ing_filter(struct sk_buff *skb)
2193 struct net_device *dev = skb->dev;
2194 u32 ttl = G_TC_RTTL(skb->tc_verd);
2195 struct netdev_queue *rxq;
2196 int result = TC_ACT_OK;
2199 if (MAX_RED_LOOP < ttl++) {
2201 "Redir loop detected Dropping packet (%d->%d)\n",
2202 skb->iif, dev->ifindex);
2206 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2207 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
2209 rxq = &dev->rx_queue;
2212 if (q != &noop_qdisc) {
2213 spin_lock(qdisc_lock(q));
2214 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
2215 result = qdisc_enqueue_root(skb, q);
2216 spin_unlock(qdisc_lock(q));
2222 static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2223 struct packet_type **pt_prev,
2224 int *ret, struct net_device *orig_dev)
2226 if (skb->dev->rx_queue.qdisc == &noop_qdisc)
2230 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2233 /* Huh? Why does turning on AF_PACKET affect this? */
2234 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
2237 switch (ing_filter(skb)) {
2251 * netif_nit_deliver - deliver received packets to network taps
2254 * This function is used to deliver incoming packets to network
2255 * taps. It should be used when the normal netif_receive_skb path
2256 * is bypassed, for example because of VLAN acceleration.
2258 void netif_nit_deliver(struct sk_buff *skb)
2260 struct packet_type *ptype;
2262 if (list_empty(&ptype_all))
2265 skb_reset_network_header(skb);
2266 skb_reset_transport_header(skb);
2267 skb->mac_len = skb->network_header - skb->mac_header;
2270 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2271 if (!ptype->dev || ptype->dev == skb->dev)
2272 deliver_skb(skb, ptype, skb->dev);
2278 * netif_receive_skb - process receive buffer from network
2279 * @skb: buffer to process
2281 * netif_receive_skb() is the main receive data processing function.
2282 * It always succeeds. The buffer may be dropped during processing
2283 * for congestion control or by the protocol layers.
2285 * This function may only be called from softirq context and interrupts
2286 * should be enabled.
2288 * Return values (usually ignored):
2289 * NET_RX_SUCCESS: no congestion
2290 * NET_RX_DROP: packet was dropped
2292 int netif_receive_skb(struct sk_buff *skb)
2294 struct packet_type *ptype, *pt_prev;
2295 struct net_device *orig_dev;
2296 struct net_device *null_or_orig;
2297 int ret = NET_RX_DROP;
2300 if (!skb->tstamp.tv64)
2303 if (skb->vlan_tci && vlan_hwaccel_do_receive(skb))
2304 return NET_RX_SUCCESS;
2306 /* if we've gotten here through NAPI, check netpoll */
2307 if (netpoll_receive_skb(skb))
2311 skb->iif = skb->dev->ifindex;
2313 null_or_orig = NULL;
2314 orig_dev = skb->dev;
2315 if (orig_dev->master) {
2316 if (skb_bond_should_drop(skb))
2317 null_or_orig = orig_dev; /* deliver only exact match */
2319 skb->dev = orig_dev->master;
2322 __get_cpu_var(netdev_rx_stat).total++;
2324 skb_reset_network_header(skb);
2325 skb_reset_transport_header(skb);
2326 skb->mac_len = skb->network_header - skb->mac_header;
2332 #ifdef CONFIG_NET_CLS_ACT
2333 if (skb->tc_verd & TC_NCLS) {
2334 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2339 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2340 if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2341 ptype->dev == orig_dev) {
2343 ret = deliver_skb(skb, pt_prev, orig_dev);
2348 #ifdef CONFIG_NET_CLS_ACT
2349 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2355 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
2358 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
2362 type = skb->protocol;
2363 list_for_each_entry_rcu(ptype,
2364 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
2365 if (ptype->type == type &&
2366 (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2367 ptype->dev == orig_dev)) {
2369 ret = deliver_skb(skb, pt_prev, orig_dev);
2375 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2378 /* Jamal, now you will not able to escape explaining
2379 * me how you were going to use this. :-)
2388 EXPORT_SYMBOL(netif_receive_skb);
2390 /* Network device is going away, flush any packets still pending */
2391 static void flush_backlog(void *arg)
2393 struct net_device *dev = arg;
2394 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2395 struct sk_buff *skb, *tmp;
2397 skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
2398 if (skb->dev == dev) {
2399 __skb_unlink(skb, &queue->input_pkt_queue);
2404 static int napi_gro_complete(struct sk_buff *skb)
2406 struct packet_type *ptype;
2407 __be16 type = skb->protocol;
2408 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2411 if (NAPI_GRO_CB(skb)->count == 1) {
2412 skb_shinfo(skb)->gso_size = 0;
2417 list_for_each_entry_rcu(ptype, head, list) {
2418 if (ptype->type != type || ptype->dev || !ptype->gro_complete)
2421 err = ptype->gro_complete(skb);
2427 WARN_ON(&ptype->list == head);
2429 return NET_RX_SUCCESS;
2433 return netif_receive_skb(skb);
2436 void napi_gro_flush(struct napi_struct *napi)
2438 struct sk_buff *skb, *next;
2440 for (skb = napi->gro_list; skb; skb = next) {
2443 napi_gro_complete(skb);
2446 napi->gro_count = 0;
2447 napi->gro_list = NULL;
2449 EXPORT_SYMBOL(napi_gro_flush);
2451 int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2453 struct sk_buff **pp = NULL;
2454 struct packet_type *ptype;
2455 __be16 type = skb->protocol;
2456 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2461 if (!(skb->dev->features & NETIF_F_GRO))
2464 if (skb_is_gso(skb) || skb_has_frags(skb))
2468 list_for_each_entry_rcu(ptype, head, list) {
2469 if (ptype->type != type || ptype->dev || !ptype->gro_receive)
2472 skb_set_network_header(skb, skb_gro_offset(skb));
2473 mac_len = skb->network_header - skb->mac_header;
2474 skb->mac_len = mac_len;
2475 NAPI_GRO_CB(skb)->same_flow = 0;
2476 NAPI_GRO_CB(skb)->flush = 0;
2477 NAPI_GRO_CB(skb)->free = 0;
2479 pp = ptype->gro_receive(&napi->gro_list, skb);
2484 if (&ptype->list == head)
2487 same_flow = NAPI_GRO_CB(skb)->same_flow;
2488 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
2491 struct sk_buff *nskb = *pp;
2495 napi_gro_complete(nskb);
2502 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
2506 NAPI_GRO_CB(skb)->count = 1;
2507 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
2508 skb->next = napi->gro_list;
2509 napi->gro_list = skb;
2513 if (skb_headlen(skb) < skb_gro_offset(skb)) {
2514 int grow = skb_gro_offset(skb) - skb_headlen(skb);
2516 BUG_ON(skb->end - skb->tail < grow);
2518 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
2521 skb->data_len -= grow;
2523 skb_shinfo(skb)->frags[0].page_offset += grow;
2524 skb_shinfo(skb)->frags[0].size -= grow;
2526 if (unlikely(!skb_shinfo(skb)->frags[0].size)) {
2527 put_page(skb_shinfo(skb)->frags[0].page);
2528 memmove(skb_shinfo(skb)->frags,
2529 skb_shinfo(skb)->frags + 1,
2530 --skb_shinfo(skb)->nr_frags);
2541 EXPORT_SYMBOL(dev_gro_receive);
2543 static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2547 if (netpoll_rx_on(skb))
2550 for (p = napi->gro_list; p; p = p->next) {
2551 NAPI_GRO_CB(p)->same_flow = (p->dev == skb->dev)
2552 && !compare_ether_header(skb_mac_header(p),
2553 skb_gro_mac_header(skb));
2554 NAPI_GRO_CB(p)->flush = 0;
2557 return dev_gro_receive(napi, skb);
2560 int napi_skb_finish(int ret, struct sk_buff *skb)
2562 int err = NET_RX_SUCCESS;
2566 return netif_receive_skb(skb);
2572 case GRO_MERGED_FREE:
2579 EXPORT_SYMBOL(napi_skb_finish);
2581 void skb_gro_reset_offset(struct sk_buff *skb)
2583 NAPI_GRO_CB(skb)->data_offset = 0;
2584 NAPI_GRO_CB(skb)->frag0 = NULL;
2585 NAPI_GRO_CB(skb)->frag0_len = 0;
2587 if (skb->mac_header == skb->tail &&
2588 !PageHighMem(skb_shinfo(skb)->frags[0].page)) {
2589 NAPI_GRO_CB(skb)->frag0 =
2590 page_address(skb_shinfo(skb)->frags[0].page) +
2591 skb_shinfo(skb)->frags[0].page_offset;
2592 NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size;
2595 EXPORT_SYMBOL(skb_gro_reset_offset);
2597 int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2599 skb_gro_reset_offset(skb);
2601 return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
2603 EXPORT_SYMBOL(napi_gro_receive);
2605 void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
2607 __skb_pull(skb, skb_headlen(skb));
2608 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
2612 EXPORT_SYMBOL(napi_reuse_skb);
2614 struct sk_buff *napi_get_frags(struct napi_struct *napi)
2616 struct sk_buff *skb = napi->skb;
2619 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
2625 EXPORT_SYMBOL(napi_get_frags);
2627 int napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, int ret)
2629 int err = NET_RX_SUCCESS;
2634 skb->protocol = eth_type_trans(skb, napi->dev);
2636 if (ret == GRO_NORMAL)
2637 return netif_receive_skb(skb);
2639 skb_gro_pull(skb, -ETH_HLEN);
2646 case GRO_MERGED_FREE:
2647 napi_reuse_skb(napi, skb);
2653 EXPORT_SYMBOL(napi_frags_finish);
2655 struct sk_buff *napi_frags_skb(struct napi_struct *napi)
2657 struct sk_buff *skb = napi->skb;
2664 skb_reset_mac_header(skb);
2665 skb_gro_reset_offset(skb);
2667 off = skb_gro_offset(skb);
2668 hlen = off + sizeof(*eth);
2669 eth = skb_gro_header_fast(skb, off);
2670 if (skb_gro_header_hard(skb, hlen)) {
2671 eth = skb_gro_header_slow(skb, hlen, off);
2672 if (unlikely(!eth)) {
2673 napi_reuse_skb(napi, skb);
2679 skb_gro_pull(skb, sizeof(*eth));
2682 * This works because the only protocols we care about don't require
2683 * special handling. We'll fix it up properly at the end.
2685 skb->protocol = eth->h_proto;
2690 EXPORT_SYMBOL(napi_frags_skb);
2692 int napi_gro_frags(struct napi_struct *napi)
2694 struct sk_buff *skb = napi_frags_skb(napi);
2699 return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
2701 EXPORT_SYMBOL(napi_gro_frags);
2703 static int process_backlog(struct napi_struct *napi, int quota)
2706 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2707 unsigned long start_time = jiffies;
2709 napi->weight = weight_p;
2711 struct sk_buff *skb;
2713 local_irq_disable();
2714 skb = __skb_dequeue(&queue->input_pkt_queue);
2716 __napi_complete(napi);
2722 netif_receive_skb(skb);
2723 } while (++work < quota && jiffies == start_time);
2729 * __napi_schedule - schedule for receive
2730 * @n: entry to schedule
2732 * The entry's receive function will be scheduled to run
2734 void __napi_schedule(struct napi_struct *n)
2736 unsigned long flags;
2738 local_irq_save(flags);
2739 list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
2740 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2741 local_irq_restore(flags);
2743 EXPORT_SYMBOL(__napi_schedule);
2745 void __napi_complete(struct napi_struct *n)
2747 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
2748 BUG_ON(n->gro_list);
2750 list_del(&n->poll_list);
2751 smp_mb__before_clear_bit();
2752 clear_bit(NAPI_STATE_SCHED, &n->state);
2754 EXPORT_SYMBOL(__napi_complete);
2756 void napi_complete(struct napi_struct *n)
2758 unsigned long flags;
2761 * don't let napi dequeue from the cpu poll list
2762 * just in case its running on a different cpu
2764 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
2768 local_irq_save(flags);
2770 local_irq_restore(flags);
2772 EXPORT_SYMBOL(napi_complete);
2774 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
2775 int (*poll)(struct napi_struct *, int), int weight)
2777 INIT_LIST_HEAD(&napi->poll_list);
2778 napi->gro_count = 0;
2779 napi->gro_list = NULL;
2782 napi->weight = weight;
2783 list_add(&napi->dev_list, &dev->napi_list);
2785 #ifdef CONFIG_NETPOLL
2786 spin_lock_init(&napi->poll_lock);
2787 napi->poll_owner = -1;
2789 set_bit(NAPI_STATE_SCHED, &napi->state);
2791 EXPORT_SYMBOL(netif_napi_add);
2793 void netif_napi_del(struct napi_struct *napi)
2795 struct sk_buff *skb, *next;
2797 list_del_init(&napi->dev_list);
2798 napi_free_frags(napi);
2800 for (skb = napi->gro_list; skb; skb = next) {
2806 napi->gro_list = NULL;
2807 napi->gro_count = 0;
2809 EXPORT_SYMBOL(netif_napi_del);
2812 static void net_rx_action(struct softirq_action *h)
2814 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
2815 unsigned long time_limit = jiffies + 2;
2816 int budget = netdev_budget;
2819 local_irq_disable();
2821 while (!list_empty(list)) {
2822 struct napi_struct *n;
2825 /* If softirq window is exhuasted then punt.
2826 * Allow this to run for 2 jiffies since which will allow
2827 * an average latency of 1.5/HZ.
2829 if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
2834 /* Even though interrupts have been re-enabled, this
2835 * access is safe because interrupts can only add new
2836 * entries to the tail of this list, and only ->poll()
2837 * calls can remove this head entry from the list.
2839 n = list_entry(list->next, struct napi_struct, poll_list);
2841 have = netpoll_poll_lock(n);
2845 /* This NAPI_STATE_SCHED test is for avoiding a race
2846 * with netpoll's poll_napi(). Only the entity which
2847 * obtains the lock and sees NAPI_STATE_SCHED set will
2848 * actually make the ->poll() call. Therefore we avoid
2849 * accidently calling ->poll() when NAPI is not scheduled.
2852 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
2853 work = n->poll(n, weight);
2857 WARN_ON_ONCE(work > weight);
2861 local_irq_disable();
2863 /* Drivers must not modify the NAPI state if they
2864 * consume the entire weight. In such cases this code
2865 * still "owns" the NAPI instance and therefore can
2866 * move the instance around on the list at-will.
2868 if (unlikely(work == weight)) {
2869 if (unlikely(napi_disable_pending(n))) {
2872 local_irq_disable();
2874 list_move_tail(&n->poll_list, list);
2877 netpoll_poll_unlock(have);
2882 #ifdef CONFIG_NET_DMA
2884 * There may not be any more sk_buffs coming right now, so push
2885 * any pending DMA copies to hardware
2887 dma_issue_pending_all();
2893 __get_cpu_var(netdev_rx_stat).time_squeeze++;
2894 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2898 static gifconf_func_t *gifconf_list[NPROTO];
2901 * register_gifconf - register a SIOCGIF handler
2902 * @family: Address family
2903 * @gifconf: Function handler
2905 * Register protocol dependent address dumping routines. The handler
2906 * that is passed must not be freed or reused until it has been replaced
2907 * by another handler.
2909 int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
2911 if (family >= NPROTO)
2913 gifconf_list[family] = gifconf;
2916 EXPORT_SYMBOL(register_gifconf);
2920 * Map an interface index to its name (SIOCGIFNAME)
2924 * We need this ioctl for efficient implementation of the
2925 * if_indextoname() function required by the IPv6 API. Without
2926 * it, we would have to search all the interfaces to find a
2930 static int dev_ifname(struct net *net, struct ifreq __user *arg)
2932 struct net_device *dev;
2936 * Fetch the caller's info block.
2939 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
2942 read_lock(&dev_base_lock);
2943 dev = __dev_get_by_index(net, ifr.ifr_ifindex);
2945 read_unlock(&dev_base_lock);
2949 strcpy(ifr.ifr_name, dev->name);
2950 read_unlock(&dev_base_lock);
2952 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
2958 * Perform a SIOCGIFCONF call. This structure will change
2959 * size eventually, and there is nothing I can do about it.
2960 * Thus we will need a 'compatibility mode'.
2963 static int dev_ifconf(struct net *net, char __user *arg)
2966 struct net_device *dev;
2973 * Fetch the caller's info block.
2976 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
2983 * Loop over the interfaces, and write an info block for each.
2987 for_each_netdev(net, dev) {
2988 for (i = 0; i < NPROTO; i++) {
2989 if (gifconf_list[i]) {
2992 done = gifconf_list[i](dev, NULL, 0);
2994 done = gifconf_list[i](dev, pos + total,
3004 * All done. Write the updated control block back to the caller.
3006 ifc.ifc_len = total;
3009 * Both BSD and Solaris return 0 here, so we do too.
3011 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
3014 #ifdef CONFIG_PROC_FS
3016 * This is invoked by the /proc filesystem handler to display a device
3019 void *dev_seq_start(struct seq_file *seq, loff_t *pos)
3020 __acquires(dev_base_lock)
3022 struct net *net = seq_file_net(seq);
3024 struct net_device *dev;
3026 read_lock(&dev_base_lock);
3028 return SEQ_START_TOKEN;
3031 for_each_netdev(net, dev)
3038 void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3040 struct net *net = seq_file_net(seq);
3042 return v == SEQ_START_TOKEN ?
3043 first_net_device(net) : next_net_device((struct net_device *)v);
3046 void dev_seq_stop(struct seq_file *seq, void *v)
3047 __releases(dev_base_lock)
3049 read_unlock(&dev_base_lock);
3052 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
3054 const struct net_device_stats *stats = dev_get_stats(dev);
3056 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
3057 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
3058 dev->name, stats->rx_bytes, stats->rx_packets,
3060 stats->rx_dropped + stats->rx_missed_errors,
3061 stats->rx_fifo_errors,
3062 stats->rx_length_errors + stats->rx_over_errors +
3063 stats->rx_crc_errors + stats->rx_frame_errors,
3064 stats->rx_compressed, stats->multicast,
3065 stats->tx_bytes, stats->tx_packets,
3066 stats->tx_errors, stats->tx_dropped,
3067 stats->tx_fifo_errors, stats->collisions,
3068 stats->tx_carrier_errors +
3069 stats->tx_aborted_errors +
3070 stats->tx_window_errors +
3071 stats->tx_heartbeat_errors,
3072 stats->tx_compressed);
3076 * Called from the PROCfs module. This now uses the new arbitrary sized
3077 * /proc/net interface to create /proc/net/dev
3079 static int dev_seq_show(struct seq_file *seq, void *v)
3081 if (v == SEQ_START_TOKEN)
3082 seq_puts(seq, "Inter-| Receive "
3084 " face |bytes packets errs drop fifo frame "
3085 "compressed multicast|bytes packets errs "
3086 "drop fifo colls carrier compressed\n");
3088 dev_seq_printf_stats(seq, v);
3092 static struct netif_rx_stats *softnet_get_online(loff_t *pos)
3094 struct netif_rx_stats *rc = NULL;
3096 while (*pos < nr_cpu_ids)
3097 if (cpu_online(*pos)) {
3098 rc = &per_cpu(netdev_rx_stat, *pos);
3105 static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
3107 return softnet_get_online(pos);
3110 static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3113 return softnet_get_online(pos);
3116 static void softnet_seq_stop(struct seq_file *seq, void *v)
3120 static int softnet_seq_show(struct seq_file *seq, void *v)
3122 struct netif_rx_stats *s = v;
3124 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
3125 s->total, s->dropped, s->time_squeeze, 0,
3126 0, 0, 0, 0, /* was fastroute */
3131 static const struct seq_operations dev_seq_ops = {
3132 .start = dev_seq_start,
3133 .next = dev_seq_next,
3134 .stop = dev_seq_stop,
3135 .show = dev_seq_show,
3138 static int dev_seq_open(struct inode *inode, struct file *file)
3140 return seq_open_net(inode, file, &dev_seq_ops,
3141 sizeof(struct seq_net_private));
3144 static const struct file_operations dev_seq_fops = {
3145 .owner = THIS_MODULE,
3146 .open = dev_seq_open,
3148 .llseek = seq_lseek,
3149 .release = seq_release_net,
3152 static const struct seq_operations softnet_seq_ops = {
3153 .start = softnet_seq_start,
3154 .next = softnet_seq_next,
3155 .stop = softnet_seq_stop,
3156 .show = softnet_seq_show,
3159 static int softnet_seq_open(struct inode *inode, struct file *file)
3161 return seq_open(file, &softnet_seq_ops);
3164 static const struct file_operations softnet_seq_fops = {
3165 .owner = THIS_MODULE,
3166 .open = softnet_seq_open,
3168 .llseek = seq_lseek,
3169 .release = seq_release,
3172 static void *ptype_get_idx(loff_t pos)
3174 struct packet_type *pt = NULL;
3178 list_for_each_entry_rcu(pt, &ptype_all, list) {
3184 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
3185 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
3194 static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
3198 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
3201 static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3203 struct packet_type *pt;
3204 struct list_head *nxt;
3208 if (v == SEQ_START_TOKEN)
3209 return ptype_get_idx(0);
3212 nxt = pt->list.next;
3213 if (pt->type == htons(ETH_P_ALL)) {
3214 if (nxt != &ptype_all)
3217 nxt = ptype_base[0].next;
3219 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
3221 while (nxt == &ptype_base[hash]) {
3222 if (++hash >= PTYPE_HASH_SIZE)
3224 nxt = ptype_base[hash].next;
3227 return list_entry(nxt, struct packet_type, list);
3230 static void ptype_seq_stop(struct seq_file *seq, void *v)
3236 static int ptype_seq_show(struct seq_file *seq, void *v)
3238 struct packet_type *pt = v;
3240 if (v == SEQ_START_TOKEN)
3241 seq_puts(seq, "Type Device Function\n");
3242 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
3243 if (pt->type == htons(ETH_P_ALL))
3244 seq_puts(seq, "ALL ");
3246 seq_printf(seq, "%04x", ntohs(pt->type));
3248 seq_printf(seq, " %-8s %pF\n",
3249 pt->dev ? pt->dev->name : "", pt->func);
3255 static const struct seq_operations ptype_seq_ops = {
3256 .start = ptype_seq_start,
3257 .next = ptype_seq_next,
3258 .stop = ptype_seq_stop,
3259 .show = ptype_seq_show,
3262 static int ptype_seq_open(struct inode *inode, struct file *file)
3264 return seq_open_net(inode, file, &ptype_seq_ops,
3265 sizeof(struct seq_net_private));
3268 static const struct file_operations ptype_seq_fops = {
3269 .owner = THIS_MODULE,
3270 .open = ptype_seq_open,
3272 .llseek = seq_lseek,
3273 .release = seq_release_net,
3277 static int __net_init dev_proc_net_init(struct net *net)
3281 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
3283 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
3285 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
3288 if (wext_proc_init(net))
3294 proc_net_remove(net, "ptype");
3296 proc_net_remove(net, "softnet_stat");
3298 proc_net_remove(net, "dev");
3302 static void __net_exit dev_proc_net_exit(struct net *net)
3304 wext_proc_exit(net);
3306 proc_net_remove(net, "ptype");
3307 proc_net_remove(net, "softnet_stat");
3308 proc_net_remove(net, "dev");
3311 static struct pernet_operations __net_initdata dev_proc_ops = {
3312 .init = dev_proc_net_init,
3313 .exit = dev_proc_net_exit,
3316 static int __init dev_proc_init(void)
3318 return register_pernet_subsys(&dev_proc_ops);
3321 #define dev_proc_init() 0
3322 #endif /* CONFIG_PROC_FS */
3326 * netdev_set_master - set up master/slave pair
3327 * @slave: slave device
3328 * @master: new master device
3330 * Changes the master device of the slave. Pass %NULL to break the
3331 * bonding. The caller must hold the RTNL semaphore. On a failure
3332 * a negative errno code is returned. On success the reference counts
3333 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
3334 * function returns zero.
3336 int netdev_set_master(struct net_device *slave, struct net_device *master)
3338 struct net_device *old = slave->master;
3348 slave->master = master;
3356 slave->flags |= IFF_SLAVE;
3358 slave->flags &= ~IFF_SLAVE;
3360 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
3363 EXPORT_SYMBOL(netdev_set_master);
3365 static void dev_change_rx_flags(struct net_device *dev, int flags)
3367 const struct net_device_ops *ops = dev->netdev_ops;
3369 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
3370 ops->ndo_change_rx_flags(dev, flags);
3373 static int __dev_set_promiscuity(struct net_device *dev, int inc)
3375 unsigned short old_flags = dev->flags;
3381 dev->flags |= IFF_PROMISC;
3382 dev->promiscuity += inc;
3383 if (dev->promiscuity == 0) {
3386 * If inc causes overflow, untouch promisc and return error.
3389 dev->flags &= ~IFF_PROMISC;
3391 dev->promiscuity -= inc;
3392 printk(KERN_WARNING "%s: promiscuity touches roof, "
3393 "set promiscuity failed, promiscuity feature "
3394 "of device might be broken.\n", dev->name);
3398 if (dev->flags != old_flags) {
3399 printk(KERN_INFO "device %s %s promiscuous mode\n",
3400 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
3402 if (audit_enabled) {
3403 current_uid_gid(&uid, &gid);
3404 audit_log(current->audit_context, GFP_ATOMIC,
3405 AUDIT_ANOM_PROMISCUOUS,
3406 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
3407 dev->name, (dev->flags & IFF_PROMISC),
3408 (old_flags & IFF_PROMISC),
3409 audit_get_loginuid(current),
3411 audit_get_sessionid(current));
3414 dev_change_rx_flags(dev, IFF_PROMISC);
3420 * dev_set_promiscuity - update promiscuity count on a device
3424 * Add or remove promiscuity from a device. While the count in the device
3425 * remains above zero the interface remains promiscuous. Once it hits zero
3426 * the device reverts back to normal filtering operation. A negative inc
3427 * value is used to drop promiscuity on the device.
3428 * Return 0 if successful or a negative errno code on error.
3430 int dev_set_promiscuity(struct net_device *dev, int inc)
3432 unsigned short old_flags = dev->flags;
3435 err = __dev_set_promiscuity(dev, inc);
3438 if (dev->flags != old_flags)
3439 dev_set_rx_mode(dev);
3442 EXPORT_SYMBOL(dev_set_promiscuity);
3445 * dev_set_allmulti - update allmulti count on a device
3449 * Add or remove reception of all multicast frames to a device. While the
3450 * count in the device remains above zero the interface remains listening
3451 * to all interfaces. Once it hits zero the device reverts back to normal
3452 * filtering operation. A negative @inc value is used to drop the counter
3453 * when releasing a resource needing all multicasts.
3454 * Return 0 if successful or a negative errno code on error.
3457 int dev_set_allmulti(struct net_device *dev, int inc)
3459 unsigned short old_flags = dev->flags;
3463 dev->flags |= IFF_ALLMULTI;
3464 dev->allmulti += inc;
3465 if (dev->allmulti == 0) {
3468 * If inc causes overflow, untouch allmulti and return error.
3471 dev->flags &= ~IFF_ALLMULTI;
3473 dev->allmulti -= inc;
3474 printk(KERN_WARNING "%s: allmulti touches roof, "
3475 "set allmulti failed, allmulti feature of "
3476 "device might be broken.\n", dev->name);
3480 if (dev->flags ^ old_flags) {
3481 dev_change_rx_flags(dev, IFF_ALLMULTI);
3482 dev_set_rx_mode(dev);
3486 EXPORT_SYMBOL(dev_set_allmulti);
3489 * Upload unicast and multicast address lists to device and
3490 * configure RX filtering. When the device doesn't support unicast
3491 * filtering it is put in promiscuous mode while unicast addresses
3494 void __dev_set_rx_mode(struct net_device *dev)
3496 const struct net_device_ops *ops = dev->netdev_ops;
3498 /* dev_open will call this function so the list will stay sane. */
3499 if (!(dev->flags&IFF_UP))
3502 if (!netif_device_present(dev))
3505 if (ops->ndo_set_rx_mode)
3506 ops->ndo_set_rx_mode(dev);
3508 /* Unicast addresses changes may only happen under the rtnl,
3509 * therefore calling __dev_set_promiscuity here is safe.
3511 if (dev->uc.count > 0 && !dev->uc_promisc) {
3512 __dev_set_promiscuity(dev, 1);
3513 dev->uc_promisc = 1;
3514 } else if (dev->uc.count == 0 && dev->uc_promisc) {
3515 __dev_set_promiscuity(dev, -1);
3516 dev->uc_promisc = 0;
3519 if (ops->ndo_set_multicast_list)
3520 ops->ndo_set_multicast_list(dev);
3524 void dev_set_rx_mode(struct net_device *dev)
3526 netif_addr_lock_bh(dev);
3527 __dev_set_rx_mode(dev);
3528 netif_addr_unlock_bh(dev);
3531 /* hw addresses list handling functions */
3533 static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr,
3534 int addr_len, unsigned char addr_type)
3536 struct netdev_hw_addr *ha;
3539 if (addr_len > MAX_ADDR_LEN)
3542 list_for_each_entry(ha, &list->list, list) {
3543 if (!memcmp(ha->addr, addr, addr_len) &&
3544 ha->type == addr_type) {
3551 alloc_size = sizeof(*ha);
3552 if (alloc_size < L1_CACHE_BYTES)
3553 alloc_size = L1_CACHE_BYTES;
3554 ha = kmalloc(alloc_size, GFP_ATOMIC);
3557 memcpy(ha->addr, addr, addr_len);
3558 ha->type = addr_type;
3561 list_add_tail_rcu(&ha->list, &list->list);
3566 static void ha_rcu_free(struct rcu_head *head)
3568 struct netdev_hw_addr *ha;
3570 ha = container_of(head, struct netdev_hw_addr, rcu_head);
3574 static int __hw_addr_del(struct netdev_hw_addr_list *list, unsigned char *addr,
3575 int addr_len, unsigned char addr_type)
3577 struct netdev_hw_addr *ha;
3579 list_for_each_entry(ha, &list->list, list) {
3580 if (!memcmp(ha->addr, addr, addr_len) &&
3581 (ha->type == addr_type || !addr_type)) {
3584 list_del_rcu(&ha->list);
3585 call_rcu(&ha->rcu_head, ha_rcu_free);
3593 static int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
3594 struct netdev_hw_addr_list *from_list,
3596 unsigned char addr_type)
3599 struct netdev_hw_addr *ha, *ha2;
3602 list_for_each_entry(ha, &from_list->list, list) {
3603 type = addr_type ? addr_type : ha->type;
3604 err = __hw_addr_add(to_list, ha->addr, addr_len, type);
3611 list_for_each_entry(ha2, &from_list->list, list) {
3614 type = addr_type ? addr_type : ha2->type;
3615 __hw_addr_del(to_list, ha2->addr, addr_len, type);
3620 static void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
3621 struct netdev_hw_addr_list *from_list,
3623 unsigned char addr_type)
3625 struct netdev_hw_addr *ha;
3628 list_for_each_entry(ha, &from_list->list, list) {
3629 type = addr_type ? addr_type : ha->type;
3630 __hw_addr_del(to_list, ha->addr, addr_len, addr_type);
3634 static int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
3635 struct netdev_hw_addr_list *from_list,
3639 struct netdev_hw_addr *ha, *tmp;
3641 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
3643 err = __hw_addr_add(to_list, ha->addr,
3644 addr_len, ha->type);
3649 } else if (ha->refcount == 1) {
3650 __hw_addr_del(to_list, ha->addr, addr_len, ha->type);
3651 __hw_addr_del(from_list, ha->addr, addr_len, ha->type);
3657 static void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
3658 struct netdev_hw_addr_list *from_list,
3661 struct netdev_hw_addr *ha, *tmp;
3663 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
3665 __hw_addr_del(to_list, ha->addr,
3666 addr_len, ha->type);
3668 __hw_addr_del(from_list, ha->addr,
3669 addr_len, ha->type);
3674 static void __hw_addr_flush(struct netdev_hw_addr_list *list)
3676 struct netdev_hw_addr *ha, *tmp;
3678 list_for_each_entry_safe(ha, tmp, &list->list, list) {
3679 list_del_rcu(&ha->list);
3680 call_rcu(&ha->rcu_head, ha_rcu_free);
3685 static void __hw_addr_init(struct netdev_hw_addr_list *list)
3687 INIT_LIST_HEAD(&list->list);
3691 /* Device addresses handling functions */
3693 static void dev_addr_flush(struct net_device *dev)
3695 /* rtnl_mutex must be held here */
3697 __hw_addr_flush(&dev->dev_addrs);
3698 dev->dev_addr = NULL;
3701 static int dev_addr_init(struct net_device *dev)
3703 unsigned char addr[MAX_ADDR_LEN];
3704 struct netdev_hw_addr *ha;
3707 /* rtnl_mutex must be held here */
3709 __hw_addr_init(&dev->dev_addrs);
3710 memset(addr, 0, sizeof(addr));
3711 err = __hw_addr_add(&dev->dev_addrs, addr, sizeof(addr),
3712 NETDEV_HW_ADDR_T_LAN);
3715 * Get the first (previously created) address from the list
3716 * and set dev_addr pointer to this location.
3718 ha = list_first_entry(&dev->dev_addrs.list,
3719 struct netdev_hw_addr, list);
3720 dev->dev_addr = ha->addr;
3726 * dev_addr_add - Add a device address
3728 * @addr: address to add
3729 * @addr_type: address type
3731 * Add a device address to the device or increase the reference count if
3732 * it already exists.
3734 * The caller must hold the rtnl_mutex.
3736 int dev_addr_add(struct net_device *dev, unsigned char *addr,
3737 unsigned char addr_type)
3743 err = __hw_addr_add(&dev->dev_addrs, addr, dev->addr_len, addr_type);
3745 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3748 EXPORT_SYMBOL(dev_addr_add);
3751 * dev_addr_del - Release a device address.
3753 * @addr: address to delete
3754 * @addr_type: address type
3756 * Release reference to a device address and remove it from the device
3757 * if the reference count drops to zero.
3759 * The caller must hold the rtnl_mutex.
3761 int dev_addr_del(struct net_device *dev, unsigned char *addr,
3762 unsigned char addr_type)
3765 struct netdev_hw_addr *ha;
3770 * We can not remove the first address from the list because
3771 * dev->dev_addr points to that.
3773 ha = list_first_entry(&dev->dev_addrs.list,
3774 struct netdev_hw_addr, list);
3775 if (ha->addr == dev->dev_addr && ha->refcount == 1)
3778 err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len,
3781 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3784 EXPORT_SYMBOL(dev_addr_del);
3787 * dev_addr_add_multiple - Add device addresses from another device
3788 * @to_dev: device to which addresses will be added
3789 * @from_dev: device from which addresses will be added
3790 * @addr_type: address type - 0 means type will be used from from_dev
3792 * Add device addresses of the one device to another.
3794 * The caller must hold the rtnl_mutex.
3796 int dev_addr_add_multiple(struct net_device *to_dev,
3797 struct net_device *from_dev,
3798 unsigned char addr_type)
3804 if (from_dev->addr_len != to_dev->addr_len)
3806 err = __hw_addr_add_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
3807 to_dev->addr_len, addr_type);
3809 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
3812 EXPORT_SYMBOL(dev_addr_add_multiple);
3815 * dev_addr_del_multiple - Delete device addresses by another device
3816 * @to_dev: device where the addresses will be deleted
3817 * @from_dev: device by which addresses the addresses will be deleted
3818 * @addr_type: address type - 0 means type will used from from_dev
3820 * Deletes addresses in to device by the list of addresses in from device.
3822 * The caller must hold the rtnl_mutex.
3824 int dev_addr_del_multiple(struct net_device *to_dev,
3825 struct net_device *from_dev,
3826 unsigned char addr_type)
3830 if (from_dev->addr_len != to_dev->addr_len)
3832 __hw_addr_del_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
3833 to_dev->addr_len, addr_type);
3834 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
3837 EXPORT_SYMBOL(dev_addr_del_multiple);
3839 /* multicast addresses handling functions */
3841 int __dev_addr_delete(struct dev_addr_list **list, int *count,
3842 void *addr, int alen, int glbl)
3844 struct dev_addr_list *da;
3846 for (; (da = *list) != NULL; list = &da->next) {
3847 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3848 alen == da->da_addrlen) {
3850 int old_glbl = da->da_gusers;
3867 int __dev_addr_add(struct dev_addr_list **list, int *count,
3868 void *addr, int alen, int glbl)
3870 struct dev_addr_list *da;
3872 for (da = *list; da != NULL; da = da->next) {
3873 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3874 da->da_addrlen == alen) {
3876 int old_glbl = da->da_gusers;
3886 da = kzalloc(sizeof(*da), GFP_ATOMIC);
3889 memcpy(da->da_addr, addr, alen);
3890 da->da_addrlen = alen;
3892 da->da_gusers = glbl ? 1 : 0;
3900 * dev_unicast_delete - Release secondary unicast address.
3902 * @addr: address to delete
3904 * Release reference to a secondary unicast address and remove it
3905 * from the device if the reference count drops to zero.
3907 * The caller must hold the rtnl_mutex.
3909 int dev_unicast_delete(struct net_device *dev, void *addr)
3915 netif_addr_lock_bh(dev);
3916 err = __hw_addr_del(&dev->uc, addr, dev->addr_len,
3917 NETDEV_HW_ADDR_T_UNICAST);
3919 __dev_set_rx_mode(dev);
3920 netif_addr_unlock_bh(dev);
3923 EXPORT_SYMBOL(dev_unicast_delete);
3926 * dev_unicast_add - add a secondary unicast address
3928 * @addr: address to add
3930 * Add a secondary unicast address to the device or increase
3931 * the reference count if it already exists.
3933 * The caller must hold the rtnl_mutex.
3935 int dev_unicast_add(struct net_device *dev, void *addr)
3941 netif_addr_lock_bh(dev);
3942 err = __hw_addr_add(&dev->uc, addr, dev->addr_len,
3943 NETDEV_HW_ADDR_T_UNICAST);
3945 __dev_set_rx_mode(dev);
3946 netif_addr_unlock_bh(dev);
3949 EXPORT_SYMBOL(dev_unicast_add);
3951 int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
3952 struct dev_addr_list **from, int *from_count)
3954 struct dev_addr_list *da, *next;
3958 while (da != NULL) {
3960 if (!da->da_synced) {
3961 err = __dev_addr_add(to, to_count,
3962 da->da_addr, da->da_addrlen, 0);
3967 } else if (da->da_users == 1) {
3968 __dev_addr_delete(to, to_count,
3969 da->da_addr, da->da_addrlen, 0);
3970 __dev_addr_delete(from, from_count,
3971 da->da_addr, da->da_addrlen, 0);
3977 EXPORT_SYMBOL_GPL(__dev_addr_sync);
3979 void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
3980 struct dev_addr_list **from, int *from_count)
3982 struct dev_addr_list *da, *next;
3985 while (da != NULL) {
3987 if (da->da_synced) {
3988 __dev_addr_delete(to, to_count,
3989 da->da_addr, da->da_addrlen, 0);
3991 __dev_addr_delete(from, from_count,
3992 da->da_addr, da->da_addrlen, 0);
3997 EXPORT_SYMBOL_GPL(__dev_addr_unsync);
4000 * dev_unicast_sync - Synchronize device's unicast list to another device
4001 * @to: destination device
4002 * @from: source device
4004 * Add newly added addresses to the destination device and release
4005 * addresses that have no users left. The source device must be
4006 * locked by netif_tx_lock_bh.
4008 * This function is intended to be called from the dev->set_rx_mode
4009 * function of layered software devices.
4011 int dev_unicast_sync(struct net_device *to, struct net_device *from)
4015 if (to->addr_len != from->addr_len)
4018 netif_addr_lock_bh(to);
4019 err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
4021 __dev_set_rx_mode(to);
4022 netif_addr_unlock_bh(to);
4025 EXPORT_SYMBOL(dev_unicast_sync);
4028 * dev_unicast_unsync - Remove synchronized addresses from the destination device
4029 * @to: destination device
4030 * @from: source device
4032 * Remove all addresses that were added to the destination device by
4033 * dev_unicast_sync(). This function is intended to be called from the
4034 * dev->stop function of layered software devices.
4036 void dev_unicast_unsync(struct net_device *to, struct net_device *from)
4038 if (to->addr_len != from->addr_len)
4041 netif_addr_lock_bh(from);
4042 netif_addr_lock(to);
4043 __hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
4044 __dev_set_rx_mode(to);
4045 netif_addr_unlock(to);
4046 netif_addr_unlock_bh(from);
4048 EXPORT_SYMBOL(dev_unicast_unsync);
4050 static void dev_unicast_flush(struct net_device *dev)
4052 netif_addr_lock_bh(dev);
4053 __hw_addr_flush(&dev->uc);
4054 netif_addr_unlock_bh(dev);
4057 static void dev_unicast_init(struct net_device *dev)
4059 __hw_addr_init(&dev->uc);
4063 static void __dev_addr_discard(struct dev_addr_list **list)
4065 struct dev_addr_list *tmp;
4067 while (*list != NULL) {
4070 if (tmp->da_users > tmp->da_gusers)
4071 printk("__dev_addr_discard: address leakage! "
4072 "da_users=%d\n", tmp->da_users);
4077 static void dev_addr_discard(struct net_device *dev)
4079 netif_addr_lock_bh(dev);
4081 __dev_addr_discard(&dev->mc_list);
4084 netif_addr_unlock_bh(dev);
4088 * dev_get_flags - get flags reported to userspace
4091 * Get the combination of flag bits exported through APIs to userspace.
4093 unsigned dev_get_flags(const struct net_device *dev)
4097 flags = (dev->flags & ~(IFF_PROMISC |
4102 (dev->gflags & (IFF_PROMISC |
4105 if (netif_running(dev)) {
4106 if (netif_oper_up(dev))
4107 flags |= IFF_RUNNING;
4108 if (netif_carrier_ok(dev))
4109 flags |= IFF_LOWER_UP;
4110 if (netif_dormant(dev))
4111 flags |= IFF_DORMANT;
4116 EXPORT_SYMBOL(dev_get_flags);
4119 * dev_change_flags - change device settings
4121 * @flags: device state flags
4123 * Change settings on device based state flags. The flags are
4124 * in the userspace exported format.
4126 int dev_change_flags(struct net_device *dev, unsigned flags)
4129 int old_flags = dev->flags;
4134 * Set the flags on our device.
4137 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
4138 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
4140 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
4144 * Load in the correct multicast list now the flags have changed.
4147 if ((old_flags ^ flags) & IFF_MULTICAST)
4148 dev_change_rx_flags(dev, IFF_MULTICAST);
4150 dev_set_rx_mode(dev);
4153 * Have we downed the interface. We handle IFF_UP ourselves
4154 * according to user attempts to set it, rather than blindly
4159 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
4160 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
4163 dev_set_rx_mode(dev);
4166 if (dev->flags & IFF_UP &&
4167 ((old_flags ^ dev->flags) & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
4169 call_netdevice_notifiers(NETDEV_CHANGE, dev);
4171 if ((flags ^ dev->gflags) & IFF_PROMISC) {
4172 int inc = (flags & IFF_PROMISC) ? 1 : -1;
4174 dev->gflags ^= IFF_PROMISC;
4175 dev_set_promiscuity(dev, inc);
4178 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4179 is important. Some (broken) drivers set IFF_PROMISC, when
4180 IFF_ALLMULTI is requested not asking us and not reporting.
4182 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
4183 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
4185 dev->gflags ^= IFF_ALLMULTI;
4186 dev_set_allmulti(dev, inc);
4189 /* Exclude state transition flags, already notified */
4190 changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING);
4192 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
4196 EXPORT_SYMBOL(dev_change_flags);
4199 * dev_set_mtu - Change maximum transfer unit
4201 * @new_mtu: new transfer unit
4203 * Change the maximum transfer size of the network device.
4205 int dev_set_mtu(struct net_device *dev, int new_mtu)
4207 const struct net_device_ops *ops = dev->netdev_ops;
4210 if (new_mtu == dev->mtu)
4213 /* MTU must be positive. */
4217 if (!netif_device_present(dev))
4221 if (ops->ndo_change_mtu)
4222 err = ops->ndo_change_mtu(dev, new_mtu);
4226 if (!err && dev->flags & IFF_UP)
4227 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
4230 EXPORT_SYMBOL(dev_set_mtu);
4233 * dev_set_mac_address - Change Media Access Control Address
4237 * Change the hardware (MAC) address of the device
4239 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4241 const struct net_device_ops *ops = dev->netdev_ops;
4244 if (!ops->ndo_set_mac_address)
4246 if (sa->sa_family != dev->type)
4248 if (!netif_device_present(dev))
4250 err = ops->ndo_set_mac_address(dev, sa);
4252 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4255 EXPORT_SYMBOL(dev_set_mac_address);
4258 * Perform the SIOCxIFxxx calls, inside read_lock(dev_base_lock)
4260 static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
4263 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
4269 case SIOCGIFFLAGS: /* Get interface flags */
4270 ifr->ifr_flags = (short) dev_get_flags(dev);
4273 case SIOCGIFMETRIC: /* Get the metric on the interface
4274 (currently unused) */
4275 ifr->ifr_metric = 0;
4278 case SIOCGIFMTU: /* Get the MTU of a device */
4279 ifr->ifr_mtu = dev->mtu;
4284 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
4286 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
4287 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4288 ifr->ifr_hwaddr.sa_family = dev->type;
4296 ifr->ifr_map.mem_start = dev->mem_start;
4297 ifr->ifr_map.mem_end = dev->mem_end;
4298 ifr->ifr_map.base_addr = dev->base_addr;
4299 ifr->ifr_map.irq = dev->irq;
4300 ifr->ifr_map.dma = dev->dma;
4301 ifr->ifr_map.port = dev->if_port;
4305 ifr->ifr_ifindex = dev->ifindex;
4309 ifr->ifr_qlen = dev->tx_queue_len;
4313 /* dev_ioctl() should ensure this case
4325 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
4327 static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4330 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
4331 const struct net_device_ops *ops;
4336 ops = dev->netdev_ops;
4339 case SIOCSIFFLAGS: /* Set interface flags */
4340 return dev_change_flags(dev, ifr->ifr_flags);
4342 case SIOCSIFMETRIC: /* Set the metric on the interface
4343 (currently unused) */
4346 case SIOCSIFMTU: /* Set the MTU of a device */
4347 return dev_set_mtu(dev, ifr->ifr_mtu);
4350 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
4352 case SIOCSIFHWBROADCAST:
4353 if (ifr->ifr_hwaddr.sa_family != dev->type)
4355 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
4356 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4357 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4361 if (ops->ndo_set_config) {
4362 if (!netif_device_present(dev))
4364 return ops->ndo_set_config(dev, &ifr->ifr_map);
4369 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4370 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4372 if (!netif_device_present(dev))
4374 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
4378 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4379 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4381 if (!netif_device_present(dev))
4383 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
4387 if (ifr->ifr_qlen < 0)
4389 dev->tx_queue_len = ifr->ifr_qlen;
4393 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
4394 return dev_change_name(dev, ifr->ifr_newname);
4397 * Unknown or private ioctl
4400 if ((cmd >= SIOCDEVPRIVATE &&
4401 cmd <= SIOCDEVPRIVATE + 15) ||
4402 cmd == SIOCBONDENSLAVE ||
4403 cmd == SIOCBONDRELEASE ||
4404 cmd == SIOCBONDSETHWADDR ||
4405 cmd == SIOCBONDSLAVEINFOQUERY ||
4406 cmd == SIOCBONDINFOQUERY ||
4407 cmd == SIOCBONDCHANGEACTIVE ||
4408 cmd == SIOCGMIIPHY ||
4409 cmd == SIOCGMIIREG ||
4410 cmd == SIOCSMIIREG ||
4411 cmd == SIOCBRADDIF ||
4412 cmd == SIOCBRDELIF ||
4413 cmd == SIOCSHWTSTAMP ||
4414 cmd == SIOCWANDEV) {
4416 if (ops->ndo_do_ioctl) {
4417 if (netif_device_present(dev))
4418 err = ops->ndo_do_ioctl(dev, ifr, cmd);
4430 * This function handles all "interface"-type I/O control requests. The actual
4431 * 'doing' part of this is dev_ifsioc above.
4435 * dev_ioctl - network device ioctl
4436 * @net: the applicable net namespace
4437 * @cmd: command to issue
4438 * @arg: pointer to a struct ifreq in user space
4440 * Issue ioctl functions to devices. This is normally called by the
4441 * user space syscall interfaces but can sometimes be useful for
4442 * other purposes. The return value is the return from the syscall if
4443 * positive or a negative errno code on error.
4446 int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
4452 /* One special case: SIOCGIFCONF takes ifconf argument
4453 and requires shared lock, because it sleeps writing
4457 if (cmd == SIOCGIFCONF) {
4459 ret = dev_ifconf(net, (char __user *) arg);
4463 if (cmd == SIOCGIFNAME)
4464 return dev_ifname(net, (struct ifreq __user *)arg);
4466 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
4469 ifr.ifr_name[IFNAMSIZ-1] = 0;
4471 colon = strchr(ifr.ifr_name, ':');
4476 * See which interface the caller is talking about.
4481 * These ioctl calls:
4482 * - can be done by all.
4483 * - atomic and do not require locking.
4494 dev_load(net, ifr.ifr_name);
4495 read_lock(&dev_base_lock);
4496 ret = dev_ifsioc_locked(net, &ifr, cmd);
4497 read_unlock(&dev_base_lock);
4501 if (copy_to_user(arg, &ifr,
4502 sizeof(struct ifreq)))
4508 dev_load(net, ifr.ifr_name);
4510 ret = dev_ethtool(net, &ifr);
4515 if (copy_to_user(arg, &ifr,
4516 sizeof(struct ifreq)))
4522 * These ioctl calls:
4523 * - require superuser power.
4524 * - require strict serialization.
4530 if (!capable(CAP_NET_ADMIN))
4532 dev_load(net, ifr.ifr_name);
4534 ret = dev_ifsioc(net, &ifr, cmd);
4539 if (copy_to_user(arg, &ifr,
4540 sizeof(struct ifreq)))
4546 * These ioctl calls:
4547 * - require superuser power.
4548 * - require strict serialization.
4549 * - do not return a value
4559 case SIOCSIFHWBROADCAST:
4562 case SIOCBONDENSLAVE:
4563 case SIOCBONDRELEASE:
4564 case SIOCBONDSETHWADDR:
4565 case SIOCBONDCHANGEACTIVE:
4569 if (!capable(CAP_NET_ADMIN))
4572 case SIOCBONDSLAVEINFOQUERY:
4573 case SIOCBONDINFOQUERY:
4574 dev_load(net, ifr.ifr_name);
4576 ret = dev_ifsioc(net, &ifr, cmd);
4581 /* Get the per device memory space. We can add this but
4582 * currently do not support it */
4584 /* Set the per device memory buffer space.
4585 * Not applicable in our case */
4590 * Unknown or private ioctl.
4593 if (cmd == SIOCWANDEV ||
4594 (cmd >= SIOCDEVPRIVATE &&
4595 cmd <= SIOCDEVPRIVATE + 15)) {
4596 dev_load(net, ifr.ifr_name);
4598 ret = dev_ifsioc(net, &ifr, cmd);
4600 if (!ret && copy_to_user(arg, &ifr,
4601 sizeof(struct ifreq)))
4605 /* Take care of Wireless Extensions */
4606 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
4607 return wext_handle_ioctl(net, &ifr, cmd, arg);
4614 * dev_new_index - allocate an ifindex
4615 * @net: the applicable net namespace
4617 * Returns a suitable unique value for a new device interface
4618 * number. The caller must hold the rtnl semaphore or the
4619 * dev_base_lock to be sure it remains unique.
4621 static int dev_new_index(struct net *net)
4627 if (!__dev_get_by_index(net, ifindex))
4632 /* Delayed registration/unregisteration */
4633 static LIST_HEAD(net_todo_list);
4635 static void net_set_todo(struct net_device *dev)
4637 list_add_tail(&dev->todo_list, &net_todo_list);
4640 static void rollback_registered(struct net_device *dev)
4642 BUG_ON(dev_boot_phase);
4645 /* Some devices call without registering for initialization unwind. */
4646 if (dev->reg_state == NETREG_UNINITIALIZED) {
4647 printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
4648 "was registered\n", dev->name, dev);
4654 BUG_ON(dev->reg_state != NETREG_REGISTERED);
4656 /* If device is running, close it first. */
4659 /* And unlink it from device chain. */
4660 unlist_netdevice(dev);
4662 dev->reg_state = NETREG_UNREGISTERING;
4666 /* Shutdown queueing discipline. */
4670 /* Notify protocols, that we are about to destroy
4671 this device. They should clean all the things.
4673 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4676 * Flush the unicast and multicast chains
4678 dev_unicast_flush(dev);
4679 dev_addr_discard(dev);
4681 if (dev->netdev_ops->ndo_uninit)
4682 dev->netdev_ops->ndo_uninit(dev);
4684 /* Notifier chain MUST detach us from master device. */
4685 WARN_ON(dev->master);
4687 /* Remove entries from kobject tree */
4688 netdev_unregister_kobject(dev);
4695 static void __netdev_init_queue_locks_one(struct net_device *dev,
4696 struct netdev_queue *dev_queue,
4699 spin_lock_init(&dev_queue->_xmit_lock);
4700 netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
4701 dev_queue->xmit_lock_owner = -1;
4704 static void netdev_init_queue_locks(struct net_device *dev)
4706 netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
4707 __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
4710 unsigned long netdev_fix_features(unsigned long features, const char *name)
4712 /* Fix illegal SG+CSUM combinations. */
4713 if ((features & NETIF_F_SG) &&
4714 !(features & NETIF_F_ALL_CSUM)) {
4716 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no "
4717 "checksum feature.\n", name);
4718 features &= ~NETIF_F_SG;
4721 /* TSO requires that SG is present as well. */
4722 if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
4724 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no "
4725 "SG feature.\n", name);
4726 features &= ~NETIF_F_TSO;
4729 if (features & NETIF_F_UFO) {
4730 if (!(features & NETIF_F_GEN_CSUM)) {
4732 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4733 "since no NETIF_F_HW_CSUM feature.\n",
4735 features &= ~NETIF_F_UFO;
4738 if (!(features & NETIF_F_SG)) {
4740 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4741 "since no NETIF_F_SG feature.\n", name);
4742 features &= ~NETIF_F_UFO;
4748 EXPORT_SYMBOL(netdev_fix_features);
4751 * register_netdevice - register a network device
4752 * @dev: device to register
4754 * Take a completed network device structure and add it to the kernel
4755 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4756 * chain. 0 is returned on success. A negative errno code is returned
4757 * on a failure to set up the device, or if the name is a duplicate.
4759 * Callers must hold the rtnl semaphore. You may want
4760 * register_netdev() instead of this.
4763 * The locking appears insufficient to guarantee two parallel registers
4764 * will not get the same name.
4767 int register_netdevice(struct net_device *dev)
4769 struct hlist_head *head;
4770 struct hlist_node *p;
4772 struct net *net = dev_net(dev);
4774 BUG_ON(dev_boot_phase);
4779 /* When net_device's are persistent, this will be fatal. */
4780 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
4783 spin_lock_init(&dev->addr_list_lock);
4784 netdev_set_addr_lockdep_class(dev);
4785 netdev_init_queue_locks(dev);
4789 /* Init, if this function is available */
4790 if (dev->netdev_ops->ndo_init) {
4791 ret = dev->netdev_ops->ndo_init(dev);
4799 if (!dev_valid_name(dev->name)) {
4804 dev->ifindex = dev_new_index(net);
4805 if (dev->iflink == -1)
4806 dev->iflink = dev->ifindex;
4808 /* Check for existence of name */
4809 head = dev_name_hash(net, dev->name);
4810 hlist_for_each(p, head) {
4811 struct net_device *d
4812 = hlist_entry(p, struct net_device, name_hlist);
4813 if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
4819 /* Fix illegal checksum combinations */
4820 if ((dev->features & NETIF_F_HW_CSUM) &&
4821 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4822 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
4824 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4827 if ((dev->features & NETIF_F_NO_CSUM) &&
4828 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4829 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
4831 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
4834 dev->features = netdev_fix_features(dev->features, dev->name);
4836 /* Enable software GSO if SG is supported. */
4837 if (dev->features & NETIF_F_SG)
4838 dev->features |= NETIF_F_GSO;
4840 netdev_initialize_kobject(dev);
4842 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
4843 ret = notifier_to_errno(ret);
4847 ret = netdev_register_kobject(dev);
4850 dev->reg_state = NETREG_REGISTERED;
4853 * Default initial state at registry is that the
4854 * device is present.
4857 set_bit(__LINK_STATE_PRESENT, &dev->state);
4859 dev_init_scheduler(dev);
4861 list_netdevice(dev);
4863 /* Notify protocols, that a new device appeared. */
4864 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
4865 ret = notifier_to_errno(ret);
4867 rollback_registered(dev);
4868 dev->reg_state = NETREG_UNREGISTERED;
4875 if (dev->netdev_ops->ndo_uninit)
4876 dev->netdev_ops->ndo_uninit(dev);
4879 EXPORT_SYMBOL(register_netdevice);
4882 * init_dummy_netdev - init a dummy network device for NAPI
4883 * @dev: device to init
4885 * This takes a network device structure and initialize the minimum
4886 * amount of fields so it can be used to schedule NAPI polls without
4887 * registering a full blown interface. This is to be used by drivers
4888 * that need to tie several hardware interfaces to a single NAPI
4889 * poll scheduler due to HW limitations.
4891 int init_dummy_netdev(struct net_device *dev)
4893 /* Clear everything. Note we don't initialize spinlocks
4894 * are they aren't supposed to be taken by any of the
4895 * NAPI code and this dummy netdev is supposed to be
4896 * only ever used for NAPI polls
4898 memset(dev, 0, sizeof(struct net_device));
4900 /* make sure we BUG if trying to hit standard
4901 * register/unregister code path
4903 dev->reg_state = NETREG_DUMMY;
4905 /* initialize the ref count */
4906 atomic_set(&dev->refcnt, 1);
4908 /* NAPI wants this */
4909 INIT_LIST_HEAD(&dev->napi_list);
4911 /* a dummy interface is started by default */
4912 set_bit(__LINK_STATE_PRESENT, &dev->state);
4913 set_bit(__LINK_STATE_START, &dev->state);
4917 EXPORT_SYMBOL_GPL(init_dummy_netdev);
4921 * register_netdev - register a network device
4922 * @dev: device to register
4924 * Take a completed network device structure and add it to the kernel
4925 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4926 * chain. 0 is returned on success. A negative errno code is returned
4927 * on a failure to set up the device, or if the name is a duplicate.
4929 * This is a wrapper around register_netdevice that takes the rtnl semaphore
4930 * and expands the device name if you passed a format string to
4933 int register_netdev(struct net_device *dev)
4940 * If the name is a format string the caller wants us to do a
4943 if (strchr(dev->name, '%')) {
4944 err = dev_alloc_name(dev, dev->name);
4949 err = register_netdevice(dev);
4954 EXPORT_SYMBOL(register_netdev);
4957 * netdev_wait_allrefs - wait until all references are gone.
4959 * This is called when unregistering network devices.
4961 * Any protocol or device that holds a reference should register
4962 * for netdevice notification, and cleanup and put back the
4963 * reference if they receive an UNREGISTER event.
4964 * We can get stuck here if buggy protocols don't correctly
4967 static void netdev_wait_allrefs(struct net_device *dev)
4969 unsigned long rebroadcast_time, warning_time;
4971 rebroadcast_time = warning_time = jiffies;
4972 while (atomic_read(&dev->refcnt) != 0) {
4973 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
4976 /* Rebroadcast unregister notification */
4977 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4979 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
4981 /* We must not have linkwatch events
4982 * pending on unregister. If this
4983 * happens, we simply run the queue
4984 * unscheduled, resulting in a noop
4987 linkwatch_run_queue();
4992 rebroadcast_time = jiffies;
4997 if (time_after(jiffies, warning_time + 10 * HZ)) {
4998 printk(KERN_EMERG "unregister_netdevice: "
4999 "waiting for %s to become free. Usage "
5001 dev->name, atomic_read(&dev->refcnt));
5002 warning_time = jiffies;
5011 * register_netdevice(x1);
5012 * register_netdevice(x2);
5014 * unregister_netdevice(y1);
5015 * unregister_netdevice(y2);
5021 * We are invoked by rtnl_unlock().
5022 * This allows us to deal with problems:
5023 * 1) We can delete sysfs objects which invoke hotplug
5024 * without deadlocking with linkwatch via keventd.
5025 * 2) Since we run with the RTNL semaphore not held, we can sleep
5026 * safely in order to wait for the netdev refcnt to drop to zero.
5028 * We must not return until all unregister events added during
5029 * the interval the lock was held have been completed.
5031 void netdev_run_todo(void)
5033 struct list_head list;
5035 /* Snapshot list, allow later requests */
5036 list_replace_init(&net_todo_list, &list);
5040 while (!list_empty(&list)) {
5041 struct net_device *dev
5042 = list_entry(list.next, struct net_device, todo_list);
5043 list_del(&dev->todo_list);
5045 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
5046 printk(KERN_ERR "network todo '%s' but state %d\n",
5047 dev->name, dev->reg_state);
5052 dev->reg_state = NETREG_UNREGISTERED;
5054 on_each_cpu(flush_backlog, dev, 1);
5056 netdev_wait_allrefs(dev);
5059 BUG_ON(atomic_read(&dev->refcnt));
5060 WARN_ON(dev->ip_ptr);
5061 WARN_ON(dev->ip6_ptr);
5062 WARN_ON(dev->dn_ptr);
5064 if (dev->destructor)
5065 dev->destructor(dev);
5067 /* Free network device */
5068 kobject_put(&dev->dev.kobj);
5073 * dev_get_stats - get network device statistics
5074 * @dev: device to get statistics from
5076 * Get network statistics from device. The device driver may provide
5077 * its own method by setting dev->netdev_ops->get_stats; otherwise
5078 * the internal statistics structure is used.
5080 const struct net_device_stats *dev_get_stats(struct net_device *dev)
5082 const struct net_device_ops *ops = dev->netdev_ops;
5084 if (ops->ndo_get_stats)
5085 return ops->ndo_get_stats(dev);
5087 unsigned long tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
5088 struct net_device_stats *stats = &dev->stats;
5090 struct netdev_queue *txq;
5092 for (i = 0; i < dev->num_tx_queues; i++) {
5093 txq = netdev_get_tx_queue(dev, i);
5094 tx_bytes += txq->tx_bytes;
5095 tx_packets += txq->tx_packets;
5096 tx_dropped += txq->tx_dropped;
5098 if (tx_bytes || tx_packets || tx_dropped) {
5099 stats->tx_bytes = tx_bytes;
5100 stats->tx_packets = tx_packets;
5101 stats->tx_dropped = tx_dropped;
5106 EXPORT_SYMBOL(dev_get_stats);
5108 static void netdev_init_one_queue(struct net_device *dev,
5109 struct netdev_queue *queue,
5115 static void netdev_init_queues(struct net_device *dev)
5117 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
5118 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
5119 spin_lock_init(&dev->tx_global_lock);
5123 * alloc_netdev_mq - allocate network device
5124 * @sizeof_priv: size of private data to allocate space for
5125 * @name: device name format string
5126 * @setup: callback to initialize device
5127 * @queue_count: the number of subqueues to allocate
5129 * Allocates a struct net_device with private data area for driver use
5130 * and performs basic initialization. Also allocates subquue structs
5131 * for each queue on the device at the end of the netdevice.
5133 struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5134 void (*setup)(struct net_device *), unsigned int queue_count)
5136 struct netdev_queue *tx;
5137 struct net_device *dev;
5139 struct net_device *p;
5141 BUG_ON(strlen(name) >= sizeof(dev->name));
5143 alloc_size = sizeof(struct net_device);
5145 /* ensure 32-byte alignment of private area */
5146 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
5147 alloc_size += sizeof_priv;
5149 /* ensure 32-byte alignment of whole construct */
5150 alloc_size += NETDEV_ALIGN - 1;
5152 p = kzalloc(alloc_size, GFP_KERNEL);
5154 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
5158 tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
5160 printk(KERN_ERR "alloc_netdev: Unable to allocate "
5165 dev = PTR_ALIGN(p, NETDEV_ALIGN);
5166 dev->padded = (char *)dev - (char *)p;
5168 if (dev_addr_init(dev))
5171 dev_unicast_init(dev);
5173 dev_net_set(dev, &init_net);
5176 dev->num_tx_queues = queue_count;
5177 dev->real_num_tx_queues = queue_count;
5179 dev->gso_max_size = GSO_MAX_SIZE;
5181 netdev_init_queues(dev);
5183 INIT_LIST_HEAD(&dev->napi_list);
5184 dev->priv_flags = IFF_XMIT_DST_RELEASE;
5186 strcpy(dev->name, name);
5196 EXPORT_SYMBOL(alloc_netdev_mq);
5199 * free_netdev - free network device
5202 * This function does the last stage of destroying an allocated device
5203 * interface. The reference to the device object is released.
5204 * If this is the last reference then it will be freed.
5206 void free_netdev(struct net_device *dev)
5208 struct napi_struct *p, *n;
5210 release_net(dev_net(dev));
5214 /* Flush device addresses */
5215 dev_addr_flush(dev);
5217 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
5220 /* Compatibility with error handling in drivers */
5221 if (dev->reg_state == NETREG_UNINITIALIZED) {
5222 kfree((char *)dev - dev->padded);
5226 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
5227 dev->reg_state = NETREG_RELEASED;
5229 /* will free via device release */
5230 put_device(&dev->dev);
5232 EXPORT_SYMBOL(free_netdev);
5235 * synchronize_net - Synchronize with packet receive processing
5237 * Wait for packets currently being received to be done.
5238 * Does not block later packets from starting.
5240 void synchronize_net(void)
5245 EXPORT_SYMBOL(synchronize_net);
5248 * unregister_netdevice - remove device from the kernel
5251 * This function shuts down a device interface and removes it
5252 * from the kernel tables.
5254 * Callers must hold the rtnl semaphore. You may want
5255 * unregister_netdev() instead of this.
5258 void unregister_netdevice(struct net_device *dev)
5262 rollback_registered(dev);
5263 /* Finish processing unregister after unlock */
5266 EXPORT_SYMBOL(unregister_netdevice);
5269 * unregister_netdev - remove device from the kernel
5272 * This function shuts down a device interface and removes it
5273 * from the kernel tables.
5275 * This is just a wrapper for unregister_netdevice that takes
5276 * the rtnl semaphore. In general you want to use this and not
5277 * unregister_netdevice.
5279 void unregister_netdev(struct net_device *dev)
5282 unregister_netdevice(dev);
5285 EXPORT_SYMBOL(unregister_netdev);
5288 * dev_change_net_namespace - move device to different nethost namespace
5290 * @net: network namespace
5291 * @pat: If not NULL name pattern to try if the current device name
5292 * is already taken in the destination network namespace.
5294 * This function shuts down a device interface and moves it
5295 * to a new network namespace. On success 0 is returned, on
5296 * a failure a netagive errno code is returned.
5298 * Callers must hold the rtnl semaphore.
5301 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
5304 const char *destname;
5309 /* Don't allow namespace local devices to be moved. */
5311 if (dev->features & NETIF_F_NETNS_LOCAL)
5315 /* Don't allow real devices to be moved when sysfs
5319 if (dev->dev.parent)
5323 /* Ensure the device has been registrered */
5325 if (dev->reg_state != NETREG_REGISTERED)
5328 /* Get out if there is nothing todo */
5330 if (net_eq(dev_net(dev), net))
5333 /* Pick the destination device name, and ensure
5334 * we can use it in the destination network namespace.
5337 destname = dev->name;
5338 if (__dev_get_by_name(net, destname)) {
5339 /* We get here if we can't use the current device name */
5342 if (!dev_valid_name(pat))
5344 if (strchr(pat, '%')) {
5345 if (__dev_alloc_name(net, pat, buf) < 0)
5350 if (__dev_get_by_name(net, destname))
5355 * And now a mini version of register_netdevice unregister_netdevice.
5358 /* If device is running close it first. */
5361 /* And unlink it from device chain */
5363 unlist_netdevice(dev);
5367 /* Shutdown queueing discipline. */
5370 /* Notify protocols, that we are about to destroy
5371 this device. They should clean all the things.
5373 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5376 * Flush the unicast and multicast chains
5378 dev_unicast_flush(dev);
5379 dev_addr_discard(dev);
5381 netdev_unregister_kobject(dev);
5383 /* Actually switch the network namespace */
5384 dev_net_set(dev, net);
5386 /* Assign the new device name */
5387 if (destname != dev->name)
5388 strcpy(dev->name, destname);
5390 /* If there is an ifindex conflict assign a new one */
5391 if (__dev_get_by_index(net, dev->ifindex)) {
5392 int iflink = (dev->iflink == dev->ifindex);
5393 dev->ifindex = dev_new_index(net);
5395 dev->iflink = dev->ifindex;
5398 /* Fixup kobjects */
5399 err = netdev_register_kobject(dev);
5402 /* Add the device back in the hashes */
5403 list_netdevice(dev);
5405 /* Notify protocols, that a new device appeared. */
5406 call_netdevice_notifiers(NETDEV_REGISTER, dev);
5413 EXPORT_SYMBOL_GPL(dev_change_net_namespace);
5415 static int dev_cpu_callback(struct notifier_block *nfb,
5416 unsigned long action,
5419 struct sk_buff **list_skb;
5420 struct Qdisc **list_net;
5421 struct sk_buff *skb;
5422 unsigned int cpu, oldcpu = (unsigned long)ocpu;
5423 struct softnet_data *sd, *oldsd;
5425 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
5428 local_irq_disable();
5429 cpu = smp_processor_id();
5430 sd = &per_cpu(softnet_data, cpu);
5431 oldsd = &per_cpu(softnet_data, oldcpu);
5433 /* Find end of our completion_queue. */
5434 list_skb = &sd->completion_queue;
5436 list_skb = &(*list_skb)->next;
5437 /* Append completion queue from offline CPU. */
5438 *list_skb = oldsd->completion_queue;
5439 oldsd->completion_queue = NULL;
5441 /* Find end of our output_queue. */
5442 list_net = &sd->output_queue;
5444 list_net = &(*list_net)->next_sched;
5445 /* Append output queue from offline CPU. */
5446 *list_net = oldsd->output_queue;
5447 oldsd->output_queue = NULL;
5449 raise_softirq_irqoff(NET_TX_SOFTIRQ);
5452 /* Process offline CPU's input_pkt_queue */
5453 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
5461 * netdev_increment_features - increment feature set by one
5462 * @all: current feature set
5463 * @one: new feature set
5464 * @mask: mask feature set
5466 * Computes a new feature set after adding a device with feature set
5467 * @one to the master device with current feature set @all. Will not
5468 * enable anything that is off in @mask. Returns the new feature set.
5470 unsigned long netdev_increment_features(unsigned long all, unsigned long one,
5473 /* If device needs checksumming, downgrade to it. */
5474 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
5475 all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM);
5476 else if (mask & NETIF_F_ALL_CSUM) {
5477 /* If one device supports v4/v6 checksumming, set for all. */
5478 if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) &&
5479 !(all & NETIF_F_GEN_CSUM)) {
5480 all &= ~NETIF_F_ALL_CSUM;
5481 all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
5484 /* If one device supports hw checksumming, set for all. */
5485 if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) {
5486 all &= ~NETIF_F_ALL_CSUM;
5487 all |= NETIF_F_HW_CSUM;
5491 one |= NETIF_F_ALL_CSUM;
5493 one |= all & NETIF_F_ONE_FOR_ALL;
5494 all &= one | NETIF_F_LLTX | NETIF_F_GSO | NETIF_F_UFO;
5495 all |= one & mask & NETIF_F_ONE_FOR_ALL;
5499 EXPORT_SYMBOL(netdev_increment_features);
5501 static struct hlist_head *netdev_create_hash(void)
5504 struct hlist_head *hash;
5506 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
5508 for (i = 0; i < NETDEV_HASHENTRIES; i++)
5509 INIT_HLIST_HEAD(&hash[i]);
5514 /* Initialize per network namespace state */
5515 static int __net_init netdev_init(struct net *net)
5517 INIT_LIST_HEAD(&net->dev_base_head);
5519 net->dev_name_head = netdev_create_hash();
5520 if (net->dev_name_head == NULL)
5523 net->dev_index_head = netdev_create_hash();
5524 if (net->dev_index_head == NULL)
5530 kfree(net->dev_name_head);
5536 * netdev_drivername - network driver for the device
5537 * @dev: network device
5538 * @buffer: buffer for resulting name
5539 * @len: size of buffer
5541 * Determine network driver for device.
5543 char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
5545 const struct device_driver *driver;
5546 const struct device *parent;
5548 if (len <= 0 || !buffer)
5552 parent = dev->dev.parent;
5557 driver = parent->driver;
5558 if (driver && driver->name)
5559 strlcpy(buffer, driver->name, len);
5563 static void __net_exit netdev_exit(struct net *net)
5565 kfree(net->dev_name_head);
5566 kfree(net->dev_index_head);
5569 static struct pernet_operations __net_initdata netdev_net_ops = {
5570 .init = netdev_init,
5571 .exit = netdev_exit,
5574 static void __net_exit default_device_exit(struct net *net)
5576 struct net_device *dev;
5578 * Push all migratable of the network devices back to the
5579 * initial network namespace
5583 for_each_netdev(net, dev) {
5585 char fb_name[IFNAMSIZ];
5587 /* Ignore unmoveable devices (i.e. loopback) */
5588 if (dev->features & NETIF_F_NETNS_LOCAL)
5591 /* Delete virtual devices */
5592 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) {
5593 dev->rtnl_link_ops->dellink(dev);
5597 /* Push remaing network devices to init_net */
5598 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
5599 err = dev_change_net_namespace(dev, &init_net, fb_name);
5601 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
5602 __func__, dev->name, err);
5610 static struct pernet_operations __net_initdata default_device_ops = {
5611 .exit = default_device_exit,
5615 * Initialize the DEV module. At boot time this walks the device list and
5616 * unhooks any devices that fail to initialise (normally hardware not
5617 * present) and leaves us with a valid list of present and active devices.
5622 * This is called single threaded during boot, so no need
5623 * to take the rtnl semaphore.
5625 static int __init net_dev_init(void)
5627 int i, rc = -ENOMEM;
5629 BUG_ON(!dev_boot_phase);
5631 if (dev_proc_init())
5634 if (netdev_kobject_init())
5637 INIT_LIST_HEAD(&ptype_all);
5638 for (i = 0; i < PTYPE_HASH_SIZE; i++)
5639 INIT_LIST_HEAD(&ptype_base[i]);
5641 if (register_pernet_subsys(&netdev_net_ops))
5645 * Initialise the packet receive queues.
5648 for_each_possible_cpu(i) {
5649 struct softnet_data *queue;
5651 queue = &per_cpu(softnet_data, i);
5652 skb_queue_head_init(&queue->input_pkt_queue);
5653 queue->completion_queue = NULL;
5654 INIT_LIST_HEAD(&queue->poll_list);
5656 queue->backlog.poll = process_backlog;
5657 queue->backlog.weight = weight_p;
5658 queue->backlog.gro_list = NULL;
5659 queue->backlog.gro_count = 0;
5664 /* The loopback device is special if any other network devices
5665 * is present in a network namespace the loopback device must
5666 * be present. Since we now dynamically allocate and free the
5667 * loopback device ensure this invariant is maintained by
5668 * keeping the loopback device as the first device on the
5669 * list of network devices. Ensuring the loopback devices
5670 * is the first device that appears and the last network device
5673 if (register_pernet_device(&loopback_net_ops))
5676 if (register_pernet_device(&default_device_ops))
5679 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
5680 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
5682 hotcpu_notifier(dev_cpu_callback, 0);
5690 subsys_initcall(net_dev_init);
5692 static int __init initialize_hashrnd(void)
5694 get_random_bytes(&skb_tx_hashrnd, sizeof(skb_tx_hashrnd));
5698 late_initcall_sync(initialize_hashrnd);