2 * NET3 Protocol independent device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the non IP parts of dev.c 1.0.19
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
51 * Rudi Cilibrasi : Pass the right thing to
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
75 #include <asm/uaccess.h>
76 #include <asm/system.h>
77 #include <linux/bitops.h>
78 #include <linux/capability.h>
79 #include <linux/cpu.h>
80 #include <linux/types.h>
81 #include <linux/kernel.h>
82 #include <linux/sched.h>
83 #include <linux/mutex.h>
84 #include <linux/string.h>
86 #include <linux/socket.h>
87 #include <linux/sockios.h>
88 #include <linux/errno.h>
89 #include <linux/interrupt.h>
90 #include <linux/if_ether.h>
91 #include <linux/netdevice.h>
92 #include <linux/etherdevice.h>
93 #include <linux/ethtool.h>
94 #include <linux/notifier.h>
95 #include <linux/skbuff.h>
96 #include <net/net_namespace.h>
98 #include <linux/rtnetlink.h>
99 #include <linux/proc_fs.h>
100 #include <linux/seq_file.h>
101 #include <linux/stat.h>
102 #include <linux/if_bridge.h>
103 #include <linux/if_macvlan.h>
105 #include <net/pkt_sched.h>
106 #include <net/checksum.h>
107 #include <linux/highmem.h>
108 #include <linux/init.h>
109 #include <linux/kmod.h>
110 #include <linux/module.h>
111 #include <linux/netpoll.h>
112 #include <linux/rcupdate.h>
113 #include <linux/delay.h>
114 #include <net/wext.h>
115 #include <net/iw_handler.h>
116 #include <asm/current.h>
117 #include <linux/audit.h>
118 #include <linux/dmaengine.h>
119 #include <linux/err.h>
120 #include <linux/ctype.h>
121 #include <linux/if_arp.h>
122 #include <linux/if_vlan.h>
123 #include <linux/ip.h>
125 #include <linux/ipv6.h>
126 #include <linux/in.h>
127 #include <linux/jhash.h>
128 #include <linux/random.h>
130 #include "net-sysfs.h"
132 /* Instead of increasing this, you should create a hash table. */
133 #define MAX_GRO_SKBS 8
135 /* This should be increased if a protocol with a bigger head is added. */
136 #define GRO_MAX_HEAD (MAX_HEADER + 128)
139 * The list of packet types we will receive (as opposed to discard)
140 * and the routines to invoke.
142 * Why 16. Because with 16 the only overlap we get on a hash of the
143 * low nibble of the protocol value is RARP/SNAP/X.25.
145 * NOTE: That is no longer true with the addition of VLAN tags. Not
146 * sure which should go first, but I bet it won't make much
147 * difference if we are running VLANs. The good news is that
148 * this protocol won't be in the list unless compiled in, so
149 * the average user (w/out VLANs) will not be adversely affected.
166 #define PTYPE_HASH_SIZE (16)
167 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
169 static DEFINE_SPINLOCK(ptype_lock);
170 static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
171 static struct list_head ptype_all __read_mostly; /* Taps */
174 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
177 * Pure readers hold dev_base_lock for reading.
179 * Writers must hold the rtnl semaphore while they loop through the
180 * dev_base_head list, and hold dev_base_lock for writing when they do the
181 * actual updates. This allows pure readers to access the list even
182 * while a writer is preparing to update it.
184 * To put it another way, dev_base_lock is held for writing only to
185 * protect against pure readers; the rtnl semaphore provides the
186 * protection against other writers.
188 * See, for example usages, register_netdevice() and
189 * unregister_netdevice(), which must be called with the rtnl
192 DEFINE_RWLOCK(dev_base_lock);
194 EXPORT_SYMBOL(dev_base_lock);
196 #define NETDEV_HASHBITS 8
197 #define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
199 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
201 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
202 return &net->dev_name_head[hash & ((1 << NETDEV_HASHBITS) - 1)];
205 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
207 return &net->dev_index_head[ifindex & ((1 << NETDEV_HASHBITS) - 1)];
210 /* Device list insertion */
211 static int list_netdevice(struct net_device *dev)
213 struct net *net = dev_net(dev);
217 write_lock_bh(&dev_base_lock);
218 list_add_tail(&dev->dev_list, &net->dev_base_head);
219 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
220 hlist_add_head(&dev->index_hlist, dev_index_hash(net, dev->ifindex));
221 write_unlock_bh(&dev_base_lock);
225 /* Device list removal */
226 static void unlist_netdevice(struct net_device *dev)
230 /* Unlink dev from the device chain */
231 write_lock_bh(&dev_base_lock);
232 list_del(&dev->dev_list);
233 hlist_del(&dev->name_hlist);
234 hlist_del(&dev->index_hlist);
235 write_unlock_bh(&dev_base_lock);
242 static RAW_NOTIFIER_HEAD(netdev_chain);
245 * Device drivers call our routines to queue packets here. We empty the
246 * queue in the local softnet handler.
249 DEFINE_PER_CPU(struct softnet_data, softnet_data);
251 #ifdef CONFIG_LOCKDEP
253 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
254 * according to dev->type
256 static const unsigned short netdev_lock_type[] =
257 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
258 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
259 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
260 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
261 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
262 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
263 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
264 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
265 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
266 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
267 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
268 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
269 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
270 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
271 ARPHRD_PHONET_PIPE, ARPHRD_VOID, ARPHRD_NONE};
273 static const char *netdev_lock_name[] =
274 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
275 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
276 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
277 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
278 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
279 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
280 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
281 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
282 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
283 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
284 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
285 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
286 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
287 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
288 "_xmit_PHONET_PIPE", "_xmit_VOID", "_xmit_NONE"};
290 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
291 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
293 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
297 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
298 if (netdev_lock_type[i] == dev_type)
300 /* the last key is used by default */
301 return ARRAY_SIZE(netdev_lock_type) - 1;
304 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
305 unsigned short dev_type)
309 i = netdev_lock_pos(dev_type);
310 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
311 netdev_lock_name[i]);
314 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
318 i = netdev_lock_pos(dev->type);
319 lockdep_set_class_and_name(&dev->addr_list_lock,
320 &netdev_addr_lock_key[i],
321 netdev_lock_name[i]);
324 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
325 unsigned short dev_type)
328 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
333 /*******************************************************************************
335 Protocol management and registration routines
337 *******************************************************************************/
340 * Add a protocol ID to the list. Now that the input handler is
341 * smarter we can dispense with all the messy stuff that used to be
344 * BEWARE!!! Protocol handlers, mangling input packets,
345 * MUST BE last in hash buckets and checking protocol handlers
346 * MUST start from promiscuous ptype_all chain in net_bh.
347 * It is true now, do not change it.
348 * Explanation follows: if protocol handler, mangling packet, will
349 * be the first on list, it is not able to sense, that packet
350 * is cloned and should be copied-on-write, so that it will
351 * change it and subsequent readers will get broken packet.
356 * dev_add_pack - add packet handler
357 * @pt: packet type declaration
359 * Add a protocol handler to the networking stack. The passed &packet_type
360 * is linked into kernel lists and may not be freed until it has been
361 * removed from the kernel lists.
363 * This call does not sleep therefore it can not
364 * guarantee all CPU's that are in middle of receiving packets
365 * will see the new packet type (until the next received packet).
368 void dev_add_pack(struct packet_type *pt)
372 spin_lock_bh(&ptype_lock);
373 if (pt->type == htons(ETH_P_ALL))
374 list_add_rcu(&pt->list, &ptype_all);
376 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
377 list_add_rcu(&pt->list, &ptype_base[hash]);
379 spin_unlock_bh(&ptype_lock);
383 * __dev_remove_pack - remove packet handler
384 * @pt: packet type declaration
386 * Remove a protocol handler that was previously added to the kernel
387 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
388 * from the kernel lists and can be freed or reused once this function
391 * The packet type might still be in use by receivers
392 * and must not be freed until after all the CPU's have gone
393 * through a quiescent state.
395 void __dev_remove_pack(struct packet_type *pt)
397 struct list_head *head;
398 struct packet_type *pt1;
400 spin_lock_bh(&ptype_lock);
402 if (pt->type == htons(ETH_P_ALL))
405 head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
407 list_for_each_entry(pt1, head, list) {
409 list_del_rcu(&pt->list);
414 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
416 spin_unlock_bh(&ptype_lock);
419 * dev_remove_pack - remove packet handler
420 * @pt: packet type declaration
422 * Remove a protocol handler that was previously added to the kernel
423 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
424 * from the kernel lists and can be freed or reused once this function
427 * This call sleeps to guarantee that no CPU is looking at the packet
430 void dev_remove_pack(struct packet_type *pt)
432 __dev_remove_pack(pt);
437 /******************************************************************************
439 Device Boot-time Settings Routines
441 *******************************************************************************/
443 /* Boot time configuration table */
444 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
447 * netdev_boot_setup_add - add new setup entry
448 * @name: name of the device
449 * @map: configured settings for the device
451 * Adds new setup entry to the dev_boot_setup list. The function
452 * returns 0 on error and 1 on success. This is a generic routine to
455 static int netdev_boot_setup_add(char *name, struct ifmap *map)
457 struct netdev_boot_setup *s;
461 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
462 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
463 memset(s[i].name, 0, sizeof(s[i].name));
464 strlcpy(s[i].name, name, IFNAMSIZ);
465 memcpy(&s[i].map, map, sizeof(s[i].map));
470 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
474 * netdev_boot_setup_check - check boot time settings
475 * @dev: the netdevice
477 * Check boot time settings for the device.
478 * The found settings are set for the device to be used
479 * later in the device probing.
480 * Returns 0 if no settings found, 1 if they are.
482 int netdev_boot_setup_check(struct net_device *dev)
484 struct netdev_boot_setup *s = dev_boot_setup;
487 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
488 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
489 !strcmp(dev->name, s[i].name)) {
490 dev->irq = s[i].map.irq;
491 dev->base_addr = s[i].map.base_addr;
492 dev->mem_start = s[i].map.mem_start;
493 dev->mem_end = s[i].map.mem_end;
502 * netdev_boot_base - get address from boot time settings
503 * @prefix: prefix for network device
504 * @unit: id for network device
506 * Check boot time settings for the base address of device.
507 * The found settings are set for the device to be used
508 * later in the device probing.
509 * Returns 0 if no settings found.
511 unsigned long netdev_boot_base(const char *prefix, int unit)
513 const struct netdev_boot_setup *s = dev_boot_setup;
517 sprintf(name, "%s%d", prefix, unit);
520 * If device already registered then return base of 1
521 * to indicate not to probe for this interface
523 if (__dev_get_by_name(&init_net, name))
526 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
527 if (!strcmp(name, s[i].name))
528 return s[i].map.base_addr;
533 * Saves at boot time configured settings for any netdevice.
535 int __init netdev_boot_setup(char *str)
540 str = get_options(str, ARRAY_SIZE(ints), ints);
545 memset(&map, 0, sizeof(map));
549 map.base_addr = ints[2];
551 map.mem_start = ints[3];
553 map.mem_end = ints[4];
555 /* Add new entry to the list */
556 return netdev_boot_setup_add(str, &map);
559 __setup("netdev=", netdev_boot_setup);
561 /*******************************************************************************
563 Device Interface Subroutines
565 *******************************************************************************/
568 * __dev_get_by_name - find a device by its name
569 * @net: the applicable net namespace
570 * @name: name to find
572 * Find an interface by name. Must be called under RTNL semaphore
573 * or @dev_base_lock. If the name is found a pointer to the device
574 * is returned. If the name is not found then %NULL is returned. The
575 * reference counters are not incremented so the caller must be
576 * careful with locks.
579 struct net_device *__dev_get_by_name(struct net *net, const char *name)
581 struct hlist_node *p;
583 hlist_for_each(p, dev_name_hash(net, name)) {
584 struct net_device *dev
585 = hlist_entry(p, struct net_device, name_hlist);
586 if (!strncmp(dev->name, name, IFNAMSIZ))
593 * dev_get_by_name - find a device by its name
594 * @net: the applicable net namespace
595 * @name: name to find
597 * Find an interface by name. This can be called from any
598 * context and does its own locking. The returned handle has
599 * the usage count incremented and the caller must use dev_put() to
600 * release it when it is no longer needed. %NULL is returned if no
601 * matching device is found.
604 struct net_device *dev_get_by_name(struct net *net, const char *name)
606 struct net_device *dev;
608 read_lock(&dev_base_lock);
609 dev = __dev_get_by_name(net, name);
612 read_unlock(&dev_base_lock);
617 * __dev_get_by_index - find a device by its ifindex
618 * @net: the applicable net namespace
619 * @ifindex: index of device
621 * Search for an interface by index. Returns %NULL if the device
622 * is not found or a pointer to the device. The device has not
623 * had its reference counter increased so the caller must be careful
624 * about locking. The caller must hold either the RTNL semaphore
628 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
630 struct hlist_node *p;
632 hlist_for_each(p, dev_index_hash(net, ifindex)) {
633 struct net_device *dev
634 = hlist_entry(p, struct net_device, index_hlist);
635 if (dev->ifindex == ifindex)
643 * dev_get_by_index - find a device by its ifindex
644 * @net: the applicable net namespace
645 * @ifindex: index of device
647 * Search for an interface by index. Returns NULL if the device
648 * is not found or a pointer to the device. The device returned has
649 * had a reference added and the pointer is safe until the user calls
650 * dev_put to indicate they have finished with it.
653 struct net_device *dev_get_by_index(struct net *net, int ifindex)
655 struct net_device *dev;
657 read_lock(&dev_base_lock);
658 dev = __dev_get_by_index(net, ifindex);
661 read_unlock(&dev_base_lock);
666 * dev_getbyhwaddr - find a device by its hardware address
667 * @net: the applicable net namespace
668 * @type: media type of device
669 * @ha: hardware address
671 * Search for an interface by MAC address. Returns NULL if the device
672 * is not found or a pointer to the device. The caller must hold the
673 * rtnl semaphore. The returned device has not had its ref count increased
674 * and the caller must therefore be careful about locking
677 * If the API was consistent this would be __dev_get_by_hwaddr
680 struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
682 struct net_device *dev;
686 for_each_netdev(net, dev)
687 if (dev->type == type &&
688 !memcmp(dev->dev_addr, ha, dev->addr_len))
694 EXPORT_SYMBOL(dev_getbyhwaddr);
696 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
698 struct net_device *dev;
701 for_each_netdev(net, dev)
702 if (dev->type == type)
708 EXPORT_SYMBOL(__dev_getfirstbyhwtype);
710 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
712 struct net_device *dev;
715 dev = __dev_getfirstbyhwtype(net, type);
722 EXPORT_SYMBOL(dev_getfirstbyhwtype);
725 * dev_get_by_flags - find any device with given flags
726 * @net: the applicable net namespace
727 * @if_flags: IFF_* values
728 * @mask: bitmask of bits in if_flags to check
730 * Search for any interface with the given flags. Returns NULL if a device
731 * is not found or a pointer to the device. The device returned has
732 * had a reference added and the pointer is safe until the user calls
733 * dev_put to indicate they have finished with it.
736 struct net_device * dev_get_by_flags(struct net *net, unsigned short if_flags, unsigned short mask)
738 struct net_device *dev, *ret;
741 read_lock(&dev_base_lock);
742 for_each_netdev(net, dev) {
743 if (((dev->flags ^ if_flags) & mask) == 0) {
749 read_unlock(&dev_base_lock);
754 * dev_valid_name - check if name is okay for network device
757 * Network device names need to be valid file names to
758 * to allow sysfs to work. We also disallow any kind of
761 int dev_valid_name(const char *name)
765 if (strlen(name) >= IFNAMSIZ)
767 if (!strcmp(name, ".") || !strcmp(name, ".."))
771 if (*name == '/' || isspace(*name))
779 * __dev_alloc_name - allocate a name for a device
780 * @net: network namespace to allocate the device name in
781 * @name: name format string
782 * @buf: scratch buffer and result name string
784 * Passed a format string - eg "lt%d" it will try and find a suitable
785 * id. It scans list of devices to build up a free map, then chooses
786 * the first empty slot. The caller must hold the dev_base or rtnl lock
787 * while allocating the name and adding the device in order to avoid
789 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
790 * Returns the number of the unit assigned or a negative errno code.
793 static int __dev_alloc_name(struct net *net, const char *name, char *buf)
797 const int max_netdevices = 8*PAGE_SIZE;
798 unsigned long *inuse;
799 struct net_device *d;
801 p = strnchr(name, IFNAMSIZ-1, '%');
804 * Verify the string as this thing may have come from
805 * the user. There must be either one "%d" and no other "%"
808 if (p[1] != 'd' || strchr(p + 2, '%'))
811 /* Use one page as a bit array of possible slots */
812 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
816 for_each_netdev(net, d) {
817 if (!sscanf(d->name, name, &i))
819 if (i < 0 || i >= max_netdevices)
822 /* avoid cases where sscanf is not exact inverse of printf */
823 snprintf(buf, IFNAMSIZ, name, i);
824 if (!strncmp(buf, d->name, IFNAMSIZ))
828 i = find_first_zero_bit(inuse, max_netdevices);
829 free_page((unsigned long) inuse);
832 snprintf(buf, IFNAMSIZ, name, i);
833 if (!__dev_get_by_name(net, buf))
836 /* It is possible to run out of possible slots
837 * when the name is long and there isn't enough space left
838 * for the digits, or if all bits are used.
844 * dev_alloc_name - allocate a name for a device
846 * @name: name format string
848 * Passed a format string - eg "lt%d" it will try and find a suitable
849 * id. It scans list of devices to build up a free map, then chooses
850 * the first empty slot. The caller must hold the dev_base or rtnl lock
851 * while allocating the name and adding the device in order to avoid
853 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
854 * Returns the number of the unit assigned or a negative errno code.
857 int dev_alloc_name(struct net_device *dev, const char *name)
863 BUG_ON(!dev_net(dev));
865 ret = __dev_alloc_name(net, name, buf);
867 strlcpy(dev->name, buf, IFNAMSIZ);
873 * dev_change_name - change name of a device
875 * @newname: name (or format string) must be at least IFNAMSIZ
877 * Change name of a device, can pass format strings "eth%d".
880 int dev_change_name(struct net_device *dev, const char *newname)
882 char oldname[IFNAMSIZ];
888 BUG_ON(!dev_net(dev));
891 if (dev->flags & IFF_UP)
894 if (!dev_valid_name(newname))
897 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
900 memcpy(oldname, dev->name, IFNAMSIZ);
902 if (strchr(newname, '%')) {
903 err = dev_alloc_name(dev, newname);
907 else if (__dev_get_by_name(net, newname))
910 strlcpy(dev->name, newname, IFNAMSIZ);
913 /* For now only devices in the initial network namespace
916 if (net == &init_net) {
917 ret = device_rename(&dev->dev, dev->name);
919 memcpy(dev->name, oldname, IFNAMSIZ);
924 write_lock_bh(&dev_base_lock);
925 hlist_del(&dev->name_hlist);
926 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
927 write_unlock_bh(&dev_base_lock);
929 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
930 ret = notifier_to_errno(ret);
935 "%s: name change rollback failed: %d.\n",
939 memcpy(dev->name, oldname, IFNAMSIZ);
948 * dev_set_alias - change ifalias of a device
950 * @alias: name up to IFALIASZ
951 * @len: limit of bytes to copy from info
953 * Set ifalias for a device,
955 int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
970 dev->ifalias = krealloc(dev->ifalias, len+1, GFP_KERNEL);
974 strlcpy(dev->ifalias, alias, len+1);
980 * netdev_features_change - device changes features
981 * @dev: device to cause notification
983 * Called to indicate a device has changed features.
985 void netdev_features_change(struct net_device *dev)
987 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
989 EXPORT_SYMBOL(netdev_features_change);
992 * netdev_state_change - device changes state
993 * @dev: device to cause notification
995 * Called to indicate a device has changed state. This function calls
996 * the notifier chains for netdev_chain and sends a NEWLINK message
997 * to the routing socket.
999 void netdev_state_change(struct net_device *dev)
1001 if (dev->flags & IFF_UP) {
1002 call_netdevice_notifiers(NETDEV_CHANGE, dev);
1003 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1007 void netdev_bonding_change(struct net_device *dev)
1009 call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, dev);
1011 EXPORT_SYMBOL(netdev_bonding_change);
1014 * dev_load - load a network module
1015 * @net: the applicable net namespace
1016 * @name: name of interface
1018 * If a network interface is not present and the process has suitable
1019 * privileges this function loads the module. If module loading is not
1020 * available in this kernel then it becomes a nop.
1023 void dev_load(struct net *net, const char *name)
1025 struct net_device *dev;
1027 read_lock(&dev_base_lock);
1028 dev = __dev_get_by_name(net, name);
1029 read_unlock(&dev_base_lock);
1031 if (!dev && capable(CAP_SYS_MODULE))
1032 request_module("%s", name);
1036 * dev_open - prepare an interface for use.
1037 * @dev: device to open
1039 * Takes a device from down to up state. The device's private open
1040 * function is invoked and then the multicast lists are loaded. Finally
1041 * the device is moved into the up state and a %NETDEV_UP message is
1042 * sent to the netdev notifier chain.
1044 * Calling this function on an active interface is a nop. On a failure
1045 * a negative errno code is returned.
1047 int dev_open(struct net_device *dev)
1049 const struct net_device_ops *ops = dev->netdev_ops;
1058 if (dev->flags & IFF_UP)
1062 * Is it even present?
1064 if (!netif_device_present(dev))
1068 * Call device private open method
1070 set_bit(__LINK_STATE_START, &dev->state);
1072 if (ops->ndo_validate_addr)
1073 ret = ops->ndo_validate_addr(dev);
1075 if (!ret && ops->ndo_open)
1076 ret = ops->ndo_open(dev);
1079 * If it went open OK then:
1083 clear_bit(__LINK_STATE_START, &dev->state);
1088 dev->flags |= IFF_UP;
1096 * Initialize multicasting status
1098 dev_set_rx_mode(dev);
1101 * Wakeup transmit queue engine
1106 * ... and announce new interface.
1108 call_netdevice_notifiers(NETDEV_UP, dev);
1115 * dev_close - shutdown an interface.
1116 * @dev: device to shutdown
1118 * This function moves an active device into down state. A
1119 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1120 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1123 int dev_close(struct net_device *dev)
1125 const struct net_device_ops *ops = dev->netdev_ops;
1130 if (!(dev->flags & IFF_UP))
1134 * Tell people we are going down, so that they can
1135 * prepare to death, when device is still operating.
1137 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1139 clear_bit(__LINK_STATE_START, &dev->state);
1141 /* Synchronize to scheduled poll. We cannot touch poll list,
1142 * it can be even on different cpu. So just clear netif_running().
1144 * dev->stop() will invoke napi_disable() on all of it's
1145 * napi_struct instances on this device.
1147 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1149 dev_deactivate(dev);
1152 * Call the device specific close. This cannot fail.
1153 * Only if device is UP
1155 * We allow it to be called even after a DETACH hot-plug
1162 * Device is now down.
1165 dev->flags &= ~IFF_UP;
1168 * Tell people we are down
1170 call_netdevice_notifiers(NETDEV_DOWN, dev);
1182 * dev_disable_lro - disable Large Receive Offload on a device
1185 * Disable Large Receive Offload (LRO) on a net device. Must be
1186 * called under RTNL. This is needed if received packets may be
1187 * forwarded to another interface.
1189 void dev_disable_lro(struct net_device *dev)
1191 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1192 dev->ethtool_ops->set_flags) {
1193 u32 flags = dev->ethtool_ops->get_flags(dev);
1194 if (flags & ETH_FLAG_LRO) {
1195 flags &= ~ETH_FLAG_LRO;
1196 dev->ethtool_ops->set_flags(dev, flags);
1199 WARN_ON(dev->features & NETIF_F_LRO);
1201 EXPORT_SYMBOL(dev_disable_lro);
1204 static int dev_boot_phase = 1;
1207 * Device change register/unregister. These are not inline or static
1208 * as we export them to the world.
1212 * register_netdevice_notifier - register a network notifier block
1215 * Register a notifier to be called when network device events occur.
1216 * The notifier passed is linked into the kernel structures and must
1217 * not be reused until it has been unregistered. A negative errno code
1218 * is returned on a failure.
1220 * When registered all registration and up events are replayed
1221 * to the new notifier to allow device to have a race free
1222 * view of the network device list.
1225 int register_netdevice_notifier(struct notifier_block *nb)
1227 struct net_device *dev;
1228 struct net_device *last;
1233 err = raw_notifier_chain_register(&netdev_chain, nb);
1239 for_each_netdev(net, dev) {
1240 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1241 err = notifier_to_errno(err);
1245 if (!(dev->flags & IFF_UP))
1248 nb->notifier_call(nb, NETDEV_UP, dev);
1259 for_each_netdev(net, dev) {
1263 if (dev->flags & IFF_UP) {
1264 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1265 nb->notifier_call(nb, NETDEV_DOWN, dev);
1267 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1271 raw_notifier_chain_unregister(&netdev_chain, nb);
1276 * unregister_netdevice_notifier - unregister a network notifier block
1279 * Unregister a notifier previously registered by
1280 * register_netdevice_notifier(). The notifier is unlinked into the
1281 * kernel structures and may then be reused. A negative errno code
1282 * is returned on a failure.
1285 int unregister_netdevice_notifier(struct notifier_block *nb)
1290 err = raw_notifier_chain_unregister(&netdev_chain, nb);
1296 * call_netdevice_notifiers - call all network notifier blocks
1297 * @val: value passed unmodified to notifier function
1298 * @dev: net_device pointer passed unmodified to notifier function
1300 * Call all network notifier blocks. Parameters and return value
1301 * are as for raw_notifier_call_chain().
1304 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1306 return raw_notifier_call_chain(&netdev_chain, val, dev);
1309 /* When > 0 there are consumers of rx skb time stamps */
1310 static atomic_t netstamp_needed = ATOMIC_INIT(0);
1312 void net_enable_timestamp(void)
1314 atomic_inc(&netstamp_needed);
1317 void net_disable_timestamp(void)
1319 atomic_dec(&netstamp_needed);
1322 static inline void net_timestamp(struct sk_buff *skb)
1324 if (atomic_read(&netstamp_needed))
1325 __net_timestamp(skb);
1327 skb->tstamp.tv64 = 0;
1331 * Support routine. Sends outgoing frames to any network
1332 * taps currently in use.
1335 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1337 struct packet_type *ptype;
1342 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1343 /* Never send packets back to the socket
1344 * they originated from - MvS (miquels@drinkel.ow.org)
1346 if ((ptype->dev == dev || !ptype->dev) &&
1347 (ptype->af_packet_priv == NULL ||
1348 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1349 struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);
1353 /* skb->nh should be correctly
1354 set by sender, so that the second statement is
1355 just protection against buggy protocols.
1357 skb_reset_mac_header(skb2);
1359 if (skb_network_header(skb2) < skb2->data ||
1360 skb2->network_header > skb2->tail) {
1361 if (net_ratelimit())
1362 printk(KERN_CRIT "protocol %04x is "
1364 skb2->protocol, dev->name);
1365 skb_reset_network_header(skb2);
1368 skb2->transport_header = skb2->network_header;
1369 skb2->pkt_type = PACKET_OUTGOING;
1370 ptype->func(skb2, skb->dev, ptype, skb->dev);
1377 static inline void __netif_reschedule(struct Qdisc *q)
1379 struct softnet_data *sd;
1380 unsigned long flags;
1382 local_irq_save(flags);
1383 sd = &__get_cpu_var(softnet_data);
1384 q->next_sched = sd->output_queue;
1385 sd->output_queue = q;
1386 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1387 local_irq_restore(flags);
1390 void __netif_schedule(struct Qdisc *q)
1392 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1393 __netif_reschedule(q);
1395 EXPORT_SYMBOL(__netif_schedule);
1397 void dev_kfree_skb_irq(struct sk_buff *skb)
1399 if (atomic_dec_and_test(&skb->users)) {
1400 struct softnet_data *sd;
1401 unsigned long flags;
1403 local_irq_save(flags);
1404 sd = &__get_cpu_var(softnet_data);
1405 skb->next = sd->completion_queue;
1406 sd->completion_queue = skb;
1407 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1408 local_irq_restore(flags);
1411 EXPORT_SYMBOL(dev_kfree_skb_irq);
1413 void dev_kfree_skb_any(struct sk_buff *skb)
1415 if (in_irq() || irqs_disabled())
1416 dev_kfree_skb_irq(skb);
1420 EXPORT_SYMBOL(dev_kfree_skb_any);
1424 * netif_device_detach - mark device as removed
1425 * @dev: network device
1427 * Mark device as removed from system and therefore no longer available.
1429 void netif_device_detach(struct net_device *dev)
1431 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1432 netif_running(dev)) {
1433 netif_stop_queue(dev);
1436 EXPORT_SYMBOL(netif_device_detach);
1439 * netif_device_attach - mark device as attached
1440 * @dev: network device
1442 * Mark device as attached from system and restart if needed.
1444 void netif_device_attach(struct net_device *dev)
1446 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1447 netif_running(dev)) {
1448 netif_wake_queue(dev);
1449 __netdev_watchdog_up(dev);
1452 EXPORT_SYMBOL(netif_device_attach);
1454 static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1456 return ((features & NETIF_F_GEN_CSUM) ||
1457 ((features & NETIF_F_IP_CSUM) &&
1458 protocol == htons(ETH_P_IP)) ||
1459 ((features & NETIF_F_IPV6_CSUM) &&
1460 protocol == htons(ETH_P_IPV6)));
1463 static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1465 if (can_checksum_protocol(dev->features, skb->protocol))
1468 if (skb->protocol == htons(ETH_P_8021Q)) {
1469 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1470 if (can_checksum_protocol(dev->features & dev->vlan_features,
1471 veh->h_vlan_encapsulated_proto))
1479 * Invalidate hardware checksum when packet is to be mangled, and
1480 * complete checksum manually on outgoing path.
1482 int skb_checksum_help(struct sk_buff *skb)
1485 int ret = 0, offset;
1487 if (skb->ip_summed == CHECKSUM_COMPLETE)
1488 goto out_set_summed;
1490 if (unlikely(skb_shinfo(skb)->gso_size)) {
1491 /* Let GSO fix up the checksum. */
1492 goto out_set_summed;
1495 offset = skb->csum_start - skb_headroom(skb);
1496 BUG_ON(offset >= skb_headlen(skb));
1497 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1499 offset += skb->csum_offset;
1500 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1502 if (skb_cloned(skb) &&
1503 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
1504 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1509 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
1511 skb->ip_summed = CHECKSUM_NONE;
1517 * skb_gso_segment - Perform segmentation on skb.
1518 * @skb: buffer to segment
1519 * @features: features for the output path (see dev->features)
1521 * This function segments the given skb and returns a list of segments.
1523 * It may return NULL if the skb requires no segmentation. This is
1524 * only possible when GSO is used for verifying header integrity.
1526 struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
1528 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1529 struct packet_type *ptype;
1530 __be16 type = skb->protocol;
1533 skb_reset_mac_header(skb);
1534 skb->mac_len = skb->network_header - skb->mac_header;
1535 __skb_pull(skb, skb->mac_len);
1537 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1538 struct net_device *dev = skb->dev;
1539 struct ethtool_drvinfo info = {};
1541 if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
1542 dev->ethtool_ops->get_drvinfo(dev, &info);
1544 WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d "
1546 info.driver, dev ? dev->features : 0L,
1547 skb->sk ? skb->sk->sk_route_caps : 0L,
1548 skb->len, skb->data_len, skb->ip_summed);
1550 if (skb_header_cloned(skb) &&
1551 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1552 return ERR_PTR(err);
1556 list_for_each_entry_rcu(ptype,
1557 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
1558 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
1559 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1560 err = ptype->gso_send_check(skb);
1561 segs = ERR_PTR(err);
1562 if (err || skb_gso_ok(skb, features))
1564 __skb_push(skb, (skb->data -
1565 skb_network_header(skb)));
1567 segs = ptype->gso_segment(skb, features);
1573 __skb_push(skb, skb->data - skb_mac_header(skb));
1578 EXPORT_SYMBOL(skb_gso_segment);
1580 /* Take action when hardware reception checksum errors are detected. */
1582 void netdev_rx_csum_fault(struct net_device *dev)
1584 if (net_ratelimit()) {
1585 printk(KERN_ERR "%s: hw csum failure.\n",
1586 dev ? dev->name : "<unknown>");
1590 EXPORT_SYMBOL(netdev_rx_csum_fault);
1593 /* Actually, we should eliminate this check as soon as we know, that:
1594 * 1. IOMMU is present and allows to map all the memory.
1595 * 2. No high memory really exists on this machine.
1598 static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1600 #ifdef CONFIG_HIGHMEM
1603 if (dev->features & NETIF_F_HIGHDMA)
1606 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1607 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1615 void (*destructor)(struct sk_buff *skb);
1618 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1620 static void dev_gso_skb_destructor(struct sk_buff *skb)
1622 struct dev_gso_cb *cb;
1625 struct sk_buff *nskb = skb->next;
1627 skb->next = nskb->next;
1630 } while (skb->next);
1632 cb = DEV_GSO_CB(skb);
1634 cb->destructor(skb);
1638 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1639 * @skb: buffer to segment
1641 * This function segments the given skb and stores the list of segments
1644 static int dev_gso_segment(struct sk_buff *skb)
1646 struct net_device *dev = skb->dev;
1647 struct sk_buff *segs;
1648 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1651 segs = skb_gso_segment(skb, features);
1653 /* Verifying header integrity only. */
1658 return PTR_ERR(segs);
1661 DEV_GSO_CB(skb)->destructor = skb->destructor;
1662 skb->destructor = dev_gso_skb_destructor;
1667 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1668 struct netdev_queue *txq)
1670 const struct net_device_ops *ops = dev->netdev_ops;
1672 prefetch(&dev->netdev_ops->ndo_start_xmit);
1673 if (likely(!skb->next)) {
1674 if (!list_empty(&ptype_all))
1675 dev_queue_xmit_nit(skb, dev);
1677 if (netif_needs_gso(dev, skb)) {
1678 if (unlikely(dev_gso_segment(skb)))
1684 return ops->ndo_start_xmit(skb, dev);
1689 struct sk_buff *nskb = skb->next;
1692 skb->next = nskb->next;
1694 rc = ops->ndo_start_xmit(nskb, dev);
1696 nskb->next = skb->next;
1700 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
1701 return NETDEV_TX_BUSY;
1702 } while (skb->next);
1704 skb->destructor = DEV_GSO_CB(skb)->destructor;
1711 static u32 simple_tx_hashrnd;
1712 static int simple_tx_hashrnd_initialized = 0;
1714 static u16 simple_tx_hash(struct net_device *dev, struct sk_buff *skb)
1716 u32 addr1, addr2, ports;
1720 if (unlikely(!simple_tx_hashrnd_initialized)) {
1721 get_random_bytes(&simple_tx_hashrnd, 4);
1722 simple_tx_hashrnd_initialized = 1;
1725 if (skb_rx_queue_recorded(skb)) {
1726 u32 val = skb_get_rx_queue(skb);
1728 hash = jhash_1word(val, simple_tx_hashrnd);
1732 if (skb->sk && skb->sk->sk_hash) {
1733 u32 val = skb->sk->sk_hash;
1735 hash = jhash_1word(val, simple_tx_hashrnd);
1739 switch (skb->protocol) {
1740 case htons(ETH_P_IP):
1741 if (!(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)))
1742 ip_proto = ip_hdr(skb)->protocol;
1743 addr1 = ip_hdr(skb)->saddr;
1744 addr2 = ip_hdr(skb)->daddr;
1745 ihl = ip_hdr(skb)->ihl;
1747 case htons(ETH_P_IPV6):
1748 ip_proto = ipv6_hdr(skb)->nexthdr;
1749 addr1 = ipv6_hdr(skb)->saddr.s6_addr32[3];
1750 addr2 = ipv6_hdr(skb)->daddr.s6_addr32[3];
1765 case IPPROTO_UDPLITE:
1766 ports = *((u32 *) (skb_network_header(skb) + (ihl * 4)));
1774 hash = jhash_3words(addr1, addr2, ports, simple_tx_hashrnd);
1777 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
1780 static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1781 struct sk_buff *skb)
1783 const struct net_device_ops *ops = dev->netdev_ops;
1784 u16 queue_index = 0;
1786 if (ops->ndo_select_queue)
1787 queue_index = ops->ndo_select_queue(dev, skb);
1788 else if (dev->real_num_tx_queues > 1)
1789 queue_index = simple_tx_hash(dev, skb);
1791 skb_set_queue_mapping(skb, queue_index);
1792 return netdev_get_tx_queue(dev, queue_index);
1796 * dev_queue_xmit - transmit a buffer
1797 * @skb: buffer to transmit
1799 * Queue a buffer for transmission to a network device. The caller must
1800 * have set the device and priority and built the buffer before calling
1801 * this function. The function can be called from an interrupt.
1803 * A negative errno code is returned on a failure. A success does not
1804 * guarantee the frame will be transmitted as it may be dropped due
1805 * to congestion or traffic shaping.
1807 * -----------------------------------------------------------------------------------
1808 * I notice this method can also return errors from the queue disciplines,
1809 * including NET_XMIT_DROP, which is a positive value. So, errors can also
1812 * Regardless of the return value, the skb is consumed, so it is currently
1813 * difficult to retry a send to this method. (You can bump the ref count
1814 * before sending to hold a reference for retry if you are careful.)
1816 * When calling this method, interrupts MUST be enabled. This is because
1817 * the BH enable code must have IRQs enabled so that it will not deadlock.
1820 int dev_queue_xmit(struct sk_buff *skb)
1822 struct net_device *dev = skb->dev;
1823 struct netdev_queue *txq;
1827 /* GSO will handle the following emulations directly. */
1828 if (netif_needs_gso(dev, skb))
1831 if (skb_shinfo(skb)->frag_list &&
1832 !(dev->features & NETIF_F_FRAGLIST) &&
1833 __skb_linearize(skb))
1836 /* Fragmented skb is linearized if device does not support SG,
1837 * or if at least one of fragments is in highmem and device
1838 * does not support DMA from it.
1840 if (skb_shinfo(skb)->nr_frags &&
1841 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
1842 __skb_linearize(skb))
1845 /* If packet is not checksummed and device does not support
1846 * checksumming for this protocol, complete checksumming here.
1848 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1849 skb_set_transport_header(skb, skb->csum_start -
1851 if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
1856 /* Disable soft irqs for various locks below. Also
1857 * stops preemption for RCU.
1861 txq = dev_pick_tx(dev, skb);
1862 q = rcu_dereference(txq->qdisc);
1864 #ifdef CONFIG_NET_CLS_ACT
1865 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
1868 spinlock_t *root_lock = qdisc_lock(q);
1870 spin_lock(root_lock);
1872 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
1876 rc = qdisc_enqueue_root(skb, q);
1879 spin_unlock(root_lock);
1884 /* The device has no queue. Common case for software devices:
1885 loopback, all the sorts of tunnels...
1887 Really, it is unlikely that netif_tx_lock protection is necessary
1888 here. (f.e. loopback and IP tunnels are clean ignoring statistics
1890 However, it is possible, that they rely on protection
1893 Check this and shot the lock. It is not prone from deadlocks.
1894 Either shot noqueue qdisc, it is even simpler 8)
1896 if (dev->flags & IFF_UP) {
1897 int cpu = smp_processor_id(); /* ok because BHs are off */
1899 if (txq->xmit_lock_owner != cpu) {
1901 HARD_TX_LOCK(dev, txq, cpu);
1903 if (!netif_tx_queue_stopped(txq)) {
1905 if (!dev_hard_start_xmit(skb, dev, txq)) {
1906 HARD_TX_UNLOCK(dev, txq);
1910 HARD_TX_UNLOCK(dev, txq);
1911 if (net_ratelimit())
1912 printk(KERN_CRIT "Virtual device %s asks to "
1913 "queue packet!\n", dev->name);
1915 /* Recursion is detected! It is possible,
1917 if (net_ratelimit())
1918 printk(KERN_CRIT "Dead loop on virtual device "
1919 "%s, fix it urgently!\n", dev->name);
1924 rcu_read_unlock_bh();
1930 rcu_read_unlock_bh();
1935 /*=======================================================================
1937 =======================================================================*/
1939 int netdev_max_backlog __read_mostly = 1000;
1940 int netdev_budget __read_mostly = 300;
1941 int weight_p __read_mostly = 64; /* old backlog weight */
1943 DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
1947 * netif_rx - post buffer to the network code
1948 * @skb: buffer to post
1950 * This function receives a packet from a device driver and queues it for
1951 * the upper (protocol) levels to process. It always succeeds. The buffer
1952 * may be dropped during processing for congestion control or by the
1956 * NET_RX_SUCCESS (no congestion)
1957 * NET_RX_DROP (packet was dropped)
1961 int netif_rx(struct sk_buff *skb)
1963 struct softnet_data *queue;
1964 unsigned long flags;
1966 /* if netpoll wants it, pretend we never saw it */
1967 if (netpoll_rx(skb))
1970 if (!skb->tstamp.tv64)
1974 * The code is rearranged so that the path is the most
1975 * short when CPU is congested, but is still operating.
1977 local_irq_save(flags);
1978 queue = &__get_cpu_var(softnet_data);
1980 __get_cpu_var(netdev_rx_stat).total++;
1981 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
1982 if (queue->input_pkt_queue.qlen) {
1984 __skb_queue_tail(&queue->input_pkt_queue, skb);
1985 local_irq_restore(flags);
1986 return NET_RX_SUCCESS;
1989 napi_schedule(&queue->backlog);
1993 __get_cpu_var(netdev_rx_stat).dropped++;
1994 local_irq_restore(flags);
2000 int netif_rx_ni(struct sk_buff *skb)
2005 err = netif_rx(skb);
2006 if (local_softirq_pending())
2013 EXPORT_SYMBOL(netif_rx_ni);
2015 static void net_tx_action(struct softirq_action *h)
2017 struct softnet_data *sd = &__get_cpu_var(softnet_data);
2019 if (sd->completion_queue) {
2020 struct sk_buff *clist;
2022 local_irq_disable();
2023 clist = sd->completion_queue;
2024 sd->completion_queue = NULL;
2028 struct sk_buff *skb = clist;
2029 clist = clist->next;
2031 WARN_ON(atomic_read(&skb->users));
2036 if (sd->output_queue) {
2039 local_irq_disable();
2040 head = sd->output_queue;
2041 sd->output_queue = NULL;
2045 struct Qdisc *q = head;
2046 spinlock_t *root_lock;
2048 head = head->next_sched;
2050 root_lock = qdisc_lock(q);
2051 if (spin_trylock(root_lock)) {
2052 smp_mb__before_clear_bit();
2053 clear_bit(__QDISC_STATE_SCHED,
2056 spin_unlock(root_lock);
2058 if (!test_bit(__QDISC_STATE_DEACTIVATED,
2060 __netif_reschedule(q);
2062 smp_mb__before_clear_bit();
2063 clear_bit(__QDISC_STATE_SCHED,
2071 static inline int deliver_skb(struct sk_buff *skb,
2072 struct packet_type *pt_prev,
2073 struct net_device *orig_dev)
2075 atomic_inc(&skb->users);
2076 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2079 #if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
2080 /* These hooks defined here for ATM */
2082 struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br,
2083 unsigned char *addr);
2084 void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent) __read_mostly;
2087 * If bridge module is loaded call bridging hook.
2088 * returns NULL if packet was consumed.
2090 struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
2091 struct sk_buff *skb) __read_mostly;
2092 static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
2093 struct packet_type **pt_prev, int *ret,
2094 struct net_device *orig_dev)
2096 struct net_bridge_port *port;
2098 if (skb->pkt_type == PACKET_LOOPBACK ||
2099 (port = rcu_dereference(skb->dev->br_port)) == NULL)
2103 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2107 return br_handle_frame_hook(port, skb);
2110 #define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
2113 #if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
2114 struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
2115 EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
2117 static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
2118 struct packet_type **pt_prev,
2120 struct net_device *orig_dev)
2122 if (skb->dev->macvlan_port == NULL)
2126 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2129 return macvlan_handle_frame_hook(skb);
2132 #define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
2135 #ifdef CONFIG_NET_CLS_ACT
2136 /* TODO: Maybe we should just force sch_ingress to be compiled in
2137 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2138 * a compare and 2 stores extra right now if we dont have it on
2139 * but have CONFIG_NET_CLS_ACT
2140 * NOTE: This doesnt stop any functionality; if you dont have
2141 * the ingress scheduler, you just cant add policies on ingress.
2144 static int ing_filter(struct sk_buff *skb)
2146 struct net_device *dev = skb->dev;
2147 u32 ttl = G_TC_RTTL(skb->tc_verd);
2148 struct netdev_queue *rxq;
2149 int result = TC_ACT_OK;
2152 if (MAX_RED_LOOP < ttl++) {
2154 "Redir loop detected Dropping packet (%d->%d)\n",
2155 skb->iif, dev->ifindex);
2159 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2160 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
2162 rxq = &dev->rx_queue;
2165 if (q != &noop_qdisc) {
2166 spin_lock(qdisc_lock(q));
2167 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
2168 result = qdisc_enqueue_root(skb, q);
2169 spin_unlock(qdisc_lock(q));
2175 static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2176 struct packet_type **pt_prev,
2177 int *ret, struct net_device *orig_dev)
2179 if (skb->dev->rx_queue.qdisc == &noop_qdisc)
2183 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2186 /* Huh? Why does turning on AF_PACKET affect this? */
2187 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
2190 switch (ing_filter(skb)) {
2204 * netif_nit_deliver - deliver received packets to network taps
2207 * This function is used to deliver incoming packets to network
2208 * taps. It should be used when the normal netif_receive_skb path
2209 * is bypassed, for example because of VLAN acceleration.
2211 void netif_nit_deliver(struct sk_buff *skb)
2213 struct packet_type *ptype;
2215 if (list_empty(&ptype_all))
2218 skb_reset_network_header(skb);
2219 skb_reset_transport_header(skb);
2220 skb->mac_len = skb->network_header - skb->mac_header;
2223 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2224 if (!ptype->dev || ptype->dev == skb->dev)
2225 deliver_skb(skb, ptype, skb->dev);
2231 * netif_receive_skb - process receive buffer from network
2232 * @skb: buffer to process
2234 * netif_receive_skb() is the main receive data processing function.
2235 * It always succeeds. The buffer may be dropped during processing
2236 * for congestion control or by the protocol layers.
2238 * This function may only be called from softirq context and interrupts
2239 * should be enabled.
2241 * Return values (usually ignored):
2242 * NET_RX_SUCCESS: no congestion
2243 * NET_RX_DROP: packet was dropped
2245 int netif_receive_skb(struct sk_buff *skb)
2247 struct packet_type *ptype, *pt_prev;
2248 struct net_device *orig_dev;
2249 struct net_device *null_or_orig;
2250 int ret = NET_RX_DROP;
2253 if (skb->vlan_tci && vlan_hwaccel_do_receive(skb))
2254 return NET_RX_SUCCESS;
2256 /* if we've gotten here through NAPI, check netpoll */
2257 if (netpoll_receive_skb(skb))
2260 if (!skb->tstamp.tv64)
2264 skb->iif = skb->dev->ifindex;
2266 null_or_orig = NULL;
2267 orig_dev = skb->dev;
2268 if (orig_dev->master) {
2269 if (skb_bond_should_drop(skb))
2270 null_or_orig = orig_dev; /* deliver only exact match */
2272 skb->dev = orig_dev->master;
2275 __get_cpu_var(netdev_rx_stat).total++;
2277 skb_reset_network_header(skb);
2278 skb_reset_transport_header(skb);
2279 skb->mac_len = skb->network_header - skb->mac_header;
2285 /* Don't receive packets in an exiting network namespace */
2286 if (!net_alive(dev_net(skb->dev))) {
2291 #ifdef CONFIG_NET_CLS_ACT
2292 if (skb->tc_verd & TC_NCLS) {
2293 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2298 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2299 if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2300 ptype->dev == orig_dev) {
2302 ret = deliver_skb(skb, pt_prev, orig_dev);
2307 #ifdef CONFIG_NET_CLS_ACT
2308 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2314 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
2317 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
2321 type = skb->protocol;
2322 list_for_each_entry_rcu(ptype,
2323 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
2324 if (ptype->type == type &&
2325 (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2326 ptype->dev == orig_dev)) {
2328 ret = deliver_skb(skb, pt_prev, orig_dev);
2334 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2337 /* Jamal, now you will not able to escape explaining
2338 * me how you were going to use this. :-)
2348 /* Network device is going away, flush any packets still pending */
2349 static void flush_backlog(void *arg)
2351 struct net_device *dev = arg;
2352 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2353 struct sk_buff *skb, *tmp;
2355 skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
2356 if (skb->dev == dev) {
2357 __skb_unlink(skb, &queue->input_pkt_queue);
2362 static int napi_gro_complete(struct sk_buff *skb)
2364 struct packet_type *ptype;
2365 __be16 type = skb->protocol;
2366 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2369 if (NAPI_GRO_CB(skb)->count == 1)
2373 list_for_each_entry_rcu(ptype, head, list) {
2374 if (ptype->type != type || ptype->dev || !ptype->gro_complete)
2377 err = ptype->gro_complete(skb);
2383 WARN_ON(&ptype->list == head);
2385 return NET_RX_SUCCESS;
2389 skb_shinfo(skb)->gso_size = 0;
2390 __skb_push(skb, -skb_network_offset(skb));
2391 return netif_receive_skb(skb);
2394 void napi_gro_flush(struct napi_struct *napi)
2396 struct sk_buff *skb, *next;
2398 for (skb = napi->gro_list; skb; skb = next) {
2401 napi_gro_complete(skb);
2404 napi->gro_list = NULL;
2406 EXPORT_SYMBOL(napi_gro_flush);
2408 int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2410 struct sk_buff **pp = NULL;
2411 struct packet_type *ptype;
2412 __be16 type = skb->protocol;
2413 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2419 if (!(skb->dev->features & NETIF_F_GRO))
2422 if (skb_is_gso(skb) || skb_shinfo(skb)->frag_list)
2426 list_for_each_entry_rcu(ptype, head, list) {
2429 if (ptype->type != type || ptype->dev || !ptype->gro_receive)
2432 skb_reset_network_header(skb);
2433 mac_len = skb->network_header - skb->mac_header;
2434 skb->mac_len = mac_len;
2435 NAPI_GRO_CB(skb)->same_flow = 0;
2436 NAPI_GRO_CB(skb)->flush = 0;
2437 NAPI_GRO_CB(skb)->free = 0;
2439 for (p = napi->gro_list; p; p = p->next) {
2442 if (!NAPI_GRO_CB(p)->same_flow)
2445 if (p->mac_len != mac_len ||
2446 memcmp(skb_mac_header(p), skb_mac_header(skb),
2448 NAPI_GRO_CB(p)->same_flow = 0;
2451 pp = ptype->gro_receive(&napi->gro_list, skb);
2456 if (&ptype->list == head)
2459 same_flow = NAPI_GRO_CB(skb)->same_flow;
2460 free = NAPI_GRO_CB(skb)->free;
2463 struct sk_buff *nskb = *pp;
2467 napi_gro_complete(nskb);
2474 if (NAPI_GRO_CB(skb)->flush || count >= MAX_GRO_SKBS) {
2475 __skb_push(skb, -skb_network_offset(skb));
2479 NAPI_GRO_CB(skb)->count = 1;
2480 skb_shinfo(skb)->gso_size = skb->len;
2481 skb->next = napi->gro_list;
2482 napi->gro_list = skb;
2490 EXPORT_SYMBOL(dev_gro_receive);
2492 static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2496 for (p = napi->gro_list; p; p = p->next) {
2497 NAPI_GRO_CB(p)->same_flow = 1;
2498 NAPI_GRO_CB(p)->flush = 0;
2501 return dev_gro_receive(napi, skb);
2504 int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2506 switch (__napi_gro_receive(napi, skb)) {
2508 return netif_receive_skb(skb);
2515 return NET_RX_SUCCESS;
2517 EXPORT_SYMBOL(napi_gro_receive);
2519 void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
2521 __skb_pull(skb, skb_headlen(skb));
2522 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
2526 EXPORT_SYMBOL(napi_reuse_skb);
2528 struct sk_buff *napi_fraginfo_skb(struct napi_struct *napi,
2529 struct napi_gro_fraginfo *info)
2531 struct net_device *dev = napi->dev;
2532 struct sk_buff *skb = napi->skb;
2537 skb = netdev_alloc_skb(dev, GRO_MAX_HEAD + NET_IP_ALIGN);
2541 skb_reserve(skb, NET_IP_ALIGN);
2544 BUG_ON(info->nr_frags > MAX_SKB_FRAGS);
2545 skb_shinfo(skb)->nr_frags = info->nr_frags;
2546 memcpy(skb_shinfo(skb)->frags, info->frags, sizeof(info->frags));
2548 skb->data_len = info->len;
2549 skb->len += info->len;
2550 skb->truesize += info->len;
2552 if (!pskb_may_pull(skb, ETH_HLEN)) {
2553 napi_reuse_skb(napi, skb);
2558 skb->protocol = eth_type_trans(skb, dev);
2560 skb->ip_summed = info->ip_summed;
2561 skb->csum = info->csum;
2566 EXPORT_SYMBOL(napi_fraginfo_skb);
2568 int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info)
2570 struct sk_buff *skb = napi_fraginfo_skb(napi, info);
2571 int err = NET_RX_DROP;
2576 err = NET_RX_SUCCESS;
2578 switch (__napi_gro_receive(napi, skb)) {
2580 return netif_receive_skb(skb);
2586 napi_reuse_skb(napi, skb);
2591 EXPORT_SYMBOL(napi_gro_frags);
2593 static int process_backlog(struct napi_struct *napi, int quota)
2596 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2597 unsigned long start_time = jiffies;
2599 napi->weight = weight_p;
2601 struct sk_buff *skb;
2603 local_irq_disable();
2604 skb = __skb_dequeue(&queue->input_pkt_queue);
2606 __napi_complete(napi);
2612 napi_gro_receive(napi, skb);
2613 } while (++work < quota && jiffies == start_time);
2615 napi_gro_flush(napi);
2621 * __napi_schedule - schedule for receive
2622 * @n: entry to schedule
2624 * The entry's receive function will be scheduled to run
2626 void __napi_schedule(struct napi_struct *n)
2628 unsigned long flags;
2630 local_irq_save(flags);
2631 list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
2632 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2633 local_irq_restore(flags);
2635 EXPORT_SYMBOL(__napi_schedule);
2637 void __napi_complete(struct napi_struct *n)
2639 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
2640 BUG_ON(n->gro_list);
2642 list_del(&n->poll_list);
2643 smp_mb__before_clear_bit();
2644 clear_bit(NAPI_STATE_SCHED, &n->state);
2646 EXPORT_SYMBOL(__napi_complete);
2648 void napi_complete(struct napi_struct *n)
2650 unsigned long flags;
2653 * don't let napi dequeue from the cpu poll list
2654 * just in case its running on a different cpu
2656 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
2660 local_irq_save(flags);
2662 local_irq_restore(flags);
2664 EXPORT_SYMBOL(napi_complete);
2666 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
2667 int (*poll)(struct napi_struct *, int), int weight)
2669 INIT_LIST_HEAD(&napi->poll_list);
2670 napi->gro_list = NULL;
2673 napi->weight = weight;
2674 list_add(&napi->dev_list, &dev->napi_list);
2676 #ifdef CONFIG_NETPOLL
2677 spin_lock_init(&napi->poll_lock);
2678 napi->poll_owner = -1;
2680 set_bit(NAPI_STATE_SCHED, &napi->state);
2682 EXPORT_SYMBOL(netif_napi_add);
2684 void netif_napi_del(struct napi_struct *napi)
2686 struct sk_buff *skb, *next;
2688 list_del_init(&napi->dev_list);
2691 for (skb = napi->gro_list; skb; skb = next) {
2697 napi->gro_list = NULL;
2699 EXPORT_SYMBOL(netif_napi_del);
2702 static void net_rx_action(struct softirq_action *h)
2704 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
2705 unsigned long time_limit = jiffies + 2;
2706 int budget = netdev_budget;
2709 local_irq_disable();
2711 while (!list_empty(list)) {
2712 struct napi_struct *n;
2715 /* If softirq window is exhuasted then punt.
2716 * Allow this to run for 2 jiffies since which will allow
2717 * an average latency of 1.5/HZ.
2719 if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
2724 /* Even though interrupts have been re-enabled, this
2725 * access is safe because interrupts can only add new
2726 * entries to the tail of this list, and only ->poll()
2727 * calls can remove this head entry from the list.
2729 n = list_entry(list->next, struct napi_struct, poll_list);
2731 have = netpoll_poll_lock(n);
2735 /* This NAPI_STATE_SCHED test is for avoiding a race
2736 * with netpoll's poll_napi(). Only the entity which
2737 * obtains the lock and sees NAPI_STATE_SCHED set will
2738 * actually make the ->poll() call. Therefore we avoid
2739 * accidently calling ->poll() when NAPI is not scheduled.
2742 if (test_bit(NAPI_STATE_SCHED, &n->state))
2743 work = n->poll(n, weight);
2745 WARN_ON_ONCE(work > weight);
2749 local_irq_disable();
2751 /* Drivers must not modify the NAPI state if they
2752 * consume the entire weight. In such cases this code
2753 * still "owns" the NAPI instance and therefore can
2754 * move the instance around on the list at-will.
2756 if (unlikely(work == weight)) {
2757 if (unlikely(napi_disable_pending(n)))
2760 list_move_tail(&n->poll_list, list);
2763 netpoll_poll_unlock(have);
2768 #ifdef CONFIG_NET_DMA
2770 * There may not be any more sk_buffs coming right now, so push
2771 * any pending DMA copies to hardware
2773 dma_issue_pending_all();
2779 __get_cpu_var(netdev_rx_stat).time_squeeze++;
2780 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2784 static gifconf_func_t * gifconf_list [NPROTO];
2787 * register_gifconf - register a SIOCGIF handler
2788 * @family: Address family
2789 * @gifconf: Function handler
2791 * Register protocol dependent address dumping routines. The handler
2792 * that is passed must not be freed or reused until it has been replaced
2793 * by another handler.
2795 int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
2797 if (family >= NPROTO)
2799 gifconf_list[family] = gifconf;
2805 * Map an interface index to its name (SIOCGIFNAME)
2809 * We need this ioctl for efficient implementation of the
2810 * if_indextoname() function required by the IPv6 API. Without
2811 * it, we would have to search all the interfaces to find a
2815 static int dev_ifname(struct net *net, struct ifreq __user *arg)
2817 struct net_device *dev;
2821 * Fetch the caller's info block.
2824 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
2827 read_lock(&dev_base_lock);
2828 dev = __dev_get_by_index(net, ifr.ifr_ifindex);
2830 read_unlock(&dev_base_lock);
2834 strcpy(ifr.ifr_name, dev->name);
2835 read_unlock(&dev_base_lock);
2837 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
2843 * Perform a SIOCGIFCONF call. This structure will change
2844 * size eventually, and there is nothing I can do about it.
2845 * Thus we will need a 'compatibility mode'.
2848 static int dev_ifconf(struct net *net, char __user *arg)
2851 struct net_device *dev;
2858 * Fetch the caller's info block.
2861 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
2868 * Loop over the interfaces, and write an info block for each.
2872 for_each_netdev(net, dev) {
2873 for (i = 0; i < NPROTO; i++) {
2874 if (gifconf_list[i]) {
2877 done = gifconf_list[i](dev, NULL, 0);
2879 done = gifconf_list[i](dev, pos + total,
2889 * All done. Write the updated control block back to the caller.
2891 ifc.ifc_len = total;
2894 * Both BSD and Solaris return 0 here, so we do too.
2896 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
2899 #ifdef CONFIG_PROC_FS
2901 * This is invoked by the /proc filesystem handler to display a device
2904 void *dev_seq_start(struct seq_file *seq, loff_t *pos)
2905 __acquires(dev_base_lock)
2907 struct net *net = seq_file_net(seq);
2909 struct net_device *dev;
2911 read_lock(&dev_base_lock);
2913 return SEQ_START_TOKEN;
2916 for_each_netdev(net, dev)
2923 void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2925 struct net *net = seq_file_net(seq);
2927 return v == SEQ_START_TOKEN ?
2928 first_net_device(net) : next_net_device((struct net_device *)v);
2931 void dev_seq_stop(struct seq_file *seq, void *v)
2932 __releases(dev_base_lock)
2934 read_unlock(&dev_base_lock);
2937 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
2939 const struct net_device_stats *stats = dev_get_stats(dev);
2941 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
2942 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
2943 dev->name, stats->rx_bytes, stats->rx_packets,
2945 stats->rx_dropped + stats->rx_missed_errors,
2946 stats->rx_fifo_errors,
2947 stats->rx_length_errors + stats->rx_over_errors +
2948 stats->rx_crc_errors + stats->rx_frame_errors,
2949 stats->rx_compressed, stats->multicast,
2950 stats->tx_bytes, stats->tx_packets,
2951 stats->tx_errors, stats->tx_dropped,
2952 stats->tx_fifo_errors, stats->collisions,
2953 stats->tx_carrier_errors +
2954 stats->tx_aborted_errors +
2955 stats->tx_window_errors +
2956 stats->tx_heartbeat_errors,
2957 stats->tx_compressed);
2961 * Called from the PROCfs module. This now uses the new arbitrary sized
2962 * /proc/net interface to create /proc/net/dev
2964 static int dev_seq_show(struct seq_file *seq, void *v)
2966 if (v == SEQ_START_TOKEN)
2967 seq_puts(seq, "Inter-| Receive "
2969 " face |bytes packets errs drop fifo frame "
2970 "compressed multicast|bytes packets errs "
2971 "drop fifo colls carrier compressed\n");
2973 dev_seq_printf_stats(seq, v);
2977 static struct netif_rx_stats *softnet_get_online(loff_t *pos)
2979 struct netif_rx_stats *rc = NULL;
2981 while (*pos < nr_cpu_ids)
2982 if (cpu_online(*pos)) {
2983 rc = &per_cpu(netdev_rx_stat, *pos);
2990 static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
2992 return softnet_get_online(pos);
2995 static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2998 return softnet_get_online(pos);
3001 static void softnet_seq_stop(struct seq_file *seq, void *v)
3005 static int softnet_seq_show(struct seq_file *seq, void *v)
3007 struct netif_rx_stats *s = v;
3009 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
3010 s->total, s->dropped, s->time_squeeze, 0,
3011 0, 0, 0, 0, /* was fastroute */
3016 static const struct seq_operations dev_seq_ops = {
3017 .start = dev_seq_start,
3018 .next = dev_seq_next,
3019 .stop = dev_seq_stop,
3020 .show = dev_seq_show,
3023 static int dev_seq_open(struct inode *inode, struct file *file)
3025 return seq_open_net(inode, file, &dev_seq_ops,
3026 sizeof(struct seq_net_private));
3029 static const struct file_operations dev_seq_fops = {
3030 .owner = THIS_MODULE,
3031 .open = dev_seq_open,
3033 .llseek = seq_lseek,
3034 .release = seq_release_net,
3037 static const struct seq_operations softnet_seq_ops = {
3038 .start = softnet_seq_start,
3039 .next = softnet_seq_next,
3040 .stop = softnet_seq_stop,
3041 .show = softnet_seq_show,
3044 static int softnet_seq_open(struct inode *inode, struct file *file)
3046 return seq_open(file, &softnet_seq_ops);
3049 static const struct file_operations softnet_seq_fops = {
3050 .owner = THIS_MODULE,
3051 .open = softnet_seq_open,
3053 .llseek = seq_lseek,
3054 .release = seq_release,
3057 static void *ptype_get_idx(loff_t pos)
3059 struct packet_type *pt = NULL;
3063 list_for_each_entry_rcu(pt, &ptype_all, list) {
3069 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
3070 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
3079 static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
3083 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
3086 static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3088 struct packet_type *pt;
3089 struct list_head *nxt;
3093 if (v == SEQ_START_TOKEN)
3094 return ptype_get_idx(0);
3097 nxt = pt->list.next;
3098 if (pt->type == htons(ETH_P_ALL)) {
3099 if (nxt != &ptype_all)
3102 nxt = ptype_base[0].next;
3104 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
3106 while (nxt == &ptype_base[hash]) {
3107 if (++hash >= PTYPE_HASH_SIZE)
3109 nxt = ptype_base[hash].next;
3112 return list_entry(nxt, struct packet_type, list);
3115 static void ptype_seq_stop(struct seq_file *seq, void *v)
3121 static int ptype_seq_show(struct seq_file *seq, void *v)
3123 struct packet_type *pt = v;
3125 if (v == SEQ_START_TOKEN)
3126 seq_puts(seq, "Type Device Function\n");
3127 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
3128 if (pt->type == htons(ETH_P_ALL))
3129 seq_puts(seq, "ALL ");
3131 seq_printf(seq, "%04x", ntohs(pt->type));
3133 seq_printf(seq, " %-8s %pF\n",
3134 pt->dev ? pt->dev->name : "", pt->func);
3140 static const struct seq_operations ptype_seq_ops = {
3141 .start = ptype_seq_start,
3142 .next = ptype_seq_next,
3143 .stop = ptype_seq_stop,
3144 .show = ptype_seq_show,
3147 static int ptype_seq_open(struct inode *inode, struct file *file)
3149 return seq_open_net(inode, file, &ptype_seq_ops,
3150 sizeof(struct seq_net_private));
3153 static const struct file_operations ptype_seq_fops = {
3154 .owner = THIS_MODULE,
3155 .open = ptype_seq_open,
3157 .llseek = seq_lseek,
3158 .release = seq_release_net,
3162 static int __net_init dev_proc_net_init(struct net *net)
3166 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
3168 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
3170 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
3173 if (wext_proc_init(net))
3179 proc_net_remove(net, "ptype");
3181 proc_net_remove(net, "softnet_stat");
3183 proc_net_remove(net, "dev");
3187 static void __net_exit dev_proc_net_exit(struct net *net)
3189 wext_proc_exit(net);
3191 proc_net_remove(net, "ptype");
3192 proc_net_remove(net, "softnet_stat");
3193 proc_net_remove(net, "dev");
3196 static struct pernet_operations __net_initdata dev_proc_ops = {
3197 .init = dev_proc_net_init,
3198 .exit = dev_proc_net_exit,
3201 static int __init dev_proc_init(void)
3203 return register_pernet_subsys(&dev_proc_ops);
3206 #define dev_proc_init() 0
3207 #endif /* CONFIG_PROC_FS */
3211 * netdev_set_master - set up master/slave pair
3212 * @slave: slave device
3213 * @master: new master device
3215 * Changes the master device of the slave. Pass %NULL to break the
3216 * bonding. The caller must hold the RTNL semaphore. On a failure
3217 * a negative errno code is returned. On success the reference counts
3218 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
3219 * function returns zero.
3221 int netdev_set_master(struct net_device *slave, struct net_device *master)
3223 struct net_device *old = slave->master;
3233 slave->master = master;
3241 slave->flags |= IFF_SLAVE;
3243 slave->flags &= ~IFF_SLAVE;
3245 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
3249 static void dev_change_rx_flags(struct net_device *dev, int flags)
3251 const struct net_device_ops *ops = dev->netdev_ops;
3253 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
3254 ops->ndo_change_rx_flags(dev, flags);
3257 static int __dev_set_promiscuity(struct net_device *dev, int inc)
3259 unsigned short old_flags = dev->flags;
3265 dev->flags |= IFF_PROMISC;
3266 dev->promiscuity += inc;
3267 if (dev->promiscuity == 0) {
3270 * If inc causes overflow, untouch promisc and return error.
3273 dev->flags &= ~IFF_PROMISC;
3275 dev->promiscuity -= inc;
3276 printk(KERN_WARNING "%s: promiscuity touches roof, "
3277 "set promiscuity failed, promiscuity feature "
3278 "of device might be broken.\n", dev->name);
3282 if (dev->flags != old_flags) {
3283 printk(KERN_INFO "device %s %s promiscuous mode\n",
3284 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
3286 if (audit_enabled) {
3287 current_uid_gid(&uid, &gid);
3288 audit_log(current->audit_context, GFP_ATOMIC,
3289 AUDIT_ANOM_PROMISCUOUS,
3290 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
3291 dev->name, (dev->flags & IFF_PROMISC),
3292 (old_flags & IFF_PROMISC),
3293 audit_get_loginuid(current),
3295 audit_get_sessionid(current));
3298 dev_change_rx_flags(dev, IFF_PROMISC);
3304 * dev_set_promiscuity - update promiscuity count on a device
3308 * Add or remove promiscuity from a device. While the count in the device
3309 * remains above zero the interface remains promiscuous. Once it hits zero
3310 * the device reverts back to normal filtering operation. A negative inc
3311 * value is used to drop promiscuity on the device.
3312 * Return 0 if successful or a negative errno code on error.
3314 int dev_set_promiscuity(struct net_device *dev, int inc)
3316 unsigned short old_flags = dev->flags;
3319 err = __dev_set_promiscuity(dev, inc);
3322 if (dev->flags != old_flags)
3323 dev_set_rx_mode(dev);
3328 * dev_set_allmulti - update allmulti count on a device
3332 * Add or remove reception of all multicast frames to a device. While the
3333 * count in the device remains above zero the interface remains listening
3334 * to all interfaces. Once it hits zero the device reverts back to normal
3335 * filtering operation. A negative @inc value is used to drop the counter
3336 * when releasing a resource needing all multicasts.
3337 * Return 0 if successful or a negative errno code on error.
3340 int dev_set_allmulti(struct net_device *dev, int inc)
3342 unsigned short old_flags = dev->flags;
3346 dev->flags |= IFF_ALLMULTI;
3347 dev->allmulti += inc;
3348 if (dev->allmulti == 0) {
3351 * If inc causes overflow, untouch allmulti and return error.
3354 dev->flags &= ~IFF_ALLMULTI;
3356 dev->allmulti -= inc;
3357 printk(KERN_WARNING "%s: allmulti touches roof, "
3358 "set allmulti failed, allmulti feature of "
3359 "device might be broken.\n", dev->name);
3363 if (dev->flags ^ old_flags) {
3364 dev_change_rx_flags(dev, IFF_ALLMULTI);
3365 dev_set_rx_mode(dev);
3371 * Upload unicast and multicast address lists to device and
3372 * configure RX filtering. When the device doesn't support unicast
3373 * filtering it is put in promiscuous mode while unicast addresses
3376 void __dev_set_rx_mode(struct net_device *dev)
3378 const struct net_device_ops *ops = dev->netdev_ops;
3380 /* dev_open will call this function so the list will stay sane. */
3381 if (!(dev->flags&IFF_UP))
3384 if (!netif_device_present(dev))
3387 if (ops->ndo_set_rx_mode)
3388 ops->ndo_set_rx_mode(dev);
3390 /* Unicast addresses changes may only happen under the rtnl,
3391 * therefore calling __dev_set_promiscuity here is safe.
3393 if (dev->uc_count > 0 && !dev->uc_promisc) {
3394 __dev_set_promiscuity(dev, 1);
3395 dev->uc_promisc = 1;
3396 } else if (dev->uc_count == 0 && dev->uc_promisc) {
3397 __dev_set_promiscuity(dev, -1);
3398 dev->uc_promisc = 0;
3401 if (ops->ndo_set_multicast_list)
3402 ops->ndo_set_multicast_list(dev);
3406 void dev_set_rx_mode(struct net_device *dev)
3408 netif_addr_lock_bh(dev);
3409 __dev_set_rx_mode(dev);
3410 netif_addr_unlock_bh(dev);
3413 int __dev_addr_delete(struct dev_addr_list **list, int *count,
3414 void *addr, int alen, int glbl)
3416 struct dev_addr_list *da;
3418 for (; (da = *list) != NULL; list = &da->next) {
3419 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3420 alen == da->da_addrlen) {
3422 int old_glbl = da->da_gusers;
3439 int __dev_addr_add(struct dev_addr_list **list, int *count,
3440 void *addr, int alen, int glbl)
3442 struct dev_addr_list *da;
3444 for (da = *list; da != NULL; da = da->next) {
3445 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3446 da->da_addrlen == alen) {
3448 int old_glbl = da->da_gusers;
3458 da = kzalloc(sizeof(*da), GFP_ATOMIC);
3461 memcpy(da->da_addr, addr, alen);
3462 da->da_addrlen = alen;
3464 da->da_gusers = glbl ? 1 : 0;
3472 * dev_unicast_delete - Release secondary unicast address.
3474 * @addr: address to delete
3475 * @alen: length of @addr
3477 * Release reference to a secondary unicast address and remove it
3478 * from the device if the reference count drops to zero.
3480 * The caller must hold the rtnl_mutex.
3482 int dev_unicast_delete(struct net_device *dev, void *addr, int alen)
3488 netif_addr_lock_bh(dev);
3489 err = __dev_addr_delete(&dev->uc_list, &dev->uc_count, addr, alen, 0);
3491 __dev_set_rx_mode(dev);
3492 netif_addr_unlock_bh(dev);
3495 EXPORT_SYMBOL(dev_unicast_delete);
3498 * dev_unicast_add - add a secondary unicast address
3500 * @addr: address to add
3501 * @alen: length of @addr
3503 * Add a secondary unicast address to the device or increase
3504 * the reference count if it already exists.
3506 * The caller must hold the rtnl_mutex.
3508 int dev_unicast_add(struct net_device *dev, void *addr, int alen)
3514 netif_addr_lock_bh(dev);
3515 err = __dev_addr_add(&dev->uc_list, &dev->uc_count, addr, alen, 0);
3517 __dev_set_rx_mode(dev);
3518 netif_addr_unlock_bh(dev);
3521 EXPORT_SYMBOL(dev_unicast_add);
3523 int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
3524 struct dev_addr_list **from, int *from_count)
3526 struct dev_addr_list *da, *next;
3530 while (da != NULL) {
3532 if (!da->da_synced) {
3533 err = __dev_addr_add(to, to_count,
3534 da->da_addr, da->da_addrlen, 0);
3539 } else if (da->da_users == 1) {
3540 __dev_addr_delete(to, to_count,
3541 da->da_addr, da->da_addrlen, 0);
3542 __dev_addr_delete(from, from_count,
3543 da->da_addr, da->da_addrlen, 0);
3550 void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
3551 struct dev_addr_list **from, int *from_count)
3553 struct dev_addr_list *da, *next;
3556 while (da != NULL) {
3558 if (da->da_synced) {
3559 __dev_addr_delete(to, to_count,
3560 da->da_addr, da->da_addrlen, 0);
3562 __dev_addr_delete(from, from_count,
3563 da->da_addr, da->da_addrlen, 0);
3570 * dev_unicast_sync - Synchronize device's unicast list to another device
3571 * @to: destination device
3572 * @from: source device
3574 * Add newly added addresses to the destination device and release
3575 * addresses that have no users left. The source device must be
3576 * locked by netif_tx_lock_bh.
3578 * This function is intended to be called from the dev->set_rx_mode
3579 * function of layered software devices.
3581 int dev_unicast_sync(struct net_device *to, struct net_device *from)
3585 netif_addr_lock_bh(to);
3586 err = __dev_addr_sync(&to->uc_list, &to->uc_count,
3587 &from->uc_list, &from->uc_count);
3589 __dev_set_rx_mode(to);
3590 netif_addr_unlock_bh(to);
3593 EXPORT_SYMBOL(dev_unicast_sync);
3596 * dev_unicast_unsync - Remove synchronized addresses from the destination device
3597 * @to: destination device
3598 * @from: source device
3600 * Remove all addresses that were added to the destination device by
3601 * dev_unicast_sync(). This function is intended to be called from the
3602 * dev->stop function of layered software devices.
3604 void dev_unicast_unsync(struct net_device *to, struct net_device *from)
3606 netif_addr_lock_bh(from);
3607 netif_addr_lock(to);
3609 __dev_addr_unsync(&to->uc_list, &to->uc_count,
3610 &from->uc_list, &from->uc_count);
3611 __dev_set_rx_mode(to);
3613 netif_addr_unlock(to);
3614 netif_addr_unlock_bh(from);
3616 EXPORT_SYMBOL(dev_unicast_unsync);
3618 static void __dev_addr_discard(struct dev_addr_list **list)
3620 struct dev_addr_list *tmp;
3622 while (*list != NULL) {
3625 if (tmp->da_users > tmp->da_gusers)
3626 printk("__dev_addr_discard: address leakage! "
3627 "da_users=%d\n", tmp->da_users);
3632 static void dev_addr_discard(struct net_device *dev)
3634 netif_addr_lock_bh(dev);
3636 __dev_addr_discard(&dev->uc_list);
3639 __dev_addr_discard(&dev->mc_list);
3642 netif_addr_unlock_bh(dev);
3646 * dev_get_flags - get flags reported to userspace
3649 * Get the combination of flag bits exported through APIs to userspace.
3651 unsigned dev_get_flags(const struct net_device *dev)
3655 flags = (dev->flags & ~(IFF_PROMISC |
3660 (dev->gflags & (IFF_PROMISC |
3663 if (netif_running(dev)) {
3664 if (netif_oper_up(dev))
3665 flags |= IFF_RUNNING;
3666 if (netif_carrier_ok(dev))
3667 flags |= IFF_LOWER_UP;
3668 if (netif_dormant(dev))
3669 flags |= IFF_DORMANT;
3676 * dev_change_flags - change device settings
3678 * @flags: device state flags
3680 * Change settings on device based state flags. The flags are
3681 * in the userspace exported format.
3683 int dev_change_flags(struct net_device *dev, unsigned flags)
3686 int old_flags = dev->flags;
3691 * Set the flags on our device.
3694 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
3695 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
3697 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
3701 * Load in the correct multicast list now the flags have changed.
3704 if ((old_flags ^ flags) & IFF_MULTICAST)
3705 dev_change_rx_flags(dev, IFF_MULTICAST);
3707 dev_set_rx_mode(dev);
3710 * Have we downed the interface. We handle IFF_UP ourselves
3711 * according to user attempts to set it, rather than blindly
3716 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
3717 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
3720 dev_set_rx_mode(dev);
3723 if (dev->flags & IFF_UP &&
3724 ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
3726 call_netdevice_notifiers(NETDEV_CHANGE, dev);
3728 if ((flags ^ dev->gflags) & IFF_PROMISC) {
3729 int inc = (flags & IFF_PROMISC) ? +1 : -1;
3730 dev->gflags ^= IFF_PROMISC;
3731 dev_set_promiscuity(dev, inc);
3734 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
3735 is important. Some (broken) drivers set IFF_PROMISC, when
3736 IFF_ALLMULTI is requested not asking us and not reporting.
3738 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
3739 int inc = (flags & IFF_ALLMULTI) ? +1 : -1;
3740 dev->gflags ^= IFF_ALLMULTI;
3741 dev_set_allmulti(dev, inc);
3744 /* Exclude state transition flags, already notified */
3745 changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING);
3747 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
3753 * dev_set_mtu - Change maximum transfer unit
3755 * @new_mtu: new transfer unit
3757 * Change the maximum transfer size of the network device.
3759 int dev_set_mtu(struct net_device *dev, int new_mtu)
3761 const struct net_device_ops *ops = dev->netdev_ops;
3764 if (new_mtu == dev->mtu)
3767 /* MTU must be positive. */
3771 if (!netif_device_present(dev))
3775 if (ops->ndo_change_mtu)
3776 err = ops->ndo_change_mtu(dev, new_mtu);
3780 if (!err && dev->flags & IFF_UP)
3781 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
3786 * dev_set_mac_address - Change Media Access Control Address
3790 * Change the hardware (MAC) address of the device
3792 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
3794 const struct net_device_ops *ops = dev->netdev_ops;
3797 if (!ops->ndo_set_mac_address)
3799 if (sa->sa_family != dev->type)
3801 if (!netif_device_present(dev))
3803 err = ops->ndo_set_mac_address(dev, sa);
3805 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3810 * Perform the SIOCxIFxxx calls, inside read_lock(dev_base_lock)
3812 static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
3815 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
3821 case SIOCGIFFLAGS: /* Get interface flags */
3822 ifr->ifr_flags = dev_get_flags(dev);
3825 case SIOCGIFMETRIC: /* Get the metric on the interface
3826 (currently unused) */
3827 ifr->ifr_metric = 0;
3830 case SIOCGIFMTU: /* Get the MTU of a device */
3831 ifr->ifr_mtu = dev->mtu;
3836 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
3838 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
3839 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
3840 ifr->ifr_hwaddr.sa_family = dev->type;
3848 ifr->ifr_map.mem_start = dev->mem_start;
3849 ifr->ifr_map.mem_end = dev->mem_end;
3850 ifr->ifr_map.base_addr = dev->base_addr;
3851 ifr->ifr_map.irq = dev->irq;
3852 ifr->ifr_map.dma = dev->dma;
3853 ifr->ifr_map.port = dev->if_port;
3857 ifr->ifr_ifindex = dev->ifindex;
3861 ifr->ifr_qlen = dev->tx_queue_len;
3865 /* dev_ioctl() should ensure this case
3877 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
3879 static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
3882 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
3883 const struct net_device_ops *ops;
3888 ops = dev->netdev_ops;
3891 case SIOCSIFFLAGS: /* Set interface flags */
3892 return dev_change_flags(dev, ifr->ifr_flags);
3894 case SIOCSIFMETRIC: /* Set the metric on the interface
3895 (currently unused) */
3898 case SIOCSIFMTU: /* Set the MTU of a device */
3899 return dev_set_mtu(dev, ifr->ifr_mtu);
3902 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
3904 case SIOCSIFHWBROADCAST:
3905 if (ifr->ifr_hwaddr.sa_family != dev->type)
3907 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
3908 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
3909 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3913 if (ops->ndo_set_config) {
3914 if (!netif_device_present(dev))
3916 return ops->ndo_set_config(dev, &ifr->ifr_map);
3921 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
3922 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
3924 if (!netif_device_present(dev))
3926 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
3930 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
3931 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
3933 if (!netif_device_present(dev))
3935 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
3939 if (ifr->ifr_qlen < 0)
3941 dev->tx_queue_len = ifr->ifr_qlen;
3945 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
3946 return dev_change_name(dev, ifr->ifr_newname);
3949 * Unknown or private ioctl
3953 if ((cmd >= SIOCDEVPRIVATE &&
3954 cmd <= SIOCDEVPRIVATE + 15) ||
3955 cmd == SIOCBONDENSLAVE ||
3956 cmd == SIOCBONDRELEASE ||
3957 cmd == SIOCBONDSETHWADDR ||
3958 cmd == SIOCBONDSLAVEINFOQUERY ||
3959 cmd == SIOCBONDINFOQUERY ||
3960 cmd == SIOCBONDCHANGEACTIVE ||
3961 cmd == SIOCGMIIPHY ||
3962 cmd == SIOCGMIIREG ||
3963 cmd == SIOCSMIIREG ||
3964 cmd == SIOCBRADDIF ||
3965 cmd == SIOCBRDELIF ||
3966 cmd == SIOCWANDEV) {
3968 if (ops->ndo_do_ioctl) {
3969 if (netif_device_present(dev))
3970 err = ops->ndo_do_ioctl(dev, ifr, cmd);
3982 * This function handles all "interface"-type I/O control requests. The actual
3983 * 'doing' part of this is dev_ifsioc above.
3987 * dev_ioctl - network device ioctl
3988 * @net: the applicable net namespace
3989 * @cmd: command to issue
3990 * @arg: pointer to a struct ifreq in user space
3992 * Issue ioctl functions to devices. This is normally called by the
3993 * user space syscall interfaces but can sometimes be useful for
3994 * other purposes. The return value is the return from the syscall if
3995 * positive or a negative errno code on error.
3998 int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
4004 /* One special case: SIOCGIFCONF takes ifconf argument
4005 and requires shared lock, because it sleeps writing
4009 if (cmd == SIOCGIFCONF) {
4011 ret = dev_ifconf(net, (char __user *) arg);
4015 if (cmd == SIOCGIFNAME)
4016 return dev_ifname(net, (struct ifreq __user *)arg);
4018 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
4021 ifr.ifr_name[IFNAMSIZ-1] = 0;
4023 colon = strchr(ifr.ifr_name, ':');
4028 * See which interface the caller is talking about.
4033 * These ioctl calls:
4034 * - can be done by all.
4035 * - atomic and do not require locking.
4046 dev_load(net, ifr.ifr_name);
4047 read_lock(&dev_base_lock);
4048 ret = dev_ifsioc_locked(net, &ifr, cmd);
4049 read_unlock(&dev_base_lock);
4053 if (copy_to_user(arg, &ifr,
4054 sizeof(struct ifreq)))
4060 dev_load(net, ifr.ifr_name);
4062 ret = dev_ethtool(net, &ifr);
4067 if (copy_to_user(arg, &ifr,
4068 sizeof(struct ifreq)))
4074 * These ioctl calls:
4075 * - require superuser power.
4076 * - require strict serialization.
4082 if (!capable(CAP_NET_ADMIN))
4084 dev_load(net, ifr.ifr_name);
4086 ret = dev_ifsioc(net, &ifr, cmd);
4091 if (copy_to_user(arg, &ifr,
4092 sizeof(struct ifreq)))
4098 * These ioctl calls:
4099 * - require superuser power.
4100 * - require strict serialization.
4101 * - do not return a value
4111 case SIOCSIFHWBROADCAST:
4114 case SIOCBONDENSLAVE:
4115 case SIOCBONDRELEASE:
4116 case SIOCBONDSETHWADDR:
4117 case SIOCBONDCHANGEACTIVE:
4120 if (!capable(CAP_NET_ADMIN))
4123 case SIOCBONDSLAVEINFOQUERY:
4124 case SIOCBONDINFOQUERY:
4125 dev_load(net, ifr.ifr_name);
4127 ret = dev_ifsioc(net, &ifr, cmd);
4132 /* Get the per device memory space. We can add this but
4133 * currently do not support it */
4135 /* Set the per device memory buffer space.
4136 * Not applicable in our case */
4141 * Unknown or private ioctl.
4144 if (cmd == SIOCWANDEV ||
4145 (cmd >= SIOCDEVPRIVATE &&
4146 cmd <= SIOCDEVPRIVATE + 15)) {
4147 dev_load(net, ifr.ifr_name);
4149 ret = dev_ifsioc(net, &ifr, cmd);
4151 if (!ret && copy_to_user(arg, &ifr,
4152 sizeof(struct ifreq)))
4156 /* Take care of Wireless Extensions */
4157 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
4158 return wext_handle_ioctl(net, &ifr, cmd, arg);
4165 * dev_new_index - allocate an ifindex
4166 * @net: the applicable net namespace
4168 * Returns a suitable unique value for a new device interface
4169 * number. The caller must hold the rtnl semaphore or the
4170 * dev_base_lock to be sure it remains unique.
4172 static int dev_new_index(struct net *net)
4178 if (!__dev_get_by_index(net, ifindex))
4183 /* Delayed registration/unregisteration */
4184 static LIST_HEAD(net_todo_list);
4186 static void net_set_todo(struct net_device *dev)
4188 list_add_tail(&dev->todo_list, &net_todo_list);
4191 static void rollback_registered(struct net_device *dev)
4193 BUG_ON(dev_boot_phase);
4196 /* Some devices call without registering for initialization unwind. */
4197 if (dev->reg_state == NETREG_UNINITIALIZED) {
4198 printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
4199 "was registered\n", dev->name, dev);
4205 BUG_ON(dev->reg_state != NETREG_REGISTERED);
4207 /* If device is running, close it first. */
4210 /* And unlink it from device chain. */
4211 unlist_netdevice(dev);
4213 dev->reg_state = NETREG_UNREGISTERING;
4217 /* Shutdown queueing discipline. */
4221 /* Notify protocols, that we are about to destroy
4222 this device. They should clean all the things.
4224 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4227 * Flush the unicast and multicast chains
4229 dev_addr_discard(dev);
4231 if (dev->netdev_ops->ndo_uninit)
4232 dev->netdev_ops->ndo_uninit(dev);
4234 /* Notifier chain MUST detach us from master device. */
4235 WARN_ON(dev->master);
4237 /* Remove entries from kobject tree */
4238 netdev_unregister_kobject(dev);
4245 static void __netdev_init_queue_locks_one(struct net_device *dev,
4246 struct netdev_queue *dev_queue,
4249 spin_lock_init(&dev_queue->_xmit_lock);
4250 netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
4251 dev_queue->xmit_lock_owner = -1;
4254 static void netdev_init_queue_locks(struct net_device *dev)
4256 netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
4257 __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
4260 unsigned long netdev_fix_features(unsigned long features, const char *name)
4262 /* Fix illegal SG+CSUM combinations. */
4263 if ((features & NETIF_F_SG) &&
4264 !(features & NETIF_F_ALL_CSUM)) {
4266 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no "
4267 "checksum feature.\n", name);
4268 features &= ~NETIF_F_SG;
4271 /* TSO requires that SG is present as well. */
4272 if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
4274 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no "
4275 "SG feature.\n", name);
4276 features &= ~NETIF_F_TSO;
4279 if (features & NETIF_F_UFO) {
4280 if (!(features & NETIF_F_GEN_CSUM)) {
4282 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4283 "since no NETIF_F_HW_CSUM feature.\n",
4285 features &= ~NETIF_F_UFO;
4288 if (!(features & NETIF_F_SG)) {
4290 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4291 "since no NETIF_F_SG feature.\n", name);
4292 features &= ~NETIF_F_UFO;
4298 EXPORT_SYMBOL(netdev_fix_features);
4301 * register_netdevice - register a network device
4302 * @dev: device to register
4304 * Take a completed network device structure and add it to the kernel
4305 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4306 * chain. 0 is returned on success. A negative errno code is returned
4307 * on a failure to set up the device, or if the name is a duplicate.
4309 * Callers must hold the rtnl semaphore. You may want
4310 * register_netdev() instead of this.
4313 * The locking appears insufficient to guarantee two parallel registers
4314 * will not get the same name.
4317 int register_netdevice(struct net_device *dev)
4319 struct hlist_head *head;
4320 struct hlist_node *p;
4322 struct net *net = dev_net(dev);
4324 BUG_ON(dev_boot_phase);
4329 /* When net_device's are persistent, this will be fatal. */
4330 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
4333 spin_lock_init(&dev->addr_list_lock);
4334 netdev_set_addr_lockdep_class(dev);
4335 netdev_init_queue_locks(dev);
4339 #ifdef CONFIG_COMPAT_NET_DEV_OPS
4340 /* Netdevice_ops API compatiability support.
4341 * This is temporary until all network devices are converted.
4343 if (dev->netdev_ops) {
4344 const struct net_device_ops *ops = dev->netdev_ops;
4346 dev->init = ops->ndo_init;
4347 dev->uninit = ops->ndo_uninit;
4348 dev->open = ops->ndo_open;
4349 dev->change_rx_flags = ops->ndo_change_rx_flags;
4350 dev->set_rx_mode = ops->ndo_set_rx_mode;
4351 dev->set_multicast_list = ops->ndo_set_multicast_list;
4352 dev->set_mac_address = ops->ndo_set_mac_address;
4353 dev->validate_addr = ops->ndo_validate_addr;
4354 dev->do_ioctl = ops->ndo_do_ioctl;
4355 dev->set_config = ops->ndo_set_config;
4356 dev->change_mtu = ops->ndo_change_mtu;
4357 dev->tx_timeout = ops->ndo_tx_timeout;
4358 dev->get_stats = ops->ndo_get_stats;
4359 dev->vlan_rx_register = ops->ndo_vlan_rx_register;
4360 dev->vlan_rx_add_vid = ops->ndo_vlan_rx_add_vid;
4361 dev->vlan_rx_kill_vid = ops->ndo_vlan_rx_kill_vid;
4362 #ifdef CONFIG_NET_POLL_CONTROLLER
4363 dev->poll_controller = ops->ndo_poll_controller;
4366 char drivername[64];
4367 pr_info("%s (%s): not using net_device_ops yet\n",
4368 dev->name, netdev_drivername(dev, drivername, 64));
4370 /* This works only because net_device_ops and the
4371 compatiablity structure are the same. */
4372 dev->netdev_ops = (void *) &(dev->init);
4376 /* Init, if this function is available */
4377 if (dev->netdev_ops->ndo_init) {
4378 ret = dev->netdev_ops->ndo_init(dev);
4386 if (!dev_valid_name(dev->name)) {
4391 dev->ifindex = dev_new_index(net);
4392 if (dev->iflink == -1)
4393 dev->iflink = dev->ifindex;
4395 /* Check for existence of name */
4396 head = dev_name_hash(net, dev->name);
4397 hlist_for_each(p, head) {
4398 struct net_device *d
4399 = hlist_entry(p, struct net_device, name_hlist);
4400 if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
4406 /* Fix illegal checksum combinations */
4407 if ((dev->features & NETIF_F_HW_CSUM) &&
4408 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4409 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
4411 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4414 if ((dev->features & NETIF_F_NO_CSUM) &&
4415 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4416 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
4418 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
4421 dev->features = netdev_fix_features(dev->features, dev->name);
4423 /* Enable software GSO if SG is supported. */
4424 if (dev->features & NETIF_F_SG)
4425 dev->features |= NETIF_F_GSO;
4427 netdev_initialize_kobject(dev);
4428 ret = netdev_register_kobject(dev);
4431 dev->reg_state = NETREG_REGISTERED;
4434 * Default initial state at registry is that the
4435 * device is present.
4438 set_bit(__LINK_STATE_PRESENT, &dev->state);
4440 dev_init_scheduler(dev);
4442 list_netdevice(dev);
4444 /* Notify protocols, that a new device appeared. */
4445 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
4446 ret = notifier_to_errno(ret);
4448 rollback_registered(dev);
4449 dev->reg_state = NETREG_UNREGISTERED;
4456 if (dev->netdev_ops->ndo_uninit)
4457 dev->netdev_ops->ndo_uninit(dev);
4462 * init_dummy_netdev - init a dummy network device for NAPI
4463 * @dev: device to init
4465 * This takes a network device structure and initialize the minimum
4466 * amount of fields so it can be used to schedule NAPI polls without
4467 * registering a full blown interface. This is to be used by drivers
4468 * that need to tie several hardware interfaces to a single NAPI
4469 * poll scheduler due to HW limitations.
4471 int init_dummy_netdev(struct net_device *dev)
4473 /* Clear everything. Note we don't initialize spinlocks
4474 * are they aren't supposed to be taken by any of the
4475 * NAPI code and this dummy netdev is supposed to be
4476 * only ever used for NAPI polls
4478 memset(dev, 0, sizeof(struct net_device));
4480 /* make sure we BUG if trying to hit standard
4481 * register/unregister code path
4483 dev->reg_state = NETREG_DUMMY;
4485 /* initialize the ref count */
4486 atomic_set(&dev->refcnt, 1);
4488 /* NAPI wants this */
4489 INIT_LIST_HEAD(&dev->napi_list);
4491 /* a dummy interface is started by default */
4492 set_bit(__LINK_STATE_PRESENT, &dev->state);
4493 set_bit(__LINK_STATE_START, &dev->state);
4497 EXPORT_SYMBOL_GPL(init_dummy_netdev);
4501 * register_netdev - register a network device
4502 * @dev: device to register
4504 * Take a completed network device structure and add it to the kernel
4505 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4506 * chain. 0 is returned on success. A negative errno code is returned
4507 * on a failure to set up the device, or if the name is a duplicate.
4509 * This is a wrapper around register_netdevice that takes the rtnl semaphore
4510 * and expands the device name if you passed a format string to
4513 int register_netdev(struct net_device *dev)
4520 * If the name is a format string the caller wants us to do a
4523 if (strchr(dev->name, '%')) {
4524 err = dev_alloc_name(dev, dev->name);
4529 err = register_netdevice(dev);
4534 EXPORT_SYMBOL(register_netdev);
4537 * netdev_wait_allrefs - wait until all references are gone.
4539 * This is called when unregistering network devices.
4541 * Any protocol or device that holds a reference should register
4542 * for netdevice notification, and cleanup and put back the
4543 * reference if they receive an UNREGISTER event.
4544 * We can get stuck here if buggy protocols don't correctly
4547 static void netdev_wait_allrefs(struct net_device *dev)
4549 unsigned long rebroadcast_time, warning_time;
4551 rebroadcast_time = warning_time = jiffies;
4552 while (atomic_read(&dev->refcnt) != 0) {
4553 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
4556 /* Rebroadcast unregister notification */
4557 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4559 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
4561 /* We must not have linkwatch events
4562 * pending on unregister. If this
4563 * happens, we simply run the queue
4564 * unscheduled, resulting in a noop
4567 linkwatch_run_queue();
4572 rebroadcast_time = jiffies;
4577 if (time_after(jiffies, warning_time + 10 * HZ)) {
4578 printk(KERN_EMERG "unregister_netdevice: "
4579 "waiting for %s to become free. Usage "
4581 dev->name, atomic_read(&dev->refcnt));
4582 warning_time = jiffies;
4591 * register_netdevice(x1);
4592 * register_netdevice(x2);
4594 * unregister_netdevice(y1);
4595 * unregister_netdevice(y2);
4601 * We are invoked by rtnl_unlock().
4602 * This allows us to deal with problems:
4603 * 1) We can delete sysfs objects which invoke hotplug
4604 * without deadlocking with linkwatch via keventd.
4605 * 2) Since we run with the RTNL semaphore not held, we can sleep
4606 * safely in order to wait for the netdev refcnt to drop to zero.
4608 * We must not return until all unregister events added during
4609 * the interval the lock was held have been completed.
4611 void netdev_run_todo(void)
4613 struct list_head list;
4615 /* Snapshot list, allow later requests */
4616 list_replace_init(&net_todo_list, &list);
4620 while (!list_empty(&list)) {
4621 struct net_device *dev
4622 = list_entry(list.next, struct net_device, todo_list);
4623 list_del(&dev->todo_list);
4625 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
4626 printk(KERN_ERR "network todo '%s' but state %d\n",
4627 dev->name, dev->reg_state);
4632 dev->reg_state = NETREG_UNREGISTERED;
4634 on_each_cpu(flush_backlog, dev, 1);
4636 netdev_wait_allrefs(dev);
4639 BUG_ON(atomic_read(&dev->refcnt));
4640 WARN_ON(dev->ip_ptr);
4641 WARN_ON(dev->ip6_ptr);
4642 WARN_ON(dev->dn_ptr);
4644 if (dev->destructor)
4645 dev->destructor(dev);
4647 /* Free network device */
4648 kobject_put(&dev->dev.kobj);
4653 * dev_get_stats - get network device statistics
4654 * @dev: device to get statistics from
4656 * Get network statistics from device. The device driver may provide
4657 * its own method by setting dev->netdev_ops->get_stats; otherwise
4658 * the internal statistics structure is used.
4660 const struct net_device_stats *dev_get_stats(struct net_device *dev)
4662 const struct net_device_ops *ops = dev->netdev_ops;
4664 if (ops->ndo_get_stats)
4665 return ops->ndo_get_stats(dev);
4669 EXPORT_SYMBOL(dev_get_stats);
4671 static void netdev_init_one_queue(struct net_device *dev,
4672 struct netdev_queue *queue,
4678 static void netdev_init_queues(struct net_device *dev)
4680 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
4681 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
4682 spin_lock_init(&dev->tx_global_lock);
4686 * alloc_netdev_mq - allocate network device
4687 * @sizeof_priv: size of private data to allocate space for
4688 * @name: device name format string
4689 * @setup: callback to initialize device
4690 * @queue_count: the number of subqueues to allocate
4692 * Allocates a struct net_device with private data area for driver use
4693 * and performs basic initialization. Also allocates subquue structs
4694 * for each queue on the device at the end of the netdevice.
4696 struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
4697 void (*setup)(struct net_device *), unsigned int queue_count)
4699 struct netdev_queue *tx;
4700 struct net_device *dev;
4704 BUG_ON(strlen(name) >= sizeof(dev->name));
4706 alloc_size = sizeof(struct net_device);
4708 /* ensure 32-byte alignment of private area */
4709 alloc_size = (alloc_size + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST;
4710 alloc_size += sizeof_priv;
4712 /* ensure 32-byte alignment of whole construct */
4713 alloc_size += NETDEV_ALIGN_CONST;
4715 p = kzalloc(alloc_size, GFP_KERNEL);
4717 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
4721 tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
4723 printk(KERN_ERR "alloc_netdev: Unable to allocate "
4729 dev = (struct net_device *)
4730 (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
4731 dev->padded = (char *)dev - (char *)p;
4732 dev_net_set(dev, &init_net);
4735 dev->num_tx_queues = queue_count;
4736 dev->real_num_tx_queues = queue_count;
4738 dev->gso_max_size = GSO_MAX_SIZE;
4740 netdev_init_queues(dev);
4742 INIT_LIST_HEAD(&dev->napi_list);
4744 strcpy(dev->name, name);
4747 EXPORT_SYMBOL(alloc_netdev_mq);
4750 * free_netdev - free network device
4753 * This function does the last stage of destroying an allocated device
4754 * interface. The reference to the device object is released.
4755 * If this is the last reference then it will be freed.
4757 void free_netdev(struct net_device *dev)
4759 struct napi_struct *p, *n;
4761 release_net(dev_net(dev));
4765 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
4768 /* Compatibility with error handling in drivers */
4769 if (dev->reg_state == NETREG_UNINITIALIZED) {
4770 kfree((char *)dev - dev->padded);
4774 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
4775 dev->reg_state = NETREG_RELEASED;
4777 /* will free via device release */
4778 put_device(&dev->dev);
4782 * synchronize_net - Synchronize with packet receive processing
4784 * Wait for packets currently being received to be done.
4785 * Does not block later packets from starting.
4787 void synchronize_net(void)
4794 * unregister_netdevice - remove device from the kernel
4797 * This function shuts down a device interface and removes it
4798 * from the kernel tables.
4800 * Callers must hold the rtnl semaphore. You may want
4801 * unregister_netdev() instead of this.
4804 void unregister_netdevice(struct net_device *dev)
4808 rollback_registered(dev);
4809 /* Finish processing unregister after unlock */
4814 * unregister_netdev - remove device from the kernel
4817 * This function shuts down a device interface and removes it
4818 * from the kernel tables.
4820 * This is just a wrapper for unregister_netdevice that takes
4821 * the rtnl semaphore. In general you want to use this and not
4822 * unregister_netdevice.
4824 void unregister_netdev(struct net_device *dev)
4827 unregister_netdevice(dev);
4831 EXPORT_SYMBOL(unregister_netdev);
4834 * dev_change_net_namespace - move device to different nethost namespace
4836 * @net: network namespace
4837 * @pat: If not NULL name pattern to try if the current device name
4838 * is already taken in the destination network namespace.
4840 * This function shuts down a device interface and moves it
4841 * to a new network namespace. On success 0 is returned, on
4842 * a failure a netagive errno code is returned.
4844 * Callers must hold the rtnl semaphore.
4847 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
4850 const char *destname;
4855 /* Don't allow namespace local devices to be moved. */
4857 if (dev->features & NETIF_F_NETNS_LOCAL)
4861 /* Don't allow real devices to be moved when sysfs
4865 if (dev->dev.parent)
4869 /* Ensure the device has been registrered */
4871 if (dev->reg_state != NETREG_REGISTERED)
4874 /* Get out if there is nothing todo */
4876 if (net_eq(dev_net(dev), net))
4879 /* Pick the destination device name, and ensure
4880 * we can use it in the destination network namespace.
4883 destname = dev->name;
4884 if (__dev_get_by_name(net, destname)) {
4885 /* We get here if we can't use the current device name */
4888 if (!dev_valid_name(pat))
4890 if (strchr(pat, '%')) {
4891 if (__dev_alloc_name(net, pat, buf) < 0)
4896 if (__dev_get_by_name(net, destname))
4901 * And now a mini version of register_netdevice unregister_netdevice.
4904 /* If device is running close it first. */
4907 /* And unlink it from device chain */
4909 unlist_netdevice(dev);
4913 /* Shutdown queueing discipline. */
4916 /* Notify protocols, that we are about to destroy
4917 this device. They should clean all the things.
4919 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4922 * Flush the unicast and multicast chains
4924 dev_addr_discard(dev);
4926 netdev_unregister_kobject(dev);
4928 /* Actually switch the network namespace */
4929 dev_net_set(dev, net);
4931 /* Assign the new device name */
4932 if (destname != dev->name)
4933 strcpy(dev->name, destname);
4935 /* If there is an ifindex conflict assign a new one */
4936 if (__dev_get_by_index(net, dev->ifindex)) {
4937 int iflink = (dev->iflink == dev->ifindex);
4938 dev->ifindex = dev_new_index(net);
4940 dev->iflink = dev->ifindex;
4943 /* Fixup kobjects */
4944 err = netdev_register_kobject(dev);
4947 /* Add the device back in the hashes */
4948 list_netdevice(dev);
4950 /* Notify protocols, that a new device appeared. */
4951 call_netdevice_notifiers(NETDEV_REGISTER, dev);
4959 static int dev_cpu_callback(struct notifier_block *nfb,
4960 unsigned long action,
4963 struct sk_buff **list_skb;
4964 struct Qdisc **list_net;
4965 struct sk_buff *skb;
4966 unsigned int cpu, oldcpu = (unsigned long)ocpu;
4967 struct softnet_data *sd, *oldsd;
4969 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
4972 local_irq_disable();
4973 cpu = smp_processor_id();
4974 sd = &per_cpu(softnet_data, cpu);
4975 oldsd = &per_cpu(softnet_data, oldcpu);
4977 /* Find end of our completion_queue. */
4978 list_skb = &sd->completion_queue;
4980 list_skb = &(*list_skb)->next;
4981 /* Append completion queue from offline CPU. */
4982 *list_skb = oldsd->completion_queue;
4983 oldsd->completion_queue = NULL;
4985 /* Find end of our output_queue. */
4986 list_net = &sd->output_queue;
4988 list_net = &(*list_net)->next_sched;
4989 /* Append output queue from offline CPU. */
4990 *list_net = oldsd->output_queue;
4991 oldsd->output_queue = NULL;
4993 raise_softirq_irqoff(NET_TX_SOFTIRQ);
4996 /* Process offline CPU's input_pkt_queue */
4997 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
5005 * netdev_increment_features - increment feature set by one
5006 * @all: current feature set
5007 * @one: new feature set
5008 * @mask: mask feature set
5010 * Computes a new feature set after adding a device with feature set
5011 * @one to the master device with current feature set @all. Will not
5012 * enable anything that is off in @mask. Returns the new feature set.
5014 unsigned long netdev_increment_features(unsigned long all, unsigned long one,
5017 /* If device needs checksumming, downgrade to it. */
5018 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
5019 all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM);
5020 else if (mask & NETIF_F_ALL_CSUM) {
5021 /* If one device supports v4/v6 checksumming, set for all. */
5022 if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) &&
5023 !(all & NETIF_F_GEN_CSUM)) {
5024 all &= ~NETIF_F_ALL_CSUM;
5025 all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
5028 /* If one device supports hw checksumming, set for all. */
5029 if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) {
5030 all &= ~NETIF_F_ALL_CSUM;
5031 all |= NETIF_F_HW_CSUM;
5035 one |= NETIF_F_ALL_CSUM;
5037 one |= all & NETIF_F_ONE_FOR_ALL;
5038 all &= one | NETIF_F_LLTX | NETIF_F_GSO;
5039 all |= one & mask & NETIF_F_ONE_FOR_ALL;
5043 EXPORT_SYMBOL(netdev_increment_features);
5045 static struct hlist_head *netdev_create_hash(void)
5048 struct hlist_head *hash;
5050 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
5052 for (i = 0; i < NETDEV_HASHENTRIES; i++)
5053 INIT_HLIST_HEAD(&hash[i]);
5058 /* Initialize per network namespace state */
5059 static int __net_init netdev_init(struct net *net)
5061 INIT_LIST_HEAD(&net->dev_base_head);
5063 net->dev_name_head = netdev_create_hash();
5064 if (net->dev_name_head == NULL)
5067 net->dev_index_head = netdev_create_hash();
5068 if (net->dev_index_head == NULL)
5074 kfree(net->dev_name_head);
5080 * netdev_drivername - network driver for the device
5081 * @dev: network device
5082 * @buffer: buffer for resulting name
5083 * @len: size of buffer
5085 * Determine network driver for device.
5087 char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
5089 const struct device_driver *driver;
5090 const struct device *parent;
5092 if (len <= 0 || !buffer)
5096 parent = dev->dev.parent;
5101 driver = parent->driver;
5102 if (driver && driver->name)
5103 strlcpy(buffer, driver->name, len);
5107 static void __net_exit netdev_exit(struct net *net)
5109 kfree(net->dev_name_head);
5110 kfree(net->dev_index_head);
5113 static struct pernet_operations __net_initdata netdev_net_ops = {
5114 .init = netdev_init,
5115 .exit = netdev_exit,
5118 static void __net_exit default_device_exit(struct net *net)
5120 struct net_device *dev;
5122 * Push all migratable of the network devices back to the
5123 * initial network namespace
5127 for_each_netdev(net, dev) {
5129 char fb_name[IFNAMSIZ];
5131 /* Ignore unmoveable devices (i.e. loopback) */
5132 if (dev->features & NETIF_F_NETNS_LOCAL)
5135 /* Delete virtual devices */
5136 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) {
5137 dev->rtnl_link_ops->dellink(dev);
5141 /* Push remaing network devices to init_net */
5142 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
5143 err = dev_change_net_namespace(dev, &init_net, fb_name);
5145 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
5146 __func__, dev->name, err);
5154 static struct pernet_operations __net_initdata default_device_ops = {
5155 .exit = default_device_exit,
5159 * Initialize the DEV module. At boot time this walks the device list and
5160 * unhooks any devices that fail to initialise (normally hardware not
5161 * present) and leaves us with a valid list of present and active devices.
5166 * This is called single threaded during boot, so no need
5167 * to take the rtnl semaphore.
5169 static int __init net_dev_init(void)
5171 int i, rc = -ENOMEM;
5173 BUG_ON(!dev_boot_phase);
5175 if (dev_proc_init())
5178 if (netdev_kobject_init())
5181 INIT_LIST_HEAD(&ptype_all);
5182 for (i = 0; i < PTYPE_HASH_SIZE; i++)
5183 INIT_LIST_HEAD(&ptype_base[i]);
5185 if (register_pernet_subsys(&netdev_net_ops))
5189 * Initialise the packet receive queues.
5192 for_each_possible_cpu(i) {
5193 struct softnet_data *queue;
5195 queue = &per_cpu(softnet_data, i);
5196 skb_queue_head_init(&queue->input_pkt_queue);
5197 queue->completion_queue = NULL;
5198 INIT_LIST_HEAD(&queue->poll_list);
5200 queue->backlog.poll = process_backlog;
5201 queue->backlog.weight = weight_p;
5202 queue->backlog.gro_list = NULL;
5207 /* The loopback device is special if any other network devices
5208 * is present in a network namespace the loopback device must
5209 * be present. Since we now dynamically allocate and free the
5210 * loopback device ensure this invariant is maintained by
5211 * keeping the loopback device as the first device on the
5212 * list of network devices. Ensuring the loopback devices
5213 * is the first device that appears and the last network device
5216 if (register_pernet_device(&loopback_net_ops))
5219 if (register_pernet_device(&default_device_ops))
5222 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
5223 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
5225 hotcpu_notifier(dev_cpu_callback, 0);
5233 subsys_initcall(net_dev_init);
5235 EXPORT_SYMBOL(__dev_get_by_index);
5236 EXPORT_SYMBOL(__dev_get_by_name);
5237 EXPORT_SYMBOL(__dev_remove_pack);
5238 EXPORT_SYMBOL(dev_valid_name);
5239 EXPORT_SYMBOL(dev_add_pack);
5240 EXPORT_SYMBOL(dev_alloc_name);
5241 EXPORT_SYMBOL(dev_close);
5242 EXPORT_SYMBOL(dev_get_by_flags);
5243 EXPORT_SYMBOL(dev_get_by_index);
5244 EXPORT_SYMBOL(dev_get_by_name);
5245 EXPORT_SYMBOL(dev_open);
5246 EXPORT_SYMBOL(dev_queue_xmit);
5247 EXPORT_SYMBOL(dev_remove_pack);
5248 EXPORT_SYMBOL(dev_set_allmulti);
5249 EXPORT_SYMBOL(dev_set_promiscuity);
5250 EXPORT_SYMBOL(dev_change_flags);
5251 EXPORT_SYMBOL(dev_set_mtu);
5252 EXPORT_SYMBOL(dev_set_mac_address);
5253 EXPORT_SYMBOL(free_netdev);
5254 EXPORT_SYMBOL(netdev_boot_setup_check);
5255 EXPORT_SYMBOL(netdev_set_master);
5256 EXPORT_SYMBOL(netdev_state_change);
5257 EXPORT_SYMBOL(netif_receive_skb);
5258 EXPORT_SYMBOL(netif_rx);
5259 EXPORT_SYMBOL(register_gifconf);
5260 EXPORT_SYMBOL(register_netdevice);
5261 EXPORT_SYMBOL(register_netdevice_notifier);
5262 EXPORT_SYMBOL(skb_checksum_help);
5263 EXPORT_SYMBOL(synchronize_net);
5264 EXPORT_SYMBOL(unregister_netdevice);
5265 EXPORT_SYMBOL(unregister_netdevice_notifier);
5266 EXPORT_SYMBOL(net_enable_timestamp);
5267 EXPORT_SYMBOL(net_disable_timestamp);
5268 EXPORT_SYMBOL(dev_get_flags);
5270 #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
5271 EXPORT_SYMBOL(br_handle_frame_hook);
5272 EXPORT_SYMBOL(br_fdb_get_hook);
5273 EXPORT_SYMBOL(br_fdb_put_hook);
5276 EXPORT_SYMBOL(dev_load);
5278 EXPORT_PER_CPU_SYMBOL(softnet_data);