2 * NET3 Protocol independent device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the non IP parts of dev.c 1.0.19
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
51 * Rudi Cilibrasi : Pass the right thing to
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
75 #include <asm/uaccess.h>
76 #include <asm/system.h>
77 #include <linux/bitops.h>
78 #include <linux/capability.h>
79 #include <linux/cpu.h>
80 #include <linux/types.h>
81 #include <linux/kernel.h>
82 #include <linux/sched.h>
83 #include <linux/mutex.h>
84 #include <linux/string.h>
86 #include <linux/socket.h>
87 #include <linux/sockios.h>
88 #include <linux/errno.h>
89 #include <linux/interrupt.h>
90 #include <linux/if_ether.h>
91 #include <linux/netdevice.h>
92 #include <linux/etherdevice.h>
93 #include <linux/ethtool.h>
94 #include <linux/notifier.h>
95 #include <linux/skbuff.h>
96 #include <net/net_namespace.h>
98 #include <linux/rtnetlink.h>
99 #include <linux/proc_fs.h>
100 #include <linux/seq_file.h>
101 #include <linux/stat.h>
102 #include <linux/if_bridge.h>
103 #include <linux/if_macvlan.h>
105 #include <net/pkt_sched.h>
106 #include <net/checksum.h>
107 #include <linux/highmem.h>
108 #include <linux/init.h>
109 #include <linux/kmod.h>
110 #include <linux/module.h>
111 #include <linux/netpoll.h>
112 #include <linux/rcupdate.h>
113 #include <linux/delay.h>
114 #include <net/wext.h>
115 #include <net/iw_handler.h>
116 #include <asm/current.h>
117 #include <linux/audit.h>
118 #include <linux/dmaengine.h>
119 #include <linux/err.h>
120 #include <linux/ctype.h>
121 #include <linux/if_arp.h>
122 #include <linux/if_vlan.h>
123 #include <linux/ip.h>
125 #include <linux/ipv6.h>
126 #include <linux/in.h>
127 #include <linux/jhash.h>
128 #include <linux/random.h>
129 #include <trace/events/napi.h>
131 #include "net-sysfs.h"
133 /* Instead of increasing this, you should create a hash table. */
134 #define MAX_GRO_SKBS 8
136 /* This should be increased if a protocol with a bigger head is added. */
137 #define GRO_MAX_HEAD (MAX_HEADER + 128)
140 * The list of packet types we will receive (as opposed to discard)
141 * and the routines to invoke.
143 * Why 16. Because with 16 the only overlap we get on a hash of the
144 * low nibble of the protocol value is RARP/SNAP/X.25.
146 * NOTE: That is no longer true with the addition of VLAN tags. Not
147 * sure which should go first, but I bet it won't make much
148 * difference if we are running VLANs. The good news is that
149 * this protocol won't be in the list unless compiled in, so
150 * the average user (w/out VLANs) will not be adversely affected.
167 #define PTYPE_HASH_SIZE (16)
168 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
170 static DEFINE_SPINLOCK(ptype_lock);
171 static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
172 static struct list_head ptype_all __read_mostly; /* Taps */
175 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
178 * Pure readers hold dev_base_lock for reading.
180 * Writers must hold the rtnl semaphore while they loop through the
181 * dev_base_head list, and hold dev_base_lock for writing when they do the
182 * actual updates. This allows pure readers to access the list even
183 * while a writer is preparing to update it.
185 * To put it another way, dev_base_lock is held for writing only to
186 * protect against pure readers; the rtnl semaphore provides the
187 * protection against other writers.
189 * See, for example usages, register_netdevice() and
190 * unregister_netdevice(), which must be called with the rtnl
193 DEFINE_RWLOCK(dev_base_lock);
194 EXPORT_SYMBOL(dev_base_lock);
196 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
198 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
199 return &net->dev_name_head[hash & (NETDEV_HASHENTRIES - 1)];
202 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
204 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
207 /* Device list insertion */
208 static int list_netdevice(struct net_device *dev)
210 struct net *net = dev_net(dev);
214 write_lock_bh(&dev_base_lock);
215 list_add_tail(&dev->dev_list, &net->dev_base_head);
216 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
217 hlist_add_head_rcu(&dev->index_hlist,
218 dev_index_hash(net, dev->ifindex));
219 write_unlock_bh(&dev_base_lock);
223 /* Device list removal
224 * caller must respect a RCU grace period before freeing/reusing dev
226 static void unlist_netdevice(struct net_device *dev)
230 /* Unlink dev from the device chain */
231 write_lock_bh(&dev_base_lock);
232 list_del(&dev->dev_list);
233 hlist_del_rcu(&dev->name_hlist);
234 hlist_del_rcu(&dev->index_hlist);
235 write_unlock_bh(&dev_base_lock);
242 static RAW_NOTIFIER_HEAD(netdev_chain);
245 * Device drivers call our routines to queue packets here. We empty the
246 * queue in the local softnet handler.
249 DEFINE_PER_CPU(struct softnet_data, softnet_data);
250 EXPORT_PER_CPU_SYMBOL(softnet_data);
252 #ifdef CONFIG_LOCKDEP
254 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
255 * according to dev->type
257 static const unsigned short netdev_lock_type[] =
258 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
259 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
260 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
261 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
262 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
263 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
264 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
265 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
266 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
267 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
268 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
269 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
270 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
271 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
272 ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154,
273 ARPHRD_VOID, ARPHRD_NONE};
275 static const char *const netdev_lock_name[] =
276 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
277 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
278 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
279 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
280 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
281 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
282 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
283 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
284 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
285 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
286 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
287 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
288 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
289 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
290 "_xmit_PHONET_PIPE", "_xmit_IEEE802154",
291 "_xmit_VOID", "_xmit_NONE"};
293 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
294 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
296 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
300 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
301 if (netdev_lock_type[i] == dev_type)
303 /* the last key is used by default */
304 return ARRAY_SIZE(netdev_lock_type) - 1;
307 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
308 unsigned short dev_type)
312 i = netdev_lock_pos(dev_type);
313 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
314 netdev_lock_name[i]);
317 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
321 i = netdev_lock_pos(dev->type);
322 lockdep_set_class_and_name(&dev->addr_list_lock,
323 &netdev_addr_lock_key[i],
324 netdev_lock_name[i]);
327 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
328 unsigned short dev_type)
331 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
336 /*******************************************************************************
338 Protocol management and registration routines
340 *******************************************************************************/
343 * Add a protocol ID to the list. Now that the input handler is
344 * smarter we can dispense with all the messy stuff that used to be
347 * BEWARE!!! Protocol handlers, mangling input packets,
348 * MUST BE last in hash buckets and checking protocol handlers
349 * MUST start from promiscuous ptype_all chain in net_bh.
350 * It is true now, do not change it.
351 * Explanation follows: if protocol handler, mangling packet, will
352 * be the first on list, it is not able to sense, that packet
353 * is cloned and should be copied-on-write, so that it will
354 * change it and subsequent readers will get broken packet.
359 * dev_add_pack - add packet handler
360 * @pt: packet type declaration
362 * Add a protocol handler to the networking stack. The passed &packet_type
363 * is linked into kernel lists and may not be freed until it has been
364 * removed from the kernel lists.
366 * This call does not sleep therefore it can not
367 * guarantee all CPU's that are in middle of receiving packets
368 * will see the new packet type (until the next received packet).
371 void dev_add_pack(struct packet_type *pt)
375 spin_lock_bh(&ptype_lock);
376 if (pt->type == htons(ETH_P_ALL))
377 list_add_rcu(&pt->list, &ptype_all);
379 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
380 list_add_rcu(&pt->list, &ptype_base[hash]);
382 spin_unlock_bh(&ptype_lock);
384 EXPORT_SYMBOL(dev_add_pack);
387 * __dev_remove_pack - remove packet handler
388 * @pt: packet type declaration
390 * Remove a protocol handler that was previously added to the kernel
391 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
392 * from the kernel lists and can be freed or reused once this function
395 * The packet type might still be in use by receivers
396 * and must not be freed until after all the CPU's have gone
397 * through a quiescent state.
399 void __dev_remove_pack(struct packet_type *pt)
401 struct list_head *head;
402 struct packet_type *pt1;
404 spin_lock_bh(&ptype_lock);
406 if (pt->type == htons(ETH_P_ALL))
409 head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
411 list_for_each_entry(pt1, head, list) {
413 list_del_rcu(&pt->list);
418 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
420 spin_unlock_bh(&ptype_lock);
422 EXPORT_SYMBOL(__dev_remove_pack);
425 * dev_remove_pack - remove packet handler
426 * @pt: packet type declaration
428 * Remove a protocol handler that was previously added to the kernel
429 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
430 * from the kernel lists and can be freed or reused once this function
433 * This call sleeps to guarantee that no CPU is looking at the packet
436 void dev_remove_pack(struct packet_type *pt)
438 __dev_remove_pack(pt);
442 EXPORT_SYMBOL(dev_remove_pack);
444 /******************************************************************************
446 Device Boot-time Settings Routines
448 *******************************************************************************/
450 /* Boot time configuration table */
451 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
454 * netdev_boot_setup_add - add new setup entry
455 * @name: name of the device
456 * @map: configured settings for the device
458 * Adds new setup entry to the dev_boot_setup list. The function
459 * returns 0 on error and 1 on success. This is a generic routine to
462 static int netdev_boot_setup_add(char *name, struct ifmap *map)
464 struct netdev_boot_setup *s;
468 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
469 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
470 memset(s[i].name, 0, sizeof(s[i].name));
471 strlcpy(s[i].name, name, IFNAMSIZ);
472 memcpy(&s[i].map, map, sizeof(s[i].map));
477 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
481 * netdev_boot_setup_check - check boot time settings
482 * @dev: the netdevice
484 * Check boot time settings for the device.
485 * The found settings are set for the device to be used
486 * later in the device probing.
487 * Returns 0 if no settings found, 1 if they are.
489 int netdev_boot_setup_check(struct net_device *dev)
491 struct netdev_boot_setup *s = dev_boot_setup;
494 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
495 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
496 !strcmp(dev->name, s[i].name)) {
497 dev->irq = s[i].map.irq;
498 dev->base_addr = s[i].map.base_addr;
499 dev->mem_start = s[i].map.mem_start;
500 dev->mem_end = s[i].map.mem_end;
506 EXPORT_SYMBOL(netdev_boot_setup_check);
510 * netdev_boot_base - get address from boot time settings
511 * @prefix: prefix for network device
512 * @unit: id for network device
514 * Check boot time settings for the base address of device.
515 * The found settings are set for the device to be used
516 * later in the device probing.
517 * Returns 0 if no settings found.
519 unsigned long netdev_boot_base(const char *prefix, int unit)
521 const struct netdev_boot_setup *s = dev_boot_setup;
525 sprintf(name, "%s%d", prefix, unit);
528 * If device already registered then return base of 1
529 * to indicate not to probe for this interface
531 if (__dev_get_by_name(&init_net, name))
534 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
535 if (!strcmp(name, s[i].name))
536 return s[i].map.base_addr;
541 * Saves at boot time configured settings for any netdevice.
543 int __init netdev_boot_setup(char *str)
548 str = get_options(str, ARRAY_SIZE(ints), ints);
553 memset(&map, 0, sizeof(map));
557 map.base_addr = ints[2];
559 map.mem_start = ints[3];
561 map.mem_end = ints[4];
563 /* Add new entry to the list */
564 return netdev_boot_setup_add(str, &map);
567 __setup("netdev=", netdev_boot_setup);
569 /*******************************************************************************
571 Device Interface Subroutines
573 *******************************************************************************/
576 * __dev_get_by_name - find a device by its name
577 * @net: the applicable net namespace
578 * @name: name to find
580 * Find an interface by name. Must be called under RTNL semaphore
581 * or @dev_base_lock. If the name is found a pointer to the device
582 * is returned. If the name is not found then %NULL is returned. The
583 * reference counters are not incremented so the caller must be
584 * careful with locks.
587 struct net_device *__dev_get_by_name(struct net *net, const char *name)
589 struct hlist_node *p;
590 struct net_device *dev;
591 struct hlist_head *head = dev_name_hash(net, name);
593 hlist_for_each_entry(dev, p, head, name_hlist)
594 if (!strncmp(dev->name, name, IFNAMSIZ))
599 EXPORT_SYMBOL(__dev_get_by_name);
602 * dev_get_by_name_rcu - find a device by its name
603 * @net: the applicable net namespace
604 * @name: name to find
606 * Find an interface by name.
607 * If the name is found a pointer to the device is returned.
608 * If the name is not found then %NULL is returned.
609 * The reference counters are not incremented so the caller must be
610 * careful with locks. The caller must hold RCU lock.
613 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
615 struct hlist_node *p;
616 struct net_device *dev;
617 struct hlist_head *head = dev_name_hash(net, name);
619 hlist_for_each_entry_rcu(dev, p, head, name_hlist)
620 if (!strncmp(dev->name, name, IFNAMSIZ))
625 EXPORT_SYMBOL(dev_get_by_name_rcu);
628 * dev_get_by_name - find a device by its name
629 * @net: the applicable net namespace
630 * @name: name to find
632 * Find an interface by name. This can be called from any
633 * context and does its own locking. The returned handle has
634 * the usage count incremented and the caller must use dev_put() to
635 * release it when it is no longer needed. %NULL is returned if no
636 * matching device is found.
639 struct net_device *dev_get_by_name(struct net *net, const char *name)
641 struct net_device *dev;
644 dev = dev_get_by_name_rcu(net, name);
650 EXPORT_SYMBOL(dev_get_by_name);
653 * __dev_get_by_index - find a device by its ifindex
654 * @net: the applicable net namespace
655 * @ifindex: index of device
657 * Search for an interface by index. Returns %NULL if the device
658 * is not found or a pointer to the device. The device has not
659 * had its reference counter increased so the caller must be careful
660 * about locking. The caller must hold either the RTNL semaphore
664 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
666 struct hlist_node *p;
667 struct net_device *dev;
668 struct hlist_head *head = dev_index_hash(net, ifindex);
670 hlist_for_each_entry(dev, p, head, index_hlist)
671 if (dev->ifindex == ifindex)
676 EXPORT_SYMBOL(__dev_get_by_index);
679 * dev_get_by_index_rcu - find a device by its ifindex
680 * @net: the applicable net namespace
681 * @ifindex: index of device
683 * Search for an interface by index. Returns %NULL if the device
684 * is not found or a pointer to the device. The device has not
685 * had its reference counter increased so the caller must be careful
686 * about locking. The caller must hold RCU lock.
689 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
691 struct hlist_node *p;
692 struct net_device *dev;
693 struct hlist_head *head = dev_index_hash(net, ifindex);
695 hlist_for_each_entry_rcu(dev, p, head, index_hlist)
696 if (dev->ifindex == ifindex)
701 EXPORT_SYMBOL(dev_get_by_index_rcu);
705 * dev_get_by_index - find a device by its ifindex
706 * @net: the applicable net namespace
707 * @ifindex: index of device
709 * Search for an interface by index. Returns NULL if the device
710 * is not found or a pointer to the device. The device returned has
711 * had a reference added and the pointer is safe until the user calls
712 * dev_put to indicate they have finished with it.
715 struct net_device *dev_get_by_index(struct net *net, int ifindex)
717 struct net_device *dev;
720 dev = dev_get_by_index_rcu(net, ifindex);
726 EXPORT_SYMBOL(dev_get_by_index);
729 * dev_getbyhwaddr - find a device by its hardware address
730 * @net: the applicable net namespace
731 * @type: media type of device
732 * @ha: hardware address
734 * Search for an interface by MAC address. Returns NULL if the device
735 * is not found or a pointer to the device. The caller must hold the
736 * rtnl semaphore. The returned device has not had its ref count increased
737 * and the caller must therefore be careful about locking
740 * If the API was consistent this would be __dev_get_by_hwaddr
743 struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
745 struct net_device *dev;
749 for_each_netdev(net, dev)
750 if (dev->type == type &&
751 !memcmp(dev->dev_addr, ha, dev->addr_len))
756 EXPORT_SYMBOL(dev_getbyhwaddr);
758 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
760 struct net_device *dev;
763 for_each_netdev(net, dev)
764 if (dev->type == type)
769 EXPORT_SYMBOL(__dev_getfirstbyhwtype);
771 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
773 struct net_device *dev;
776 dev = __dev_getfirstbyhwtype(net, type);
782 EXPORT_SYMBOL(dev_getfirstbyhwtype);
785 * dev_get_by_flags - find any device with given flags
786 * @net: the applicable net namespace
787 * @if_flags: IFF_* values
788 * @mask: bitmask of bits in if_flags to check
790 * Search for any interface with the given flags. Returns NULL if a device
791 * is not found or a pointer to the device. The device returned has
792 * had a reference added and the pointer is safe until the user calls
793 * dev_put to indicate they have finished with it.
796 struct net_device *dev_get_by_flags(struct net *net, unsigned short if_flags,
799 struct net_device *dev, *ret;
802 read_lock(&dev_base_lock);
803 for_each_netdev(net, dev) {
804 if (((dev->flags ^ if_flags) & mask) == 0) {
810 read_unlock(&dev_base_lock);
813 EXPORT_SYMBOL(dev_get_by_flags);
816 * dev_valid_name - check if name is okay for network device
819 * Network device names need to be valid file names to
820 * to allow sysfs to work. We also disallow any kind of
823 int dev_valid_name(const char *name)
827 if (strlen(name) >= IFNAMSIZ)
829 if (!strcmp(name, ".") || !strcmp(name, ".."))
833 if (*name == '/' || isspace(*name))
839 EXPORT_SYMBOL(dev_valid_name);
842 * __dev_alloc_name - allocate a name for a device
843 * @net: network namespace to allocate the device name in
844 * @name: name format string
845 * @buf: scratch buffer and result name string
847 * Passed a format string - eg "lt%d" it will try and find a suitable
848 * id. It scans list of devices to build up a free map, then chooses
849 * the first empty slot. The caller must hold the dev_base or rtnl lock
850 * while allocating the name and adding the device in order to avoid
852 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
853 * Returns the number of the unit assigned or a negative errno code.
856 static int __dev_alloc_name(struct net *net, const char *name, char *buf)
860 const int max_netdevices = 8*PAGE_SIZE;
861 unsigned long *inuse;
862 struct net_device *d;
864 p = strnchr(name, IFNAMSIZ-1, '%');
867 * Verify the string as this thing may have come from
868 * the user. There must be either one "%d" and no other "%"
871 if (p[1] != 'd' || strchr(p + 2, '%'))
874 /* Use one page as a bit array of possible slots */
875 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
879 for_each_netdev(net, d) {
880 if (!sscanf(d->name, name, &i))
882 if (i < 0 || i >= max_netdevices)
885 /* avoid cases where sscanf is not exact inverse of printf */
886 snprintf(buf, IFNAMSIZ, name, i);
887 if (!strncmp(buf, d->name, IFNAMSIZ))
891 i = find_first_zero_bit(inuse, max_netdevices);
892 free_page((unsigned long) inuse);
895 snprintf(buf, IFNAMSIZ, name, i);
896 if (!__dev_get_by_name(net, buf))
899 /* It is possible to run out of possible slots
900 * when the name is long and there isn't enough space left
901 * for the digits, or if all bits are used.
907 * dev_alloc_name - allocate a name for a device
909 * @name: name format string
911 * Passed a format string - eg "lt%d" it will try and find a suitable
912 * id. It scans list of devices to build up a free map, then chooses
913 * the first empty slot. The caller must hold the dev_base or rtnl lock
914 * while allocating the name and adding the device in order to avoid
916 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
917 * Returns the number of the unit assigned or a negative errno code.
920 int dev_alloc_name(struct net_device *dev, const char *name)
926 BUG_ON(!dev_net(dev));
928 ret = __dev_alloc_name(net, name, buf);
930 strlcpy(dev->name, buf, IFNAMSIZ);
933 EXPORT_SYMBOL(dev_alloc_name);
937 * dev_change_name - change name of a device
939 * @newname: name (or format string) must be at least IFNAMSIZ
941 * Change name of a device, can pass format strings "eth%d".
944 int dev_change_name(struct net_device *dev, const char *newname)
946 char oldname[IFNAMSIZ];
952 BUG_ON(!dev_net(dev));
955 if (dev->flags & IFF_UP)
958 if (!dev_valid_name(newname))
961 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
964 memcpy(oldname, dev->name, IFNAMSIZ);
966 if (strchr(newname, '%')) {
967 err = dev_alloc_name(dev, newname);
970 } else if (__dev_get_by_name(net, newname))
973 strlcpy(dev->name, newname, IFNAMSIZ);
976 /* For now only devices in the initial network namespace
979 if (net == &init_net) {
980 ret = device_rename(&dev->dev, dev->name);
982 memcpy(dev->name, oldname, IFNAMSIZ);
987 write_lock_bh(&dev_base_lock);
988 hlist_del(&dev->name_hlist);
989 write_unlock_bh(&dev_base_lock);
993 write_lock_bh(&dev_base_lock);
994 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
995 write_unlock_bh(&dev_base_lock);
997 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
998 ret = notifier_to_errno(ret);
1003 "%s: name change rollback failed: %d.\n",
1007 memcpy(dev->name, oldname, IFNAMSIZ);
1016 * dev_set_alias - change ifalias of a device
1018 * @alias: name up to IFALIASZ
1019 * @len: limit of bytes to copy from info
1021 * Set ifalias for a device,
1023 int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1027 if (len >= IFALIASZ)
1032 kfree(dev->ifalias);
1033 dev->ifalias = NULL;
1038 dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1042 strlcpy(dev->ifalias, alias, len+1);
1048 * netdev_features_change - device changes features
1049 * @dev: device to cause notification
1051 * Called to indicate a device has changed features.
1053 void netdev_features_change(struct net_device *dev)
1055 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1057 EXPORT_SYMBOL(netdev_features_change);
1060 * netdev_state_change - device changes state
1061 * @dev: device to cause notification
1063 * Called to indicate a device has changed state. This function calls
1064 * the notifier chains for netdev_chain and sends a NEWLINK message
1065 * to the routing socket.
1067 void netdev_state_change(struct net_device *dev)
1069 if (dev->flags & IFF_UP) {
1070 call_netdevice_notifiers(NETDEV_CHANGE, dev);
1071 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1074 EXPORT_SYMBOL(netdev_state_change);
1076 void netdev_bonding_change(struct net_device *dev, unsigned long event)
1078 call_netdevice_notifiers(event, dev);
1080 EXPORT_SYMBOL(netdev_bonding_change);
1083 * dev_load - load a network module
1084 * @net: the applicable net namespace
1085 * @name: name of interface
1087 * If a network interface is not present and the process has suitable
1088 * privileges this function loads the module. If module loading is not
1089 * available in this kernel then it becomes a nop.
1092 void dev_load(struct net *net, const char *name)
1094 struct net_device *dev;
1097 dev = dev_get_by_name_rcu(net, name);
1100 if (!dev && capable(CAP_NET_ADMIN))
1101 request_module("%s", name);
1103 EXPORT_SYMBOL(dev_load);
1106 * dev_open - prepare an interface for use.
1107 * @dev: device to open
1109 * Takes a device from down to up state. The device's private open
1110 * function is invoked and then the multicast lists are loaded. Finally
1111 * the device is moved into the up state and a %NETDEV_UP message is
1112 * sent to the netdev notifier chain.
1114 * Calling this function on an active interface is a nop. On a failure
1115 * a negative errno code is returned.
1117 int dev_open(struct net_device *dev)
1119 const struct net_device_ops *ops = dev->netdev_ops;
1128 if (dev->flags & IFF_UP)
1132 * Is it even present?
1134 if (!netif_device_present(dev))
1137 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1138 ret = notifier_to_errno(ret);
1143 * Call device private open method
1145 set_bit(__LINK_STATE_START, &dev->state);
1147 if (ops->ndo_validate_addr)
1148 ret = ops->ndo_validate_addr(dev);
1150 if (!ret && ops->ndo_open)
1151 ret = ops->ndo_open(dev);
1154 * If it went open OK then:
1158 clear_bit(__LINK_STATE_START, &dev->state);
1163 dev->flags |= IFF_UP;
1168 net_dmaengine_get();
1171 * Initialize multicasting status
1173 dev_set_rx_mode(dev);
1176 * Wakeup transmit queue engine
1181 * ... and announce new interface.
1183 call_netdevice_notifiers(NETDEV_UP, dev);
1188 EXPORT_SYMBOL(dev_open);
1191 * dev_close - shutdown an interface.
1192 * @dev: device to shutdown
1194 * This function moves an active device into down state. A
1195 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1196 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1199 int dev_close(struct net_device *dev)
1201 const struct net_device_ops *ops = dev->netdev_ops;
1206 if (!(dev->flags & IFF_UP))
1210 * Tell people we are going down, so that they can
1211 * prepare to death, when device is still operating.
1213 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1215 clear_bit(__LINK_STATE_START, &dev->state);
1217 /* Synchronize to scheduled poll. We cannot touch poll list,
1218 * it can be even on different cpu. So just clear netif_running().
1220 * dev->stop() will invoke napi_disable() on all of it's
1221 * napi_struct instances on this device.
1223 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1225 dev_deactivate(dev);
1228 * Call the device specific close. This cannot fail.
1229 * Only if device is UP
1231 * We allow it to be called even after a DETACH hot-plug
1238 * Device is now down.
1241 dev->flags &= ~IFF_UP;
1244 * Tell people we are down
1246 call_netdevice_notifiers(NETDEV_DOWN, dev);
1251 net_dmaengine_put();
1255 EXPORT_SYMBOL(dev_close);
1259 * dev_disable_lro - disable Large Receive Offload on a device
1262 * Disable Large Receive Offload (LRO) on a net device. Must be
1263 * called under RTNL. This is needed if received packets may be
1264 * forwarded to another interface.
1266 void dev_disable_lro(struct net_device *dev)
1268 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1269 dev->ethtool_ops->set_flags) {
1270 u32 flags = dev->ethtool_ops->get_flags(dev);
1271 if (flags & ETH_FLAG_LRO) {
1272 flags &= ~ETH_FLAG_LRO;
1273 dev->ethtool_ops->set_flags(dev, flags);
1276 WARN_ON(dev->features & NETIF_F_LRO);
1278 EXPORT_SYMBOL(dev_disable_lro);
1281 static int dev_boot_phase = 1;
1284 * Device change register/unregister. These are not inline or static
1285 * as we export them to the world.
1289 * register_netdevice_notifier - register a network notifier block
1292 * Register a notifier to be called when network device events occur.
1293 * The notifier passed is linked into the kernel structures and must
1294 * not be reused until it has been unregistered. A negative errno code
1295 * is returned on a failure.
1297 * When registered all registration and up events are replayed
1298 * to the new notifier to allow device to have a race free
1299 * view of the network device list.
1302 int register_netdevice_notifier(struct notifier_block *nb)
1304 struct net_device *dev;
1305 struct net_device *last;
1310 err = raw_notifier_chain_register(&netdev_chain, nb);
1316 for_each_netdev(net, dev) {
1317 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1318 err = notifier_to_errno(err);
1322 if (!(dev->flags & IFF_UP))
1325 nb->notifier_call(nb, NETDEV_UP, dev);
1336 for_each_netdev(net, dev) {
1340 if (dev->flags & IFF_UP) {
1341 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1342 nb->notifier_call(nb, NETDEV_DOWN, dev);
1344 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1348 raw_notifier_chain_unregister(&netdev_chain, nb);
1351 EXPORT_SYMBOL(register_netdevice_notifier);
1354 * unregister_netdevice_notifier - unregister a network notifier block
1357 * Unregister a notifier previously registered by
1358 * register_netdevice_notifier(). The notifier is unlinked into the
1359 * kernel structures and may then be reused. A negative errno code
1360 * is returned on a failure.
1363 int unregister_netdevice_notifier(struct notifier_block *nb)
1368 err = raw_notifier_chain_unregister(&netdev_chain, nb);
1372 EXPORT_SYMBOL(unregister_netdevice_notifier);
1375 * call_netdevice_notifiers - call all network notifier blocks
1376 * @val: value passed unmodified to notifier function
1377 * @dev: net_device pointer passed unmodified to notifier function
1379 * Call all network notifier blocks. Parameters and return value
1380 * are as for raw_notifier_call_chain().
1383 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1385 return raw_notifier_call_chain(&netdev_chain, val, dev);
1388 /* When > 0 there are consumers of rx skb time stamps */
1389 static atomic_t netstamp_needed = ATOMIC_INIT(0);
1391 void net_enable_timestamp(void)
1393 atomic_inc(&netstamp_needed);
1395 EXPORT_SYMBOL(net_enable_timestamp);
1397 void net_disable_timestamp(void)
1399 atomic_dec(&netstamp_needed);
1401 EXPORT_SYMBOL(net_disable_timestamp);
1403 static inline void net_timestamp(struct sk_buff *skb)
1405 if (atomic_read(&netstamp_needed))
1406 __net_timestamp(skb);
1408 skb->tstamp.tv64 = 0;
1412 * Support routine. Sends outgoing frames to any network
1413 * taps currently in use.
1416 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1418 struct packet_type *ptype;
1420 #ifdef CONFIG_NET_CLS_ACT
1421 if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS)))
1428 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1429 /* Never send packets back to the socket
1430 * they originated from - MvS (miquels@drinkel.ow.org)
1432 if ((ptype->dev == dev || !ptype->dev) &&
1433 (ptype->af_packet_priv == NULL ||
1434 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1435 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1439 /* skb->nh should be correctly
1440 set by sender, so that the second statement is
1441 just protection against buggy protocols.
1443 skb_reset_mac_header(skb2);
1445 if (skb_network_header(skb2) < skb2->data ||
1446 skb2->network_header > skb2->tail) {
1447 if (net_ratelimit())
1448 printk(KERN_CRIT "protocol %04x is "
1450 skb2->protocol, dev->name);
1451 skb_reset_network_header(skb2);
1454 skb2->transport_header = skb2->network_header;
1455 skb2->pkt_type = PACKET_OUTGOING;
1456 ptype->func(skb2, skb->dev, ptype, skb->dev);
1463 static inline void __netif_reschedule(struct Qdisc *q)
1465 struct softnet_data *sd;
1466 unsigned long flags;
1468 local_irq_save(flags);
1469 sd = &__get_cpu_var(softnet_data);
1470 q->next_sched = sd->output_queue;
1471 sd->output_queue = q;
1472 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1473 local_irq_restore(flags);
1476 void __netif_schedule(struct Qdisc *q)
1478 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1479 __netif_reschedule(q);
1481 EXPORT_SYMBOL(__netif_schedule);
1483 void dev_kfree_skb_irq(struct sk_buff *skb)
1485 if (atomic_dec_and_test(&skb->users)) {
1486 struct softnet_data *sd;
1487 unsigned long flags;
1489 local_irq_save(flags);
1490 sd = &__get_cpu_var(softnet_data);
1491 skb->next = sd->completion_queue;
1492 sd->completion_queue = skb;
1493 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1494 local_irq_restore(flags);
1497 EXPORT_SYMBOL(dev_kfree_skb_irq);
1499 void dev_kfree_skb_any(struct sk_buff *skb)
1501 if (in_irq() || irqs_disabled())
1502 dev_kfree_skb_irq(skb);
1506 EXPORT_SYMBOL(dev_kfree_skb_any);
1510 * netif_device_detach - mark device as removed
1511 * @dev: network device
1513 * Mark device as removed from system and therefore no longer available.
1515 void netif_device_detach(struct net_device *dev)
1517 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1518 netif_running(dev)) {
1519 netif_tx_stop_all_queues(dev);
1522 EXPORT_SYMBOL(netif_device_detach);
1525 * netif_device_attach - mark device as attached
1526 * @dev: network device
1528 * Mark device as attached from system and restart if needed.
1530 void netif_device_attach(struct net_device *dev)
1532 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1533 netif_running(dev)) {
1534 netif_tx_wake_all_queues(dev);
1535 __netdev_watchdog_up(dev);
1538 EXPORT_SYMBOL(netif_device_attach);
1540 static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1542 return ((features & NETIF_F_GEN_CSUM) ||
1543 ((features & NETIF_F_IP_CSUM) &&
1544 protocol == htons(ETH_P_IP)) ||
1545 ((features & NETIF_F_IPV6_CSUM) &&
1546 protocol == htons(ETH_P_IPV6)) ||
1547 ((features & NETIF_F_FCOE_CRC) &&
1548 protocol == htons(ETH_P_FCOE)));
1551 static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1553 if (can_checksum_protocol(dev->features, skb->protocol))
1556 if (skb->protocol == htons(ETH_P_8021Q)) {
1557 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1558 if (can_checksum_protocol(dev->features & dev->vlan_features,
1559 veh->h_vlan_encapsulated_proto))
1567 * Invalidate hardware checksum when packet is to be mangled, and
1568 * complete checksum manually on outgoing path.
1570 int skb_checksum_help(struct sk_buff *skb)
1573 int ret = 0, offset;
1575 if (skb->ip_summed == CHECKSUM_COMPLETE)
1576 goto out_set_summed;
1578 if (unlikely(skb_shinfo(skb)->gso_size)) {
1579 /* Let GSO fix up the checksum. */
1580 goto out_set_summed;
1583 offset = skb->csum_start - skb_headroom(skb);
1584 BUG_ON(offset >= skb_headlen(skb));
1585 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1587 offset += skb->csum_offset;
1588 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1590 if (skb_cloned(skb) &&
1591 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
1592 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1597 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
1599 skb->ip_summed = CHECKSUM_NONE;
1603 EXPORT_SYMBOL(skb_checksum_help);
1606 * skb_gso_segment - Perform segmentation on skb.
1607 * @skb: buffer to segment
1608 * @features: features for the output path (see dev->features)
1610 * This function segments the given skb and returns a list of segments.
1612 * It may return NULL if the skb requires no segmentation. This is
1613 * only possible when GSO is used for verifying header integrity.
1615 struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
1617 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1618 struct packet_type *ptype;
1619 __be16 type = skb->protocol;
1622 skb_reset_mac_header(skb);
1623 skb->mac_len = skb->network_header - skb->mac_header;
1624 __skb_pull(skb, skb->mac_len);
1626 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1627 struct net_device *dev = skb->dev;
1628 struct ethtool_drvinfo info = {};
1630 if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
1631 dev->ethtool_ops->get_drvinfo(dev, &info);
1633 WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d "
1635 info.driver, dev ? dev->features : 0L,
1636 skb->sk ? skb->sk->sk_route_caps : 0L,
1637 skb->len, skb->data_len, skb->ip_summed);
1639 if (skb_header_cloned(skb) &&
1640 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1641 return ERR_PTR(err);
1645 list_for_each_entry_rcu(ptype,
1646 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
1647 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
1648 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1649 err = ptype->gso_send_check(skb);
1650 segs = ERR_PTR(err);
1651 if (err || skb_gso_ok(skb, features))
1653 __skb_push(skb, (skb->data -
1654 skb_network_header(skb)));
1656 segs = ptype->gso_segment(skb, features);
1662 __skb_push(skb, skb->data - skb_mac_header(skb));
1666 EXPORT_SYMBOL(skb_gso_segment);
1668 /* Take action when hardware reception checksum errors are detected. */
1670 void netdev_rx_csum_fault(struct net_device *dev)
1672 if (net_ratelimit()) {
1673 printk(KERN_ERR "%s: hw csum failure.\n",
1674 dev ? dev->name : "<unknown>");
1678 EXPORT_SYMBOL(netdev_rx_csum_fault);
1681 /* Actually, we should eliminate this check as soon as we know, that:
1682 * 1. IOMMU is present and allows to map all the memory.
1683 * 2. No high memory really exists on this machine.
1686 static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1688 #ifdef CONFIG_HIGHMEM
1691 if (dev->features & NETIF_F_HIGHDMA)
1694 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1695 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1703 void (*destructor)(struct sk_buff *skb);
1706 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1708 static void dev_gso_skb_destructor(struct sk_buff *skb)
1710 struct dev_gso_cb *cb;
1713 struct sk_buff *nskb = skb->next;
1715 skb->next = nskb->next;
1718 } while (skb->next);
1720 cb = DEV_GSO_CB(skb);
1722 cb->destructor(skb);
1726 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1727 * @skb: buffer to segment
1729 * This function segments the given skb and stores the list of segments
1732 static int dev_gso_segment(struct sk_buff *skb)
1734 struct net_device *dev = skb->dev;
1735 struct sk_buff *segs;
1736 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1739 segs = skb_gso_segment(skb, features);
1741 /* Verifying header integrity only. */
1746 return PTR_ERR(segs);
1749 DEV_GSO_CB(skb)->destructor = skb->destructor;
1750 skb->destructor = dev_gso_skb_destructor;
1755 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1756 struct netdev_queue *txq)
1758 const struct net_device_ops *ops = dev->netdev_ops;
1761 if (likely(!skb->next)) {
1762 if (!list_empty(&ptype_all))
1763 dev_queue_xmit_nit(skb, dev);
1765 if (netif_needs_gso(dev, skb)) {
1766 if (unlikely(dev_gso_segment(skb)))
1773 * If device doesnt need skb->dst, release it right now while
1774 * its hot in this cpu cache
1776 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
1779 rc = ops->ndo_start_xmit(skb, dev);
1780 if (rc == NETDEV_TX_OK)
1781 txq_trans_update(txq);
1783 * TODO: if skb_orphan() was called by
1784 * dev->hard_start_xmit() (for example, the unmodified
1785 * igb driver does that; bnx2 doesn't), then
1786 * skb_tx_software_timestamp() will be unable to send
1787 * back the time stamp.
1789 * How can this be prevented? Always create another
1790 * reference to the socket before calling
1791 * dev->hard_start_xmit()? Prevent that skb_orphan()
1792 * does anything in dev->hard_start_xmit() by clearing
1793 * the skb destructor before the call and restoring it
1794 * afterwards, then doing the skb_orphan() ourselves?
1801 struct sk_buff *nskb = skb->next;
1803 skb->next = nskb->next;
1805 rc = ops->ndo_start_xmit(nskb, dev);
1806 if (unlikely(rc != NETDEV_TX_OK)) {
1807 nskb->next = skb->next;
1811 txq_trans_update(txq);
1812 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
1813 return NETDEV_TX_BUSY;
1814 } while (skb->next);
1816 skb->destructor = DEV_GSO_CB(skb)->destructor;
1820 return NETDEV_TX_OK;
1823 static u32 skb_tx_hashrnd;
1825 u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
1829 if (skb_rx_queue_recorded(skb)) {
1830 hash = skb_get_rx_queue(skb);
1831 while (unlikely(hash >= dev->real_num_tx_queues))
1832 hash -= dev->real_num_tx_queues;
1836 if (skb->sk && skb->sk->sk_hash)
1837 hash = skb->sk->sk_hash;
1839 hash = skb->protocol;
1841 hash = jhash_1word(hash, skb_tx_hashrnd);
1843 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
1845 EXPORT_SYMBOL(skb_tx_hash);
1847 static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1848 struct sk_buff *skb)
1851 struct sock *sk = skb->sk;
1853 if (sk_tx_queue_recorded(sk)) {
1854 queue_index = sk_tx_queue_get(sk);
1856 const struct net_device_ops *ops = dev->netdev_ops;
1858 if (ops->ndo_select_queue) {
1859 queue_index = ops->ndo_select_queue(dev, skb);
1862 if (dev->real_num_tx_queues > 1)
1863 queue_index = skb_tx_hash(dev, skb);
1865 if (sk && sk->sk_dst_cache)
1866 sk_tx_queue_set(sk, queue_index);
1870 skb_set_queue_mapping(skb, queue_index);
1871 return netdev_get_tx_queue(dev, queue_index);
1874 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
1875 struct net_device *dev,
1876 struct netdev_queue *txq)
1878 spinlock_t *root_lock = qdisc_lock(q);
1881 spin_lock(root_lock);
1882 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
1885 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
1886 !test_and_set_bit(__QDISC_STATE_RUNNING, &q->state)) {
1888 * This is a work-conserving queue; there are no old skbs
1889 * waiting to be sent out; and the qdisc is not running -
1890 * xmit the skb directly.
1892 __qdisc_update_bstats(q, skb->len);
1893 if (sch_direct_xmit(skb, q, dev, txq, root_lock))
1896 clear_bit(__QDISC_STATE_RUNNING, &q->state);
1898 rc = NET_XMIT_SUCCESS;
1900 rc = qdisc_enqueue_root(skb, q);
1903 spin_unlock(root_lock);
1909 * dev_queue_xmit - transmit a buffer
1910 * @skb: buffer to transmit
1912 * Queue a buffer for transmission to a network device. The caller must
1913 * have set the device and priority and built the buffer before calling
1914 * this function. The function can be called from an interrupt.
1916 * A negative errno code is returned on a failure. A success does not
1917 * guarantee the frame will be transmitted as it may be dropped due
1918 * to congestion or traffic shaping.
1920 * -----------------------------------------------------------------------------------
1921 * I notice this method can also return errors from the queue disciplines,
1922 * including NET_XMIT_DROP, which is a positive value. So, errors can also
1925 * Regardless of the return value, the skb is consumed, so it is currently
1926 * difficult to retry a send to this method. (You can bump the ref count
1927 * before sending to hold a reference for retry if you are careful.)
1929 * When calling this method, interrupts MUST be enabled. This is because
1930 * the BH enable code must have IRQs enabled so that it will not deadlock.
1933 int dev_queue_xmit(struct sk_buff *skb)
1935 struct net_device *dev = skb->dev;
1936 struct netdev_queue *txq;
1940 /* GSO will handle the following emulations directly. */
1941 if (netif_needs_gso(dev, skb))
1944 if (skb_has_frags(skb) &&
1945 !(dev->features & NETIF_F_FRAGLIST) &&
1946 __skb_linearize(skb))
1949 /* Fragmented skb is linearized if device does not support SG,
1950 * or if at least one of fragments is in highmem and device
1951 * does not support DMA from it.
1953 if (skb_shinfo(skb)->nr_frags &&
1954 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
1955 __skb_linearize(skb))
1958 /* If packet is not checksummed and device does not support
1959 * checksumming for this protocol, complete checksumming here.
1961 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1962 skb_set_transport_header(skb, skb->csum_start -
1964 if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
1969 /* Disable soft irqs for various locks below. Also
1970 * stops preemption for RCU.
1974 txq = dev_pick_tx(dev, skb);
1975 q = rcu_dereference(txq->qdisc);
1977 #ifdef CONFIG_NET_CLS_ACT
1978 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
1981 rc = __dev_xmit_skb(skb, q, dev, txq);
1985 /* The device has no queue. Common case for software devices:
1986 loopback, all the sorts of tunnels...
1988 Really, it is unlikely that netif_tx_lock protection is necessary
1989 here. (f.e. loopback and IP tunnels are clean ignoring statistics
1991 However, it is possible, that they rely on protection
1994 Check this and shot the lock. It is not prone from deadlocks.
1995 Either shot noqueue qdisc, it is even simpler 8)
1997 if (dev->flags & IFF_UP) {
1998 int cpu = smp_processor_id(); /* ok because BHs are off */
2000 if (txq->xmit_lock_owner != cpu) {
2002 HARD_TX_LOCK(dev, txq, cpu);
2004 if (!netif_tx_queue_stopped(txq)) {
2005 rc = NET_XMIT_SUCCESS;
2006 if (!dev_hard_start_xmit(skb, dev, txq)) {
2007 HARD_TX_UNLOCK(dev, txq);
2011 HARD_TX_UNLOCK(dev, txq);
2012 if (net_ratelimit())
2013 printk(KERN_CRIT "Virtual device %s asks to "
2014 "queue packet!\n", dev->name);
2016 /* Recursion is detected! It is possible,
2018 if (net_ratelimit())
2019 printk(KERN_CRIT "Dead loop on virtual device "
2020 "%s, fix it urgently!\n", dev->name);
2025 rcu_read_unlock_bh();
2031 rcu_read_unlock_bh();
2034 EXPORT_SYMBOL(dev_queue_xmit);
2037 /*=======================================================================
2039 =======================================================================*/
2041 int netdev_max_backlog __read_mostly = 1000;
2042 int netdev_budget __read_mostly = 300;
2043 int weight_p __read_mostly = 64; /* old backlog weight */
2045 DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
2049 * netif_rx - post buffer to the network code
2050 * @skb: buffer to post
2052 * This function receives a packet from a device driver and queues it for
2053 * the upper (protocol) levels to process. It always succeeds. The buffer
2054 * may be dropped during processing for congestion control or by the
2058 * NET_RX_SUCCESS (no congestion)
2059 * NET_RX_DROP (packet was dropped)
2063 int netif_rx(struct sk_buff *skb)
2065 struct softnet_data *queue;
2066 unsigned long flags;
2068 /* if netpoll wants it, pretend we never saw it */
2069 if (netpoll_rx(skb))
2072 if (!skb->tstamp.tv64)
2076 * The code is rearranged so that the path is the most
2077 * short when CPU is congested, but is still operating.
2079 local_irq_save(flags);
2080 queue = &__get_cpu_var(softnet_data);
2082 __get_cpu_var(netdev_rx_stat).total++;
2083 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
2084 if (queue->input_pkt_queue.qlen) {
2086 __skb_queue_tail(&queue->input_pkt_queue, skb);
2087 local_irq_restore(flags);
2088 return NET_RX_SUCCESS;
2091 napi_schedule(&queue->backlog);
2095 __get_cpu_var(netdev_rx_stat).dropped++;
2096 local_irq_restore(flags);
2101 EXPORT_SYMBOL(netif_rx);
2103 int netif_rx_ni(struct sk_buff *skb)
2108 err = netif_rx(skb);
2109 if (local_softirq_pending())
2115 EXPORT_SYMBOL(netif_rx_ni);
2117 static void net_tx_action(struct softirq_action *h)
2119 struct softnet_data *sd = &__get_cpu_var(softnet_data);
2121 if (sd->completion_queue) {
2122 struct sk_buff *clist;
2124 local_irq_disable();
2125 clist = sd->completion_queue;
2126 sd->completion_queue = NULL;
2130 struct sk_buff *skb = clist;
2131 clist = clist->next;
2133 WARN_ON(atomic_read(&skb->users));
2138 if (sd->output_queue) {
2141 local_irq_disable();
2142 head = sd->output_queue;
2143 sd->output_queue = NULL;
2147 struct Qdisc *q = head;
2148 spinlock_t *root_lock;
2150 head = head->next_sched;
2152 root_lock = qdisc_lock(q);
2153 if (spin_trylock(root_lock)) {
2154 smp_mb__before_clear_bit();
2155 clear_bit(__QDISC_STATE_SCHED,
2158 spin_unlock(root_lock);
2160 if (!test_bit(__QDISC_STATE_DEACTIVATED,
2162 __netif_reschedule(q);
2164 smp_mb__before_clear_bit();
2165 clear_bit(__QDISC_STATE_SCHED,
2173 static inline int deliver_skb(struct sk_buff *skb,
2174 struct packet_type *pt_prev,
2175 struct net_device *orig_dev)
2177 atomic_inc(&skb->users);
2178 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2181 #if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
2183 #if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
2184 /* This hook is defined here for ATM LANE */
2185 int (*br_fdb_test_addr_hook)(struct net_device *dev,
2186 unsigned char *addr) __read_mostly;
2187 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
2191 * If bridge module is loaded call bridging hook.
2192 * returns NULL if packet was consumed.
2194 struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
2195 struct sk_buff *skb) __read_mostly;
2196 EXPORT_SYMBOL_GPL(br_handle_frame_hook);
2198 static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
2199 struct packet_type **pt_prev, int *ret,
2200 struct net_device *orig_dev)
2202 struct net_bridge_port *port;
2204 if (skb->pkt_type == PACKET_LOOPBACK ||
2205 (port = rcu_dereference(skb->dev->br_port)) == NULL)
2209 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2213 return br_handle_frame_hook(port, skb);
2216 #define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
2219 #if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
2220 struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
2221 EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
2223 static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
2224 struct packet_type **pt_prev,
2226 struct net_device *orig_dev)
2228 if (skb->dev->macvlan_port == NULL)
2232 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2235 return macvlan_handle_frame_hook(skb);
2238 #define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
2241 #ifdef CONFIG_NET_CLS_ACT
2242 /* TODO: Maybe we should just force sch_ingress to be compiled in
2243 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2244 * a compare and 2 stores extra right now if we dont have it on
2245 * but have CONFIG_NET_CLS_ACT
2246 * NOTE: This doesnt stop any functionality; if you dont have
2247 * the ingress scheduler, you just cant add policies on ingress.
2250 static int ing_filter(struct sk_buff *skb)
2252 struct net_device *dev = skb->dev;
2253 u32 ttl = G_TC_RTTL(skb->tc_verd);
2254 struct netdev_queue *rxq;
2255 int result = TC_ACT_OK;
2258 if (MAX_RED_LOOP < ttl++) {
2260 "Redir loop detected Dropping packet (%d->%d)\n",
2261 skb->iif, dev->ifindex);
2265 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2266 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
2268 rxq = &dev->rx_queue;
2271 if (q != &noop_qdisc) {
2272 spin_lock(qdisc_lock(q));
2273 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
2274 result = qdisc_enqueue_root(skb, q);
2275 spin_unlock(qdisc_lock(q));
2281 static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2282 struct packet_type **pt_prev,
2283 int *ret, struct net_device *orig_dev)
2285 if (skb->dev->rx_queue.qdisc == &noop_qdisc)
2289 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2292 /* Huh? Why does turning on AF_PACKET affect this? */
2293 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
2296 switch (ing_filter(skb)) {
2310 * netif_nit_deliver - deliver received packets to network taps
2313 * This function is used to deliver incoming packets to network
2314 * taps. It should be used when the normal netif_receive_skb path
2315 * is bypassed, for example because of VLAN acceleration.
2317 void netif_nit_deliver(struct sk_buff *skb)
2319 struct packet_type *ptype;
2321 if (list_empty(&ptype_all))
2324 skb_reset_network_header(skb);
2325 skb_reset_transport_header(skb);
2326 skb->mac_len = skb->network_header - skb->mac_header;
2329 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2330 if (!ptype->dev || ptype->dev == skb->dev)
2331 deliver_skb(skb, ptype, skb->dev);
2337 * netif_receive_skb - process receive buffer from network
2338 * @skb: buffer to process
2340 * netif_receive_skb() is the main receive data processing function.
2341 * It always succeeds. The buffer may be dropped during processing
2342 * for congestion control or by the protocol layers.
2344 * This function may only be called from softirq context and interrupts
2345 * should be enabled.
2347 * Return values (usually ignored):
2348 * NET_RX_SUCCESS: no congestion
2349 * NET_RX_DROP: packet was dropped
2351 int netif_receive_skb(struct sk_buff *skb)
2353 struct packet_type *ptype, *pt_prev;
2354 struct net_device *orig_dev;
2355 struct net_device *null_or_orig;
2356 int ret = NET_RX_DROP;
2359 if (!skb->tstamp.tv64)
2362 if (vlan_tx_tag_present(skb) && vlan_hwaccel_do_receive(skb))
2363 return NET_RX_SUCCESS;
2365 /* if we've gotten here through NAPI, check netpoll */
2366 if (netpoll_receive_skb(skb))
2370 skb->iif = skb->dev->ifindex;
2372 null_or_orig = NULL;
2373 orig_dev = skb->dev;
2374 if (orig_dev->master) {
2375 if (skb_bond_should_drop(skb))
2376 null_or_orig = orig_dev; /* deliver only exact match */
2378 skb->dev = orig_dev->master;
2381 __get_cpu_var(netdev_rx_stat).total++;
2383 skb_reset_network_header(skb);
2384 skb_reset_transport_header(skb);
2385 skb->mac_len = skb->network_header - skb->mac_header;
2391 #ifdef CONFIG_NET_CLS_ACT
2392 if (skb->tc_verd & TC_NCLS) {
2393 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2398 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2399 if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2400 ptype->dev == orig_dev) {
2402 ret = deliver_skb(skb, pt_prev, orig_dev);
2407 #ifdef CONFIG_NET_CLS_ACT
2408 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2414 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
2417 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
2421 type = skb->protocol;
2422 list_for_each_entry_rcu(ptype,
2423 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
2424 if (ptype->type == type &&
2425 (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2426 ptype->dev == orig_dev)) {
2428 ret = deliver_skb(skb, pt_prev, orig_dev);
2434 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2437 /* Jamal, now you will not able to escape explaining
2438 * me how you were going to use this. :-)
2447 EXPORT_SYMBOL(netif_receive_skb);
2449 /* Network device is going away, flush any packets still pending */
2450 static void flush_backlog(void *arg)
2452 struct net_device *dev = arg;
2453 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2454 struct sk_buff *skb, *tmp;
2456 skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
2457 if (skb->dev == dev) {
2458 __skb_unlink(skb, &queue->input_pkt_queue);
2463 static int napi_gro_complete(struct sk_buff *skb)
2465 struct packet_type *ptype;
2466 __be16 type = skb->protocol;
2467 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2470 if (NAPI_GRO_CB(skb)->count == 1) {
2471 skb_shinfo(skb)->gso_size = 0;
2476 list_for_each_entry_rcu(ptype, head, list) {
2477 if (ptype->type != type || ptype->dev || !ptype->gro_complete)
2480 err = ptype->gro_complete(skb);
2486 WARN_ON(&ptype->list == head);
2488 return NET_RX_SUCCESS;
2492 return netif_receive_skb(skb);
2495 void napi_gro_flush(struct napi_struct *napi)
2497 struct sk_buff *skb, *next;
2499 for (skb = napi->gro_list; skb; skb = next) {
2502 napi_gro_complete(skb);
2505 napi->gro_count = 0;
2506 napi->gro_list = NULL;
2508 EXPORT_SYMBOL(napi_gro_flush);
2510 enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2512 struct sk_buff **pp = NULL;
2513 struct packet_type *ptype;
2514 __be16 type = skb->protocol;
2515 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2518 enum gro_result ret;
2520 if (!(skb->dev->features & NETIF_F_GRO))
2523 if (skb_is_gso(skb) || skb_has_frags(skb))
2527 list_for_each_entry_rcu(ptype, head, list) {
2528 if (ptype->type != type || ptype->dev || !ptype->gro_receive)
2531 skb_set_network_header(skb, skb_gro_offset(skb));
2532 mac_len = skb->network_header - skb->mac_header;
2533 skb->mac_len = mac_len;
2534 NAPI_GRO_CB(skb)->same_flow = 0;
2535 NAPI_GRO_CB(skb)->flush = 0;
2536 NAPI_GRO_CB(skb)->free = 0;
2538 pp = ptype->gro_receive(&napi->gro_list, skb);
2543 if (&ptype->list == head)
2546 same_flow = NAPI_GRO_CB(skb)->same_flow;
2547 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
2550 struct sk_buff *nskb = *pp;
2554 napi_gro_complete(nskb);
2561 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
2565 NAPI_GRO_CB(skb)->count = 1;
2566 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
2567 skb->next = napi->gro_list;
2568 napi->gro_list = skb;
2572 if (skb_headlen(skb) < skb_gro_offset(skb)) {
2573 int grow = skb_gro_offset(skb) - skb_headlen(skb);
2575 BUG_ON(skb->end - skb->tail < grow);
2577 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
2580 skb->data_len -= grow;
2582 skb_shinfo(skb)->frags[0].page_offset += grow;
2583 skb_shinfo(skb)->frags[0].size -= grow;
2585 if (unlikely(!skb_shinfo(skb)->frags[0].size)) {
2586 put_page(skb_shinfo(skb)->frags[0].page);
2587 memmove(skb_shinfo(skb)->frags,
2588 skb_shinfo(skb)->frags + 1,
2589 --skb_shinfo(skb)->nr_frags);
2600 EXPORT_SYMBOL(dev_gro_receive);
2603 __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2607 if (netpoll_rx_on(skb))
2610 for (p = napi->gro_list; p; p = p->next) {
2611 NAPI_GRO_CB(p)->same_flow = (p->dev == skb->dev)
2612 && !compare_ether_header(skb_mac_header(p),
2613 skb_gro_mac_header(skb));
2614 NAPI_GRO_CB(p)->flush = 0;
2617 return dev_gro_receive(napi, skb);
2620 gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
2624 if (netif_receive_skb(skb))
2629 case GRO_MERGED_FREE:
2640 EXPORT_SYMBOL(napi_skb_finish);
2642 void skb_gro_reset_offset(struct sk_buff *skb)
2644 NAPI_GRO_CB(skb)->data_offset = 0;
2645 NAPI_GRO_CB(skb)->frag0 = NULL;
2646 NAPI_GRO_CB(skb)->frag0_len = 0;
2648 if (skb->mac_header == skb->tail &&
2649 !PageHighMem(skb_shinfo(skb)->frags[0].page)) {
2650 NAPI_GRO_CB(skb)->frag0 =
2651 page_address(skb_shinfo(skb)->frags[0].page) +
2652 skb_shinfo(skb)->frags[0].page_offset;
2653 NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size;
2656 EXPORT_SYMBOL(skb_gro_reset_offset);
2658 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2660 skb_gro_reset_offset(skb);
2662 return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
2664 EXPORT_SYMBOL(napi_gro_receive);
2666 void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
2668 __skb_pull(skb, skb_headlen(skb));
2669 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
2673 EXPORT_SYMBOL(napi_reuse_skb);
2675 struct sk_buff *napi_get_frags(struct napi_struct *napi)
2677 struct sk_buff *skb = napi->skb;
2680 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
2686 EXPORT_SYMBOL(napi_get_frags);
2688 gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
2694 skb->protocol = eth_type_trans(skb, napi->dev);
2696 if (ret == GRO_HELD)
2697 skb_gro_pull(skb, -ETH_HLEN);
2698 else if (netif_receive_skb(skb))
2703 case GRO_MERGED_FREE:
2704 napi_reuse_skb(napi, skb);
2713 EXPORT_SYMBOL(napi_frags_finish);
2715 struct sk_buff *napi_frags_skb(struct napi_struct *napi)
2717 struct sk_buff *skb = napi->skb;
2724 skb_reset_mac_header(skb);
2725 skb_gro_reset_offset(skb);
2727 off = skb_gro_offset(skb);
2728 hlen = off + sizeof(*eth);
2729 eth = skb_gro_header_fast(skb, off);
2730 if (skb_gro_header_hard(skb, hlen)) {
2731 eth = skb_gro_header_slow(skb, hlen, off);
2732 if (unlikely(!eth)) {
2733 napi_reuse_skb(napi, skb);
2739 skb_gro_pull(skb, sizeof(*eth));
2742 * This works because the only protocols we care about don't require
2743 * special handling. We'll fix it up properly at the end.
2745 skb->protocol = eth->h_proto;
2750 EXPORT_SYMBOL(napi_frags_skb);
2752 gro_result_t napi_gro_frags(struct napi_struct *napi)
2754 struct sk_buff *skb = napi_frags_skb(napi);
2759 return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
2761 EXPORT_SYMBOL(napi_gro_frags);
2763 static int process_backlog(struct napi_struct *napi, int quota)
2766 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2767 unsigned long start_time = jiffies;
2769 napi->weight = weight_p;
2771 struct sk_buff *skb;
2773 local_irq_disable();
2774 skb = __skb_dequeue(&queue->input_pkt_queue);
2776 __napi_complete(napi);
2782 netif_receive_skb(skb);
2783 } while (++work < quota && jiffies == start_time);
2789 * __napi_schedule - schedule for receive
2790 * @n: entry to schedule
2792 * The entry's receive function will be scheduled to run
2794 void __napi_schedule(struct napi_struct *n)
2796 unsigned long flags;
2798 local_irq_save(flags);
2799 list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
2800 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2801 local_irq_restore(flags);
2803 EXPORT_SYMBOL(__napi_schedule);
2805 void __napi_complete(struct napi_struct *n)
2807 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
2808 BUG_ON(n->gro_list);
2810 list_del(&n->poll_list);
2811 smp_mb__before_clear_bit();
2812 clear_bit(NAPI_STATE_SCHED, &n->state);
2814 EXPORT_SYMBOL(__napi_complete);
2816 void napi_complete(struct napi_struct *n)
2818 unsigned long flags;
2821 * don't let napi dequeue from the cpu poll list
2822 * just in case its running on a different cpu
2824 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
2828 local_irq_save(flags);
2830 local_irq_restore(flags);
2832 EXPORT_SYMBOL(napi_complete);
2834 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
2835 int (*poll)(struct napi_struct *, int), int weight)
2837 INIT_LIST_HEAD(&napi->poll_list);
2838 napi->gro_count = 0;
2839 napi->gro_list = NULL;
2842 napi->weight = weight;
2843 list_add(&napi->dev_list, &dev->napi_list);
2845 #ifdef CONFIG_NETPOLL
2846 spin_lock_init(&napi->poll_lock);
2847 napi->poll_owner = -1;
2849 set_bit(NAPI_STATE_SCHED, &napi->state);
2851 EXPORT_SYMBOL(netif_napi_add);
2853 void netif_napi_del(struct napi_struct *napi)
2855 struct sk_buff *skb, *next;
2857 list_del_init(&napi->dev_list);
2858 napi_free_frags(napi);
2860 for (skb = napi->gro_list; skb; skb = next) {
2866 napi->gro_list = NULL;
2867 napi->gro_count = 0;
2869 EXPORT_SYMBOL(netif_napi_del);
2872 static void net_rx_action(struct softirq_action *h)
2874 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
2875 unsigned long time_limit = jiffies + 2;
2876 int budget = netdev_budget;
2879 local_irq_disable();
2881 while (!list_empty(list)) {
2882 struct napi_struct *n;
2885 /* If softirq window is exhuasted then punt.
2886 * Allow this to run for 2 jiffies since which will allow
2887 * an average latency of 1.5/HZ.
2889 if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
2894 /* Even though interrupts have been re-enabled, this
2895 * access is safe because interrupts can only add new
2896 * entries to the tail of this list, and only ->poll()
2897 * calls can remove this head entry from the list.
2899 n = list_entry(list->next, struct napi_struct, poll_list);
2901 have = netpoll_poll_lock(n);
2905 /* This NAPI_STATE_SCHED test is for avoiding a race
2906 * with netpoll's poll_napi(). Only the entity which
2907 * obtains the lock and sees NAPI_STATE_SCHED set will
2908 * actually make the ->poll() call. Therefore we avoid
2909 * accidently calling ->poll() when NAPI is not scheduled.
2912 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
2913 work = n->poll(n, weight);
2917 WARN_ON_ONCE(work > weight);
2921 local_irq_disable();
2923 /* Drivers must not modify the NAPI state if they
2924 * consume the entire weight. In such cases this code
2925 * still "owns" the NAPI instance and therefore can
2926 * move the instance around on the list at-will.
2928 if (unlikely(work == weight)) {
2929 if (unlikely(napi_disable_pending(n))) {
2932 local_irq_disable();
2934 list_move_tail(&n->poll_list, list);
2937 netpoll_poll_unlock(have);
2942 #ifdef CONFIG_NET_DMA
2944 * There may not be any more sk_buffs coming right now, so push
2945 * any pending DMA copies to hardware
2947 dma_issue_pending_all();
2953 __get_cpu_var(netdev_rx_stat).time_squeeze++;
2954 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2958 static gifconf_func_t *gifconf_list[NPROTO];
2961 * register_gifconf - register a SIOCGIF handler
2962 * @family: Address family
2963 * @gifconf: Function handler
2965 * Register protocol dependent address dumping routines. The handler
2966 * that is passed must not be freed or reused until it has been replaced
2967 * by another handler.
2969 int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
2971 if (family >= NPROTO)
2973 gifconf_list[family] = gifconf;
2976 EXPORT_SYMBOL(register_gifconf);
2980 * Map an interface index to its name (SIOCGIFNAME)
2984 * We need this ioctl for efficient implementation of the
2985 * if_indextoname() function required by the IPv6 API. Without
2986 * it, we would have to search all the interfaces to find a
2990 static int dev_ifname(struct net *net, struct ifreq __user *arg)
2992 struct net_device *dev;
2996 * Fetch the caller's info block.
2999 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
3003 dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
3009 strcpy(ifr.ifr_name, dev->name);
3012 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
3018 * Perform a SIOCGIFCONF call. This structure will change
3019 * size eventually, and there is nothing I can do about it.
3020 * Thus we will need a 'compatibility mode'.
3023 static int dev_ifconf(struct net *net, char __user *arg)
3026 struct net_device *dev;
3033 * Fetch the caller's info block.
3036 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
3043 * Loop over the interfaces, and write an info block for each.
3047 for_each_netdev(net, dev) {
3048 for (i = 0; i < NPROTO; i++) {
3049 if (gifconf_list[i]) {
3052 done = gifconf_list[i](dev, NULL, 0);
3054 done = gifconf_list[i](dev, pos + total,
3064 * All done. Write the updated control block back to the caller.
3066 ifc.ifc_len = total;
3069 * Both BSD and Solaris return 0 here, so we do too.
3071 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
3074 #ifdef CONFIG_PROC_FS
3076 * This is invoked by the /proc filesystem handler to display a device
3079 void *dev_seq_start(struct seq_file *seq, loff_t *pos)
3080 __acquires(dev_base_lock)
3082 struct net *net = seq_file_net(seq);
3084 struct net_device *dev;
3086 read_lock(&dev_base_lock);
3088 return SEQ_START_TOKEN;
3091 for_each_netdev(net, dev)
3098 void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3100 struct net *net = seq_file_net(seq);
3102 return v == SEQ_START_TOKEN ?
3103 first_net_device(net) : next_net_device((struct net_device *)v);
3106 void dev_seq_stop(struct seq_file *seq, void *v)
3107 __releases(dev_base_lock)
3109 read_unlock(&dev_base_lock);
3112 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
3114 const struct net_device_stats *stats = dev_get_stats(dev);
3116 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
3117 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
3118 dev->name, stats->rx_bytes, stats->rx_packets,
3120 stats->rx_dropped + stats->rx_missed_errors,
3121 stats->rx_fifo_errors,
3122 stats->rx_length_errors + stats->rx_over_errors +
3123 stats->rx_crc_errors + stats->rx_frame_errors,
3124 stats->rx_compressed, stats->multicast,
3125 stats->tx_bytes, stats->tx_packets,
3126 stats->tx_errors, stats->tx_dropped,
3127 stats->tx_fifo_errors, stats->collisions,
3128 stats->tx_carrier_errors +
3129 stats->tx_aborted_errors +
3130 stats->tx_window_errors +
3131 stats->tx_heartbeat_errors,
3132 stats->tx_compressed);
3136 * Called from the PROCfs module. This now uses the new arbitrary sized
3137 * /proc/net interface to create /proc/net/dev
3139 static int dev_seq_show(struct seq_file *seq, void *v)
3141 if (v == SEQ_START_TOKEN)
3142 seq_puts(seq, "Inter-| Receive "
3144 " face |bytes packets errs drop fifo frame "
3145 "compressed multicast|bytes packets errs "
3146 "drop fifo colls carrier compressed\n");
3148 dev_seq_printf_stats(seq, v);
3152 static struct netif_rx_stats *softnet_get_online(loff_t *pos)
3154 struct netif_rx_stats *rc = NULL;
3156 while (*pos < nr_cpu_ids)
3157 if (cpu_online(*pos)) {
3158 rc = &per_cpu(netdev_rx_stat, *pos);
3165 static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
3167 return softnet_get_online(pos);
3170 static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3173 return softnet_get_online(pos);
3176 static void softnet_seq_stop(struct seq_file *seq, void *v)
3180 static int softnet_seq_show(struct seq_file *seq, void *v)
3182 struct netif_rx_stats *s = v;
3184 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
3185 s->total, s->dropped, s->time_squeeze, 0,
3186 0, 0, 0, 0, /* was fastroute */
3191 static const struct seq_operations dev_seq_ops = {
3192 .start = dev_seq_start,
3193 .next = dev_seq_next,
3194 .stop = dev_seq_stop,
3195 .show = dev_seq_show,
3198 static int dev_seq_open(struct inode *inode, struct file *file)
3200 return seq_open_net(inode, file, &dev_seq_ops,
3201 sizeof(struct seq_net_private));
3204 static const struct file_operations dev_seq_fops = {
3205 .owner = THIS_MODULE,
3206 .open = dev_seq_open,
3208 .llseek = seq_lseek,
3209 .release = seq_release_net,
3212 static const struct seq_operations softnet_seq_ops = {
3213 .start = softnet_seq_start,
3214 .next = softnet_seq_next,
3215 .stop = softnet_seq_stop,
3216 .show = softnet_seq_show,
3219 static int softnet_seq_open(struct inode *inode, struct file *file)
3221 return seq_open(file, &softnet_seq_ops);
3224 static const struct file_operations softnet_seq_fops = {
3225 .owner = THIS_MODULE,
3226 .open = softnet_seq_open,
3228 .llseek = seq_lseek,
3229 .release = seq_release,
3232 static void *ptype_get_idx(loff_t pos)
3234 struct packet_type *pt = NULL;
3238 list_for_each_entry_rcu(pt, &ptype_all, list) {
3244 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
3245 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
3254 static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
3258 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
3261 static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3263 struct packet_type *pt;
3264 struct list_head *nxt;
3268 if (v == SEQ_START_TOKEN)
3269 return ptype_get_idx(0);
3272 nxt = pt->list.next;
3273 if (pt->type == htons(ETH_P_ALL)) {
3274 if (nxt != &ptype_all)
3277 nxt = ptype_base[0].next;
3279 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
3281 while (nxt == &ptype_base[hash]) {
3282 if (++hash >= PTYPE_HASH_SIZE)
3284 nxt = ptype_base[hash].next;
3287 return list_entry(nxt, struct packet_type, list);
3290 static void ptype_seq_stop(struct seq_file *seq, void *v)
3296 static int ptype_seq_show(struct seq_file *seq, void *v)
3298 struct packet_type *pt = v;
3300 if (v == SEQ_START_TOKEN)
3301 seq_puts(seq, "Type Device Function\n");
3302 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
3303 if (pt->type == htons(ETH_P_ALL))
3304 seq_puts(seq, "ALL ");
3306 seq_printf(seq, "%04x", ntohs(pt->type));
3308 seq_printf(seq, " %-8s %pF\n",
3309 pt->dev ? pt->dev->name : "", pt->func);
3315 static const struct seq_operations ptype_seq_ops = {
3316 .start = ptype_seq_start,
3317 .next = ptype_seq_next,
3318 .stop = ptype_seq_stop,
3319 .show = ptype_seq_show,
3322 static int ptype_seq_open(struct inode *inode, struct file *file)
3324 return seq_open_net(inode, file, &ptype_seq_ops,
3325 sizeof(struct seq_net_private));
3328 static const struct file_operations ptype_seq_fops = {
3329 .owner = THIS_MODULE,
3330 .open = ptype_seq_open,
3332 .llseek = seq_lseek,
3333 .release = seq_release_net,
3337 static int __net_init dev_proc_net_init(struct net *net)
3341 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
3343 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
3345 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
3348 if (wext_proc_init(net))
3354 proc_net_remove(net, "ptype");
3356 proc_net_remove(net, "softnet_stat");
3358 proc_net_remove(net, "dev");
3362 static void __net_exit dev_proc_net_exit(struct net *net)
3364 wext_proc_exit(net);
3366 proc_net_remove(net, "ptype");
3367 proc_net_remove(net, "softnet_stat");
3368 proc_net_remove(net, "dev");
3371 static struct pernet_operations __net_initdata dev_proc_ops = {
3372 .init = dev_proc_net_init,
3373 .exit = dev_proc_net_exit,
3376 static int __init dev_proc_init(void)
3378 return register_pernet_subsys(&dev_proc_ops);
3381 #define dev_proc_init() 0
3382 #endif /* CONFIG_PROC_FS */
3386 * netdev_set_master - set up master/slave pair
3387 * @slave: slave device
3388 * @master: new master device
3390 * Changes the master device of the slave. Pass %NULL to break the
3391 * bonding. The caller must hold the RTNL semaphore. On a failure
3392 * a negative errno code is returned. On success the reference counts
3393 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
3394 * function returns zero.
3396 int netdev_set_master(struct net_device *slave, struct net_device *master)
3398 struct net_device *old = slave->master;
3408 slave->master = master;
3416 slave->flags |= IFF_SLAVE;
3418 slave->flags &= ~IFF_SLAVE;
3420 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
3423 EXPORT_SYMBOL(netdev_set_master);
3425 static void dev_change_rx_flags(struct net_device *dev, int flags)
3427 const struct net_device_ops *ops = dev->netdev_ops;
3429 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
3430 ops->ndo_change_rx_flags(dev, flags);
3433 static int __dev_set_promiscuity(struct net_device *dev, int inc)
3435 unsigned short old_flags = dev->flags;
3441 dev->flags |= IFF_PROMISC;
3442 dev->promiscuity += inc;
3443 if (dev->promiscuity == 0) {
3446 * If inc causes overflow, untouch promisc and return error.
3449 dev->flags &= ~IFF_PROMISC;
3451 dev->promiscuity -= inc;
3452 printk(KERN_WARNING "%s: promiscuity touches roof, "
3453 "set promiscuity failed, promiscuity feature "
3454 "of device might be broken.\n", dev->name);
3458 if (dev->flags != old_flags) {
3459 printk(KERN_INFO "device %s %s promiscuous mode\n",
3460 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
3462 if (audit_enabled) {
3463 current_uid_gid(&uid, &gid);
3464 audit_log(current->audit_context, GFP_ATOMIC,
3465 AUDIT_ANOM_PROMISCUOUS,
3466 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
3467 dev->name, (dev->flags & IFF_PROMISC),
3468 (old_flags & IFF_PROMISC),
3469 audit_get_loginuid(current),
3471 audit_get_sessionid(current));
3474 dev_change_rx_flags(dev, IFF_PROMISC);
3480 * dev_set_promiscuity - update promiscuity count on a device
3484 * Add or remove promiscuity from a device. While the count in the device
3485 * remains above zero the interface remains promiscuous. Once it hits zero
3486 * the device reverts back to normal filtering operation. A negative inc
3487 * value is used to drop promiscuity on the device.
3488 * Return 0 if successful or a negative errno code on error.
3490 int dev_set_promiscuity(struct net_device *dev, int inc)
3492 unsigned short old_flags = dev->flags;
3495 err = __dev_set_promiscuity(dev, inc);
3498 if (dev->flags != old_flags)
3499 dev_set_rx_mode(dev);
3502 EXPORT_SYMBOL(dev_set_promiscuity);
3505 * dev_set_allmulti - update allmulti count on a device
3509 * Add or remove reception of all multicast frames to a device. While the
3510 * count in the device remains above zero the interface remains listening
3511 * to all interfaces. Once it hits zero the device reverts back to normal
3512 * filtering operation. A negative @inc value is used to drop the counter
3513 * when releasing a resource needing all multicasts.
3514 * Return 0 if successful or a negative errno code on error.
3517 int dev_set_allmulti(struct net_device *dev, int inc)
3519 unsigned short old_flags = dev->flags;
3523 dev->flags |= IFF_ALLMULTI;
3524 dev->allmulti += inc;
3525 if (dev->allmulti == 0) {
3528 * If inc causes overflow, untouch allmulti and return error.
3531 dev->flags &= ~IFF_ALLMULTI;
3533 dev->allmulti -= inc;
3534 printk(KERN_WARNING "%s: allmulti touches roof, "
3535 "set allmulti failed, allmulti feature of "
3536 "device might be broken.\n", dev->name);
3540 if (dev->flags ^ old_flags) {
3541 dev_change_rx_flags(dev, IFF_ALLMULTI);
3542 dev_set_rx_mode(dev);
3546 EXPORT_SYMBOL(dev_set_allmulti);
3549 * Upload unicast and multicast address lists to device and
3550 * configure RX filtering. When the device doesn't support unicast
3551 * filtering it is put in promiscuous mode while unicast addresses
3554 void __dev_set_rx_mode(struct net_device *dev)
3556 const struct net_device_ops *ops = dev->netdev_ops;
3558 /* dev_open will call this function so the list will stay sane. */
3559 if (!(dev->flags&IFF_UP))
3562 if (!netif_device_present(dev))
3565 if (ops->ndo_set_rx_mode)
3566 ops->ndo_set_rx_mode(dev);
3568 /* Unicast addresses changes may only happen under the rtnl,
3569 * therefore calling __dev_set_promiscuity here is safe.
3571 if (dev->uc.count > 0 && !dev->uc_promisc) {
3572 __dev_set_promiscuity(dev, 1);
3573 dev->uc_promisc = 1;
3574 } else if (dev->uc.count == 0 && dev->uc_promisc) {
3575 __dev_set_promiscuity(dev, -1);
3576 dev->uc_promisc = 0;
3579 if (ops->ndo_set_multicast_list)
3580 ops->ndo_set_multicast_list(dev);
3584 void dev_set_rx_mode(struct net_device *dev)
3586 netif_addr_lock_bh(dev);
3587 __dev_set_rx_mode(dev);
3588 netif_addr_unlock_bh(dev);
3591 /* hw addresses list handling functions */
3593 static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr,
3594 int addr_len, unsigned char addr_type)
3596 struct netdev_hw_addr *ha;
3599 if (addr_len > MAX_ADDR_LEN)
3602 list_for_each_entry(ha, &list->list, list) {
3603 if (!memcmp(ha->addr, addr, addr_len) &&
3604 ha->type == addr_type) {
3611 alloc_size = sizeof(*ha);
3612 if (alloc_size < L1_CACHE_BYTES)
3613 alloc_size = L1_CACHE_BYTES;
3614 ha = kmalloc(alloc_size, GFP_ATOMIC);
3617 memcpy(ha->addr, addr, addr_len);
3618 ha->type = addr_type;
3621 list_add_tail_rcu(&ha->list, &list->list);
3626 static void ha_rcu_free(struct rcu_head *head)
3628 struct netdev_hw_addr *ha;
3630 ha = container_of(head, struct netdev_hw_addr, rcu_head);
3634 static int __hw_addr_del(struct netdev_hw_addr_list *list, unsigned char *addr,
3635 int addr_len, unsigned char addr_type)
3637 struct netdev_hw_addr *ha;
3639 list_for_each_entry(ha, &list->list, list) {
3640 if (!memcmp(ha->addr, addr, addr_len) &&
3641 (ha->type == addr_type || !addr_type)) {
3644 list_del_rcu(&ha->list);
3645 call_rcu(&ha->rcu_head, ha_rcu_free);
3653 static int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
3654 struct netdev_hw_addr_list *from_list,
3656 unsigned char addr_type)
3659 struct netdev_hw_addr *ha, *ha2;
3662 list_for_each_entry(ha, &from_list->list, list) {
3663 type = addr_type ? addr_type : ha->type;
3664 err = __hw_addr_add(to_list, ha->addr, addr_len, type);
3671 list_for_each_entry(ha2, &from_list->list, list) {
3674 type = addr_type ? addr_type : ha2->type;
3675 __hw_addr_del(to_list, ha2->addr, addr_len, type);
3680 static void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
3681 struct netdev_hw_addr_list *from_list,
3683 unsigned char addr_type)
3685 struct netdev_hw_addr *ha;
3688 list_for_each_entry(ha, &from_list->list, list) {
3689 type = addr_type ? addr_type : ha->type;
3690 __hw_addr_del(to_list, ha->addr, addr_len, addr_type);
3694 static int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
3695 struct netdev_hw_addr_list *from_list,
3699 struct netdev_hw_addr *ha, *tmp;
3701 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
3703 err = __hw_addr_add(to_list, ha->addr,
3704 addr_len, ha->type);
3709 } else if (ha->refcount == 1) {
3710 __hw_addr_del(to_list, ha->addr, addr_len, ha->type);
3711 __hw_addr_del(from_list, ha->addr, addr_len, ha->type);
3717 static void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
3718 struct netdev_hw_addr_list *from_list,
3721 struct netdev_hw_addr *ha, *tmp;
3723 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
3725 __hw_addr_del(to_list, ha->addr,
3726 addr_len, ha->type);
3728 __hw_addr_del(from_list, ha->addr,
3729 addr_len, ha->type);
3734 static void __hw_addr_flush(struct netdev_hw_addr_list *list)
3736 struct netdev_hw_addr *ha, *tmp;
3738 list_for_each_entry_safe(ha, tmp, &list->list, list) {
3739 list_del_rcu(&ha->list);
3740 call_rcu(&ha->rcu_head, ha_rcu_free);
3745 static void __hw_addr_init(struct netdev_hw_addr_list *list)
3747 INIT_LIST_HEAD(&list->list);
3751 /* Device addresses handling functions */
3753 static void dev_addr_flush(struct net_device *dev)
3755 /* rtnl_mutex must be held here */
3757 __hw_addr_flush(&dev->dev_addrs);
3758 dev->dev_addr = NULL;
3761 static int dev_addr_init(struct net_device *dev)
3763 unsigned char addr[MAX_ADDR_LEN];
3764 struct netdev_hw_addr *ha;
3767 /* rtnl_mutex must be held here */
3769 __hw_addr_init(&dev->dev_addrs);
3770 memset(addr, 0, sizeof(addr));
3771 err = __hw_addr_add(&dev->dev_addrs, addr, sizeof(addr),
3772 NETDEV_HW_ADDR_T_LAN);
3775 * Get the first (previously created) address from the list
3776 * and set dev_addr pointer to this location.
3778 ha = list_first_entry(&dev->dev_addrs.list,
3779 struct netdev_hw_addr, list);
3780 dev->dev_addr = ha->addr;
3786 * dev_addr_add - Add a device address
3788 * @addr: address to add
3789 * @addr_type: address type
3791 * Add a device address to the device or increase the reference count if
3792 * it already exists.
3794 * The caller must hold the rtnl_mutex.
3796 int dev_addr_add(struct net_device *dev, unsigned char *addr,
3797 unsigned char addr_type)
3803 err = __hw_addr_add(&dev->dev_addrs, addr, dev->addr_len, addr_type);
3805 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3808 EXPORT_SYMBOL(dev_addr_add);
3811 * dev_addr_del - Release a device address.
3813 * @addr: address to delete
3814 * @addr_type: address type
3816 * Release reference to a device address and remove it from the device
3817 * if the reference count drops to zero.
3819 * The caller must hold the rtnl_mutex.
3821 int dev_addr_del(struct net_device *dev, unsigned char *addr,
3822 unsigned char addr_type)
3825 struct netdev_hw_addr *ha;
3830 * We can not remove the first address from the list because
3831 * dev->dev_addr points to that.
3833 ha = list_first_entry(&dev->dev_addrs.list,
3834 struct netdev_hw_addr, list);
3835 if (ha->addr == dev->dev_addr && ha->refcount == 1)
3838 err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len,
3841 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3844 EXPORT_SYMBOL(dev_addr_del);
3847 * dev_addr_add_multiple - Add device addresses from another device
3848 * @to_dev: device to which addresses will be added
3849 * @from_dev: device from which addresses will be added
3850 * @addr_type: address type - 0 means type will be used from from_dev
3852 * Add device addresses of the one device to another.
3854 * The caller must hold the rtnl_mutex.
3856 int dev_addr_add_multiple(struct net_device *to_dev,
3857 struct net_device *from_dev,
3858 unsigned char addr_type)
3864 if (from_dev->addr_len != to_dev->addr_len)
3866 err = __hw_addr_add_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
3867 to_dev->addr_len, addr_type);
3869 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
3872 EXPORT_SYMBOL(dev_addr_add_multiple);
3875 * dev_addr_del_multiple - Delete device addresses by another device
3876 * @to_dev: device where the addresses will be deleted
3877 * @from_dev: device by which addresses the addresses will be deleted
3878 * @addr_type: address type - 0 means type will used from from_dev
3880 * Deletes addresses in to device by the list of addresses in from device.
3882 * The caller must hold the rtnl_mutex.
3884 int dev_addr_del_multiple(struct net_device *to_dev,
3885 struct net_device *from_dev,
3886 unsigned char addr_type)
3890 if (from_dev->addr_len != to_dev->addr_len)
3892 __hw_addr_del_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
3893 to_dev->addr_len, addr_type);
3894 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
3897 EXPORT_SYMBOL(dev_addr_del_multiple);
3899 /* multicast addresses handling functions */
3901 int __dev_addr_delete(struct dev_addr_list **list, int *count,
3902 void *addr, int alen, int glbl)
3904 struct dev_addr_list *da;
3906 for (; (da = *list) != NULL; list = &da->next) {
3907 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3908 alen == da->da_addrlen) {
3910 int old_glbl = da->da_gusers;
3927 int __dev_addr_add(struct dev_addr_list **list, int *count,
3928 void *addr, int alen, int glbl)
3930 struct dev_addr_list *da;
3932 for (da = *list; da != NULL; da = da->next) {
3933 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3934 da->da_addrlen == alen) {
3936 int old_glbl = da->da_gusers;
3946 da = kzalloc(sizeof(*da), GFP_ATOMIC);
3949 memcpy(da->da_addr, addr, alen);
3950 da->da_addrlen = alen;
3952 da->da_gusers = glbl ? 1 : 0;
3960 * dev_unicast_delete - Release secondary unicast address.
3962 * @addr: address to delete
3964 * Release reference to a secondary unicast address and remove it
3965 * from the device if the reference count drops to zero.
3967 * The caller must hold the rtnl_mutex.
3969 int dev_unicast_delete(struct net_device *dev, void *addr)
3975 netif_addr_lock_bh(dev);
3976 err = __hw_addr_del(&dev->uc, addr, dev->addr_len,
3977 NETDEV_HW_ADDR_T_UNICAST);
3979 __dev_set_rx_mode(dev);
3980 netif_addr_unlock_bh(dev);
3983 EXPORT_SYMBOL(dev_unicast_delete);
3986 * dev_unicast_add - add a secondary unicast address
3988 * @addr: address to add
3990 * Add a secondary unicast address to the device or increase
3991 * the reference count if it already exists.
3993 * The caller must hold the rtnl_mutex.
3995 int dev_unicast_add(struct net_device *dev, void *addr)
4001 netif_addr_lock_bh(dev);
4002 err = __hw_addr_add(&dev->uc, addr, dev->addr_len,
4003 NETDEV_HW_ADDR_T_UNICAST);
4005 __dev_set_rx_mode(dev);
4006 netif_addr_unlock_bh(dev);
4009 EXPORT_SYMBOL(dev_unicast_add);
4011 int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
4012 struct dev_addr_list **from, int *from_count)
4014 struct dev_addr_list *da, *next;
4018 while (da != NULL) {
4020 if (!da->da_synced) {
4021 err = __dev_addr_add(to, to_count,
4022 da->da_addr, da->da_addrlen, 0);
4027 } else if (da->da_users == 1) {
4028 __dev_addr_delete(to, to_count,
4029 da->da_addr, da->da_addrlen, 0);
4030 __dev_addr_delete(from, from_count,
4031 da->da_addr, da->da_addrlen, 0);
4037 EXPORT_SYMBOL_GPL(__dev_addr_sync);
4039 void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
4040 struct dev_addr_list **from, int *from_count)
4042 struct dev_addr_list *da, *next;
4045 while (da != NULL) {
4047 if (da->da_synced) {
4048 __dev_addr_delete(to, to_count,
4049 da->da_addr, da->da_addrlen, 0);
4051 __dev_addr_delete(from, from_count,
4052 da->da_addr, da->da_addrlen, 0);
4057 EXPORT_SYMBOL_GPL(__dev_addr_unsync);
4060 * dev_unicast_sync - Synchronize device's unicast list to another device
4061 * @to: destination device
4062 * @from: source device
4064 * Add newly added addresses to the destination device and release
4065 * addresses that have no users left. The source device must be
4066 * locked by netif_tx_lock_bh.
4068 * This function is intended to be called from the dev->set_rx_mode
4069 * function of layered software devices.
4071 int dev_unicast_sync(struct net_device *to, struct net_device *from)
4075 if (to->addr_len != from->addr_len)
4078 netif_addr_lock_bh(to);
4079 err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
4081 __dev_set_rx_mode(to);
4082 netif_addr_unlock_bh(to);
4085 EXPORT_SYMBOL(dev_unicast_sync);
4088 * dev_unicast_unsync - Remove synchronized addresses from the destination device
4089 * @to: destination device
4090 * @from: source device
4092 * Remove all addresses that were added to the destination device by
4093 * dev_unicast_sync(). This function is intended to be called from the
4094 * dev->stop function of layered software devices.
4096 void dev_unicast_unsync(struct net_device *to, struct net_device *from)
4098 if (to->addr_len != from->addr_len)
4101 netif_addr_lock_bh(from);
4102 netif_addr_lock(to);
4103 __hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
4104 __dev_set_rx_mode(to);
4105 netif_addr_unlock(to);
4106 netif_addr_unlock_bh(from);
4108 EXPORT_SYMBOL(dev_unicast_unsync);
4110 static void dev_unicast_flush(struct net_device *dev)
4112 netif_addr_lock_bh(dev);
4113 __hw_addr_flush(&dev->uc);
4114 netif_addr_unlock_bh(dev);
4117 static void dev_unicast_init(struct net_device *dev)
4119 __hw_addr_init(&dev->uc);
4123 static void __dev_addr_discard(struct dev_addr_list **list)
4125 struct dev_addr_list *tmp;
4127 while (*list != NULL) {
4130 if (tmp->da_users > tmp->da_gusers)
4131 printk("__dev_addr_discard: address leakage! "
4132 "da_users=%d\n", tmp->da_users);
4137 static void dev_addr_discard(struct net_device *dev)
4139 netif_addr_lock_bh(dev);
4141 __dev_addr_discard(&dev->mc_list);
4144 netif_addr_unlock_bh(dev);
4148 * dev_get_flags - get flags reported to userspace
4151 * Get the combination of flag bits exported through APIs to userspace.
4153 unsigned dev_get_flags(const struct net_device *dev)
4157 flags = (dev->flags & ~(IFF_PROMISC |
4162 (dev->gflags & (IFF_PROMISC |
4165 if (netif_running(dev)) {
4166 if (netif_oper_up(dev))
4167 flags |= IFF_RUNNING;
4168 if (netif_carrier_ok(dev))
4169 flags |= IFF_LOWER_UP;
4170 if (netif_dormant(dev))
4171 flags |= IFF_DORMANT;
4176 EXPORT_SYMBOL(dev_get_flags);
4179 * dev_change_flags - change device settings
4181 * @flags: device state flags
4183 * Change settings on device based state flags. The flags are
4184 * in the userspace exported format.
4186 int dev_change_flags(struct net_device *dev, unsigned flags)
4189 int old_flags = dev->flags;
4194 * Set the flags on our device.
4197 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
4198 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
4200 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
4204 * Load in the correct multicast list now the flags have changed.
4207 if ((old_flags ^ flags) & IFF_MULTICAST)
4208 dev_change_rx_flags(dev, IFF_MULTICAST);
4210 dev_set_rx_mode(dev);
4213 * Have we downed the interface. We handle IFF_UP ourselves
4214 * according to user attempts to set it, rather than blindly
4219 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
4220 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
4223 dev_set_rx_mode(dev);
4226 if (dev->flags & IFF_UP &&
4227 ((old_flags ^ dev->flags) & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
4229 call_netdevice_notifiers(NETDEV_CHANGE, dev);
4231 if ((flags ^ dev->gflags) & IFF_PROMISC) {
4232 int inc = (flags & IFF_PROMISC) ? 1 : -1;
4234 dev->gflags ^= IFF_PROMISC;
4235 dev_set_promiscuity(dev, inc);
4238 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4239 is important. Some (broken) drivers set IFF_PROMISC, when
4240 IFF_ALLMULTI is requested not asking us and not reporting.
4242 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
4243 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
4245 dev->gflags ^= IFF_ALLMULTI;
4246 dev_set_allmulti(dev, inc);
4249 /* Exclude state transition flags, already notified */
4250 changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING);
4252 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
4256 EXPORT_SYMBOL(dev_change_flags);
4259 * dev_set_mtu - Change maximum transfer unit
4261 * @new_mtu: new transfer unit
4263 * Change the maximum transfer size of the network device.
4265 int dev_set_mtu(struct net_device *dev, int new_mtu)
4267 const struct net_device_ops *ops = dev->netdev_ops;
4270 if (new_mtu == dev->mtu)
4273 /* MTU must be positive. */
4277 if (!netif_device_present(dev))
4281 if (ops->ndo_change_mtu)
4282 err = ops->ndo_change_mtu(dev, new_mtu);
4286 if (!err && dev->flags & IFF_UP)
4287 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
4290 EXPORT_SYMBOL(dev_set_mtu);
4293 * dev_set_mac_address - Change Media Access Control Address
4297 * Change the hardware (MAC) address of the device
4299 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4301 const struct net_device_ops *ops = dev->netdev_ops;
4304 if (!ops->ndo_set_mac_address)
4306 if (sa->sa_family != dev->type)
4308 if (!netif_device_present(dev))
4310 err = ops->ndo_set_mac_address(dev, sa);
4312 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4315 EXPORT_SYMBOL(dev_set_mac_address);
4318 * Perform the SIOCxIFxxx calls, inside read_lock(dev_base_lock)
4320 static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
4323 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
4329 case SIOCGIFFLAGS: /* Get interface flags */
4330 ifr->ifr_flags = (short) dev_get_flags(dev);
4333 case SIOCGIFMETRIC: /* Get the metric on the interface
4334 (currently unused) */
4335 ifr->ifr_metric = 0;
4338 case SIOCGIFMTU: /* Get the MTU of a device */
4339 ifr->ifr_mtu = dev->mtu;
4344 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
4346 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
4347 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4348 ifr->ifr_hwaddr.sa_family = dev->type;
4356 ifr->ifr_map.mem_start = dev->mem_start;
4357 ifr->ifr_map.mem_end = dev->mem_end;
4358 ifr->ifr_map.base_addr = dev->base_addr;
4359 ifr->ifr_map.irq = dev->irq;
4360 ifr->ifr_map.dma = dev->dma;
4361 ifr->ifr_map.port = dev->if_port;
4365 ifr->ifr_ifindex = dev->ifindex;
4369 ifr->ifr_qlen = dev->tx_queue_len;
4373 /* dev_ioctl() should ensure this case
4385 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
4387 static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4390 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
4391 const struct net_device_ops *ops;
4396 ops = dev->netdev_ops;
4399 case SIOCSIFFLAGS: /* Set interface flags */
4400 return dev_change_flags(dev, ifr->ifr_flags);
4402 case SIOCSIFMETRIC: /* Set the metric on the interface
4403 (currently unused) */
4406 case SIOCSIFMTU: /* Set the MTU of a device */
4407 return dev_set_mtu(dev, ifr->ifr_mtu);
4410 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
4412 case SIOCSIFHWBROADCAST:
4413 if (ifr->ifr_hwaddr.sa_family != dev->type)
4415 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
4416 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4417 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4421 if (ops->ndo_set_config) {
4422 if (!netif_device_present(dev))
4424 return ops->ndo_set_config(dev, &ifr->ifr_map);
4429 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4430 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4432 if (!netif_device_present(dev))
4434 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
4438 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4439 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4441 if (!netif_device_present(dev))
4443 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
4447 if (ifr->ifr_qlen < 0)
4449 dev->tx_queue_len = ifr->ifr_qlen;
4453 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
4454 return dev_change_name(dev, ifr->ifr_newname);
4457 * Unknown or private ioctl
4460 if ((cmd >= SIOCDEVPRIVATE &&
4461 cmd <= SIOCDEVPRIVATE + 15) ||
4462 cmd == SIOCBONDENSLAVE ||
4463 cmd == SIOCBONDRELEASE ||
4464 cmd == SIOCBONDSETHWADDR ||
4465 cmd == SIOCBONDSLAVEINFOQUERY ||
4466 cmd == SIOCBONDINFOQUERY ||
4467 cmd == SIOCBONDCHANGEACTIVE ||
4468 cmd == SIOCGMIIPHY ||
4469 cmd == SIOCGMIIREG ||
4470 cmd == SIOCSMIIREG ||
4471 cmd == SIOCBRADDIF ||
4472 cmd == SIOCBRDELIF ||
4473 cmd == SIOCSHWTSTAMP ||
4474 cmd == SIOCWANDEV) {
4476 if (ops->ndo_do_ioctl) {
4477 if (netif_device_present(dev))
4478 err = ops->ndo_do_ioctl(dev, ifr, cmd);
4490 * This function handles all "interface"-type I/O control requests. The actual
4491 * 'doing' part of this is dev_ifsioc above.
4495 * dev_ioctl - network device ioctl
4496 * @net: the applicable net namespace
4497 * @cmd: command to issue
4498 * @arg: pointer to a struct ifreq in user space
4500 * Issue ioctl functions to devices. This is normally called by the
4501 * user space syscall interfaces but can sometimes be useful for
4502 * other purposes. The return value is the return from the syscall if
4503 * positive or a negative errno code on error.
4506 int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
4512 /* One special case: SIOCGIFCONF takes ifconf argument
4513 and requires shared lock, because it sleeps writing
4517 if (cmd == SIOCGIFCONF) {
4519 ret = dev_ifconf(net, (char __user *) arg);
4523 if (cmd == SIOCGIFNAME)
4524 return dev_ifname(net, (struct ifreq __user *)arg);
4526 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
4529 ifr.ifr_name[IFNAMSIZ-1] = 0;
4531 colon = strchr(ifr.ifr_name, ':');
4536 * See which interface the caller is talking about.
4541 * These ioctl calls:
4542 * - can be done by all.
4543 * - atomic and do not require locking.
4554 dev_load(net, ifr.ifr_name);
4555 read_lock(&dev_base_lock);
4556 ret = dev_ifsioc_locked(net, &ifr, cmd);
4557 read_unlock(&dev_base_lock);
4561 if (copy_to_user(arg, &ifr,
4562 sizeof(struct ifreq)))
4568 dev_load(net, ifr.ifr_name);
4570 ret = dev_ethtool(net, &ifr);
4575 if (copy_to_user(arg, &ifr,
4576 sizeof(struct ifreq)))
4582 * These ioctl calls:
4583 * - require superuser power.
4584 * - require strict serialization.
4590 if (!capable(CAP_NET_ADMIN))
4592 dev_load(net, ifr.ifr_name);
4594 ret = dev_ifsioc(net, &ifr, cmd);
4599 if (copy_to_user(arg, &ifr,
4600 sizeof(struct ifreq)))
4606 * These ioctl calls:
4607 * - require superuser power.
4608 * - require strict serialization.
4609 * - do not return a value
4619 case SIOCSIFHWBROADCAST:
4622 case SIOCBONDENSLAVE:
4623 case SIOCBONDRELEASE:
4624 case SIOCBONDSETHWADDR:
4625 case SIOCBONDCHANGEACTIVE:
4629 if (!capable(CAP_NET_ADMIN))
4632 case SIOCBONDSLAVEINFOQUERY:
4633 case SIOCBONDINFOQUERY:
4634 dev_load(net, ifr.ifr_name);
4636 ret = dev_ifsioc(net, &ifr, cmd);
4641 /* Get the per device memory space. We can add this but
4642 * currently do not support it */
4644 /* Set the per device memory buffer space.
4645 * Not applicable in our case */
4650 * Unknown or private ioctl.
4653 if (cmd == SIOCWANDEV ||
4654 (cmd >= SIOCDEVPRIVATE &&
4655 cmd <= SIOCDEVPRIVATE + 15)) {
4656 dev_load(net, ifr.ifr_name);
4658 ret = dev_ifsioc(net, &ifr, cmd);
4660 if (!ret && copy_to_user(arg, &ifr,
4661 sizeof(struct ifreq)))
4665 /* Take care of Wireless Extensions */
4666 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
4667 return wext_handle_ioctl(net, &ifr, cmd, arg);
4674 * dev_new_index - allocate an ifindex
4675 * @net: the applicable net namespace
4677 * Returns a suitable unique value for a new device interface
4678 * number. The caller must hold the rtnl semaphore or the
4679 * dev_base_lock to be sure it remains unique.
4681 static int dev_new_index(struct net *net)
4687 if (!__dev_get_by_index(net, ifindex))
4692 /* Delayed registration/unregisteration */
4693 static LIST_HEAD(net_todo_list);
4695 static void net_set_todo(struct net_device *dev)
4697 list_add_tail(&dev->todo_list, &net_todo_list);
4700 static void rollback_registered_many(struct list_head *head)
4702 struct net_device *dev;
4704 BUG_ON(dev_boot_phase);
4707 list_for_each_entry(dev, head, unreg_list) {
4708 /* Some devices call without registering
4709 * for initialization unwind.
4711 if (dev->reg_state == NETREG_UNINITIALIZED) {
4712 pr_debug("unregister_netdevice: device %s/%p never "
4713 "was registered\n", dev->name, dev);
4719 BUG_ON(dev->reg_state != NETREG_REGISTERED);
4721 /* If device is running, close it first. */
4724 /* And unlink it from device chain. */
4725 unlist_netdevice(dev);
4727 dev->reg_state = NETREG_UNREGISTERING;
4732 list_for_each_entry(dev, head, unreg_list) {
4733 /* Shutdown queueing discipline. */
4737 /* Notify protocols, that we are about to destroy
4738 this device. They should clean all the things.
4740 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4743 * Flush the unicast and multicast chains
4745 dev_unicast_flush(dev);
4746 dev_addr_discard(dev);
4748 if (dev->netdev_ops->ndo_uninit)
4749 dev->netdev_ops->ndo_uninit(dev);
4751 /* Notifier chain MUST detach us from master device. */
4752 WARN_ON(dev->master);
4754 /* Remove entries from kobject tree */
4755 netdev_unregister_kobject(dev);
4760 list_for_each_entry(dev, head, unreg_list)
4764 static void rollback_registered(struct net_device *dev)
4768 list_add(&dev->unreg_list, &single);
4769 rollback_registered_many(&single);
4772 static void __netdev_init_queue_locks_one(struct net_device *dev,
4773 struct netdev_queue *dev_queue,
4776 spin_lock_init(&dev_queue->_xmit_lock);
4777 netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
4778 dev_queue->xmit_lock_owner = -1;
4781 static void netdev_init_queue_locks(struct net_device *dev)
4783 netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
4784 __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
4787 unsigned long netdev_fix_features(unsigned long features, const char *name)
4789 /* Fix illegal SG+CSUM combinations. */
4790 if ((features & NETIF_F_SG) &&
4791 !(features & NETIF_F_ALL_CSUM)) {
4793 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no "
4794 "checksum feature.\n", name);
4795 features &= ~NETIF_F_SG;
4798 /* TSO requires that SG is present as well. */
4799 if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
4801 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no "
4802 "SG feature.\n", name);
4803 features &= ~NETIF_F_TSO;
4806 if (features & NETIF_F_UFO) {
4807 if (!(features & NETIF_F_GEN_CSUM)) {
4809 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4810 "since no NETIF_F_HW_CSUM feature.\n",
4812 features &= ~NETIF_F_UFO;
4815 if (!(features & NETIF_F_SG)) {
4817 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4818 "since no NETIF_F_SG feature.\n", name);
4819 features &= ~NETIF_F_UFO;
4825 EXPORT_SYMBOL(netdev_fix_features);
4828 * register_netdevice - register a network device
4829 * @dev: device to register
4831 * Take a completed network device structure and add it to the kernel
4832 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4833 * chain. 0 is returned on success. A negative errno code is returned
4834 * on a failure to set up the device, or if the name is a duplicate.
4836 * Callers must hold the rtnl semaphore. You may want
4837 * register_netdev() instead of this.
4840 * The locking appears insufficient to guarantee two parallel registers
4841 * will not get the same name.
4844 int register_netdevice(struct net_device *dev)
4846 struct hlist_head *head;
4847 struct hlist_node *p;
4849 struct net *net = dev_net(dev);
4851 BUG_ON(dev_boot_phase);
4856 /* When net_device's are persistent, this will be fatal. */
4857 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
4860 spin_lock_init(&dev->addr_list_lock);
4861 netdev_set_addr_lockdep_class(dev);
4862 netdev_init_queue_locks(dev);
4866 /* Init, if this function is available */
4867 if (dev->netdev_ops->ndo_init) {
4868 ret = dev->netdev_ops->ndo_init(dev);
4876 if (!dev_valid_name(dev->name)) {
4881 dev->ifindex = dev_new_index(net);
4882 if (dev->iflink == -1)
4883 dev->iflink = dev->ifindex;
4885 /* Check for existence of name */
4886 head = dev_name_hash(net, dev->name);
4887 hlist_for_each(p, head) {
4888 struct net_device *d
4889 = hlist_entry(p, struct net_device, name_hlist);
4890 if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
4896 /* Fix illegal checksum combinations */
4897 if ((dev->features & NETIF_F_HW_CSUM) &&
4898 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4899 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
4901 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4904 if ((dev->features & NETIF_F_NO_CSUM) &&
4905 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4906 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
4908 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
4911 dev->features = netdev_fix_features(dev->features, dev->name);
4913 /* Enable software GSO if SG is supported. */
4914 if (dev->features & NETIF_F_SG)
4915 dev->features |= NETIF_F_GSO;
4917 netdev_initialize_kobject(dev);
4919 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
4920 ret = notifier_to_errno(ret);
4924 ret = netdev_register_kobject(dev);
4927 dev->reg_state = NETREG_REGISTERED;
4930 * Default initial state at registry is that the
4931 * device is present.
4934 set_bit(__LINK_STATE_PRESENT, &dev->state);
4936 dev_init_scheduler(dev);
4938 list_netdevice(dev);
4940 /* Notify protocols, that a new device appeared. */
4941 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
4942 ret = notifier_to_errno(ret);
4944 rollback_registered(dev);
4945 dev->reg_state = NETREG_UNREGISTERED;
4952 if (dev->netdev_ops->ndo_uninit)
4953 dev->netdev_ops->ndo_uninit(dev);
4956 EXPORT_SYMBOL(register_netdevice);
4959 * init_dummy_netdev - init a dummy network device for NAPI
4960 * @dev: device to init
4962 * This takes a network device structure and initialize the minimum
4963 * amount of fields so it can be used to schedule NAPI polls without
4964 * registering a full blown interface. This is to be used by drivers
4965 * that need to tie several hardware interfaces to a single NAPI
4966 * poll scheduler due to HW limitations.
4968 int init_dummy_netdev(struct net_device *dev)
4970 /* Clear everything. Note we don't initialize spinlocks
4971 * are they aren't supposed to be taken by any of the
4972 * NAPI code and this dummy netdev is supposed to be
4973 * only ever used for NAPI polls
4975 memset(dev, 0, sizeof(struct net_device));
4977 /* make sure we BUG if trying to hit standard
4978 * register/unregister code path
4980 dev->reg_state = NETREG_DUMMY;
4982 /* initialize the ref count */
4983 atomic_set(&dev->refcnt, 1);
4985 /* NAPI wants this */
4986 INIT_LIST_HEAD(&dev->napi_list);
4988 /* a dummy interface is started by default */
4989 set_bit(__LINK_STATE_PRESENT, &dev->state);
4990 set_bit(__LINK_STATE_START, &dev->state);
4994 EXPORT_SYMBOL_GPL(init_dummy_netdev);
4998 * register_netdev - register a network device
4999 * @dev: device to register
5001 * Take a completed network device structure and add it to the kernel
5002 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5003 * chain. 0 is returned on success. A negative errno code is returned
5004 * on a failure to set up the device, or if the name is a duplicate.
5006 * This is a wrapper around register_netdevice that takes the rtnl semaphore
5007 * and expands the device name if you passed a format string to
5010 int register_netdev(struct net_device *dev)
5017 * If the name is a format string the caller wants us to do a
5020 if (strchr(dev->name, '%')) {
5021 err = dev_alloc_name(dev, dev->name);
5026 err = register_netdevice(dev);
5031 EXPORT_SYMBOL(register_netdev);
5034 * netdev_wait_allrefs - wait until all references are gone.
5036 * This is called when unregistering network devices.
5038 * Any protocol or device that holds a reference should register
5039 * for netdevice notification, and cleanup and put back the
5040 * reference if they receive an UNREGISTER event.
5041 * We can get stuck here if buggy protocols don't correctly
5044 static void netdev_wait_allrefs(struct net_device *dev)
5046 unsigned long rebroadcast_time, warning_time;
5048 rebroadcast_time = warning_time = jiffies;
5049 while (atomic_read(&dev->refcnt) != 0) {
5050 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
5053 /* Rebroadcast unregister notification */
5054 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5056 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
5058 /* We must not have linkwatch events
5059 * pending on unregister. If this
5060 * happens, we simply run the queue
5061 * unscheduled, resulting in a noop
5064 linkwatch_run_queue();
5069 rebroadcast_time = jiffies;
5074 if (time_after(jiffies, warning_time + 10 * HZ)) {
5075 printk(KERN_EMERG "unregister_netdevice: "
5076 "waiting for %s to become free. Usage "
5078 dev->name, atomic_read(&dev->refcnt));
5079 warning_time = jiffies;
5088 * register_netdevice(x1);
5089 * register_netdevice(x2);
5091 * unregister_netdevice(y1);
5092 * unregister_netdevice(y2);
5098 * We are invoked by rtnl_unlock().
5099 * This allows us to deal with problems:
5100 * 1) We can delete sysfs objects which invoke hotplug
5101 * without deadlocking with linkwatch via keventd.
5102 * 2) Since we run with the RTNL semaphore not held, we can sleep
5103 * safely in order to wait for the netdev refcnt to drop to zero.
5105 * We must not return until all unregister events added during
5106 * the interval the lock was held have been completed.
5108 void netdev_run_todo(void)
5110 struct list_head list;
5112 /* Snapshot list, allow later requests */
5113 list_replace_init(&net_todo_list, &list);
5117 while (!list_empty(&list)) {
5118 struct net_device *dev
5119 = list_entry(list.next, struct net_device, todo_list);
5120 list_del(&dev->todo_list);
5122 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
5123 printk(KERN_ERR "network todo '%s' but state %d\n",
5124 dev->name, dev->reg_state);
5129 dev->reg_state = NETREG_UNREGISTERED;
5131 on_each_cpu(flush_backlog, dev, 1);
5133 netdev_wait_allrefs(dev);
5136 BUG_ON(atomic_read(&dev->refcnt));
5137 WARN_ON(dev->ip_ptr);
5138 WARN_ON(dev->ip6_ptr);
5139 WARN_ON(dev->dn_ptr);
5141 if (dev->destructor)
5142 dev->destructor(dev);
5144 /* Free network device */
5145 kobject_put(&dev->dev.kobj);
5150 * dev_get_stats - get network device statistics
5151 * @dev: device to get statistics from
5153 * Get network statistics from device. The device driver may provide
5154 * its own method by setting dev->netdev_ops->get_stats; otherwise
5155 * the internal statistics structure is used.
5157 const struct net_device_stats *dev_get_stats(struct net_device *dev)
5159 const struct net_device_ops *ops = dev->netdev_ops;
5161 if (ops->ndo_get_stats)
5162 return ops->ndo_get_stats(dev);
5164 unsigned long tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
5165 struct net_device_stats *stats = &dev->stats;
5167 struct netdev_queue *txq;
5169 for (i = 0; i < dev->num_tx_queues; i++) {
5170 txq = netdev_get_tx_queue(dev, i);
5171 tx_bytes += txq->tx_bytes;
5172 tx_packets += txq->tx_packets;
5173 tx_dropped += txq->tx_dropped;
5175 if (tx_bytes || tx_packets || tx_dropped) {
5176 stats->tx_bytes = tx_bytes;
5177 stats->tx_packets = tx_packets;
5178 stats->tx_dropped = tx_dropped;
5183 EXPORT_SYMBOL(dev_get_stats);
5185 static void netdev_init_one_queue(struct net_device *dev,
5186 struct netdev_queue *queue,
5192 static void netdev_init_queues(struct net_device *dev)
5194 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
5195 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
5196 spin_lock_init(&dev->tx_global_lock);
5200 * alloc_netdev_mq - allocate network device
5201 * @sizeof_priv: size of private data to allocate space for
5202 * @name: device name format string
5203 * @setup: callback to initialize device
5204 * @queue_count: the number of subqueues to allocate
5206 * Allocates a struct net_device with private data area for driver use
5207 * and performs basic initialization. Also allocates subquue structs
5208 * for each queue on the device at the end of the netdevice.
5210 struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5211 void (*setup)(struct net_device *), unsigned int queue_count)
5213 struct netdev_queue *tx;
5214 struct net_device *dev;
5216 struct net_device *p;
5218 BUG_ON(strlen(name) >= sizeof(dev->name));
5220 alloc_size = sizeof(struct net_device);
5222 /* ensure 32-byte alignment of private area */
5223 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
5224 alloc_size += sizeof_priv;
5226 /* ensure 32-byte alignment of whole construct */
5227 alloc_size += NETDEV_ALIGN - 1;
5229 p = kzalloc(alloc_size, GFP_KERNEL);
5231 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
5235 tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
5237 printk(KERN_ERR "alloc_netdev: Unable to allocate "
5242 dev = PTR_ALIGN(p, NETDEV_ALIGN);
5243 dev->padded = (char *)dev - (char *)p;
5245 if (dev_addr_init(dev))
5248 dev_unicast_init(dev);
5250 dev_net_set(dev, &init_net);
5253 dev->num_tx_queues = queue_count;
5254 dev->real_num_tx_queues = queue_count;
5256 dev->gso_max_size = GSO_MAX_SIZE;
5258 netdev_init_queues(dev);
5260 INIT_LIST_HEAD(&dev->napi_list);
5261 INIT_LIST_HEAD(&dev->unreg_list);
5262 dev->priv_flags = IFF_XMIT_DST_RELEASE;
5264 strcpy(dev->name, name);
5274 EXPORT_SYMBOL(alloc_netdev_mq);
5277 * free_netdev - free network device
5280 * This function does the last stage of destroying an allocated device
5281 * interface. The reference to the device object is released.
5282 * If this is the last reference then it will be freed.
5284 void free_netdev(struct net_device *dev)
5286 struct napi_struct *p, *n;
5288 release_net(dev_net(dev));
5292 /* Flush device addresses */
5293 dev_addr_flush(dev);
5295 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
5298 /* Compatibility with error handling in drivers */
5299 if (dev->reg_state == NETREG_UNINITIALIZED) {
5300 kfree((char *)dev - dev->padded);
5304 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
5305 dev->reg_state = NETREG_RELEASED;
5307 /* will free via device release */
5308 put_device(&dev->dev);
5310 EXPORT_SYMBOL(free_netdev);
5313 * synchronize_net - Synchronize with packet receive processing
5315 * Wait for packets currently being received to be done.
5316 * Does not block later packets from starting.
5318 void synchronize_net(void)
5323 EXPORT_SYMBOL(synchronize_net);
5326 * unregister_netdevice_queue - remove device from the kernel
5330 * This function shuts down a device interface and removes it
5331 * from the kernel tables.
5332 * If head not NULL, device is queued to be unregistered later.
5334 * Callers must hold the rtnl semaphore. You may want
5335 * unregister_netdev() instead of this.
5338 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
5343 list_move_tail(&dev->unreg_list, head);
5345 rollback_registered(dev);
5346 /* Finish processing unregister after unlock */
5350 EXPORT_SYMBOL(unregister_netdevice_queue);
5353 * unregister_netdevice_many - unregister many devices
5354 * @head: list of devices
5357 void unregister_netdevice_many(struct list_head *head)
5359 struct net_device *dev;
5361 if (!list_empty(head)) {
5362 rollback_registered_many(head);
5363 list_for_each_entry(dev, head, unreg_list)
5367 EXPORT_SYMBOL(unregister_netdevice_many);
5370 * unregister_netdev - remove device from the kernel
5373 * This function shuts down a device interface and removes it
5374 * from the kernel tables.
5376 * This is just a wrapper for unregister_netdevice that takes
5377 * the rtnl semaphore. In general you want to use this and not
5378 * unregister_netdevice.
5380 void unregister_netdev(struct net_device *dev)
5383 unregister_netdevice(dev);
5386 EXPORT_SYMBOL(unregister_netdev);
5389 * dev_change_net_namespace - move device to different nethost namespace
5391 * @net: network namespace
5392 * @pat: If not NULL name pattern to try if the current device name
5393 * is already taken in the destination network namespace.
5395 * This function shuts down a device interface and moves it
5396 * to a new network namespace. On success 0 is returned, on
5397 * a failure a netagive errno code is returned.
5399 * Callers must hold the rtnl semaphore.
5402 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
5405 const char *destname;
5410 /* Don't allow namespace local devices to be moved. */
5412 if (dev->features & NETIF_F_NETNS_LOCAL)
5416 /* Don't allow real devices to be moved when sysfs
5420 if (dev->dev.parent)
5424 /* Ensure the device has been registrered */
5426 if (dev->reg_state != NETREG_REGISTERED)
5429 /* Get out if there is nothing todo */
5431 if (net_eq(dev_net(dev), net))
5434 /* Pick the destination device name, and ensure
5435 * we can use it in the destination network namespace.
5438 destname = dev->name;
5439 if (__dev_get_by_name(net, destname)) {
5440 /* We get here if we can't use the current device name */
5443 if (!dev_valid_name(pat))
5445 if (strchr(pat, '%')) {
5446 if (__dev_alloc_name(net, pat, buf) < 0)
5451 if (__dev_get_by_name(net, destname))
5456 * And now a mini version of register_netdevice unregister_netdevice.
5459 /* If device is running close it first. */
5462 /* And unlink it from device chain */
5464 unlist_netdevice(dev);
5468 /* Shutdown queueing discipline. */
5471 /* Notify protocols, that we are about to destroy
5472 this device. They should clean all the things.
5474 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5477 * Flush the unicast and multicast chains
5479 dev_unicast_flush(dev);
5480 dev_addr_discard(dev);
5482 netdev_unregister_kobject(dev);
5484 /* Actually switch the network namespace */
5485 dev_net_set(dev, net);
5487 /* Assign the new device name */
5488 if (destname != dev->name)
5489 strcpy(dev->name, destname);
5491 /* If there is an ifindex conflict assign a new one */
5492 if (__dev_get_by_index(net, dev->ifindex)) {
5493 int iflink = (dev->iflink == dev->ifindex);
5494 dev->ifindex = dev_new_index(net);
5496 dev->iflink = dev->ifindex;
5499 /* Fixup kobjects */
5500 err = netdev_register_kobject(dev);
5503 /* Add the device back in the hashes */
5504 list_netdevice(dev);
5506 /* Notify protocols, that a new device appeared. */
5507 call_netdevice_notifiers(NETDEV_REGISTER, dev);
5514 EXPORT_SYMBOL_GPL(dev_change_net_namespace);
5516 static int dev_cpu_callback(struct notifier_block *nfb,
5517 unsigned long action,
5520 struct sk_buff **list_skb;
5521 struct Qdisc **list_net;
5522 struct sk_buff *skb;
5523 unsigned int cpu, oldcpu = (unsigned long)ocpu;
5524 struct softnet_data *sd, *oldsd;
5526 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
5529 local_irq_disable();
5530 cpu = smp_processor_id();
5531 sd = &per_cpu(softnet_data, cpu);
5532 oldsd = &per_cpu(softnet_data, oldcpu);
5534 /* Find end of our completion_queue. */
5535 list_skb = &sd->completion_queue;
5537 list_skb = &(*list_skb)->next;
5538 /* Append completion queue from offline CPU. */
5539 *list_skb = oldsd->completion_queue;
5540 oldsd->completion_queue = NULL;
5542 /* Find end of our output_queue. */
5543 list_net = &sd->output_queue;
5545 list_net = &(*list_net)->next_sched;
5546 /* Append output queue from offline CPU. */
5547 *list_net = oldsd->output_queue;
5548 oldsd->output_queue = NULL;
5550 raise_softirq_irqoff(NET_TX_SOFTIRQ);
5553 /* Process offline CPU's input_pkt_queue */
5554 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
5562 * netdev_increment_features - increment feature set by one
5563 * @all: current feature set
5564 * @one: new feature set
5565 * @mask: mask feature set
5567 * Computes a new feature set after adding a device with feature set
5568 * @one to the master device with current feature set @all. Will not
5569 * enable anything that is off in @mask. Returns the new feature set.
5571 unsigned long netdev_increment_features(unsigned long all, unsigned long one,
5574 /* If device needs checksumming, downgrade to it. */
5575 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
5576 all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM);
5577 else if (mask & NETIF_F_ALL_CSUM) {
5578 /* If one device supports v4/v6 checksumming, set for all. */
5579 if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) &&
5580 !(all & NETIF_F_GEN_CSUM)) {
5581 all &= ~NETIF_F_ALL_CSUM;
5582 all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
5585 /* If one device supports hw checksumming, set for all. */
5586 if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) {
5587 all &= ~NETIF_F_ALL_CSUM;
5588 all |= NETIF_F_HW_CSUM;
5592 one |= NETIF_F_ALL_CSUM;
5594 one |= all & NETIF_F_ONE_FOR_ALL;
5595 all &= one | NETIF_F_LLTX | NETIF_F_GSO | NETIF_F_UFO;
5596 all |= one & mask & NETIF_F_ONE_FOR_ALL;
5600 EXPORT_SYMBOL(netdev_increment_features);
5602 static struct hlist_head *netdev_create_hash(void)
5605 struct hlist_head *hash;
5607 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
5609 for (i = 0; i < NETDEV_HASHENTRIES; i++)
5610 INIT_HLIST_HEAD(&hash[i]);
5615 /* Initialize per network namespace state */
5616 static int __net_init netdev_init(struct net *net)
5618 INIT_LIST_HEAD(&net->dev_base_head);
5620 net->dev_name_head = netdev_create_hash();
5621 if (net->dev_name_head == NULL)
5624 net->dev_index_head = netdev_create_hash();
5625 if (net->dev_index_head == NULL)
5631 kfree(net->dev_name_head);
5637 * netdev_drivername - network driver for the device
5638 * @dev: network device
5639 * @buffer: buffer for resulting name
5640 * @len: size of buffer
5642 * Determine network driver for device.
5644 char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
5646 const struct device_driver *driver;
5647 const struct device *parent;
5649 if (len <= 0 || !buffer)
5653 parent = dev->dev.parent;
5658 driver = parent->driver;
5659 if (driver && driver->name)
5660 strlcpy(buffer, driver->name, len);
5664 static void __net_exit netdev_exit(struct net *net)
5666 kfree(net->dev_name_head);
5667 kfree(net->dev_index_head);
5670 static struct pernet_operations __net_initdata netdev_net_ops = {
5671 .init = netdev_init,
5672 .exit = netdev_exit,
5675 static void __net_exit default_device_exit(struct net *net)
5677 struct net_device *dev;
5679 * Push all migratable of the network devices back to the
5680 * initial network namespace
5684 for_each_netdev(net, dev) {
5686 char fb_name[IFNAMSIZ];
5688 /* Ignore unmoveable devices (i.e. loopback) */
5689 if (dev->features & NETIF_F_NETNS_LOCAL)
5692 /* Delete virtual devices */
5693 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) {
5694 dev->rtnl_link_ops->dellink(dev, NULL);
5698 /* Push remaing network devices to init_net */
5699 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
5700 err = dev_change_net_namespace(dev, &init_net, fb_name);
5702 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
5703 __func__, dev->name, err);
5711 static struct pernet_operations __net_initdata default_device_ops = {
5712 .exit = default_device_exit,
5716 * Initialize the DEV module. At boot time this walks the device list and
5717 * unhooks any devices that fail to initialise (normally hardware not
5718 * present) and leaves us with a valid list of present and active devices.
5723 * This is called single threaded during boot, so no need
5724 * to take the rtnl semaphore.
5726 static int __init net_dev_init(void)
5728 int i, rc = -ENOMEM;
5730 BUG_ON(!dev_boot_phase);
5732 if (dev_proc_init())
5735 if (netdev_kobject_init())
5738 INIT_LIST_HEAD(&ptype_all);
5739 for (i = 0; i < PTYPE_HASH_SIZE; i++)
5740 INIT_LIST_HEAD(&ptype_base[i]);
5742 if (register_pernet_subsys(&netdev_net_ops))
5746 * Initialise the packet receive queues.
5749 for_each_possible_cpu(i) {
5750 struct softnet_data *queue;
5752 queue = &per_cpu(softnet_data, i);
5753 skb_queue_head_init(&queue->input_pkt_queue);
5754 queue->completion_queue = NULL;
5755 INIT_LIST_HEAD(&queue->poll_list);
5757 queue->backlog.poll = process_backlog;
5758 queue->backlog.weight = weight_p;
5759 queue->backlog.gro_list = NULL;
5760 queue->backlog.gro_count = 0;
5765 /* The loopback device is special if any other network devices
5766 * is present in a network namespace the loopback device must
5767 * be present. Since we now dynamically allocate and free the
5768 * loopback device ensure this invariant is maintained by
5769 * keeping the loopback device as the first device on the
5770 * list of network devices. Ensuring the loopback devices
5771 * is the first device that appears and the last network device
5774 if (register_pernet_device(&loopback_net_ops))
5777 if (register_pernet_device(&default_device_ops))
5780 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
5781 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
5783 hotcpu_notifier(dev_cpu_callback, 0);
5791 subsys_initcall(net_dev_init);
5793 static int __init initialize_hashrnd(void)
5795 get_random_bytes(&skb_tx_hashrnd, sizeof(skb_tx_hashrnd));
5799 late_initcall_sync(initialize_hashrnd);