2 * NET3 Protocol independent device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the non IP parts of dev.c 1.0.19
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
51 * Rudi Cilibrasi : Pass the right thing to
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
75 #include <asm/uaccess.h>
76 #include <asm/system.h>
77 #include <linux/bitops.h>
78 #include <linux/capability.h>
79 #include <linux/cpu.h>
80 #include <linux/types.h>
81 #include <linux/kernel.h>
82 #include <linux/sched.h>
83 #include <linux/mutex.h>
84 #include <linux/string.h>
86 #include <linux/socket.h>
87 #include <linux/sockios.h>
88 #include <linux/errno.h>
89 #include <linux/interrupt.h>
90 #include <linux/if_ether.h>
91 #include <linux/netdevice.h>
92 #include <linux/etherdevice.h>
93 #include <linux/ethtool.h>
94 #include <linux/notifier.h>
95 #include <linux/skbuff.h>
96 #include <net/net_namespace.h>
98 #include <linux/rtnetlink.h>
99 #include <linux/proc_fs.h>
100 #include <linux/seq_file.h>
101 #include <linux/stat.h>
102 #include <linux/if_bridge.h>
103 #include <linux/if_macvlan.h>
105 #include <net/pkt_sched.h>
106 #include <net/checksum.h>
107 #include <linux/highmem.h>
108 #include <linux/init.h>
109 #include <linux/kmod.h>
110 #include <linux/module.h>
111 #include <linux/netpoll.h>
112 #include <linux/rcupdate.h>
113 #include <linux/delay.h>
114 #include <net/wext.h>
115 #include <net/iw_handler.h>
116 #include <asm/current.h>
117 #include <linux/audit.h>
118 #include <linux/dmaengine.h>
119 #include <linux/err.h>
120 #include <linux/ctype.h>
121 #include <linux/if_arp.h>
122 #include <linux/if_vlan.h>
123 #include <linux/ip.h>
125 #include <linux/ipv6.h>
126 #include <linux/in.h>
127 #include <linux/jhash.h>
128 #include <linux/random.h>
129 #include <trace/napi.h>
131 #include "net-sysfs.h"
133 /* Instead of increasing this, you should create a hash table. */
134 #define MAX_GRO_SKBS 8
136 /* This should be increased if a protocol with a bigger head is added. */
137 #define GRO_MAX_HEAD (MAX_HEADER + 128)
140 * The list of packet types we will receive (as opposed to discard)
141 * and the routines to invoke.
143 * Why 16. Because with 16 the only overlap we get on a hash of the
144 * low nibble of the protocol value is RARP/SNAP/X.25.
146 * NOTE: That is no longer true with the addition of VLAN tags. Not
147 * sure which should go first, but I bet it won't make much
148 * difference if we are running VLANs. The good news is that
149 * this protocol won't be in the list unless compiled in, so
150 * the average user (w/out VLANs) will not be adversely affected.
167 #define PTYPE_HASH_SIZE (16)
168 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
170 static DEFINE_SPINLOCK(ptype_lock);
171 static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
172 static struct list_head ptype_all __read_mostly; /* Taps */
175 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
178 * Pure readers hold dev_base_lock for reading.
180 * Writers must hold the rtnl semaphore while they loop through the
181 * dev_base_head list, and hold dev_base_lock for writing when they do the
182 * actual updates. This allows pure readers to access the list even
183 * while a writer is preparing to update it.
185 * To put it another way, dev_base_lock is held for writing only to
186 * protect against pure readers; the rtnl semaphore provides the
187 * protection against other writers.
189 * See, for example usages, register_netdevice() and
190 * unregister_netdevice(), which must be called with the rtnl
193 DEFINE_RWLOCK(dev_base_lock);
195 EXPORT_SYMBOL(dev_base_lock);
197 #define NETDEV_HASHBITS 8
198 #define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
200 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
202 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
203 return &net->dev_name_head[hash & ((1 << NETDEV_HASHBITS) - 1)];
206 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
208 return &net->dev_index_head[ifindex & ((1 << NETDEV_HASHBITS) - 1)];
211 /* Device list insertion */
212 static int list_netdevice(struct net_device *dev)
214 struct net *net = dev_net(dev);
218 write_lock_bh(&dev_base_lock);
219 list_add_tail(&dev->dev_list, &net->dev_base_head);
220 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
221 hlist_add_head(&dev->index_hlist, dev_index_hash(net, dev->ifindex));
222 write_unlock_bh(&dev_base_lock);
226 /* Device list removal */
227 static void unlist_netdevice(struct net_device *dev)
231 /* Unlink dev from the device chain */
232 write_lock_bh(&dev_base_lock);
233 list_del(&dev->dev_list);
234 hlist_del(&dev->name_hlist);
235 hlist_del(&dev->index_hlist);
236 write_unlock_bh(&dev_base_lock);
243 static RAW_NOTIFIER_HEAD(netdev_chain);
246 * Device drivers call our routines to queue packets here. We empty the
247 * queue in the local softnet handler.
250 DEFINE_PER_CPU(struct softnet_data, softnet_data);
252 #ifdef CONFIG_LOCKDEP
254 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
255 * according to dev->type
257 static const unsigned short netdev_lock_type[] =
258 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
259 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
260 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
261 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
262 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
263 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
264 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
265 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
266 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
267 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
268 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
269 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
270 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
271 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
272 ARPHRD_PHONET_PIPE, ARPHRD_VOID, ARPHRD_NONE};
274 static const char *netdev_lock_name[] =
275 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
276 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
277 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
278 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
279 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
280 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
281 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
282 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
283 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
284 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
285 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
286 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
287 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
288 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
289 "_xmit_PHONET_PIPE", "_xmit_VOID", "_xmit_NONE"};
291 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
292 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
294 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
298 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
299 if (netdev_lock_type[i] == dev_type)
301 /* the last key is used by default */
302 return ARRAY_SIZE(netdev_lock_type) - 1;
305 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
306 unsigned short dev_type)
310 i = netdev_lock_pos(dev_type);
311 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
312 netdev_lock_name[i]);
315 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
319 i = netdev_lock_pos(dev->type);
320 lockdep_set_class_and_name(&dev->addr_list_lock,
321 &netdev_addr_lock_key[i],
322 netdev_lock_name[i]);
325 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
326 unsigned short dev_type)
329 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
334 /*******************************************************************************
336 Protocol management and registration routines
338 *******************************************************************************/
341 * Add a protocol ID to the list. Now that the input handler is
342 * smarter we can dispense with all the messy stuff that used to be
345 * BEWARE!!! Protocol handlers, mangling input packets,
346 * MUST BE last in hash buckets and checking protocol handlers
347 * MUST start from promiscuous ptype_all chain in net_bh.
348 * It is true now, do not change it.
349 * Explanation follows: if protocol handler, mangling packet, will
350 * be the first on list, it is not able to sense, that packet
351 * is cloned and should be copied-on-write, so that it will
352 * change it and subsequent readers will get broken packet.
357 * dev_add_pack - add packet handler
358 * @pt: packet type declaration
360 * Add a protocol handler to the networking stack. The passed &packet_type
361 * is linked into kernel lists and may not be freed until it has been
362 * removed from the kernel lists.
364 * This call does not sleep therefore it can not
365 * guarantee all CPU's that are in middle of receiving packets
366 * will see the new packet type (until the next received packet).
369 void dev_add_pack(struct packet_type *pt)
373 spin_lock_bh(&ptype_lock);
374 if (pt->type == htons(ETH_P_ALL))
375 list_add_rcu(&pt->list, &ptype_all);
377 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
378 list_add_rcu(&pt->list, &ptype_base[hash]);
380 spin_unlock_bh(&ptype_lock);
384 * __dev_remove_pack - remove packet handler
385 * @pt: packet type declaration
387 * Remove a protocol handler that was previously added to the kernel
388 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
389 * from the kernel lists and can be freed or reused once this function
392 * The packet type might still be in use by receivers
393 * and must not be freed until after all the CPU's have gone
394 * through a quiescent state.
396 void __dev_remove_pack(struct packet_type *pt)
398 struct list_head *head;
399 struct packet_type *pt1;
401 spin_lock_bh(&ptype_lock);
403 if (pt->type == htons(ETH_P_ALL))
406 head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
408 list_for_each_entry(pt1, head, list) {
410 list_del_rcu(&pt->list);
415 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
417 spin_unlock_bh(&ptype_lock);
420 * dev_remove_pack - remove packet handler
421 * @pt: packet type declaration
423 * Remove a protocol handler that was previously added to the kernel
424 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
425 * from the kernel lists and can be freed or reused once this function
428 * This call sleeps to guarantee that no CPU is looking at the packet
431 void dev_remove_pack(struct packet_type *pt)
433 __dev_remove_pack(pt);
438 /******************************************************************************
440 Device Boot-time Settings Routines
442 *******************************************************************************/
444 /* Boot time configuration table */
445 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
448 * netdev_boot_setup_add - add new setup entry
449 * @name: name of the device
450 * @map: configured settings for the device
452 * Adds new setup entry to the dev_boot_setup list. The function
453 * returns 0 on error and 1 on success. This is a generic routine to
456 static int netdev_boot_setup_add(char *name, struct ifmap *map)
458 struct netdev_boot_setup *s;
462 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
463 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
464 memset(s[i].name, 0, sizeof(s[i].name));
465 strlcpy(s[i].name, name, IFNAMSIZ);
466 memcpy(&s[i].map, map, sizeof(s[i].map));
471 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
475 * netdev_boot_setup_check - check boot time settings
476 * @dev: the netdevice
478 * Check boot time settings for the device.
479 * The found settings are set for the device to be used
480 * later in the device probing.
481 * Returns 0 if no settings found, 1 if they are.
483 int netdev_boot_setup_check(struct net_device *dev)
485 struct netdev_boot_setup *s = dev_boot_setup;
488 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
489 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
490 !strcmp(dev->name, s[i].name)) {
491 dev->irq = s[i].map.irq;
492 dev->base_addr = s[i].map.base_addr;
493 dev->mem_start = s[i].map.mem_start;
494 dev->mem_end = s[i].map.mem_end;
503 * netdev_boot_base - get address from boot time settings
504 * @prefix: prefix for network device
505 * @unit: id for network device
507 * Check boot time settings for the base address of device.
508 * The found settings are set for the device to be used
509 * later in the device probing.
510 * Returns 0 if no settings found.
512 unsigned long netdev_boot_base(const char *prefix, int unit)
514 const struct netdev_boot_setup *s = dev_boot_setup;
518 sprintf(name, "%s%d", prefix, unit);
521 * If device already registered then return base of 1
522 * to indicate not to probe for this interface
524 if (__dev_get_by_name(&init_net, name))
527 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
528 if (!strcmp(name, s[i].name))
529 return s[i].map.base_addr;
534 * Saves at boot time configured settings for any netdevice.
536 int __init netdev_boot_setup(char *str)
541 str = get_options(str, ARRAY_SIZE(ints), ints);
546 memset(&map, 0, sizeof(map));
550 map.base_addr = ints[2];
552 map.mem_start = ints[3];
554 map.mem_end = ints[4];
556 /* Add new entry to the list */
557 return netdev_boot_setup_add(str, &map);
560 __setup("netdev=", netdev_boot_setup);
562 /*******************************************************************************
564 Device Interface Subroutines
566 *******************************************************************************/
569 * __dev_get_by_name - find a device by its name
570 * @net: the applicable net namespace
571 * @name: name to find
573 * Find an interface by name. Must be called under RTNL semaphore
574 * or @dev_base_lock. If the name is found a pointer to the device
575 * is returned. If the name is not found then %NULL is returned. The
576 * reference counters are not incremented so the caller must be
577 * careful with locks.
580 struct net_device *__dev_get_by_name(struct net *net, const char *name)
582 struct hlist_node *p;
584 hlist_for_each(p, dev_name_hash(net, name)) {
585 struct net_device *dev
586 = hlist_entry(p, struct net_device, name_hlist);
587 if (!strncmp(dev->name, name, IFNAMSIZ))
594 * dev_get_by_name - find a device by its name
595 * @net: the applicable net namespace
596 * @name: name to find
598 * Find an interface by name. This can be called from any
599 * context and does its own locking. The returned handle has
600 * the usage count incremented and the caller must use dev_put() to
601 * release it when it is no longer needed. %NULL is returned if no
602 * matching device is found.
605 struct net_device *dev_get_by_name(struct net *net, const char *name)
607 struct net_device *dev;
609 read_lock(&dev_base_lock);
610 dev = __dev_get_by_name(net, name);
613 read_unlock(&dev_base_lock);
618 * __dev_get_by_index - find a device by its ifindex
619 * @net: the applicable net namespace
620 * @ifindex: index of device
622 * Search for an interface by index. Returns %NULL if the device
623 * is not found or a pointer to the device. The device has not
624 * had its reference counter increased so the caller must be careful
625 * about locking. The caller must hold either the RTNL semaphore
629 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
631 struct hlist_node *p;
633 hlist_for_each(p, dev_index_hash(net, ifindex)) {
634 struct net_device *dev
635 = hlist_entry(p, struct net_device, index_hlist);
636 if (dev->ifindex == ifindex)
644 * dev_get_by_index - find a device by its ifindex
645 * @net: the applicable net namespace
646 * @ifindex: index of device
648 * Search for an interface by index. Returns NULL if the device
649 * is not found or a pointer to the device. The device returned has
650 * had a reference added and the pointer is safe until the user calls
651 * dev_put to indicate they have finished with it.
654 struct net_device *dev_get_by_index(struct net *net, int ifindex)
656 struct net_device *dev;
658 read_lock(&dev_base_lock);
659 dev = __dev_get_by_index(net, ifindex);
662 read_unlock(&dev_base_lock);
667 * dev_getbyhwaddr - find a device by its hardware address
668 * @net: the applicable net namespace
669 * @type: media type of device
670 * @ha: hardware address
672 * Search for an interface by MAC address. Returns NULL if the device
673 * is not found or a pointer to the device. The caller must hold the
674 * rtnl semaphore. The returned device has not had its ref count increased
675 * and the caller must therefore be careful about locking
678 * If the API was consistent this would be __dev_get_by_hwaddr
681 struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
683 struct net_device *dev;
687 for_each_netdev(net, dev)
688 if (dev->type == type &&
689 !memcmp(dev->dev_addr, ha, dev->addr_len))
695 EXPORT_SYMBOL(dev_getbyhwaddr);
697 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
699 struct net_device *dev;
702 for_each_netdev(net, dev)
703 if (dev->type == type)
709 EXPORT_SYMBOL(__dev_getfirstbyhwtype);
711 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
713 struct net_device *dev;
716 dev = __dev_getfirstbyhwtype(net, type);
723 EXPORT_SYMBOL(dev_getfirstbyhwtype);
726 * dev_get_by_flags - find any device with given flags
727 * @net: the applicable net namespace
728 * @if_flags: IFF_* values
729 * @mask: bitmask of bits in if_flags to check
731 * Search for any interface with the given flags. Returns NULL if a device
732 * is not found or a pointer to the device. The device returned has
733 * had a reference added and the pointer is safe until the user calls
734 * dev_put to indicate they have finished with it.
737 struct net_device * dev_get_by_flags(struct net *net, unsigned short if_flags, unsigned short mask)
739 struct net_device *dev, *ret;
742 read_lock(&dev_base_lock);
743 for_each_netdev(net, dev) {
744 if (((dev->flags ^ if_flags) & mask) == 0) {
750 read_unlock(&dev_base_lock);
755 * dev_valid_name - check if name is okay for network device
758 * Network device names need to be valid file names to
759 * to allow sysfs to work. We also disallow any kind of
762 int dev_valid_name(const char *name)
766 if (strlen(name) >= IFNAMSIZ)
768 if (!strcmp(name, ".") || !strcmp(name, ".."))
772 if (*name == '/' || isspace(*name))
780 * __dev_alloc_name - allocate a name for a device
781 * @net: network namespace to allocate the device name in
782 * @name: name format string
783 * @buf: scratch buffer and result name string
785 * Passed a format string - eg "lt%d" it will try and find a suitable
786 * id. It scans list of devices to build up a free map, then chooses
787 * the first empty slot. The caller must hold the dev_base or rtnl lock
788 * while allocating the name and adding the device in order to avoid
790 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
791 * Returns the number of the unit assigned or a negative errno code.
794 static int __dev_alloc_name(struct net *net, const char *name, char *buf)
798 const int max_netdevices = 8*PAGE_SIZE;
799 unsigned long *inuse;
800 struct net_device *d;
802 p = strnchr(name, IFNAMSIZ-1, '%');
805 * Verify the string as this thing may have come from
806 * the user. There must be either one "%d" and no other "%"
809 if (p[1] != 'd' || strchr(p + 2, '%'))
812 /* Use one page as a bit array of possible slots */
813 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
817 for_each_netdev(net, d) {
818 if (!sscanf(d->name, name, &i))
820 if (i < 0 || i >= max_netdevices)
823 /* avoid cases where sscanf is not exact inverse of printf */
824 snprintf(buf, IFNAMSIZ, name, i);
825 if (!strncmp(buf, d->name, IFNAMSIZ))
829 i = find_first_zero_bit(inuse, max_netdevices);
830 free_page((unsigned long) inuse);
833 snprintf(buf, IFNAMSIZ, name, i);
834 if (!__dev_get_by_name(net, buf))
837 /* It is possible to run out of possible slots
838 * when the name is long and there isn't enough space left
839 * for the digits, or if all bits are used.
845 * dev_alloc_name - allocate a name for a device
847 * @name: name format string
849 * Passed a format string - eg "lt%d" it will try and find a suitable
850 * id. It scans list of devices to build up a free map, then chooses
851 * the first empty slot. The caller must hold the dev_base or rtnl lock
852 * while allocating the name and adding the device in order to avoid
854 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
855 * Returns the number of the unit assigned or a negative errno code.
858 int dev_alloc_name(struct net_device *dev, const char *name)
864 BUG_ON(!dev_net(dev));
866 ret = __dev_alloc_name(net, name, buf);
868 strlcpy(dev->name, buf, IFNAMSIZ);
874 * dev_change_name - change name of a device
876 * @newname: name (or format string) must be at least IFNAMSIZ
878 * Change name of a device, can pass format strings "eth%d".
881 int dev_change_name(struct net_device *dev, const char *newname)
883 char oldname[IFNAMSIZ];
889 BUG_ON(!dev_net(dev));
892 if (dev->flags & IFF_UP)
895 if (!dev_valid_name(newname))
898 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
901 memcpy(oldname, dev->name, IFNAMSIZ);
903 if (strchr(newname, '%')) {
904 err = dev_alloc_name(dev, newname);
908 else if (__dev_get_by_name(net, newname))
911 strlcpy(dev->name, newname, IFNAMSIZ);
914 /* For now only devices in the initial network namespace
917 if (net == &init_net) {
918 ret = device_rename(&dev->dev, dev->name);
920 memcpy(dev->name, oldname, IFNAMSIZ);
925 write_lock_bh(&dev_base_lock);
926 hlist_del(&dev->name_hlist);
927 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
928 write_unlock_bh(&dev_base_lock);
930 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
931 ret = notifier_to_errno(ret);
936 "%s: name change rollback failed: %d.\n",
940 memcpy(dev->name, oldname, IFNAMSIZ);
949 * dev_set_alias - change ifalias of a device
951 * @alias: name up to IFALIASZ
952 * @len: limit of bytes to copy from info
954 * Set ifalias for a device,
956 int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
971 dev->ifalias = krealloc(dev->ifalias, len+1, GFP_KERNEL);
975 strlcpy(dev->ifalias, alias, len+1);
981 * netdev_features_change - device changes features
982 * @dev: device to cause notification
984 * Called to indicate a device has changed features.
986 void netdev_features_change(struct net_device *dev)
988 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
990 EXPORT_SYMBOL(netdev_features_change);
993 * netdev_state_change - device changes state
994 * @dev: device to cause notification
996 * Called to indicate a device has changed state. This function calls
997 * the notifier chains for netdev_chain and sends a NEWLINK message
998 * to the routing socket.
1000 void netdev_state_change(struct net_device *dev)
1002 if (dev->flags & IFF_UP) {
1003 call_netdevice_notifiers(NETDEV_CHANGE, dev);
1004 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1008 void netdev_bonding_change(struct net_device *dev)
1010 call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, dev);
1012 EXPORT_SYMBOL(netdev_bonding_change);
1015 * dev_load - load a network module
1016 * @net: the applicable net namespace
1017 * @name: name of interface
1019 * If a network interface is not present and the process has suitable
1020 * privileges this function loads the module. If module loading is not
1021 * available in this kernel then it becomes a nop.
1024 void dev_load(struct net *net, const char *name)
1026 struct net_device *dev;
1028 read_lock(&dev_base_lock);
1029 dev = __dev_get_by_name(net, name);
1030 read_unlock(&dev_base_lock);
1032 if (!dev && capable(CAP_SYS_MODULE))
1033 request_module("%s", name);
1037 * dev_open - prepare an interface for use.
1038 * @dev: device to open
1040 * Takes a device from down to up state. The device's private open
1041 * function is invoked and then the multicast lists are loaded. Finally
1042 * the device is moved into the up state and a %NETDEV_UP message is
1043 * sent to the netdev notifier chain.
1045 * Calling this function on an active interface is a nop. On a failure
1046 * a negative errno code is returned.
1048 int dev_open(struct net_device *dev)
1050 const struct net_device_ops *ops = dev->netdev_ops;
1059 if (dev->flags & IFF_UP)
1063 * Is it even present?
1065 if (!netif_device_present(dev))
1068 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1069 ret = notifier_to_errno(ret);
1074 * Call device private open method
1076 set_bit(__LINK_STATE_START, &dev->state);
1078 if (ops->ndo_validate_addr)
1079 ret = ops->ndo_validate_addr(dev);
1081 if (!ret && ops->ndo_open)
1082 ret = ops->ndo_open(dev);
1085 * If it went open OK then:
1089 clear_bit(__LINK_STATE_START, &dev->state);
1094 dev->flags |= IFF_UP;
1099 net_dmaengine_get();
1102 * Initialize multicasting status
1104 dev_set_rx_mode(dev);
1107 * Wakeup transmit queue engine
1112 * ... and announce new interface.
1114 call_netdevice_notifiers(NETDEV_UP, dev);
1121 * dev_close - shutdown an interface.
1122 * @dev: device to shutdown
1124 * This function moves an active device into down state. A
1125 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1126 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1129 int dev_close(struct net_device *dev)
1131 const struct net_device_ops *ops = dev->netdev_ops;
1136 if (!(dev->flags & IFF_UP))
1140 * Tell people we are going down, so that they can
1141 * prepare to death, when device is still operating.
1143 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1145 clear_bit(__LINK_STATE_START, &dev->state);
1147 /* Synchronize to scheduled poll. We cannot touch poll list,
1148 * it can be even on different cpu. So just clear netif_running().
1150 * dev->stop() will invoke napi_disable() on all of it's
1151 * napi_struct instances on this device.
1153 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1155 dev_deactivate(dev);
1158 * Call the device specific close. This cannot fail.
1159 * Only if device is UP
1161 * We allow it to be called even after a DETACH hot-plug
1168 * Device is now down.
1171 dev->flags &= ~IFF_UP;
1174 * Tell people we are down
1176 call_netdevice_notifiers(NETDEV_DOWN, dev);
1181 net_dmaengine_put();
1188 * dev_disable_lro - disable Large Receive Offload on a device
1191 * Disable Large Receive Offload (LRO) on a net device. Must be
1192 * called under RTNL. This is needed if received packets may be
1193 * forwarded to another interface.
1195 void dev_disable_lro(struct net_device *dev)
1197 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1198 dev->ethtool_ops->set_flags) {
1199 u32 flags = dev->ethtool_ops->get_flags(dev);
1200 if (flags & ETH_FLAG_LRO) {
1201 flags &= ~ETH_FLAG_LRO;
1202 dev->ethtool_ops->set_flags(dev, flags);
1205 WARN_ON(dev->features & NETIF_F_LRO);
1207 EXPORT_SYMBOL(dev_disable_lro);
1210 static int dev_boot_phase = 1;
1213 * Device change register/unregister. These are not inline or static
1214 * as we export them to the world.
1218 * register_netdevice_notifier - register a network notifier block
1221 * Register a notifier to be called when network device events occur.
1222 * The notifier passed is linked into the kernel structures and must
1223 * not be reused until it has been unregistered. A negative errno code
1224 * is returned on a failure.
1226 * When registered all registration and up events are replayed
1227 * to the new notifier to allow device to have a race free
1228 * view of the network device list.
1231 int register_netdevice_notifier(struct notifier_block *nb)
1233 struct net_device *dev;
1234 struct net_device *last;
1239 err = raw_notifier_chain_register(&netdev_chain, nb);
1245 for_each_netdev(net, dev) {
1246 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1247 err = notifier_to_errno(err);
1251 if (!(dev->flags & IFF_UP))
1254 nb->notifier_call(nb, NETDEV_UP, dev);
1265 for_each_netdev(net, dev) {
1269 if (dev->flags & IFF_UP) {
1270 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1271 nb->notifier_call(nb, NETDEV_DOWN, dev);
1273 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1277 raw_notifier_chain_unregister(&netdev_chain, nb);
1282 * unregister_netdevice_notifier - unregister a network notifier block
1285 * Unregister a notifier previously registered by
1286 * register_netdevice_notifier(). The notifier is unlinked into the
1287 * kernel structures and may then be reused. A negative errno code
1288 * is returned on a failure.
1291 int unregister_netdevice_notifier(struct notifier_block *nb)
1296 err = raw_notifier_chain_unregister(&netdev_chain, nb);
1302 * call_netdevice_notifiers - call all network notifier blocks
1303 * @val: value passed unmodified to notifier function
1304 * @dev: net_device pointer passed unmodified to notifier function
1306 * Call all network notifier blocks. Parameters and return value
1307 * are as for raw_notifier_call_chain().
1310 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1312 return raw_notifier_call_chain(&netdev_chain, val, dev);
1315 /* When > 0 there are consumers of rx skb time stamps */
1316 static atomic_t netstamp_needed = ATOMIC_INIT(0);
1318 void net_enable_timestamp(void)
1320 atomic_inc(&netstamp_needed);
1323 void net_disable_timestamp(void)
1325 atomic_dec(&netstamp_needed);
1328 static inline void net_timestamp(struct sk_buff *skb)
1330 if (atomic_read(&netstamp_needed))
1331 __net_timestamp(skb);
1333 skb->tstamp.tv64 = 0;
1337 * Support routine. Sends outgoing frames to any network
1338 * taps currently in use.
1341 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1343 struct packet_type *ptype;
1345 #ifdef CONFIG_NET_CLS_ACT
1346 if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS)))
1353 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1354 /* Never send packets back to the socket
1355 * they originated from - MvS (miquels@drinkel.ow.org)
1357 if ((ptype->dev == dev || !ptype->dev) &&
1358 (ptype->af_packet_priv == NULL ||
1359 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1360 struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);
1364 /* skb->nh should be correctly
1365 set by sender, so that the second statement is
1366 just protection against buggy protocols.
1368 skb_reset_mac_header(skb2);
1370 if (skb_network_header(skb2) < skb2->data ||
1371 skb2->network_header > skb2->tail) {
1372 if (net_ratelimit())
1373 printk(KERN_CRIT "protocol %04x is "
1375 skb2->protocol, dev->name);
1376 skb_reset_network_header(skb2);
1379 skb2->transport_header = skb2->network_header;
1380 skb2->pkt_type = PACKET_OUTGOING;
1381 ptype->func(skb2, skb->dev, ptype, skb->dev);
1388 static inline void __netif_reschedule(struct Qdisc *q)
1390 struct softnet_data *sd;
1391 unsigned long flags;
1393 local_irq_save(flags);
1394 sd = &__get_cpu_var(softnet_data);
1395 q->next_sched = sd->output_queue;
1396 sd->output_queue = q;
1397 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1398 local_irq_restore(flags);
1401 void __netif_schedule(struct Qdisc *q)
1403 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1404 __netif_reschedule(q);
1406 EXPORT_SYMBOL(__netif_schedule);
1408 void dev_kfree_skb_irq(struct sk_buff *skb)
1410 if (atomic_dec_and_test(&skb->users)) {
1411 struct softnet_data *sd;
1412 unsigned long flags;
1414 local_irq_save(flags);
1415 sd = &__get_cpu_var(softnet_data);
1416 skb->next = sd->completion_queue;
1417 sd->completion_queue = skb;
1418 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1419 local_irq_restore(flags);
1422 EXPORT_SYMBOL(dev_kfree_skb_irq);
1424 void dev_kfree_skb_any(struct sk_buff *skb)
1426 if (in_irq() || irqs_disabled())
1427 dev_kfree_skb_irq(skb);
1431 EXPORT_SYMBOL(dev_kfree_skb_any);
1435 * netif_device_detach - mark device as removed
1436 * @dev: network device
1438 * Mark device as removed from system and therefore no longer available.
1440 void netif_device_detach(struct net_device *dev)
1442 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1443 netif_running(dev)) {
1444 netif_tx_stop_all_queues(dev);
1447 EXPORT_SYMBOL(netif_device_detach);
1450 * netif_device_attach - mark device as attached
1451 * @dev: network device
1453 * Mark device as attached from system and restart if needed.
1455 void netif_device_attach(struct net_device *dev)
1457 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1458 netif_running(dev)) {
1459 netif_tx_wake_all_queues(dev);
1460 __netdev_watchdog_up(dev);
1463 EXPORT_SYMBOL(netif_device_attach);
1465 static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1467 return ((features & NETIF_F_GEN_CSUM) ||
1468 ((features & NETIF_F_IP_CSUM) &&
1469 protocol == htons(ETH_P_IP)) ||
1470 ((features & NETIF_F_IPV6_CSUM) &&
1471 protocol == htons(ETH_P_IPV6)) ||
1472 ((features & NETIF_F_FCOE_CRC) &&
1473 protocol == htons(ETH_P_FCOE)));
1476 static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1478 if (can_checksum_protocol(dev->features, skb->protocol))
1481 if (skb->protocol == htons(ETH_P_8021Q)) {
1482 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1483 if (can_checksum_protocol(dev->features & dev->vlan_features,
1484 veh->h_vlan_encapsulated_proto))
1492 * Invalidate hardware checksum when packet is to be mangled, and
1493 * complete checksum manually on outgoing path.
1495 int skb_checksum_help(struct sk_buff *skb)
1498 int ret = 0, offset;
1500 if (skb->ip_summed == CHECKSUM_COMPLETE)
1501 goto out_set_summed;
1503 if (unlikely(skb_shinfo(skb)->gso_size)) {
1504 /* Let GSO fix up the checksum. */
1505 goto out_set_summed;
1508 offset = skb->csum_start - skb_headroom(skb);
1509 BUG_ON(offset >= skb_headlen(skb));
1510 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1512 offset += skb->csum_offset;
1513 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1515 if (skb_cloned(skb) &&
1516 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
1517 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1522 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
1524 skb->ip_summed = CHECKSUM_NONE;
1530 * skb_gso_segment - Perform segmentation on skb.
1531 * @skb: buffer to segment
1532 * @features: features for the output path (see dev->features)
1534 * This function segments the given skb and returns a list of segments.
1536 * It may return NULL if the skb requires no segmentation. This is
1537 * only possible when GSO is used for verifying header integrity.
1539 struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
1541 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1542 struct packet_type *ptype;
1543 __be16 type = skb->protocol;
1546 skb_reset_mac_header(skb);
1547 skb->mac_len = skb->network_header - skb->mac_header;
1548 __skb_pull(skb, skb->mac_len);
1550 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1551 struct net_device *dev = skb->dev;
1552 struct ethtool_drvinfo info = {};
1554 if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
1555 dev->ethtool_ops->get_drvinfo(dev, &info);
1557 WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d "
1559 info.driver, dev ? dev->features : 0L,
1560 skb->sk ? skb->sk->sk_route_caps : 0L,
1561 skb->len, skb->data_len, skb->ip_summed);
1563 if (skb_header_cloned(skb) &&
1564 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1565 return ERR_PTR(err);
1569 list_for_each_entry_rcu(ptype,
1570 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
1571 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
1572 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1573 err = ptype->gso_send_check(skb);
1574 segs = ERR_PTR(err);
1575 if (err || skb_gso_ok(skb, features))
1577 __skb_push(skb, (skb->data -
1578 skb_network_header(skb)));
1580 segs = ptype->gso_segment(skb, features);
1586 __skb_push(skb, skb->data - skb_mac_header(skb));
1591 EXPORT_SYMBOL(skb_gso_segment);
1593 /* Take action when hardware reception checksum errors are detected. */
1595 void netdev_rx_csum_fault(struct net_device *dev)
1597 if (net_ratelimit()) {
1598 printk(KERN_ERR "%s: hw csum failure.\n",
1599 dev ? dev->name : "<unknown>");
1603 EXPORT_SYMBOL(netdev_rx_csum_fault);
1606 /* Actually, we should eliminate this check as soon as we know, that:
1607 * 1. IOMMU is present and allows to map all the memory.
1608 * 2. No high memory really exists on this machine.
1611 static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1613 #ifdef CONFIG_HIGHMEM
1616 if (dev->features & NETIF_F_HIGHDMA)
1619 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1620 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1628 void (*destructor)(struct sk_buff *skb);
1631 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1633 static void dev_gso_skb_destructor(struct sk_buff *skb)
1635 struct dev_gso_cb *cb;
1638 struct sk_buff *nskb = skb->next;
1640 skb->next = nskb->next;
1643 } while (skb->next);
1645 cb = DEV_GSO_CB(skb);
1647 cb->destructor(skb);
1651 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1652 * @skb: buffer to segment
1654 * This function segments the given skb and stores the list of segments
1657 static int dev_gso_segment(struct sk_buff *skb)
1659 struct net_device *dev = skb->dev;
1660 struct sk_buff *segs;
1661 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1664 segs = skb_gso_segment(skb, features);
1666 /* Verifying header integrity only. */
1671 return PTR_ERR(segs);
1674 DEV_GSO_CB(skb)->destructor = skb->destructor;
1675 skb->destructor = dev_gso_skb_destructor;
1680 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1681 struct netdev_queue *txq)
1683 const struct net_device_ops *ops = dev->netdev_ops;
1686 if (likely(!skb->next)) {
1687 if (!list_empty(&ptype_all))
1688 dev_queue_xmit_nit(skb, dev);
1690 if (netif_needs_gso(dev, skb)) {
1691 if (unlikely(dev_gso_segment(skb)))
1698 * If device doesnt need skb->dst, release it right now while
1699 * its hot in this cpu cache
1701 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
1704 rc = ops->ndo_start_xmit(skb, dev);
1706 txq_trans_update(txq);
1708 * TODO: if skb_orphan() was called by
1709 * dev->hard_start_xmit() (for example, the unmodified
1710 * igb driver does that; bnx2 doesn't), then
1711 * skb_tx_software_timestamp() will be unable to send
1712 * back the time stamp.
1714 * How can this be prevented? Always create another
1715 * reference to the socket before calling
1716 * dev->hard_start_xmit()? Prevent that skb_orphan()
1717 * does anything in dev->hard_start_xmit() by clearing
1718 * the skb destructor before the call and restoring it
1719 * afterwards, then doing the skb_orphan() ourselves?
1726 struct sk_buff *nskb = skb->next;
1728 skb->next = nskb->next;
1730 rc = ops->ndo_start_xmit(nskb, dev);
1732 nskb->next = skb->next;
1736 txq_trans_update(txq);
1737 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
1738 return NETDEV_TX_BUSY;
1739 } while (skb->next);
1741 skb->destructor = DEV_GSO_CB(skb)->destructor;
1748 static u32 skb_tx_hashrnd;
1750 u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
1754 if (skb_rx_queue_recorded(skb)) {
1755 hash = skb_get_rx_queue(skb);
1756 while (unlikely (hash >= dev->real_num_tx_queues))
1757 hash -= dev->real_num_tx_queues;
1761 if (skb->sk && skb->sk->sk_hash)
1762 hash = skb->sk->sk_hash;
1764 hash = skb->protocol;
1766 hash = jhash_1word(hash, skb_tx_hashrnd);
1768 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
1770 EXPORT_SYMBOL(skb_tx_hash);
1772 static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1773 struct sk_buff *skb)
1775 const struct net_device_ops *ops = dev->netdev_ops;
1776 u16 queue_index = 0;
1778 if (ops->ndo_select_queue)
1779 queue_index = ops->ndo_select_queue(dev, skb);
1780 else if (dev->real_num_tx_queues > 1)
1781 queue_index = skb_tx_hash(dev, skb);
1783 skb_set_queue_mapping(skb, queue_index);
1784 return netdev_get_tx_queue(dev, queue_index);
1788 * dev_queue_xmit - transmit a buffer
1789 * @skb: buffer to transmit
1791 * Queue a buffer for transmission to a network device. The caller must
1792 * have set the device and priority and built the buffer before calling
1793 * this function. The function can be called from an interrupt.
1795 * A negative errno code is returned on a failure. A success does not
1796 * guarantee the frame will be transmitted as it may be dropped due
1797 * to congestion or traffic shaping.
1799 * -----------------------------------------------------------------------------------
1800 * I notice this method can also return errors from the queue disciplines,
1801 * including NET_XMIT_DROP, which is a positive value. So, errors can also
1804 * Regardless of the return value, the skb is consumed, so it is currently
1805 * difficult to retry a send to this method. (You can bump the ref count
1806 * before sending to hold a reference for retry if you are careful.)
1808 * When calling this method, interrupts MUST be enabled. This is because
1809 * the BH enable code must have IRQs enabled so that it will not deadlock.
1812 int dev_queue_xmit(struct sk_buff *skb)
1814 struct net_device *dev = skb->dev;
1815 struct netdev_queue *txq;
1819 /* GSO will handle the following emulations directly. */
1820 if (netif_needs_gso(dev, skb))
1823 if (skb_has_frags(skb) &&
1824 !(dev->features & NETIF_F_FRAGLIST) &&
1825 __skb_linearize(skb))
1828 /* Fragmented skb is linearized if device does not support SG,
1829 * or if at least one of fragments is in highmem and device
1830 * does not support DMA from it.
1832 if (skb_shinfo(skb)->nr_frags &&
1833 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
1834 __skb_linearize(skb))
1837 /* If packet is not checksummed and device does not support
1838 * checksumming for this protocol, complete checksumming here.
1840 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1841 skb_set_transport_header(skb, skb->csum_start -
1843 if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
1848 /* Disable soft irqs for various locks below. Also
1849 * stops preemption for RCU.
1853 txq = dev_pick_tx(dev, skb);
1854 q = rcu_dereference(txq->qdisc);
1856 #ifdef CONFIG_NET_CLS_ACT
1857 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
1860 spinlock_t *root_lock = qdisc_lock(q);
1862 spin_lock(root_lock);
1864 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
1868 rc = qdisc_enqueue_root(skb, q);
1871 spin_unlock(root_lock);
1876 /* The device has no queue. Common case for software devices:
1877 loopback, all the sorts of tunnels...
1879 Really, it is unlikely that netif_tx_lock protection is necessary
1880 here. (f.e. loopback and IP tunnels are clean ignoring statistics
1882 However, it is possible, that they rely on protection
1885 Check this and shot the lock. It is not prone from deadlocks.
1886 Either shot noqueue qdisc, it is even simpler 8)
1888 if (dev->flags & IFF_UP) {
1889 int cpu = smp_processor_id(); /* ok because BHs are off */
1891 if (txq->xmit_lock_owner != cpu) {
1893 HARD_TX_LOCK(dev, txq, cpu);
1895 if (!netif_tx_queue_stopped(txq)) {
1897 if (!dev_hard_start_xmit(skb, dev, txq)) {
1898 HARD_TX_UNLOCK(dev, txq);
1902 HARD_TX_UNLOCK(dev, txq);
1903 if (net_ratelimit())
1904 printk(KERN_CRIT "Virtual device %s asks to "
1905 "queue packet!\n", dev->name);
1907 /* Recursion is detected! It is possible,
1909 if (net_ratelimit())
1910 printk(KERN_CRIT "Dead loop on virtual device "
1911 "%s, fix it urgently!\n", dev->name);
1916 rcu_read_unlock_bh();
1922 rcu_read_unlock_bh();
1927 /*=======================================================================
1929 =======================================================================*/
1931 int netdev_max_backlog __read_mostly = 1000;
1932 int netdev_budget __read_mostly = 300;
1933 int weight_p __read_mostly = 64; /* old backlog weight */
1935 DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
1939 * netif_rx - post buffer to the network code
1940 * @skb: buffer to post
1942 * This function receives a packet from a device driver and queues it for
1943 * the upper (protocol) levels to process. It always succeeds. The buffer
1944 * may be dropped during processing for congestion control or by the
1948 * NET_RX_SUCCESS (no congestion)
1949 * NET_RX_DROP (packet was dropped)
1953 int netif_rx(struct sk_buff *skb)
1955 struct softnet_data *queue;
1956 unsigned long flags;
1958 /* if netpoll wants it, pretend we never saw it */
1959 if (netpoll_rx(skb))
1962 if (!skb->tstamp.tv64)
1966 * The code is rearranged so that the path is the most
1967 * short when CPU is congested, but is still operating.
1969 local_irq_save(flags);
1970 queue = &__get_cpu_var(softnet_data);
1972 __get_cpu_var(netdev_rx_stat).total++;
1973 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
1974 if (queue->input_pkt_queue.qlen) {
1976 __skb_queue_tail(&queue->input_pkt_queue, skb);
1977 local_irq_restore(flags);
1978 return NET_RX_SUCCESS;
1981 napi_schedule(&queue->backlog);
1985 __get_cpu_var(netdev_rx_stat).dropped++;
1986 local_irq_restore(flags);
1992 int netif_rx_ni(struct sk_buff *skb)
1997 err = netif_rx(skb);
1998 if (local_softirq_pending())
2005 EXPORT_SYMBOL(netif_rx_ni);
2007 static void net_tx_action(struct softirq_action *h)
2009 struct softnet_data *sd = &__get_cpu_var(softnet_data);
2011 if (sd->completion_queue) {
2012 struct sk_buff *clist;
2014 local_irq_disable();
2015 clist = sd->completion_queue;
2016 sd->completion_queue = NULL;
2020 struct sk_buff *skb = clist;
2021 clist = clist->next;
2023 WARN_ON(atomic_read(&skb->users));
2028 if (sd->output_queue) {
2031 local_irq_disable();
2032 head = sd->output_queue;
2033 sd->output_queue = NULL;
2037 struct Qdisc *q = head;
2038 spinlock_t *root_lock;
2040 head = head->next_sched;
2042 root_lock = qdisc_lock(q);
2043 if (spin_trylock(root_lock)) {
2044 smp_mb__before_clear_bit();
2045 clear_bit(__QDISC_STATE_SCHED,
2048 spin_unlock(root_lock);
2050 if (!test_bit(__QDISC_STATE_DEACTIVATED,
2052 __netif_reschedule(q);
2054 smp_mb__before_clear_bit();
2055 clear_bit(__QDISC_STATE_SCHED,
2063 static inline int deliver_skb(struct sk_buff *skb,
2064 struct packet_type *pt_prev,
2065 struct net_device *orig_dev)
2067 atomic_inc(&skb->users);
2068 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2071 #if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
2072 /* These hooks defined here for ATM */
2074 struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br,
2075 unsigned char *addr);
2076 void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent) __read_mostly;
2079 * If bridge module is loaded call bridging hook.
2080 * returns NULL if packet was consumed.
2082 struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
2083 struct sk_buff *skb) __read_mostly;
2084 static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
2085 struct packet_type **pt_prev, int *ret,
2086 struct net_device *orig_dev)
2088 struct net_bridge_port *port;
2090 if (skb->pkt_type == PACKET_LOOPBACK ||
2091 (port = rcu_dereference(skb->dev->br_port)) == NULL)
2095 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2099 return br_handle_frame_hook(port, skb);
2102 #define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
2105 #if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
2106 struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
2107 EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
2109 static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
2110 struct packet_type **pt_prev,
2112 struct net_device *orig_dev)
2114 if (skb->dev->macvlan_port == NULL)
2118 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2121 return macvlan_handle_frame_hook(skb);
2124 #define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
2127 #ifdef CONFIG_NET_CLS_ACT
2128 /* TODO: Maybe we should just force sch_ingress to be compiled in
2129 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2130 * a compare and 2 stores extra right now if we dont have it on
2131 * but have CONFIG_NET_CLS_ACT
2132 * NOTE: This doesnt stop any functionality; if you dont have
2133 * the ingress scheduler, you just cant add policies on ingress.
2136 static int ing_filter(struct sk_buff *skb)
2138 struct net_device *dev = skb->dev;
2139 u32 ttl = G_TC_RTTL(skb->tc_verd);
2140 struct netdev_queue *rxq;
2141 int result = TC_ACT_OK;
2144 if (MAX_RED_LOOP < ttl++) {
2146 "Redir loop detected Dropping packet (%d->%d)\n",
2147 skb->iif, dev->ifindex);
2151 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2152 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
2154 rxq = &dev->rx_queue;
2157 if (q != &noop_qdisc) {
2158 spin_lock(qdisc_lock(q));
2159 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
2160 result = qdisc_enqueue_root(skb, q);
2161 spin_unlock(qdisc_lock(q));
2167 static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2168 struct packet_type **pt_prev,
2169 int *ret, struct net_device *orig_dev)
2171 if (skb->dev->rx_queue.qdisc == &noop_qdisc)
2175 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2178 /* Huh? Why does turning on AF_PACKET affect this? */
2179 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
2182 switch (ing_filter(skb)) {
2196 * netif_nit_deliver - deliver received packets to network taps
2199 * This function is used to deliver incoming packets to network
2200 * taps. It should be used when the normal netif_receive_skb path
2201 * is bypassed, for example because of VLAN acceleration.
2203 void netif_nit_deliver(struct sk_buff *skb)
2205 struct packet_type *ptype;
2207 if (list_empty(&ptype_all))
2210 skb_reset_network_header(skb);
2211 skb_reset_transport_header(skb);
2212 skb->mac_len = skb->network_header - skb->mac_header;
2215 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2216 if (!ptype->dev || ptype->dev == skb->dev)
2217 deliver_skb(skb, ptype, skb->dev);
2223 * netif_receive_skb - process receive buffer from network
2224 * @skb: buffer to process
2226 * netif_receive_skb() is the main receive data processing function.
2227 * It always succeeds. The buffer may be dropped during processing
2228 * for congestion control or by the protocol layers.
2230 * This function may only be called from softirq context and interrupts
2231 * should be enabled.
2233 * Return values (usually ignored):
2234 * NET_RX_SUCCESS: no congestion
2235 * NET_RX_DROP: packet was dropped
2237 int netif_receive_skb(struct sk_buff *skb)
2239 struct packet_type *ptype, *pt_prev;
2240 struct net_device *orig_dev;
2241 struct net_device *null_or_orig;
2242 int ret = NET_RX_DROP;
2245 if (skb->vlan_tci && vlan_hwaccel_do_receive(skb))
2246 return NET_RX_SUCCESS;
2248 /* if we've gotten here through NAPI, check netpoll */
2249 if (netpoll_receive_skb(skb))
2252 if (!skb->tstamp.tv64)
2256 skb->iif = skb->dev->ifindex;
2258 null_or_orig = NULL;
2259 orig_dev = skb->dev;
2260 if (orig_dev->master) {
2261 if (skb_bond_should_drop(skb))
2262 null_or_orig = orig_dev; /* deliver only exact match */
2264 skb->dev = orig_dev->master;
2267 __get_cpu_var(netdev_rx_stat).total++;
2269 skb_reset_network_header(skb);
2270 skb_reset_transport_header(skb);
2271 skb->mac_len = skb->network_header - skb->mac_header;
2277 #ifdef CONFIG_NET_CLS_ACT
2278 if (skb->tc_verd & TC_NCLS) {
2279 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2284 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2285 if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2286 ptype->dev == orig_dev) {
2288 ret = deliver_skb(skb, pt_prev, orig_dev);
2293 #ifdef CONFIG_NET_CLS_ACT
2294 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2300 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
2303 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
2309 type = skb->protocol;
2310 list_for_each_entry_rcu(ptype,
2311 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
2312 if (ptype->type == type &&
2313 (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2314 ptype->dev == orig_dev)) {
2316 ret = deliver_skb(skb, pt_prev, orig_dev);
2322 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2325 /* Jamal, now you will not able to escape explaining
2326 * me how you were going to use this. :-)
2336 /* Network device is going away, flush any packets still pending */
2337 static void flush_backlog(void *arg)
2339 struct net_device *dev = arg;
2340 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2341 struct sk_buff *skb, *tmp;
2343 skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
2344 if (skb->dev == dev) {
2345 __skb_unlink(skb, &queue->input_pkt_queue);
2350 static int napi_gro_complete(struct sk_buff *skb)
2352 struct packet_type *ptype;
2353 __be16 type = skb->protocol;
2354 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2357 if (NAPI_GRO_CB(skb)->count == 1) {
2358 skb_shinfo(skb)->gso_size = 0;
2363 list_for_each_entry_rcu(ptype, head, list) {
2364 if (ptype->type != type || ptype->dev || !ptype->gro_complete)
2367 err = ptype->gro_complete(skb);
2373 WARN_ON(&ptype->list == head);
2375 return NET_RX_SUCCESS;
2379 return netif_receive_skb(skb);
2382 void napi_gro_flush(struct napi_struct *napi)
2384 struct sk_buff *skb, *next;
2386 for (skb = napi->gro_list; skb; skb = next) {
2389 napi_gro_complete(skb);
2392 napi->gro_count = 0;
2393 napi->gro_list = NULL;
2395 EXPORT_SYMBOL(napi_gro_flush);
2397 int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2399 struct sk_buff **pp = NULL;
2400 struct packet_type *ptype;
2401 __be16 type = skb->protocol;
2402 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2407 if (!(skb->dev->features & NETIF_F_GRO))
2410 if (skb_is_gso(skb) || skb_has_frags(skb))
2414 list_for_each_entry_rcu(ptype, head, list) {
2415 if (ptype->type != type || ptype->dev || !ptype->gro_receive)
2418 skb_set_network_header(skb, skb_gro_offset(skb));
2419 mac_len = skb->network_header - skb->mac_header;
2420 skb->mac_len = mac_len;
2421 NAPI_GRO_CB(skb)->same_flow = 0;
2422 NAPI_GRO_CB(skb)->flush = 0;
2423 NAPI_GRO_CB(skb)->free = 0;
2425 pp = ptype->gro_receive(&napi->gro_list, skb);
2430 if (&ptype->list == head)
2433 same_flow = NAPI_GRO_CB(skb)->same_flow;
2434 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
2437 struct sk_buff *nskb = *pp;
2441 napi_gro_complete(nskb);
2448 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
2452 NAPI_GRO_CB(skb)->count = 1;
2453 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
2454 skb->next = napi->gro_list;
2455 napi->gro_list = skb;
2459 if (skb_headlen(skb) < skb_gro_offset(skb)) {
2460 int grow = skb_gro_offset(skb) - skb_headlen(skb);
2462 BUG_ON(skb->end - skb->tail < grow);
2464 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
2467 skb->data_len -= grow;
2469 skb_shinfo(skb)->frags[0].page_offset += grow;
2470 skb_shinfo(skb)->frags[0].size -= grow;
2472 if (unlikely(!skb_shinfo(skb)->frags[0].size)) {
2473 put_page(skb_shinfo(skb)->frags[0].page);
2474 memmove(skb_shinfo(skb)->frags,
2475 skb_shinfo(skb)->frags + 1,
2476 --skb_shinfo(skb)->nr_frags);
2487 EXPORT_SYMBOL(dev_gro_receive);
2489 static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2493 if (netpoll_rx_on(skb))
2496 for (p = napi->gro_list; p; p = p->next) {
2497 NAPI_GRO_CB(p)->same_flow = (p->dev == skb->dev)
2498 && !compare_ether_header(skb_mac_header(p),
2499 skb_gro_mac_header(skb));
2500 NAPI_GRO_CB(p)->flush = 0;
2503 return dev_gro_receive(napi, skb);
2506 int napi_skb_finish(int ret, struct sk_buff *skb)
2508 int err = NET_RX_SUCCESS;
2512 return netif_receive_skb(skb);
2518 case GRO_MERGED_FREE:
2525 EXPORT_SYMBOL(napi_skb_finish);
2527 void skb_gro_reset_offset(struct sk_buff *skb)
2529 NAPI_GRO_CB(skb)->data_offset = 0;
2530 NAPI_GRO_CB(skb)->frag0 = NULL;
2531 NAPI_GRO_CB(skb)->frag0_len = 0;
2533 if (skb->mac_header == skb->tail &&
2534 !PageHighMem(skb_shinfo(skb)->frags[0].page)) {
2535 NAPI_GRO_CB(skb)->frag0 =
2536 page_address(skb_shinfo(skb)->frags[0].page) +
2537 skb_shinfo(skb)->frags[0].page_offset;
2538 NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size;
2541 EXPORT_SYMBOL(skb_gro_reset_offset);
2543 int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2545 skb_gro_reset_offset(skb);
2547 return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
2549 EXPORT_SYMBOL(napi_gro_receive);
2551 void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
2553 __skb_pull(skb, skb_headlen(skb));
2554 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
2558 EXPORT_SYMBOL(napi_reuse_skb);
2560 struct sk_buff *napi_get_frags(struct napi_struct *napi)
2562 struct net_device *dev = napi->dev;
2563 struct sk_buff *skb = napi->skb;
2566 skb = netdev_alloc_skb(dev, GRO_MAX_HEAD + NET_IP_ALIGN);
2570 skb_reserve(skb, NET_IP_ALIGN);
2578 EXPORT_SYMBOL(napi_get_frags);
2580 int napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, int ret)
2582 int err = NET_RX_SUCCESS;
2587 skb->protocol = eth_type_trans(skb, napi->dev);
2589 if (ret == GRO_NORMAL)
2590 return netif_receive_skb(skb);
2592 skb_gro_pull(skb, -ETH_HLEN);
2599 case GRO_MERGED_FREE:
2600 napi_reuse_skb(napi, skb);
2606 EXPORT_SYMBOL(napi_frags_finish);
2608 struct sk_buff *napi_frags_skb(struct napi_struct *napi)
2610 struct sk_buff *skb = napi->skb;
2617 skb_reset_mac_header(skb);
2618 skb_gro_reset_offset(skb);
2620 off = skb_gro_offset(skb);
2621 hlen = off + sizeof(*eth);
2622 eth = skb_gro_header_fast(skb, off);
2623 if (skb_gro_header_hard(skb, hlen)) {
2624 eth = skb_gro_header_slow(skb, hlen, off);
2625 if (unlikely(!eth)) {
2626 napi_reuse_skb(napi, skb);
2632 skb_gro_pull(skb, sizeof(*eth));
2635 * This works because the only protocols we care about don't require
2636 * special handling. We'll fix it up properly at the end.
2638 skb->protocol = eth->h_proto;
2643 EXPORT_SYMBOL(napi_frags_skb);
2645 int napi_gro_frags(struct napi_struct *napi)
2647 struct sk_buff *skb = napi_frags_skb(napi);
2652 return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
2654 EXPORT_SYMBOL(napi_gro_frags);
2656 static int process_backlog(struct napi_struct *napi, int quota)
2659 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2660 unsigned long start_time = jiffies;
2662 napi->weight = weight_p;
2664 struct sk_buff *skb;
2666 local_irq_disable();
2667 skb = __skb_dequeue(&queue->input_pkt_queue);
2669 __napi_complete(napi);
2675 netif_receive_skb(skb);
2676 } while (++work < quota && jiffies == start_time);
2682 * __napi_schedule - schedule for receive
2683 * @n: entry to schedule
2685 * The entry's receive function will be scheduled to run
2687 void __napi_schedule(struct napi_struct *n)
2689 unsigned long flags;
2691 local_irq_save(flags);
2692 list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
2693 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2694 local_irq_restore(flags);
2696 EXPORT_SYMBOL(__napi_schedule);
2698 void __napi_complete(struct napi_struct *n)
2700 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
2701 BUG_ON(n->gro_list);
2703 list_del(&n->poll_list);
2704 smp_mb__before_clear_bit();
2705 clear_bit(NAPI_STATE_SCHED, &n->state);
2707 EXPORT_SYMBOL(__napi_complete);
2709 void napi_complete(struct napi_struct *n)
2711 unsigned long flags;
2714 * don't let napi dequeue from the cpu poll list
2715 * just in case its running on a different cpu
2717 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
2721 local_irq_save(flags);
2723 local_irq_restore(flags);
2725 EXPORT_SYMBOL(napi_complete);
2727 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
2728 int (*poll)(struct napi_struct *, int), int weight)
2730 INIT_LIST_HEAD(&napi->poll_list);
2731 napi->gro_count = 0;
2732 napi->gro_list = NULL;
2735 napi->weight = weight;
2736 list_add(&napi->dev_list, &dev->napi_list);
2738 #ifdef CONFIG_NETPOLL
2739 spin_lock_init(&napi->poll_lock);
2740 napi->poll_owner = -1;
2742 set_bit(NAPI_STATE_SCHED, &napi->state);
2744 EXPORT_SYMBOL(netif_napi_add);
2746 void netif_napi_del(struct napi_struct *napi)
2748 struct sk_buff *skb, *next;
2750 list_del_init(&napi->dev_list);
2751 napi_free_frags(napi);
2753 for (skb = napi->gro_list; skb; skb = next) {
2759 napi->gro_list = NULL;
2760 napi->gro_count = 0;
2762 EXPORT_SYMBOL(netif_napi_del);
2765 static void net_rx_action(struct softirq_action *h)
2767 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
2768 unsigned long time_limit = jiffies + 2;
2769 int budget = netdev_budget;
2772 local_irq_disable();
2774 while (!list_empty(list)) {
2775 struct napi_struct *n;
2778 /* If softirq window is exhuasted then punt.
2779 * Allow this to run for 2 jiffies since which will allow
2780 * an average latency of 1.5/HZ.
2782 if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
2787 /* Even though interrupts have been re-enabled, this
2788 * access is safe because interrupts can only add new
2789 * entries to the tail of this list, and only ->poll()
2790 * calls can remove this head entry from the list.
2792 n = list_entry(list->next, struct napi_struct, poll_list);
2794 have = netpoll_poll_lock(n);
2798 /* This NAPI_STATE_SCHED test is for avoiding a race
2799 * with netpoll's poll_napi(). Only the entity which
2800 * obtains the lock and sees NAPI_STATE_SCHED set will
2801 * actually make the ->poll() call. Therefore we avoid
2802 * accidently calling ->poll() when NAPI is not scheduled.
2805 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
2806 work = n->poll(n, weight);
2810 WARN_ON_ONCE(work > weight);
2814 local_irq_disable();
2816 /* Drivers must not modify the NAPI state if they
2817 * consume the entire weight. In such cases this code
2818 * still "owns" the NAPI instance and therefore can
2819 * move the instance around on the list at-will.
2821 if (unlikely(work == weight)) {
2822 if (unlikely(napi_disable_pending(n)))
2825 list_move_tail(&n->poll_list, list);
2828 netpoll_poll_unlock(have);
2833 #ifdef CONFIG_NET_DMA
2835 * There may not be any more sk_buffs coming right now, so push
2836 * any pending DMA copies to hardware
2838 dma_issue_pending_all();
2844 __get_cpu_var(netdev_rx_stat).time_squeeze++;
2845 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2849 static gifconf_func_t * gifconf_list [NPROTO];
2852 * register_gifconf - register a SIOCGIF handler
2853 * @family: Address family
2854 * @gifconf: Function handler
2856 * Register protocol dependent address dumping routines. The handler
2857 * that is passed must not be freed or reused until it has been replaced
2858 * by another handler.
2860 int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
2862 if (family >= NPROTO)
2864 gifconf_list[family] = gifconf;
2870 * Map an interface index to its name (SIOCGIFNAME)
2874 * We need this ioctl for efficient implementation of the
2875 * if_indextoname() function required by the IPv6 API. Without
2876 * it, we would have to search all the interfaces to find a
2880 static int dev_ifname(struct net *net, struct ifreq __user *arg)
2882 struct net_device *dev;
2886 * Fetch the caller's info block.
2889 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
2892 read_lock(&dev_base_lock);
2893 dev = __dev_get_by_index(net, ifr.ifr_ifindex);
2895 read_unlock(&dev_base_lock);
2899 strcpy(ifr.ifr_name, dev->name);
2900 read_unlock(&dev_base_lock);
2902 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
2908 * Perform a SIOCGIFCONF call. This structure will change
2909 * size eventually, and there is nothing I can do about it.
2910 * Thus we will need a 'compatibility mode'.
2913 static int dev_ifconf(struct net *net, char __user *arg)
2916 struct net_device *dev;
2923 * Fetch the caller's info block.
2926 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
2933 * Loop over the interfaces, and write an info block for each.
2937 for_each_netdev(net, dev) {
2938 for (i = 0; i < NPROTO; i++) {
2939 if (gifconf_list[i]) {
2942 done = gifconf_list[i](dev, NULL, 0);
2944 done = gifconf_list[i](dev, pos + total,
2954 * All done. Write the updated control block back to the caller.
2956 ifc.ifc_len = total;
2959 * Both BSD and Solaris return 0 here, so we do too.
2961 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
2964 #ifdef CONFIG_PROC_FS
2966 * This is invoked by the /proc filesystem handler to display a device
2969 void *dev_seq_start(struct seq_file *seq, loff_t *pos)
2970 __acquires(dev_base_lock)
2972 struct net *net = seq_file_net(seq);
2974 struct net_device *dev;
2976 read_lock(&dev_base_lock);
2978 return SEQ_START_TOKEN;
2981 for_each_netdev(net, dev)
2988 void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2990 struct net *net = seq_file_net(seq);
2992 return v == SEQ_START_TOKEN ?
2993 first_net_device(net) : next_net_device((struct net_device *)v);
2996 void dev_seq_stop(struct seq_file *seq, void *v)
2997 __releases(dev_base_lock)
2999 read_unlock(&dev_base_lock);
3002 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
3004 const struct net_device_stats *stats = dev_get_stats(dev);
3006 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
3007 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
3008 dev->name, stats->rx_bytes, stats->rx_packets,
3010 stats->rx_dropped + stats->rx_missed_errors,
3011 stats->rx_fifo_errors,
3012 stats->rx_length_errors + stats->rx_over_errors +
3013 stats->rx_crc_errors + stats->rx_frame_errors,
3014 stats->rx_compressed, stats->multicast,
3015 stats->tx_bytes, stats->tx_packets,
3016 stats->tx_errors, stats->tx_dropped,
3017 stats->tx_fifo_errors, stats->collisions,
3018 stats->tx_carrier_errors +
3019 stats->tx_aborted_errors +
3020 stats->tx_window_errors +
3021 stats->tx_heartbeat_errors,
3022 stats->tx_compressed);
3026 * Called from the PROCfs module. This now uses the new arbitrary sized
3027 * /proc/net interface to create /proc/net/dev
3029 static int dev_seq_show(struct seq_file *seq, void *v)
3031 if (v == SEQ_START_TOKEN)
3032 seq_puts(seq, "Inter-| Receive "
3034 " face |bytes packets errs drop fifo frame "
3035 "compressed multicast|bytes packets errs "
3036 "drop fifo colls carrier compressed\n");
3038 dev_seq_printf_stats(seq, v);
3042 static struct netif_rx_stats *softnet_get_online(loff_t *pos)
3044 struct netif_rx_stats *rc = NULL;
3046 while (*pos < nr_cpu_ids)
3047 if (cpu_online(*pos)) {
3048 rc = &per_cpu(netdev_rx_stat, *pos);
3055 static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
3057 return softnet_get_online(pos);
3060 static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3063 return softnet_get_online(pos);
3066 static void softnet_seq_stop(struct seq_file *seq, void *v)
3070 static int softnet_seq_show(struct seq_file *seq, void *v)
3072 struct netif_rx_stats *s = v;
3074 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
3075 s->total, s->dropped, s->time_squeeze, 0,
3076 0, 0, 0, 0, /* was fastroute */
3081 static const struct seq_operations dev_seq_ops = {
3082 .start = dev_seq_start,
3083 .next = dev_seq_next,
3084 .stop = dev_seq_stop,
3085 .show = dev_seq_show,
3088 static int dev_seq_open(struct inode *inode, struct file *file)
3090 return seq_open_net(inode, file, &dev_seq_ops,
3091 sizeof(struct seq_net_private));
3094 static const struct file_operations dev_seq_fops = {
3095 .owner = THIS_MODULE,
3096 .open = dev_seq_open,
3098 .llseek = seq_lseek,
3099 .release = seq_release_net,
3102 static const struct seq_operations softnet_seq_ops = {
3103 .start = softnet_seq_start,
3104 .next = softnet_seq_next,
3105 .stop = softnet_seq_stop,
3106 .show = softnet_seq_show,
3109 static int softnet_seq_open(struct inode *inode, struct file *file)
3111 return seq_open(file, &softnet_seq_ops);
3114 static const struct file_operations softnet_seq_fops = {
3115 .owner = THIS_MODULE,
3116 .open = softnet_seq_open,
3118 .llseek = seq_lseek,
3119 .release = seq_release,
3122 static void *ptype_get_idx(loff_t pos)
3124 struct packet_type *pt = NULL;
3128 list_for_each_entry_rcu(pt, &ptype_all, list) {
3134 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
3135 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
3144 static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
3148 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
3151 static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3153 struct packet_type *pt;
3154 struct list_head *nxt;
3158 if (v == SEQ_START_TOKEN)
3159 return ptype_get_idx(0);
3162 nxt = pt->list.next;
3163 if (pt->type == htons(ETH_P_ALL)) {
3164 if (nxt != &ptype_all)
3167 nxt = ptype_base[0].next;
3169 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
3171 while (nxt == &ptype_base[hash]) {
3172 if (++hash >= PTYPE_HASH_SIZE)
3174 nxt = ptype_base[hash].next;
3177 return list_entry(nxt, struct packet_type, list);
3180 static void ptype_seq_stop(struct seq_file *seq, void *v)
3186 static int ptype_seq_show(struct seq_file *seq, void *v)
3188 struct packet_type *pt = v;
3190 if (v == SEQ_START_TOKEN)
3191 seq_puts(seq, "Type Device Function\n");
3192 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
3193 if (pt->type == htons(ETH_P_ALL))
3194 seq_puts(seq, "ALL ");
3196 seq_printf(seq, "%04x", ntohs(pt->type));
3198 seq_printf(seq, " %-8s %pF\n",
3199 pt->dev ? pt->dev->name : "", pt->func);
3205 static const struct seq_operations ptype_seq_ops = {
3206 .start = ptype_seq_start,
3207 .next = ptype_seq_next,
3208 .stop = ptype_seq_stop,
3209 .show = ptype_seq_show,
3212 static int ptype_seq_open(struct inode *inode, struct file *file)
3214 return seq_open_net(inode, file, &ptype_seq_ops,
3215 sizeof(struct seq_net_private));
3218 static const struct file_operations ptype_seq_fops = {
3219 .owner = THIS_MODULE,
3220 .open = ptype_seq_open,
3222 .llseek = seq_lseek,
3223 .release = seq_release_net,
3227 static int __net_init dev_proc_net_init(struct net *net)
3231 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
3233 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
3235 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
3238 if (wext_proc_init(net))
3244 proc_net_remove(net, "ptype");
3246 proc_net_remove(net, "softnet_stat");
3248 proc_net_remove(net, "dev");
3252 static void __net_exit dev_proc_net_exit(struct net *net)
3254 wext_proc_exit(net);
3256 proc_net_remove(net, "ptype");
3257 proc_net_remove(net, "softnet_stat");
3258 proc_net_remove(net, "dev");
3261 static struct pernet_operations __net_initdata dev_proc_ops = {
3262 .init = dev_proc_net_init,
3263 .exit = dev_proc_net_exit,
3266 static int __init dev_proc_init(void)
3268 return register_pernet_subsys(&dev_proc_ops);
3271 #define dev_proc_init() 0
3272 #endif /* CONFIG_PROC_FS */
3276 * netdev_set_master - set up master/slave pair
3277 * @slave: slave device
3278 * @master: new master device
3280 * Changes the master device of the slave. Pass %NULL to break the
3281 * bonding. The caller must hold the RTNL semaphore. On a failure
3282 * a negative errno code is returned. On success the reference counts
3283 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
3284 * function returns zero.
3286 int netdev_set_master(struct net_device *slave, struct net_device *master)
3288 struct net_device *old = slave->master;
3298 slave->master = master;
3306 slave->flags |= IFF_SLAVE;
3308 slave->flags &= ~IFF_SLAVE;
3310 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
3314 static void dev_change_rx_flags(struct net_device *dev, int flags)
3316 const struct net_device_ops *ops = dev->netdev_ops;
3318 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
3319 ops->ndo_change_rx_flags(dev, flags);
3322 static int __dev_set_promiscuity(struct net_device *dev, int inc)
3324 unsigned short old_flags = dev->flags;
3330 dev->flags |= IFF_PROMISC;
3331 dev->promiscuity += inc;
3332 if (dev->promiscuity == 0) {
3335 * If inc causes overflow, untouch promisc and return error.
3338 dev->flags &= ~IFF_PROMISC;
3340 dev->promiscuity -= inc;
3341 printk(KERN_WARNING "%s: promiscuity touches roof, "
3342 "set promiscuity failed, promiscuity feature "
3343 "of device might be broken.\n", dev->name);
3347 if (dev->flags != old_flags) {
3348 printk(KERN_INFO "device %s %s promiscuous mode\n",
3349 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
3351 if (audit_enabled) {
3352 current_uid_gid(&uid, &gid);
3353 audit_log(current->audit_context, GFP_ATOMIC,
3354 AUDIT_ANOM_PROMISCUOUS,
3355 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
3356 dev->name, (dev->flags & IFF_PROMISC),
3357 (old_flags & IFF_PROMISC),
3358 audit_get_loginuid(current),
3360 audit_get_sessionid(current));
3363 dev_change_rx_flags(dev, IFF_PROMISC);
3369 * dev_set_promiscuity - update promiscuity count on a device
3373 * Add or remove promiscuity from a device. While the count in the device
3374 * remains above zero the interface remains promiscuous. Once it hits zero
3375 * the device reverts back to normal filtering operation. A negative inc
3376 * value is used to drop promiscuity on the device.
3377 * Return 0 if successful or a negative errno code on error.
3379 int dev_set_promiscuity(struct net_device *dev, int inc)
3381 unsigned short old_flags = dev->flags;
3384 err = __dev_set_promiscuity(dev, inc);
3387 if (dev->flags != old_flags)
3388 dev_set_rx_mode(dev);
3393 * dev_set_allmulti - update allmulti count on a device
3397 * Add or remove reception of all multicast frames to a device. While the
3398 * count in the device remains above zero the interface remains listening
3399 * to all interfaces. Once it hits zero the device reverts back to normal
3400 * filtering operation. A negative @inc value is used to drop the counter
3401 * when releasing a resource needing all multicasts.
3402 * Return 0 if successful or a negative errno code on error.
3405 int dev_set_allmulti(struct net_device *dev, int inc)
3407 unsigned short old_flags = dev->flags;
3411 dev->flags |= IFF_ALLMULTI;
3412 dev->allmulti += inc;
3413 if (dev->allmulti == 0) {
3416 * If inc causes overflow, untouch allmulti and return error.
3419 dev->flags &= ~IFF_ALLMULTI;
3421 dev->allmulti -= inc;
3422 printk(KERN_WARNING "%s: allmulti touches roof, "
3423 "set allmulti failed, allmulti feature of "
3424 "device might be broken.\n", dev->name);
3428 if (dev->flags ^ old_flags) {
3429 dev_change_rx_flags(dev, IFF_ALLMULTI);
3430 dev_set_rx_mode(dev);
3436 * Upload unicast and multicast address lists to device and
3437 * configure RX filtering. When the device doesn't support unicast
3438 * filtering it is put in promiscuous mode while unicast addresses
3441 void __dev_set_rx_mode(struct net_device *dev)
3443 const struct net_device_ops *ops = dev->netdev_ops;
3445 /* dev_open will call this function so the list will stay sane. */
3446 if (!(dev->flags&IFF_UP))
3449 if (!netif_device_present(dev))
3452 if (ops->ndo_set_rx_mode)
3453 ops->ndo_set_rx_mode(dev);
3455 /* Unicast addresses changes may only happen under the rtnl,
3456 * therefore calling __dev_set_promiscuity here is safe.
3458 if (dev->uc_count > 0 && !dev->uc_promisc) {
3459 __dev_set_promiscuity(dev, 1);
3460 dev->uc_promisc = 1;
3461 } else if (dev->uc_count == 0 && dev->uc_promisc) {
3462 __dev_set_promiscuity(dev, -1);
3463 dev->uc_promisc = 0;
3466 if (ops->ndo_set_multicast_list)
3467 ops->ndo_set_multicast_list(dev);
3471 void dev_set_rx_mode(struct net_device *dev)
3473 netif_addr_lock_bh(dev);
3474 __dev_set_rx_mode(dev);
3475 netif_addr_unlock_bh(dev);
3478 /* hw addresses list handling functions */
3480 static int __hw_addr_add(struct list_head *list, int *delta,
3481 unsigned char *addr, int addr_len,
3482 unsigned char addr_type)
3484 struct netdev_hw_addr *ha;
3487 if (addr_len > MAX_ADDR_LEN)
3490 list_for_each_entry(ha, list, list) {
3491 if (!memcmp(ha->addr, addr, addr_len) &&
3492 ha->type == addr_type) {
3499 alloc_size = sizeof(*ha);
3500 if (alloc_size < L1_CACHE_BYTES)
3501 alloc_size = L1_CACHE_BYTES;
3502 ha = kmalloc(alloc_size, GFP_ATOMIC);
3505 memcpy(ha->addr, addr, addr_len);
3506 ha->type = addr_type;
3509 list_add_tail_rcu(&ha->list, list);
3515 static void ha_rcu_free(struct rcu_head *head)
3517 struct netdev_hw_addr *ha;
3519 ha = container_of(head, struct netdev_hw_addr, rcu_head);
3523 static int __hw_addr_del(struct list_head *list, int *delta,
3524 unsigned char *addr, int addr_len,
3525 unsigned char addr_type)
3527 struct netdev_hw_addr *ha;
3529 list_for_each_entry(ha, list, list) {
3530 if (!memcmp(ha->addr, addr, addr_len) &&
3531 (ha->type == addr_type || !addr_type)) {
3534 list_del_rcu(&ha->list);
3535 call_rcu(&ha->rcu_head, ha_rcu_free);
3544 static int __hw_addr_add_multiple(struct list_head *to_list, int *to_delta,
3545 struct list_head *from_list, int addr_len,
3546 unsigned char addr_type)
3549 struct netdev_hw_addr *ha, *ha2;
3552 list_for_each_entry(ha, from_list, list) {
3553 type = addr_type ? addr_type : ha->type;
3554 err = __hw_addr_add(to_list, to_delta, ha->addr,
3562 list_for_each_entry(ha2, from_list, list) {
3565 type = addr_type ? addr_type : ha2->type;
3566 __hw_addr_del(to_list, to_delta, ha2->addr,
3572 static void __hw_addr_del_multiple(struct list_head *to_list, int *to_delta,
3573 struct list_head *from_list, int addr_len,
3574 unsigned char addr_type)
3576 struct netdev_hw_addr *ha;
3579 list_for_each_entry(ha, from_list, list) {
3580 type = addr_type ? addr_type : ha->type;
3581 __hw_addr_del(to_list, to_delta, ha->addr,
3582 addr_len, addr_type);
3586 static int __hw_addr_sync(struct list_head *to_list, int *to_delta,
3587 struct list_head *from_list, int *from_delta,
3591 struct netdev_hw_addr *ha, *tmp;
3593 list_for_each_entry_safe(ha, tmp, from_list, list) {
3595 err = __hw_addr_add(to_list, to_delta, ha->addr,
3596 addr_len, ha->type);
3601 } else if (ha->refcount == 1) {
3602 __hw_addr_del(to_list, to_delta, ha->addr,
3603 addr_len, ha->type);
3604 __hw_addr_del(from_list, from_delta, ha->addr,
3605 addr_len, ha->type);
3611 static void __hw_addr_unsync(struct list_head *to_list, int *to_delta,
3612 struct list_head *from_list, int *from_delta,
3615 struct netdev_hw_addr *ha, *tmp;
3617 list_for_each_entry_safe(ha, tmp, from_list, list) {
3619 __hw_addr_del(to_list, to_delta, ha->addr,
3620 addr_len, ha->type);
3622 __hw_addr_del(from_list, from_delta, ha->addr,
3623 addr_len, ha->type);
3629 static void __hw_addr_flush(struct list_head *list)
3631 struct netdev_hw_addr *ha, *tmp;
3633 list_for_each_entry_safe(ha, tmp, list, list) {
3634 list_del_rcu(&ha->list);
3635 call_rcu(&ha->rcu_head, ha_rcu_free);
3639 /* Device addresses handling functions */
3641 static void dev_addr_flush(struct net_device *dev)
3643 /* rtnl_mutex must be held here */
3645 __hw_addr_flush(&dev->dev_addr_list);
3646 dev->dev_addr = NULL;
3649 static int dev_addr_init(struct net_device *dev)
3651 unsigned char addr[MAX_ADDR_LEN];
3652 struct netdev_hw_addr *ha;
3655 /* rtnl_mutex must be held here */
3657 INIT_LIST_HEAD(&dev->dev_addr_list);
3658 memset(addr, 0, sizeof(addr));
3659 err = __hw_addr_add(&dev->dev_addr_list, NULL, addr, sizeof(addr),
3660 NETDEV_HW_ADDR_T_LAN);
3663 * Get the first (previously created) address from the list
3664 * and set dev_addr pointer to this location.
3666 ha = list_first_entry(&dev->dev_addr_list,
3667 struct netdev_hw_addr, list);
3668 dev->dev_addr = ha->addr;
3674 * dev_addr_add - Add a device address
3676 * @addr: address to add
3677 * @addr_type: address type
3679 * Add a device address to the device or increase the reference count if
3680 * it already exists.
3682 * The caller must hold the rtnl_mutex.
3684 int dev_addr_add(struct net_device *dev, unsigned char *addr,
3685 unsigned char addr_type)
3691 err = __hw_addr_add(&dev->dev_addr_list, NULL, addr, dev->addr_len,
3694 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3697 EXPORT_SYMBOL(dev_addr_add);
3700 * dev_addr_del - Release a device address.
3702 * @addr: address to delete
3703 * @addr_type: address type
3705 * Release reference to a device address and remove it from the device
3706 * if the reference count drops to zero.
3708 * The caller must hold the rtnl_mutex.
3710 int dev_addr_del(struct net_device *dev, unsigned char *addr,
3711 unsigned char addr_type)
3714 struct netdev_hw_addr *ha;
3719 * We can not remove the first address from the list because
3720 * dev->dev_addr points to that.
3722 ha = list_first_entry(&dev->dev_addr_list, struct netdev_hw_addr, list);
3723 if (ha->addr == dev->dev_addr && ha->refcount == 1)
3726 err = __hw_addr_del(&dev->dev_addr_list, NULL, addr, dev->addr_len,
3729 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3732 EXPORT_SYMBOL(dev_addr_del);
3735 * dev_addr_add_multiple - Add device addresses from another device
3736 * @to_dev: device to which addresses will be added
3737 * @from_dev: device from which addresses will be added
3738 * @addr_type: address type - 0 means type will be used from from_dev
3740 * Add device addresses of the one device to another.
3742 * The caller must hold the rtnl_mutex.
3744 int dev_addr_add_multiple(struct net_device *to_dev,
3745 struct net_device *from_dev,
3746 unsigned char addr_type)
3752 if (from_dev->addr_len != to_dev->addr_len)
3754 err = __hw_addr_add_multiple(&to_dev->dev_addr_list, NULL,
3755 &from_dev->dev_addr_list,
3756 to_dev->addr_len, addr_type);
3758 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
3761 EXPORT_SYMBOL(dev_addr_add_multiple);
3764 * dev_addr_del_multiple - Delete device addresses by another device
3765 * @to_dev: device where the addresses will be deleted
3766 * @from_dev: device by which addresses the addresses will be deleted
3767 * @addr_type: address type - 0 means type will used from from_dev
3769 * Deletes addresses in to device by the list of addresses in from device.
3771 * The caller must hold the rtnl_mutex.
3773 int dev_addr_del_multiple(struct net_device *to_dev,
3774 struct net_device *from_dev,
3775 unsigned char addr_type)
3779 if (from_dev->addr_len != to_dev->addr_len)
3781 __hw_addr_del_multiple(&to_dev->dev_addr_list, NULL,
3782 &from_dev->dev_addr_list,
3783 to_dev->addr_len, addr_type);
3784 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
3787 EXPORT_SYMBOL(dev_addr_del_multiple);
3789 /* unicast and multicast addresses handling functions */
3791 int __dev_addr_delete(struct dev_addr_list **list, int *count,
3792 void *addr, int alen, int glbl)
3794 struct dev_addr_list *da;
3796 for (; (da = *list) != NULL; list = &da->next) {
3797 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3798 alen == da->da_addrlen) {
3800 int old_glbl = da->da_gusers;
3817 int __dev_addr_add(struct dev_addr_list **list, int *count,
3818 void *addr, int alen, int glbl)
3820 struct dev_addr_list *da;
3822 for (da = *list; da != NULL; da = da->next) {
3823 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3824 da->da_addrlen == alen) {
3826 int old_glbl = da->da_gusers;
3836 da = kzalloc(sizeof(*da), GFP_ATOMIC);
3839 memcpy(da->da_addr, addr, alen);
3840 da->da_addrlen = alen;
3842 da->da_gusers = glbl ? 1 : 0;
3850 * dev_unicast_delete - Release secondary unicast address.
3852 * @addr: address to delete
3854 * Release reference to a secondary unicast address and remove it
3855 * from the device if the reference count drops to zero.
3857 * The caller must hold the rtnl_mutex.
3859 int dev_unicast_delete(struct net_device *dev, void *addr)
3865 err = __hw_addr_del(&dev->uc_list, &dev->uc_count, addr,
3866 dev->addr_len, NETDEV_HW_ADDR_T_UNICAST);
3868 __dev_set_rx_mode(dev);
3871 EXPORT_SYMBOL(dev_unicast_delete);
3874 * dev_unicast_add - add a secondary unicast address
3876 * @addr: address to add
3878 * Add a secondary unicast address to the device or increase
3879 * the reference count if it already exists.
3881 * The caller must hold the rtnl_mutex.
3883 int dev_unicast_add(struct net_device *dev, void *addr)
3889 err = __hw_addr_add(&dev->uc_list, &dev->uc_count, addr,
3890 dev->addr_len, NETDEV_HW_ADDR_T_UNICAST);
3892 __dev_set_rx_mode(dev);
3895 EXPORT_SYMBOL(dev_unicast_add);
3897 int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
3898 struct dev_addr_list **from, int *from_count)
3900 struct dev_addr_list *da, *next;
3904 while (da != NULL) {
3906 if (!da->da_synced) {
3907 err = __dev_addr_add(to, to_count,
3908 da->da_addr, da->da_addrlen, 0);
3913 } else if (da->da_users == 1) {
3914 __dev_addr_delete(to, to_count,
3915 da->da_addr, da->da_addrlen, 0);
3916 __dev_addr_delete(from, from_count,
3917 da->da_addr, da->da_addrlen, 0);
3924 void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
3925 struct dev_addr_list **from, int *from_count)
3927 struct dev_addr_list *da, *next;
3930 while (da != NULL) {
3932 if (da->da_synced) {
3933 __dev_addr_delete(to, to_count,
3934 da->da_addr, da->da_addrlen, 0);
3936 __dev_addr_delete(from, from_count,
3937 da->da_addr, da->da_addrlen, 0);
3944 * dev_unicast_sync - Synchronize device's unicast list to another device
3945 * @to: destination device
3946 * @from: source device
3948 * Add newly added addresses to the destination device and release
3949 * addresses that have no users left.
3951 * This function is intended to be called from the dev->set_rx_mode
3952 * function of layered software devices.
3954 int dev_unicast_sync(struct net_device *to, struct net_device *from)
3960 if (to->addr_len != from->addr_len)
3963 err = __hw_addr_sync(&to->uc_list, &to->uc_count,
3964 &from->uc_list, &from->uc_count, to->addr_len);
3966 __dev_set_rx_mode(to);
3969 EXPORT_SYMBOL(dev_unicast_sync);
3972 * dev_unicast_unsync - Remove synchronized addresses from the destination device
3973 * @to: destination device
3974 * @from: source device
3976 * Remove all addresses that were added to the destination device by
3977 * dev_unicast_sync(). This function is intended to be called from the
3978 * dev->stop function of layered software devices.
3980 void dev_unicast_unsync(struct net_device *to, struct net_device *from)
3984 if (to->addr_len != from->addr_len)
3987 __hw_addr_unsync(&to->uc_list, &to->uc_count,
3988 &from->uc_list, &from->uc_count, to->addr_len);
3989 __dev_set_rx_mode(to);
3991 EXPORT_SYMBOL(dev_unicast_unsync);
3993 static void dev_unicast_flush(struct net_device *dev)
3995 /* rtnl_mutex must be held here */
3997 __hw_addr_flush(&dev->uc_list);
4001 static void dev_unicast_init(struct net_device *dev)
4003 /* rtnl_mutex must be held here */
4005 INIT_LIST_HEAD(&dev->uc_list);
4009 static void __dev_addr_discard(struct dev_addr_list **list)
4011 struct dev_addr_list *tmp;
4013 while (*list != NULL) {
4016 if (tmp->da_users > tmp->da_gusers)
4017 printk("__dev_addr_discard: address leakage! "
4018 "da_users=%d\n", tmp->da_users);
4023 static void dev_addr_discard(struct net_device *dev)
4025 netif_addr_lock_bh(dev);
4027 __dev_addr_discard(&dev->mc_list);
4030 netif_addr_unlock_bh(dev);
4034 * dev_get_flags - get flags reported to userspace
4037 * Get the combination of flag bits exported through APIs to userspace.
4039 unsigned dev_get_flags(const struct net_device *dev)
4043 flags = (dev->flags & ~(IFF_PROMISC |
4048 (dev->gflags & (IFF_PROMISC |
4051 if (netif_running(dev)) {
4052 if (netif_oper_up(dev))
4053 flags |= IFF_RUNNING;
4054 if (netif_carrier_ok(dev))
4055 flags |= IFF_LOWER_UP;
4056 if (netif_dormant(dev))
4057 flags |= IFF_DORMANT;
4064 * dev_change_flags - change device settings
4066 * @flags: device state flags
4068 * Change settings on device based state flags. The flags are
4069 * in the userspace exported format.
4071 int dev_change_flags(struct net_device *dev, unsigned flags)
4074 int old_flags = dev->flags;
4079 * Set the flags on our device.
4082 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
4083 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
4085 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
4089 * Load in the correct multicast list now the flags have changed.
4092 if ((old_flags ^ flags) & IFF_MULTICAST)
4093 dev_change_rx_flags(dev, IFF_MULTICAST);
4095 dev_set_rx_mode(dev);
4098 * Have we downed the interface. We handle IFF_UP ourselves
4099 * according to user attempts to set it, rather than blindly
4104 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
4105 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
4108 dev_set_rx_mode(dev);
4111 if (dev->flags & IFF_UP &&
4112 ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
4114 call_netdevice_notifiers(NETDEV_CHANGE, dev);
4116 if ((flags ^ dev->gflags) & IFF_PROMISC) {
4117 int inc = (flags & IFF_PROMISC) ? +1 : -1;
4118 dev->gflags ^= IFF_PROMISC;
4119 dev_set_promiscuity(dev, inc);
4122 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4123 is important. Some (broken) drivers set IFF_PROMISC, when
4124 IFF_ALLMULTI is requested not asking us and not reporting.
4126 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
4127 int inc = (flags & IFF_ALLMULTI) ? +1 : -1;
4128 dev->gflags ^= IFF_ALLMULTI;
4129 dev_set_allmulti(dev, inc);
4132 /* Exclude state transition flags, already notified */
4133 changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING);
4135 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
4141 * dev_set_mtu - Change maximum transfer unit
4143 * @new_mtu: new transfer unit
4145 * Change the maximum transfer size of the network device.
4147 int dev_set_mtu(struct net_device *dev, int new_mtu)
4149 const struct net_device_ops *ops = dev->netdev_ops;
4152 if (new_mtu == dev->mtu)
4155 /* MTU must be positive. */
4159 if (!netif_device_present(dev))
4163 if (ops->ndo_change_mtu)
4164 err = ops->ndo_change_mtu(dev, new_mtu);
4168 if (!err && dev->flags & IFF_UP)
4169 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
4174 * dev_set_mac_address - Change Media Access Control Address
4178 * Change the hardware (MAC) address of the device
4180 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4182 const struct net_device_ops *ops = dev->netdev_ops;
4185 if (!ops->ndo_set_mac_address)
4187 if (sa->sa_family != dev->type)
4189 if (!netif_device_present(dev))
4191 err = ops->ndo_set_mac_address(dev, sa);
4193 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4198 * Perform the SIOCxIFxxx calls, inside read_lock(dev_base_lock)
4200 static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
4203 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
4209 case SIOCGIFFLAGS: /* Get interface flags */
4210 ifr->ifr_flags = dev_get_flags(dev);
4213 case SIOCGIFMETRIC: /* Get the metric on the interface
4214 (currently unused) */
4215 ifr->ifr_metric = 0;
4218 case SIOCGIFMTU: /* Get the MTU of a device */
4219 ifr->ifr_mtu = dev->mtu;
4224 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
4226 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
4227 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4228 ifr->ifr_hwaddr.sa_family = dev->type;
4236 ifr->ifr_map.mem_start = dev->mem_start;
4237 ifr->ifr_map.mem_end = dev->mem_end;
4238 ifr->ifr_map.base_addr = dev->base_addr;
4239 ifr->ifr_map.irq = dev->irq;
4240 ifr->ifr_map.dma = dev->dma;
4241 ifr->ifr_map.port = dev->if_port;
4245 ifr->ifr_ifindex = dev->ifindex;
4249 ifr->ifr_qlen = dev->tx_queue_len;
4253 /* dev_ioctl() should ensure this case
4265 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
4267 static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4270 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
4271 const struct net_device_ops *ops;
4276 ops = dev->netdev_ops;
4279 case SIOCSIFFLAGS: /* Set interface flags */
4280 return dev_change_flags(dev, ifr->ifr_flags);
4282 case SIOCSIFMETRIC: /* Set the metric on the interface
4283 (currently unused) */
4286 case SIOCSIFMTU: /* Set the MTU of a device */
4287 return dev_set_mtu(dev, ifr->ifr_mtu);
4290 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
4292 case SIOCSIFHWBROADCAST:
4293 if (ifr->ifr_hwaddr.sa_family != dev->type)
4295 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
4296 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4297 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4301 if (ops->ndo_set_config) {
4302 if (!netif_device_present(dev))
4304 return ops->ndo_set_config(dev, &ifr->ifr_map);
4309 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4310 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4312 if (!netif_device_present(dev))
4314 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
4318 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4319 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4321 if (!netif_device_present(dev))
4323 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
4327 if (ifr->ifr_qlen < 0)
4329 dev->tx_queue_len = ifr->ifr_qlen;
4333 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
4334 return dev_change_name(dev, ifr->ifr_newname);
4337 * Unknown or private ioctl
4341 if ((cmd >= SIOCDEVPRIVATE &&
4342 cmd <= SIOCDEVPRIVATE + 15) ||
4343 cmd == SIOCBONDENSLAVE ||
4344 cmd == SIOCBONDRELEASE ||
4345 cmd == SIOCBONDSETHWADDR ||
4346 cmd == SIOCBONDSLAVEINFOQUERY ||
4347 cmd == SIOCBONDINFOQUERY ||
4348 cmd == SIOCBONDCHANGEACTIVE ||
4349 cmd == SIOCGMIIPHY ||
4350 cmd == SIOCGMIIREG ||
4351 cmd == SIOCSMIIREG ||
4352 cmd == SIOCBRADDIF ||
4353 cmd == SIOCBRDELIF ||
4354 cmd == SIOCSHWTSTAMP ||
4355 cmd == SIOCWANDEV) {
4357 if (ops->ndo_do_ioctl) {
4358 if (netif_device_present(dev))
4359 err = ops->ndo_do_ioctl(dev, ifr, cmd);
4371 * This function handles all "interface"-type I/O control requests. The actual
4372 * 'doing' part of this is dev_ifsioc above.
4376 * dev_ioctl - network device ioctl
4377 * @net: the applicable net namespace
4378 * @cmd: command to issue
4379 * @arg: pointer to a struct ifreq in user space
4381 * Issue ioctl functions to devices. This is normally called by the
4382 * user space syscall interfaces but can sometimes be useful for
4383 * other purposes. The return value is the return from the syscall if
4384 * positive or a negative errno code on error.
4387 int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
4393 /* One special case: SIOCGIFCONF takes ifconf argument
4394 and requires shared lock, because it sleeps writing
4398 if (cmd == SIOCGIFCONF) {
4400 ret = dev_ifconf(net, (char __user *) arg);
4404 if (cmd == SIOCGIFNAME)
4405 return dev_ifname(net, (struct ifreq __user *)arg);
4407 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
4410 ifr.ifr_name[IFNAMSIZ-1] = 0;
4412 colon = strchr(ifr.ifr_name, ':');
4417 * See which interface the caller is talking about.
4422 * These ioctl calls:
4423 * - can be done by all.
4424 * - atomic and do not require locking.
4435 dev_load(net, ifr.ifr_name);
4436 read_lock(&dev_base_lock);
4437 ret = dev_ifsioc_locked(net, &ifr, cmd);
4438 read_unlock(&dev_base_lock);
4442 if (copy_to_user(arg, &ifr,
4443 sizeof(struct ifreq)))
4449 dev_load(net, ifr.ifr_name);
4451 ret = dev_ethtool(net, &ifr);
4456 if (copy_to_user(arg, &ifr,
4457 sizeof(struct ifreq)))
4463 * These ioctl calls:
4464 * - require superuser power.
4465 * - require strict serialization.
4471 if (!capable(CAP_NET_ADMIN))
4473 dev_load(net, ifr.ifr_name);
4475 ret = dev_ifsioc(net, &ifr, cmd);
4480 if (copy_to_user(arg, &ifr,
4481 sizeof(struct ifreq)))
4487 * These ioctl calls:
4488 * - require superuser power.
4489 * - require strict serialization.
4490 * - do not return a value
4500 case SIOCSIFHWBROADCAST:
4503 case SIOCBONDENSLAVE:
4504 case SIOCBONDRELEASE:
4505 case SIOCBONDSETHWADDR:
4506 case SIOCBONDCHANGEACTIVE:
4510 if (!capable(CAP_NET_ADMIN))
4513 case SIOCBONDSLAVEINFOQUERY:
4514 case SIOCBONDINFOQUERY:
4515 dev_load(net, ifr.ifr_name);
4517 ret = dev_ifsioc(net, &ifr, cmd);
4522 /* Get the per device memory space. We can add this but
4523 * currently do not support it */
4525 /* Set the per device memory buffer space.
4526 * Not applicable in our case */
4531 * Unknown or private ioctl.
4534 if (cmd == SIOCWANDEV ||
4535 (cmd >= SIOCDEVPRIVATE &&
4536 cmd <= SIOCDEVPRIVATE + 15)) {
4537 dev_load(net, ifr.ifr_name);
4539 ret = dev_ifsioc(net, &ifr, cmd);
4541 if (!ret && copy_to_user(arg, &ifr,
4542 sizeof(struct ifreq)))
4546 /* Take care of Wireless Extensions */
4547 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
4548 return wext_handle_ioctl(net, &ifr, cmd, arg);
4555 * dev_new_index - allocate an ifindex
4556 * @net: the applicable net namespace
4558 * Returns a suitable unique value for a new device interface
4559 * number. The caller must hold the rtnl semaphore or the
4560 * dev_base_lock to be sure it remains unique.
4562 static int dev_new_index(struct net *net)
4568 if (!__dev_get_by_index(net, ifindex))
4573 /* Delayed registration/unregisteration */
4574 static LIST_HEAD(net_todo_list);
4576 static void net_set_todo(struct net_device *dev)
4578 list_add_tail(&dev->todo_list, &net_todo_list);
4581 static void rollback_registered(struct net_device *dev)
4583 BUG_ON(dev_boot_phase);
4586 /* Some devices call without registering for initialization unwind. */
4587 if (dev->reg_state == NETREG_UNINITIALIZED) {
4588 printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
4589 "was registered\n", dev->name, dev);
4595 BUG_ON(dev->reg_state != NETREG_REGISTERED);
4597 /* If device is running, close it first. */
4600 /* And unlink it from device chain. */
4601 unlist_netdevice(dev);
4603 dev->reg_state = NETREG_UNREGISTERING;
4607 /* Shutdown queueing discipline. */
4611 /* Notify protocols, that we are about to destroy
4612 this device. They should clean all the things.
4614 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4617 * Flush the unicast and multicast chains
4619 dev_unicast_flush(dev);
4620 dev_addr_discard(dev);
4622 if (dev->netdev_ops->ndo_uninit)
4623 dev->netdev_ops->ndo_uninit(dev);
4625 /* Notifier chain MUST detach us from master device. */
4626 WARN_ON(dev->master);
4628 /* Remove entries from kobject tree */
4629 netdev_unregister_kobject(dev);
4636 static void __netdev_init_queue_locks_one(struct net_device *dev,
4637 struct netdev_queue *dev_queue,
4640 spin_lock_init(&dev_queue->_xmit_lock);
4641 netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
4642 dev_queue->xmit_lock_owner = -1;
4645 static void netdev_init_queue_locks(struct net_device *dev)
4647 netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
4648 __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
4651 unsigned long netdev_fix_features(unsigned long features, const char *name)
4653 /* Fix illegal SG+CSUM combinations. */
4654 if ((features & NETIF_F_SG) &&
4655 !(features & NETIF_F_ALL_CSUM)) {
4657 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no "
4658 "checksum feature.\n", name);
4659 features &= ~NETIF_F_SG;
4662 /* TSO requires that SG is present as well. */
4663 if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
4665 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no "
4666 "SG feature.\n", name);
4667 features &= ~NETIF_F_TSO;
4670 if (features & NETIF_F_UFO) {
4671 if (!(features & NETIF_F_GEN_CSUM)) {
4673 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4674 "since no NETIF_F_HW_CSUM feature.\n",
4676 features &= ~NETIF_F_UFO;
4679 if (!(features & NETIF_F_SG)) {
4681 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4682 "since no NETIF_F_SG feature.\n", name);
4683 features &= ~NETIF_F_UFO;
4689 EXPORT_SYMBOL(netdev_fix_features);
4692 * register_netdevice - register a network device
4693 * @dev: device to register
4695 * Take a completed network device structure and add it to the kernel
4696 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4697 * chain. 0 is returned on success. A negative errno code is returned
4698 * on a failure to set up the device, or if the name is a duplicate.
4700 * Callers must hold the rtnl semaphore. You may want
4701 * register_netdev() instead of this.
4704 * The locking appears insufficient to guarantee two parallel registers
4705 * will not get the same name.
4708 int register_netdevice(struct net_device *dev)
4710 struct hlist_head *head;
4711 struct hlist_node *p;
4713 struct net *net = dev_net(dev);
4715 BUG_ON(dev_boot_phase);
4720 /* When net_device's are persistent, this will be fatal. */
4721 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
4724 spin_lock_init(&dev->addr_list_lock);
4725 netdev_set_addr_lockdep_class(dev);
4726 netdev_init_queue_locks(dev);
4730 /* Init, if this function is available */
4731 if (dev->netdev_ops->ndo_init) {
4732 ret = dev->netdev_ops->ndo_init(dev);
4740 if (!dev_valid_name(dev->name)) {
4745 dev->ifindex = dev_new_index(net);
4746 if (dev->iflink == -1)
4747 dev->iflink = dev->ifindex;
4749 /* Check for existence of name */
4750 head = dev_name_hash(net, dev->name);
4751 hlist_for_each(p, head) {
4752 struct net_device *d
4753 = hlist_entry(p, struct net_device, name_hlist);
4754 if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
4760 /* Fix illegal checksum combinations */
4761 if ((dev->features & NETIF_F_HW_CSUM) &&
4762 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4763 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
4765 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4768 if ((dev->features & NETIF_F_NO_CSUM) &&
4769 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4770 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
4772 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
4775 dev->features = netdev_fix_features(dev->features, dev->name);
4777 /* Enable software GSO if SG is supported. */
4778 if (dev->features & NETIF_F_SG)
4779 dev->features |= NETIF_F_GSO;
4781 netdev_initialize_kobject(dev);
4782 ret = netdev_register_kobject(dev);
4785 dev->reg_state = NETREG_REGISTERED;
4788 * Default initial state at registry is that the
4789 * device is present.
4792 set_bit(__LINK_STATE_PRESENT, &dev->state);
4794 dev_init_scheduler(dev);
4796 list_netdevice(dev);
4798 /* Notify protocols, that a new device appeared. */
4799 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
4800 ret = notifier_to_errno(ret);
4802 rollback_registered(dev);
4803 dev->reg_state = NETREG_UNREGISTERED;
4810 if (dev->netdev_ops->ndo_uninit)
4811 dev->netdev_ops->ndo_uninit(dev);
4816 * init_dummy_netdev - init a dummy network device for NAPI
4817 * @dev: device to init
4819 * This takes a network device structure and initialize the minimum
4820 * amount of fields so it can be used to schedule NAPI polls without
4821 * registering a full blown interface. This is to be used by drivers
4822 * that need to tie several hardware interfaces to a single NAPI
4823 * poll scheduler due to HW limitations.
4825 int init_dummy_netdev(struct net_device *dev)
4827 /* Clear everything. Note we don't initialize spinlocks
4828 * are they aren't supposed to be taken by any of the
4829 * NAPI code and this dummy netdev is supposed to be
4830 * only ever used for NAPI polls
4832 memset(dev, 0, sizeof(struct net_device));
4834 /* make sure we BUG if trying to hit standard
4835 * register/unregister code path
4837 dev->reg_state = NETREG_DUMMY;
4839 /* initialize the ref count */
4840 atomic_set(&dev->refcnt, 1);
4842 /* NAPI wants this */
4843 INIT_LIST_HEAD(&dev->napi_list);
4845 /* a dummy interface is started by default */
4846 set_bit(__LINK_STATE_PRESENT, &dev->state);
4847 set_bit(__LINK_STATE_START, &dev->state);
4851 EXPORT_SYMBOL_GPL(init_dummy_netdev);
4855 * register_netdev - register a network device
4856 * @dev: device to register
4858 * Take a completed network device structure and add it to the kernel
4859 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4860 * chain. 0 is returned on success. A negative errno code is returned
4861 * on a failure to set up the device, or if the name is a duplicate.
4863 * This is a wrapper around register_netdevice that takes the rtnl semaphore
4864 * and expands the device name if you passed a format string to
4867 int register_netdev(struct net_device *dev)
4874 * If the name is a format string the caller wants us to do a
4877 if (strchr(dev->name, '%')) {
4878 err = dev_alloc_name(dev, dev->name);
4883 err = register_netdevice(dev);
4888 EXPORT_SYMBOL(register_netdev);
4891 * netdev_wait_allrefs - wait until all references are gone.
4893 * This is called when unregistering network devices.
4895 * Any protocol or device that holds a reference should register
4896 * for netdevice notification, and cleanup and put back the
4897 * reference if they receive an UNREGISTER event.
4898 * We can get stuck here if buggy protocols don't correctly
4901 static void netdev_wait_allrefs(struct net_device *dev)
4903 unsigned long rebroadcast_time, warning_time;
4905 rebroadcast_time = warning_time = jiffies;
4906 while (atomic_read(&dev->refcnt) != 0) {
4907 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
4910 /* Rebroadcast unregister notification */
4911 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4913 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
4915 /* We must not have linkwatch events
4916 * pending on unregister. If this
4917 * happens, we simply run the queue
4918 * unscheduled, resulting in a noop
4921 linkwatch_run_queue();
4926 rebroadcast_time = jiffies;
4931 if (time_after(jiffies, warning_time + 10 * HZ)) {
4932 printk(KERN_EMERG "unregister_netdevice: "
4933 "waiting for %s to become free. Usage "
4935 dev->name, atomic_read(&dev->refcnt));
4936 warning_time = jiffies;
4945 * register_netdevice(x1);
4946 * register_netdevice(x2);
4948 * unregister_netdevice(y1);
4949 * unregister_netdevice(y2);
4955 * We are invoked by rtnl_unlock().
4956 * This allows us to deal with problems:
4957 * 1) We can delete sysfs objects which invoke hotplug
4958 * without deadlocking with linkwatch via keventd.
4959 * 2) Since we run with the RTNL semaphore not held, we can sleep
4960 * safely in order to wait for the netdev refcnt to drop to zero.
4962 * We must not return until all unregister events added during
4963 * the interval the lock was held have been completed.
4965 void netdev_run_todo(void)
4967 struct list_head list;
4969 /* Snapshot list, allow later requests */
4970 list_replace_init(&net_todo_list, &list);
4974 while (!list_empty(&list)) {
4975 struct net_device *dev
4976 = list_entry(list.next, struct net_device, todo_list);
4977 list_del(&dev->todo_list);
4979 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
4980 printk(KERN_ERR "network todo '%s' but state %d\n",
4981 dev->name, dev->reg_state);
4986 dev->reg_state = NETREG_UNREGISTERED;
4988 on_each_cpu(flush_backlog, dev, 1);
4990 netdev_wait_allrefs(dev);
4993 BUG_ON(atomic_read(&dev->refcnt));
4994 WARN_ON(dev->ip_ptr);
4995 WARN_ON(dev->ip6_ptr);
4996 WARN_ON(dev->dn_ptr);
4998 if (dev->destructor)
4999 dev->destructor(dev);
5001 /* Free network device */
5002 kobject_put(&dev->dev.kobj);
5007 * dev_get_stats - get network device statistics
5008 * @dev: device to get statistics from
5010 * Get network statistics from device. The device driver may provide
5011 * its own method by setting dev->netdev_ops->get_stats; otherwise
5012 * the internal statistics structure is used.
5014 const struct net_device_stats *dev_get_stats(struct net_device *dev)
5016 const struct net_device_ops *ops = dev->netdev_ops;
5018 if (ops->ndo_get_stats)
5019 return ops->ndo_get_stats(dev);
5021 unsigned long tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
5022 struct net_device_stats *stats = &dev->stats;
5024 struct netdev_queue *txq;
5026 for (i = 0; i < dev->num_tx_queues; i++) {
5027 txq = netdev_get_tx_queue(dev, i);
5028 tx_bytes += txq->tx_bytes;
5029 tx_packets += txq->tx_packets;
5030 tx_dropped += txq->tx_dropped;
5032 if (tx_bytes || tx_packets || tx_dropped) {
5033 stats->tx_bytes = tx_bytes;
5034 stats->tx_packets = tx_packets;
5035 stats->tx_dropped = tx_dropped;
5040 EXPORT_SYMBOL(dev_get_stats);
5042 static void netdev_init_one_queue(struct net_device *dev,
5043 struct netdev_queue *queue,
5049 static void netdev_init_queues(struct net_device *dev)
5051 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
5052 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
5053 spin_lock_init(&dev->tx_global_lock);
5057 * alloc_netdev_mq - allocate network device
5058 * @sizeof_priv: size of private data to allocate space for
5059 * @name: device name format string
5060 * @setup: callback to initialize device
5061 * @queue_count: the number of subqueues to allocate
5063 * Allocates a struct net_device with private data area for driver use
5064 * and performs basic initialization. Also allocates subquue structs
5065 * for each queue on the device at the end of the netdevice.
5067 struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5068 void (*setup)(struct net_device *), unsigned int queue_count)
5070 struct netdev_queue *tx;
5071 struct net_device *dev;
5073 struct net_device *p;
5075 BUG_ON(strlen(name) >= sizeof(dev->name));
5077 alloc_size = sizeof(struct net_device);
5079 /* ensure 32-byte alignment of private area */
5080 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
5081 alloc_size += sizeof_priv;
5083 /* ensure 32-byte alignment of whole construct */
5084 alloc_size += NETDEV_ALIGN - 1;
5086 p = kzalloc(alloc_size, GFP_KERNEL);
5088 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
5092 tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
5094 printk(KERN_ERR "alloc_netdev: Unable to allocate "
5099 dev = PTR_ALIGN(p, NETDEV_ALIGN);
5100 dev->padded = (char *)dev - (char *)p;
5102 if (dev_addr_init(dev))
5105 dev_unicast_init(dev);
5107 dev_net_set(dev, &init_net);
5110 dev->num_tx_queues = queue_count;
5111 dev->real_num_tx_queues = queue_count;
5113 dev->gso_max_size = GSO_MAX_SIZE;
5115 netdev_init_queues(dev);
5117 INIT_LIST_HEAD(&dev->napi_list);
5118 dev->priv_flags = IFF_XMIT_DST_RELEASE;
5120 strcpy(dev->name, name);
5130 EXPORT_SYMBOL(alloc_netdev_mq);
5133 * free_netdev - free network device
5136 * This function does the last stage of destroying an allocated device
5137 * interface. The reference to the device object is released.
5138 * If this is the last reference then it will be freed.
5140 void free_netdev(struct net_device *dev)
5142 struct napi_struct *p, *n;
5144 release_net(dev_net(dev));
5148 /* Flush device addresses */
5149 dev_addr_flush(dev);
5151 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
5154 /* Compatibility with error handling in drivers */
5155 if (dev->reg_state == NETREG_UNINITIALIZED) {
5156 kfree((char *)dev - dev->padded);
5160 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
5161 dev->reg_state = NETREG_RELEASED;
5163 /* will free via device release */
5164 put_device(&dev->dev);
5168 * synchronize_net - Synchronize with packet receive processing
5170 * Wait for packets currently being received to be done.
5171 * Does not block later packets from starting.
5173 void synchronize_net(void)
5180 * unregister_netdevice - remove device from the kernel
5183 * This function shuts down a device interface and removes it
5184 * from the kernel tables.
5186 * Callers must hold the rtnl semaphore. You may want
5187 * unregister_netdev() instead of this.
5190 void unregister_netdevice(struct net_device *dev)
5194 rollback_registered(dev);
5195 /* Finish processing unregister after unlock */
5200 * unregister_netdev - remove device from the kernel
5203 * This function shuts down a device interface and removes it
5204 * from the kernel tables.
5206 * This is just a wrapper for unregister_netdevice that takes
5207 * the rtnl semaphore. In general you want to use this and not
5208 * unregister_netdevice.
5210 void unregister_netdev(struct net_device *dev)
5213 unregister_netdevice(dev);
5217 EXPORT_SYMBOL(unregister_netdev);
5220 * dev_change_net_namespace - move device to different nethost namespace
5222 * @net: network namespace
5223 * @pat: If not NULL name pattern to try if the current device name
5224 * is already taken in the destination network namespace.
5226 * This function shuts down a device interface and moves it
5227 * to a new network namespace. On success 0 is returned, on
5228 * a failure a netagive errno code is returned.
5230 * Callers must hold the rtnl semaphore.
5233 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
5236 const char *destname;
5241 /* Don't allow namespace local devices to be moved. */
5243 if (dev->features & NETIF_F_NETNS_LOCAL)
5247 /* Don't allow real devices to be moved when sysfs
5251 if (dev->dev.parent)
5255 /* Ensure the device has been registrered */
5257 if (dev->reg_state != NETREG_REGISTERED)
5260 /* Get out if there is nothing todo */
5262 if (net_eq(dev_net(dev), net))
5265 /* Pick the destination device name, and ensure
5266 * we can use it in the destination network namespace.
5269 destname = dev->name;
5270 if (__dev_get_by_name(net, destname)) {
5271 /* We get here if we can't use the current device name */
5274 if (!dev_valid_name(pat))
5276 if (strchr(pat, '%')) {
5277 if (__dev_alloc_name(net, pat, buf) < 0)
5282 if (__dev_get_by_name(net, destname))
5287 * And now a mini version of register_netdevice unregister_netdevice.
5290 /* If device is running close it first. */
5293 /* And unlink it from device chain */
5295 unlist_netdevice(dev);
5299 /* Shutdown queueing discipline. */
5302 /* Notify protocols, that we are about to destroy
5303 this device. They should clean all the things.
5305 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5308 * Flush the unicast and multicast chains
5310 dev_unicast_flush(dev);
5311 dev_addr_discard(dev);
5313 netdev_unregister_kobject(dev);
5315 /* Actually switch the network namespace */
5316 dev_net_set(dev, net);
5318 /* Assign the new device name */
5319 if (destname != dev->name)
5320 strcpy(dev->name, destname);
5322 /* If there is an ifindex conflict assign a new one */
5323 if (__dev_get_by_index(net, dev->ifindex)) {
5324 int iflink = (dev->iflink == dev->ifindex);
5325 dev->ifindex = dev_new_index(net);
5327 dev->iflink = dev->ifindex;
5330 /* Fixup kobjects */
5331 err = netdev_register_kobject(dev);
5334 /* Add the device back in the hashes */
5335 list_netdevice(dev);
5337 /* Notify protocols, that a new device appeared. */
5338 call_netdevice_notifiers(NETDEV_REGISTER, dev);
5346 static int dev_cpu_callback(struct notifier_block *nfb,
5347 unsigned long action,
5350 struct sk_buff **list_skb;
5351 struct Qdisc **list_net;
5352 struct sk_buff *skb;
5353 unsigned int cpu, oldcpu = (unsigned long)ocpu;
5354 struct softnet_data *sd, *oldsd;
5356 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
5359 local_irq_disable();
5360 cpu = smp_processor_id();
5361 sd = &per_cpu(softnet_data, cpu);
5362 oldsd = &per_cpu(softnet_data, oldcpu);
5364 /* Find end of our completion_queue. */
5365 list_skb = &sd->completion_queue;
5367 list_skb = &(*list_skb)->next;
5368 /* Append completion queue from offline CPU. */
5369 *list_skb = oldsd->completion_queue;
5370 oldsd->completion_queue = NULL;
5372 /* Find end of our output_queue. */
5373 list_net = &sd->output_queue;
5375 list_net = &(*list_net)->next_sched;
5376 /* Append output queue from offline CPU. */
5377 *list_net = oldsd->output_queue;
5378 oldsd->output_queue = NULL;
5380 raise_softirq_irqoff(NET_TX_SOFTIRQ);
5383 /* Process offline CPU's input_pkt_queue */
5384 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
5392 * netdev_increment_features - increment feature set by one
5393 * @all: current feature set
5394 * @one: new feature set
5395 * @mask: mask feature set
5397 * Computes a new feature set after adding a device with feature set
5398 * @one to the master device with current feature set @all. Will not
5399 * enable anything that is off in @mask. Returns the new feature set.
5401 unsigned long netdev_increment_features(unsigned long all, unsigned long one,
5404 /* If device needs checksumming, downgrade to it. */
5405 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
5406 all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM);
5407 else if (mask & NETIF_F_ALL_CSUM) {
5408 /* If one device supports v4/v6 checksumming, set for all. */
5409 if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) &&
5410 !(all & NETIF_F_GEN_CSUM)) {
5411 all &= ~NETIF_F_ALL_CSUM;
5412 all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
5415 /* If one device supports hw checksumming, set for all. */
5416 if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) {
5417 all &= ~NETIF_F_ALL_CSUM;
5418 all |= NETIF_F_HW_CSUM;
5422 one |= NETIF_F_ALL_CSUM;
5424 one |= all & NETIF_F_ONE_FOR_ALL;
5425 all &= one | NETIF_F_LLTX | NETIF_F_GSO;
5426 all |= one & mask & NETIF_F_ONE_FOR_ALL;
5430 EXPORT_SYMBOL(netdev_increment_features);
5432 static struct hlist_head *netdev_create_hash(void)
5435 struct hlist_head *hash;
5437 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
5439 for (i = 0; i < NETDEV_HASHENTRIES; i++)
5440 INIT_HLIST_HEAD(&hash[i]);
5445 /* Initialize per network namespace state */
5446 static int __net_init netdev_init(struct net *net)
5448 INIT_LIST_HEAD(&net->dev_base_head);
5450 net->dev_name_head = netdev_create_hash();
5451 if (net->dev_name_head == NULL)
5454 net->dev_index_head = netdev_create_hash();
5455 if (net->dev_index_head == NULL)
5461 kfree(net->dev_name_head);
5467 * netdev_drivername - network driver for the device
5468 * @dev: network device
5469 * @buffer: buffer for resulting name
5470 * @len: size of buffer
5472 * Determine network driver for device.
5474 char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
5476 const struct device_driver *driver;
5477 const struct device *parent;
5479 if (len <= 0 || !buffer)
5483 parent = dev->dev.parent;
5488 driver = parent->driver;
5489 if (driver && driver->name)
5490 strlcpy(buffer, driver->name, len);
5494 static void __net_exit netdev_exit(struct net *net)
5496 kfree(net->dev_name_head);
5497 kfree(net->dev_index_head);
5500 static struct pernet_operations __net_initdata netdev_net_ops = {
5501 .init = netdev_init,
5502 .exit = netdev_exit,
5505 static void __net_exit default_device_exit(struct net *net)
5507 struct net_device *dev;
5509 * Push all migratable of the network devices back to the
5510 * initial network namespace
5514 for_each_netdev(net, dev) {
5516 char fb_name[IFNAMSIZ];
5518 /* Ignore unmoveable devices (i.e. loopback) */
5519 if (dev->features & NETIF_F_NETNS_LOCAL)
5522 /* Delete virtual devices */
5523 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) {
5524 dev->rtnl_link_ops->dellink(dev);
5528 /* Push remaing network devices to init_net */
5529 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
5530 err = dev_change_net_namespace(dev, &init_net, fb_name);
5532 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
5533 __func__, dev->name, err);
5541 static struct pernet_operations __net_initdata default_device_ops = {
5542 .exit = default_device_exit,
5546 * Initialize the DEV module. At boot time this walks the device list and
5547 * unhooks any devices that fail to initialise (normally hardware not
5548 * present) and leaves us with a valid list of present and active devices.
5553 * This is called single threaded during boot, so no need
5554 * to take the rtnl semaphore.
5556 static int __init net_dev_init(void)
5558 int i, rc = -ENOMEM;
5560 BUG_ON(!dev_boot_phase);
5562 if (dev_proc_init())
5565 if (netdev_kobject_init())
5568 INIT_LIST_HEAD(&ptype_all);
5569 for (i = 0; i < PTYPE_HASH_SIZE; i++)
5570 INIT_LIST_HEAD(&ptype_base[i]);
5572 if (register_pernet_subsys(&netdev_net_ops))
5576 * Initialise the packet receive queues.
5579 for_each_possible_cpu(i) {
5580 struct softnet_data *queue;
5582 queue = &per_cpu(softnet_data, i);
5583 skb_queue_head_init(&queue->input_pkt_queue);
5584 queue->completion_queue = NULL;
5585 INIT_LIST_HEAD(&queue->poll_list);
5587 queue->backlog.poll = process_backlog;
5588 queue->backlog.weight = weight_p;
5589 queue->backlog.gro_list = NULL;
5590 queue->backlog.gro_count = 0;
5595 /* The loopback device is special if any other network devices
5596 * is present in a network namespace the loopback device must
5597 * be present. Since we now dynamically allocate and free the
5598 * loopback device ensure this invariant is maintained by
5599 * keeping the loopback device as the first device on the
5600 * list of network devices. Ensuring the loopback devices
5601 * is the first device that appears and the last network device
5604 if (register_pernet_device(&loopback_net_ops))
5607 if (register_pernet_device(&default_device_ops))
5610 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
5611 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
5613 hotcpu_notifier(dev_cpu_callback, 0);
5621 subsys_initcall(net_dev_init);
5623 static int __init initialize_hashrnd(void)
5625 get_random_bytes(&skb_tx_hashrnd, sizeof(skb_tx_hashrnd));
5629 late_initcall_sync(initialize_hashrnd);
5631 EXPORT_SYMBOL(__dev_get_by_index);
5632 EXPORT_SYMBOL(__dev_get_by_name);
5633 EXPORT_SYMBOL(__dev_remove_pack);
5634 EXPORT_SYMBOL(dev_valid_name);
5635 EXPORT_SYMBOL(dev_add_pack);
5636 EXPORT_SYMBOL(dev_alloc_name);
5637 EXPORT_SYMBOL(dev_close);
5638 EXPORT_SYMBOL(dev_get_by_flags);
5639 EXPORT_SYMBOL(dev_get_by_index);
5640 EXPORT_SYMBOL(dev_get_by_name);
5641 EXPORT_SYMBOL(dev_open);
5642 EXPORT_SYMBOL(dev_queue_xmit);
5643 EXPORT_SYMBOL(dev_remove_pack);
5644 EXPORT_SYMBOL(dev_set_allmulti);
5645 EXPORT_SYMBOL(dev_set_promiscuity);
5646 EXPORT_SYMBOL(dev_change_flags);
5647 EXPORT_SYMBOL(dev_set_mtu);
5648 EXPORT_SYMBOL(dev_set_mac_address);
5649 EXPORT_SYMBOL(free_netdev);
5650 EXPORT_SYMBOL(netdev_boot_setup_check);
5651 EXPORT_SYMBOL(netdev_set_master);
5652 EXPORT_SYMBOL(netdev_state_change);
5653 EXPORT_SYMBOL(netif_receive_skb);
5654 EXPORT_SYMBOL(netif_rx);
5655 EXPORT_SYMBOL(register_gifconf);
5656 EXPORT_SYMBOL(register_netdevice);
5657 EXPORT_SYMBOL(register_netdevice_notifier);
5658 EXPORT_SYMBOL(skb_checksum_help);
5659 EXPORT_SYMBOL(synchronize_net);
5660 EXPORT_SYMBOL(unregister_netdevice);
5661 EXPORT_SYMBOL(unregister_netdevice_notifier);
5662 EXPORT_SYMBOL(net_enable_timestamp);
5663 EXPORT_SYMBOL(net_disable_timestamp);
5664 EXPORT_SYMBOL(dev_get_flags);
5666 #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
5667 EXPORT_SYMBOL(br_handle_frame_hook);
5668 EXPORT_SYMBOL(br_fdb_get_hook);
5669 EXPORT_SYMBOL(br_fdb_put_hook);
5672 EXPORT_SYMBOL(dev_load);
5674 EXPORT_PER_CPU_SYMBOL(softnet_data);