2 * net-sysfs.c - network device class and attributes
4 * Copyright (c) 2003 Stephen Hemminger <shemminger@osdl.org>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/capability.h>
13 #include <linux/kernel.h>
14 #include <linux/netdevice.h>
15 #include <linux/if_arp.h>
16 #include <linux/slab.h>
17 #include <linux/nsproxy.h>
19 #include <net/net_namespace.h>
20 #include <linux/rtnetlink.h>
21 #include <linux/vmalloc.h>
22 #include <linux/export.h>
23 #include <linux/jiffies.h>
24 #include <linux/pm_runtime.h>
26 #include "net-sysfs.h"
29 static const char fmt_hex[] = "%#x\n";
30 static const char fmt_long_hex[] = "%#lx\n";
31 static const char fmt_dec[] = "%d\n";
32 static const char fmt_udec[] = "%u\n";
33 static const char fmt_ulong[] = "%lu\n";
34 static const char fmt_u64[] = "%llu\n";
36 static inline int dev_isalive(const struct net_device *dev)
38 return dev->reg_state <= NETREG_REGISTERED;
41 /* use same locking rules as GIF* ioctl's */
42 static ssize_t netdev_show(const struct device *dev,
43 struct device_attribute *attr, char *buf,
44 ssize_t (*format)(const struct net_device *, char *))
46 struct net_device *ndev = to_net_dev(dev);
47 ssize_t ret = -EINVAL;
49 read_lock(&dev_base_lock);
50 if (dev_isalive(ndev))
51 ret = (*format)(ndev, buf);
52 read_unlock(&dev_base_lock);
57 /* generate a show function for simple field */
58 #define NETDEVICE_SHOW(field, format_string) \
59 static ssize_t format_##field(const struct net_device *dev, char *buf) \
61 return sprintf(buf, format_string, dev->field); \
63 static ssize_t field##_show(struct device *dev, \
64 struct device_attribute *attr, char *buf) \
66 return netdev_show(dev, attr, buf, format_##field); \
69 #define NETDEVICE_SHOW_RO(field, format_string) \
70 NETDEVICE_SHOW(field, format_string); \
71 static DEVICE_ATTR_RO(field)
73 #define NETDEVICE_SHOW_RW(field, format_string) \
74 NETDEVICE_SHOW(field, format_string); \
75 static DEVICE_ATTR_RW(field)
77 /* use same locking and permission rules as SIF* ioctl's */
78 static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
79 const char *buf, size_t len,
80 int (*set)(struct net_device *, unsigned long))
82 struct net_device *netdev = to_net_dev(dev);
83 struct net *net = dev_net(netdev);
87 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
90 ret = kstrtoul(buf, 0, &new);
95 return restart_syscall();
97 if (dev_isalive(netdev)) {
98 if ((ret = (*set)(netdev, new)) == 0)
106 NETDEVICE_SHOW_RO(dev_id, fmt_hex);
107 NETDEVICE_SHOW_RO(dev_port, fmt_dec);
108 NETDEVICE_SHOW_RO(addr_assign_type, fmt_dec);
109 NETDEVICE_SHOW_RO(addr_len, fmt_dec);
110 NETDEVICE_SHOW_RO(iflink, fmt_dec);
111 NETDEVICE_SHOW_RO(ifindex, fmt_dec);
112 NETDEVICE_SHOW_RO(type, fmt_dec);
113 NETDEVICE_SHOW_RO(link_mode, fmt_dec);
115 static ssize_t format_name_assign_type(const struct net_device *dev, char *buf)
117 return sprintf(buf, fmt_dec, dev->name_assign_type);
120 static ssize_t name_assign_type_show(struct device *dev,
121 struct device_attribute *attr,
124 struct net_device *ndev = to_net_dev(dev);
125 ssize_t ret = -EINVAL;
127 if (ndev->name_assign_type != NET_NAME_UNKNOWN)
128 ret = netdev_show(dev, attr, buf, format_name_assign_type);
132 static DEVICE_ATTR_RO(name_assign_type);
134 /* use same locking rules as GIFHWADDR ioctl's */
135 static ssize_t address_show(struct device *dev, struct device_attribute *attr,
138 struct net_device *ndev = to_net_dev(dev);
139 ssize_t ret = -EINVAL;
141 read_lock(&dev_base_lock);
142 if (dev_isalive(ndev))
143 ret = sysfs_format_mac(buf, ndev->dev_addr, ndev->addr_len);
144 read_unlock(&dev_base_lock);
147 static DEVICE_ATTR_RO(address);
149 static ssize_t broadcast_show(struct device *dev,
150 struct device_attribute *attr, char *buf)
152 struct net_device *ndev = to_net_dev(dev);
153 if (dev_isalive(ndev))
154 return sysfs_format_mac(buf, ndev->broadcast, ndev->addr_len);
157 static DEVICE_ATTR_RO(broadcast);
159 static int change_carrier(struct net_device *dev, unsigned long new_carrier)
161 if (!netif_running(dev))
163 return dev_change_carrier(dev, (bool) new_carrier);
166 static ssize_t carrier_store(struct device *dev, struct device_attribute *attr,
167 const char *buf, size_t len)
169 return netdev_store(dev, attr, buf, len, change_carrier);
172 static ssize_t carrier_show(struct device *dev,
173 struct device_attribute *attr, char *buf)
175 struct net_device *netdev = to_net_dev(dev);
176 if (netif_running(netdev)) {
177 return sprintf(buf, fmt_dec, !!netif_carrier_ok(netdev));
181 static DEVICE_ATTR_RW(carrier);
183 static ssize_t speed_show(struct device *dev,
184 struct device_attribute *attr, char *buf)
186 struct net_device *netdev = to_net_dev(dev);
190 return restart_syscall();
192 if (netif_running(netdev)) {
193 struct ethtool_cmd cmd;
194 if (!__ethtool_get_settings(netdev, &cmd))
195 ret = sprintf(buf, fmt_udec, ethtool_cmd_speed(&cmd));
200 static DEVICE_ATTR_RO(speed);
202 static ssize_t duplex_show(struct device *dev,
203 struct device_attribute *attr, char *buf)
205 struct net_device *netdev = to_net_dev(dev);
209 return restart_syscall();
211 if (netif_running(netdev)) {
212 struct ethtool_cmd cmd;
213 if (!__ethtool_get_settings(netdev, &cmd)) {
215 switch (cmd.duplex) {
226 ret = sprintf(buf, "%s\n", duplex);
232 static DEVICE_ATTR_RO(duplex);
234 static ssize_t dormant_show(struct device *dev,
235 struct device_attribute *attr, char *buf)
237 struct net_device *netdev = to_net_dev(dev);
239 if (netif_running(netdev))
240 return sprintf(buf, fmt_dec, !!netif_dormant(netdev));
244 static DEVICE_ATTR_RO(dormant);
246 static const char *const operstates[] = {
248 "notpresent", /* currently unused */
251 "testing", /* currently unused */
256 static ssize_t operstate_show(struct device *dev,
257 struct device_attribute *attr, char *buf)
259 const struct net_device *netdev = to_net_dev(dev);
260 unsigned char operstate;
262 read_lock(&dev_base_lock);
263 operstate = netdev->operstate;
264 if (!netif_running(netdev))
265 operstate = IF_OPER_DOWN;
266 read_unlock(&dev_base_lock);
268 if (operstate >= ARRAY_SIZE(operstates))
269 return -EINVAL; /* should not happen */
271 return sprintf(buf, "%s\n", operstates[operstate]);
273 static DEVICE_ATTR_RO(operstate);
275 static ssize_t carrier_changes_show(struct device *dev,
276 struct device_attribute *attr,
279 struct net_device *netdev = to_net_dev(dev);
280 return sprintf(buf, fmt_dec,
281 atomic_read(&netdev->carrier_changes));
283 static DEVICE_ATTR_RO(carrier_changes);
285 /* read-write attributes */
287 static int change_mtu(struct net_device *dev, unsigned long new_mtu)
289 return dev_set_mtu(dev, (int) new_mtu);
292 static ssize_t mtu_store(struct device *dev, struct device_attribute *attr,
293 const char *buf, size_t len)
295 return netdev_store(dev, attr, buf, len, change_mtu);
297 NETDEVICE_SHOW_RW(mtu, fmt_dec);
299 static int change_flags(struct net_device *dev, unsigned long new_flags)
301 return dev_change_flags(dev, (unsigned int) new_flags);
304 static ssize_t flags_store(struct device *dev, struct device_attribute *attr,
305 const char *buf, size_t len)
307 return netdev_store(dev, attr, buf, len, change_flags);
309 NETDEVICE_SHOW_RW(flags, fmt_hex);
311 static int change_tx_queue_len(struct net_device *dev, unsigned long new_len)
313 dev->tx_queue_len = new_len;
317 static ssize_t tx_queue_len_store(struct device *dev,
318 struct device_attribute *attr,
319 const char *buf, size_t len)
321 if (!capable(CAP_NET_ADMIN))
324 return netdev_store(dev, attr, buf, len, change_tx_queue_len);
326 NETDEVICE_SHOW_RW(tx_queue_len, fmt_ulong);
328 static int change_gro_flush_timeout(struct net_device *dev, unsigned long val)
330 dev->gro_flush_timeout = val;
334 static ssize_t gro_flush_timeout_store(struct device *dev,
335 struct device_attribute *attr,
336 const char *buf, size_t len)
338 if (!capable(CAP_NET_ADMIN))
341 return netdev_store(dev, attr, buf, len, change_gro_flush_timeout);
343 NETDEVICE_SHOW_RW(gro_flush_timeout, fmt_ulong);
345 static ssize_t ifalias_store(struct device *dev, struct device_attribute *attr,
346 const char *buf, size_t len)
348 struct net_device *netdev = to_net_dev(dev);
349 struct net *net = dev_net(netdev);
353 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
356 /* ignore trailing newline */
357 if (len > 0 && buf[len - 1] == '\n')
361 return restart_syscall();
362 ret = dev_set_alias(netdev, buf, count);
365 return ret < 0 ? ret : len;
368 static ssize_t ifalias_show(struct device *dev,
369 struct device_attribute *attr, char *buf)
371 const struct net_device *netdev = to_net_dev(dev);
375 return restart_syscall();
377 ret = sprintf(buf, "%s\n", netdev->ifalias);
381 static DEVICE_ATTR_RW(ifalias);
383 static int change_group(struct net_device *dev, unsigned long new_group)
385 dev_set_group(dev, (int) new_group);
389 static ssize_t group_store(struct device *dev, struct device_attribute *attr,
390 const char *buf, size_t len)
392 return netdev_store(dev, attr, buf, len, change_group);
394 NETDEVICE_SHOW(group, fmt_dec);
395 static DEVICE_ATTR(netdev_group, S_IRUGO | S_IWUSR, group_show, group_store);
397 static ssize_t phys_port_id_show(struct device *dev,
398 struct device_attribute *attr, char *buf)
400 struct net_device *netdev = to_net_dev(dev);
401 ssize_t ret = -EINVAL;
404 return restart_syscall();
406 if (dev_isalive(netdev)) {
407 struct netdev_phys_port_id ppid;
409 ret = dev_get_phys_port_id(netdev, &ppid);
411 ret = sprintf(buf, "%*phN\n", ppid.id_len, ppid.id);
417 static DEVICE_ATTR_RO(phys_port_id);
419 static struct attribute *net_class_attrs[] = {
420 &dev_attr_netdev_group.attr,
422 &dev_attr_dev_id.attr,
423 &dev_attr_dev_port.attr,
424 &dev_attr_iflink.attr,
425 &dev_attr_ifindex.attr,
426 &dev_attr_name_assign_type.attr,
427 &dev_attr_addr_assign_type.attr,
428 &dev_attr_addr_len.attr,
429 &dev_attr_link_mode.attr,
430 &dev_attr_address.attr,
431 &dev_attr_broadcast.attr,
432 &dev_attr_speed.attr,
433 &dev_attr_duplex.attr,
434 &dev_attr_dormant.attr,
435 &dev_attr_operstate.attr,
436 &dev_attr_carrier_changes.attr,
437 &dev_attr_ifalias.attr,
438 &dev_attr_carrier.attr,
440 &dev_attr_flags.attr,
441 &dev_attr_tx_queue_len.attr,
442 &dev_attr_gro_flush_timeout.attr,
443 &dev_attr_phys_port_id.attr,
446 ATTRIBUTE_GROUPS(net_class);
448 /* Show a given an attribute in the statistics group */
449 static ssize_t netstat_show(const struct device *d,
450 struct device_attribute *attr, char *buf,
451 unsigned long offset)
453 struct net_device *dev = to_net_dev(d);
454 ssize_t ret = -EINVAL;
456 WARN_ON(offset > sizeof(struct rtnl_link_stats64) ||
457 offset % sizeof(u64) != 0);
459 read_lock(&dev_base_lock);
460 if (dev_isalive(dev)) {
461 struct rtnl_link_stats64 temp;
462 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
464 ret = sprintf(buf, fmt_u64, *(u64 *)(((u8 *) stats) + offset));
466 read_unlock(&dev_base_lock);
470 /* generate a read-only statistics attribute */
471 #define NETSTAT_ENTRY(name) \
472 static ssize_t name##_show(struct device *d, \
473 struct device_attribute *attr, char *buf) \
475 return netstat_show(d, attr, buf, \
476 offsetof(struct rtnl_link_stats64, name)); \
478 static DEVICE_ATTR_RO(name)
480 NETSTAT_ENTRY(rx_packets);
481 NETSTAT_ENTRY(tx_packets);
482 NETSTAT_ENTRY(rx_bytes);
483 NETSTAT_ENTRY(tx_bytes);
484 NETSTAT_ENTRY(rx_errors);
485 NETSTAT_ENTRY(tx_errors);
486 NETSTAT_ENTRY(rx_dropped);
487 NETSTAT_ENTRY(tx_dropped);
488 NETSTAT_ENTRY(multicast);
489 NETSTAT_ENTRY(collisions);
490 NETSTAT_ENTRY(rx_length_errors);
491 NETSTAT_ENTRY(rx_over_errors);
492 NETSTAT_ENTRY(rx_crc_errors);
493 NETSTAT_ENTRY(rx_frame_errors);
494 NETSTAT_ENTRY(rx_fifo_errors);
495 NETSTAT_ENTRY(rx_missed_errors);
496 NETSTAT_ENTRY(tx_aborted_errors);
497 NETSTAT_ENTRY(tx_carrier_errors);
498 NETSTAT_ENTRY(tx_fifo_errors);
499 NETSTAT_ENTRY(tx_heartbeat_errors);
500 NETSTAT_ENTRY(tx_window_errors);
501 NETSTAT_ENTRY(rx_compressed);
502 NETSTAT_ENTRY(tx_compressed);
504 static struct attribute *netstat_attrs[] = {
505 &dev_attr_rx_packets.attr,
506 &dev_attr_tx_packets.attr,
507 &dev_attr_rx_bytes.attr,
508 &dev_attr_tx_bytes.attr,
509 &dev_attr_rx_errors.attr,
510 &dev_attr_tx_errors.attr,
511 &dev_attr_rx_dropped.attr,
512 &dev_attr_tx_dropped.attr,
513 &dev_attr_multicast.attr,
514 &dev_attr_collisions.attr,
515 &dev_attr_rx_length_errors.attr,
516 &dev_attr_rx_over_errors.attr,
517 &dev_attr_rx_crc_errors.attr,
518 &dev_attr_rx_frame_errors.attr,
519 &dev_attr_rx_fifo_errors.attr,
520 &dev_attr_rx_missed_errors.attr,
521 &dev_attr_tx_aborted_errors.attr,
522 &dev_attr_tx_carrier_errors.attr,
523 &dev_attr_tx_fifo_errors.attr,
524 &dev_attr_tx_heartbeat_errors.attr,
525 &dev_attr_tx_window_errors.attr,
526 &dev_attr_rx_compressed.attr,
527 &dev_attr_tx_compressed.attr,
532 static struct attribute_group netstat_group = {
533 .name = "statistics",
534 .attrs = netstat_attrs,
537 #if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
538 static struct attribute *wireless_attrs[] = {
542 static struct attribute_group wireless_group = {
544 .attrs = wireless_attrs,
548 #else /* CONFIG_SYSFS */
549 #define net_class_groups NULL
550 #endif /* CONFIG_SYSFS */
553 #define to_rx_queue_attr(_attr) container_of(_attr, \
554 struct rx_queue_attribute, attr)
556 #define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj)
558 static ssize_t rx_queue_attr_show(struct kobject *kobj, struct attribute *attr,
561 struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
562 struct netdev_rx_queue *queue = to_rx_queue(kobj);
564 if (!attribute->show)
567 return attribute->show(queue, attribute, buf);
570 static ssize_t rx_queue_attr_store(struct kobject *kobj, struct attribute *attr,
571 const char *buf, size_t count)
573 struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
574 struct netdev_rx_queue *queue = to_rx_queue(kobj);
576 if (!attribute->store)
579 return attribute->store(queue, attribute, buf, count);
582 static const struct sysfs_ops rx_queue_sysfs_ops = {
583 .show = rx_queue_attr_show,
584 .store = rx_queue_attr_store,
588 static ssize_t show_rps_map(struct netdev_rx_queue *queue,
589 struct rx_queue_attribute *attribute, char *buf)
596 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
600 map = rcu_dereference(queue->rps_map);
602 for (i = 0; i < map->len; i++)
603 cpumask_set_cpu(map->cpus[i], mask);
605 len += cpumask_scnprintf(buf + len, PAGE_SIZE, mask);
606 if (PAGE_SIZE - len < 3) {
608 free_cpumask_var(mask);
613 free_cpumask_var(mask);
614 len += sprintf(buf + len, "\n");
618 static ssize_t store_rps_map(struct netdev_rx_queue *queue,
619 struct rx_queue_attribute *attribute,
620 const char *buf, size_t len)
622 struct rps_map *old_map, *map;
625 static DEFINE_SPINLOCK(rps_map_lock);
627 if (!capable(CAP_NET_ADMIN))
630 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
633 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
635 free_cpumask_var(mask);
639 map = kzalloc(max_t(unsigned int,
640 RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
643 free_cpumask_var(mask);
648 for_each_cpu_and(cpu, mask, cpu_online_mask)
649 map->cpus[i++] = cpu;
658 spin_lock(&rps_map_lock);
659 old_map = rcu_dereference_protected(queue->rps_map,
660 lockdep_is_held(&rps_map_lock));
661 rcu_assign_pointer(queue->rps_map, map);
662 spin_unlock(&rps_map_lock);
665 static_key_slow_inc(&rps_needed);
667 kfree_rcu(old_map, rcu);
668 static_key_slow_dec(&rps_needed);
670 free_cpumask_var(mask);
674 static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
675 struct rx_queue_attribute *attr,
678 struct rps_dev_flow_table *flow_table;
679 unsigned long val = 0;
682 flow_table = rcu_dereference(queue->rps_flow_table);
684 val = (unsigned long)flow_table->mask + 1;
687 return sprintf(buf, "%lu\n", val);
690 static void rps_dev_flow_table_release(struct rcu_head *rcu)
692 struct rps_dev_flow_table *table = container_of(rcu,
693 struct rps_dev_flow_table, rcu);
697 static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
698 struct rx_queue_attribute *attr,
699 const char *buf, size_t len)
701 unsigned long mask, count;
702 struct rps_dev_flow_table *table, *old_table;
703 static DEFINE_SPINLOCK(rps_dev_flow_lock);
706 if (!capable(CAP_NET_ADMIN))
709 rc = kstrtoul(buf, 0, &count);
715 /* mask = roundup_pow_of_two(count) - 1;
716 * without overflows...
718 while ((mask | (mask >> 1)) != mask)
720 /* On 64 bit arches, must check mask fits in table->mask (u32),
721 * and on 32bit arches, must check
722 * RPS_DEV_FLOW_TABLE_SIZE(mask + 1) doesn't overflow.
724 #if BITS_PER_LONG > 32
725 if (mask > (unsigned long)(u32)mask)
728 if (mask > (ULONG_MAX - RPS_DEV_FLOW_TABLE_SIZE(1))
729 / sizeof(struct rps_dev_flow)) {
730 /* Enforce a limit to prevent overflow */
734 table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(mask + 1));
739 for (count = 0; count <= mask; count++)
740 table->flows[count].cpu = RPS_NO_CPU;
744 spin_lock(&rps_dev_flow_lock);
745 old_table = rcu_dereference_protected(queue->rps_flow_table,
746 lockdep_is_held(&rps_dev_flow_lock));
747 rcu_assign_pointer(queue->rps_flow_table, table);
748 spin_unlock(&rps_dev_flow_lock);
751 call_rcu(&old_table->rcu, rps_dev_flow_table_release);
756 static struct rx_queue_attribute rps_cpus_attribute =
757 __ATTR(rps_cpus, S_IRUGO | S_IWUSR, show_rps_map, store_rps_map);
760 static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute =
761 __ATTR(rps_flow_cnt, S_IRUGO | S_IWUSR,
762 show_rps_dev_flow_table_cnt, store_rps_dev_flow_table_cnt);
763 #endif /* CONFIG_RPS */
765 static struct attribute *rx_queue_default_attrs[] = {
767 &rps_cpus_attribute.attr,
768 &rps_dev_flow_table_cnt_attribute.attr,
773 static void rx_queue_release(struct kobject *kobj)
775 struct netdev_rx_queue *queue = to_rx_queue(kobj);
778 struct rps_dev_flow_table *flow_table;
781 map = rcu_dereference_protected(queue->rps_map, 1);
783 RCU_INIT_POINTER(queue->rps_map, NULL);
787 flow_table = rcu_dereference_protected(queue->rps_flow_table, 1);
789 RCU_INIT_POINTER(queue->rps_flow_table, NULL);
790 call_rcu(&flow_table->rcu, rps_dev_flow_table_release);
794 memset(kobj, 0, sizeof(*kobj));
798 static const void *rx_queue_namespace(struct kobject *kobj)
800 struct netdev_rx_queue *queue = to_rx_queue(kobj);
801 struct device *dev = &queue->dev->dev;
802 const void *ns = NULL;
804 if (dev->class && dev->class->ns_type)
805 ns = dev->class->namespace(dev);
810 static struct kobj_type rx_queue_ktype = {
811 .sysfs_ops = &rx_queue_sysfs_ops,
812 .release = rx_queue_release,
813 .default_attrs = rx_queue_default_attrs,
814 .namespace = rx_queue_namespace
817 static int rx_queue_add_kobject(struct net_device *dev, int index)
819 struct netdev_rx_queue *queue = dev->_rx + index;
820 struct kobject *kobj = &queue->kobj;
823 kobj->kset = dev->queues_kset;
824 error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL,
829 if (dev->sysfs_rx_queue_group) {
830 error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group);
835 kobject_uevent(kobj, KOBJ_ADD);
836 dev_hold(queue->dev);
843 #endif /* CONFIG_SYSFS */
846 net_rx_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
853 if (!dev->sysfs_rx_queue_group)
856 for (i = old_num; i < new_num; i++) {
857 error = rx_queue_add_kobject(dev, i);
864 while (--i >= new_num) {
865 if (dev->sysfs_rx_queue_group)
866 sysfs_remove_group(&dev->_rx[i].kobj,
867 dev->sysfs_rx_queue_group);
868 kobject_put(&dev->_rx[i].kobj);
879 * netdev_queue sysfs structures and functions.
881 struct netdev_queue_attribute {
882 struct attribute attr;
883 ssize_t (*show)(struct netdev_queue *queue,
884 struct netdev_queue_attribute *attr, char *buf);
885 ssize_t (*store)(struct netdev_queue *queue,
886 struct netdev_queue_attribute *attr, const char *buf, size_t len);
888 #define to_netdev_queue_attr(_attr) container_of(_attr, \
889 struct netdev_queue_attribute, attr)
891 #define to_netdev_queue(obj) container_of(obj, struct netdev_queue, kobj)
893 static ssize_t netdev_queue_attr_show(struct kobject *kobj,
894 struct attribute *attr, char *buf)
896 struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr);
897 struct netdev_queue *queue = to_netdev_queue(kobj);
899 if (!attribute->show)
902 return attribute->show(queue, attribute, buf);
905 static ssize_t netdev_queue_attr_store(struct kobject *kobj,
906 struct attribute *attr,
907 const char *buf, size_t count)
909 struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr);
910 struct netdev_queue *queue = to_netdev_queue(kobj);
912 if (!attribute->store)
915 return attribute->store(queue, attribute, buf, count);
918 static const struct sysfs_ops netdev_queue_sysfs_ops = {
919 .show = netdev_queue_attr_show,
920 .store = netdev_queue_attr_store,
923 static ssize_t show_trans_timeout(struct netdev_queue *queue,
924 struct netdev_queue_attribute *attribute,
927 unsigned long trans_timeout;
929 spin_lock_irq(&queue->_xmit_lock);
930 trans_timeout = queue->trans_timeout;
931 spin_unlock_irq(&queue->_xmit_lock);
933 return sprintf(buf, "%lu", trans_timeout);
936 static struct netdev_queue_attribute queue_trans_timeout =
937 __ATTR(tx_timeout, S_IRUGO, show_trans_timeout, NULL);
941 * Byte queue limits sysfs structures and functions.
943 static ssize_t bql_show(char *buf, unsigned int value)
945 return sprintf(buf, "%u\n", value);
948 static ssize_t bql_set(const char *buf, const size_t count,
949 unsigned int *pvalue)
954 if (!strcmp(buf, "max") || !strcmp(buf, "max\n"))
955 value = DQL_MAX_LIMIT;
957 err = kstrtouint(buf, 10, &value);
960 if (value > DQL_MAX_LIMIT)
969 static ssize_t bql_show_hold_time(struct netdev_queue *queue,
970 struct netdev_queue_attribute *attr,
973 struct dql *dql = &queue->dql;
975 return sprintf(buf, "%u\n", jiffies_to_msecs(dql->slack_hold_time));
978 static ssize_t bql_set_hold_time(struct netdev_queue *queue,
979 struct netdev_queue_attribute *attribute,
980 const char *buf, size_t len)
982 struct dql *dql = &queue->dql;
986 err = kstrtouint(buf, 10, &value);
990 dql->slack_hold_time = msecs_to_jiffies(value);
995 static struct netdev_queue_attribute bql_hold_time_attribute =
996 __ATTR(hold_time, S_IRUGO | S_IWUSR, bql_show_hold_time,
999 static ssize_t bql_show_inflight(struct netdev_queue *queue,
1000 struct netdev_queue_attribute *attr,
1003 struct dql *dql = &queue->dql;
1005 return sprintf(buf, "%u\n", dql->num_queued - dql->num_completed);
1008 static struct netdev_queue_attribute bql_inflight_attribute =
1009 __ATTR(inflight, S_IRUGO, bql_show_inflight, NULL);
1011 #define BQL_ATTR(NAME, FIELD) \
1012 static ssize_t bql_show_ ## NAME(struct netdev_queue *queue, \
1013 struct netdev_queue_attribute *attr, \
1016 return bql_show(buf, queue->dql.FIELD); \
1019 static ssize_t bql_set_ ## NAME(struct netdev_queue *queue, \
1020 struct netdev_queue_attribute *attr, \
1021 const char *buf, size_t len) \
1023 return bql_set(buf, len, &queue->dql.FIELD); \
1026 static struct netdev_queue_attribute bql_ ## NAME ## _attribute = \
1027 __ATTR(NAME, S_IRUGO | S_IWUSR, bql_show_ ## NAME, \
1030 BQL_ATTR(limit, limit)
1031 BQL_ATTR(limit_max, max_limit)
1032 BQL_ATTR(limit_min, min_limit)
1034 static struct attribute *dql_attrs[] = {
1035 &bql_limit_attribute.attr,
1036 &bql_limit_max_attribute.attr,
1037 &bql_limit_min_attribute.attr,
1038 &bql_hold_time_attribute.attr,
1039 &bql_inflight_attribute.attr,
1043 static struct attribute_group dql_group = {
1044 .name = "byte_queue_limits",
1047 #endif /* CONFIG_BQL */
1050 static unsigned int get_netdev_queue_index(struct netdev_queue *queue)
1052 struct net_device *dev = queue->dev;
1055 i = queue - dev->_tx;
1056 BUG_ON(i >= dev->num_tx_queues);
1062 static ssize_t show_xps_map(struct netdev_queue *queue,
1063 struct netdev_queue_attribute *attribute, char *buf)
1065 struct net_device *dev = queue->dev;
1066 struct xps_dev_maps *dev_maps;
1068 unsigned long index;
1072 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
1075 index = get_netdev_queue_index(queue);
1078 dev_maps = rcu_dereference(dev->xps_maps);
1080 for_each_possible_cpu(i) {
1081 struct xps_map *map =
1082 rcu_dereference(dev_maps->cpu_map[i]);
1085 for (j = 0; j < map->len; j++) {
1086 if (map->queues[j] == index) {
1087 cpumask_set_cpu(i, mask);
1096 len += cpumask_scnprintf(buf + len, PAGE_SIZE, mask);
1097 if (PAGE_SIZE - len < 3) {
1098 free_cpumask_var(mask);
1102 free_cpumask_var(mask);
1103 len += sprintf(buf + len, "\n");
1107 static ssize_t store_xps_map(struct netdev_queue *queue,
1108 struct netdev_queue_attribute *attribute,
1109 const char *buf, size_t len)
1111 struct net_device *dev = queue->dev;
1112 unsigned long index;
1116 if (!capable(CAP_NET_ADMIN))
1119 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
1122 index = get_netdev_queue_index(queue);
1124 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
1126 free_cpumask_var(mask);
1130 err = netif_set_xps_queue(dev, mask, index);
1132 free_cpumask_var(mask);
1137 static struct netdev_queue_attribute xps_cpus_attribute =
1138 __ATTR(xps_cpus, S_IRUGO | S_IWUSR, show_xps_map, store_xps_map);
1139 #endif /* CONFIG_XPS */
1141 static struct attribute *netdev_queue_default_attrs[] = {
1142 &queue_trans_timeout.attr,
1144 &xps_cpus_attribute.attr,
1149 static void netdev_queue_release(struct kobject *kobj)
1151 struct netdev_queue *queue = to_netdev_queue(kobj);
1153 memset(kobj, 0, sizeof(*kobj));
1154 dev_put(queue->dev);
1157 static const void *netdev_queue_namespace(struct kobject *kobj)
1159 struct netdev_queue *queue = to_netdev_queue(kobj);
1160 struct device *dev = &queue->dev->dev;
1161 const void *ns = NULL;
1163 if (dev->class && dev->class->ns_type)
1164 ns = dev->class->namespace(dev);
1169 static struct kobj_type netdev_queue_ktype = {
1170 .sysfs_ops = &netdev_queue_sysfs_ops,
1171 .release = netdev_queue_release,
1172 .default_attrs = netdev_queue_default_attrs,
1173 .namespace = netdev_queue_namespace,
1176 static int netdev_queue_add_kobject(struct net_device *dev, int index)
1178 struct netdev_queue *queue = dev->_tx + index;
1179 struct kobject *kobj = &queue->kobj;
1182 kobj->kset = dev->queues_kset;
1183 error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL,
1189 error = sysfs_create_group(kobj, &dql_group);
1194 kobject_uevent(kobj, KOBJ_ADD);
1195 dev_hold(queue->dev);
1202 #endif /* CONFIG_SYSFS */
1205 netdev_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
1211 for (i = old_num; i < new_num; i++) {
1212 error = netdev_queue_add_kobject(dev, i);
1219 while (--i >= new_num) {
1220 struct netdev_queue *queue = dev->_tx + i;
1223 sysfs_remove_group(&queue->kobj, &dql_group);
1225 kobject_put(&queue->kobj);
1231 #endif /* CONFIG_SYSFS */
1234 static int register_queue_kobjects(struct net_device *dev)
1236 int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0;
1239 dev->queues_kset = kset_create_and_add("queues",
1240 NULL, &dev->dev.kobj);
1241 if (!dev->queues_kset)
1243 real_rx = dev->real_num_rx_queues;
1245 real_tx = dev->real_num_tx_queues;
1247 error = net_rx_queue_update_kobjects(dev, 0, real_rx);
1252 error = netdev_queue_update_kobjects(dev, 0, real_tx);
1260 netdev_queue_update_kobjects(dev, txq, 0);
1261 net_rx_queue_update_kobjects(dev, rxq, 0);
1265 static void remove_queue_kobjects(struct net_device *dev)
1267 int real_rx = 0, real_tx = 0;
1270 real_rx = dev->real_num_rx_queues;
1272 real_tx = dev->real_num_tx_queues;
1274 net_rx_queue_update_kobjects(dev, real_rx, 0);
1275 netdev_queue_update_kobjects(dev, real_tx, 0);
1277 kset_unregister(dev->queues_kset);
1281 static bool net_current_may_mount(void)
1283 struct net *net = current->nsproxy->net_ns;
1285 return ns_capable(net->user_ns, CAP_SYS_ADMIN);
1288 static void *net_grab_current_ns(void)
1290 struct net *ns = current->nsproxy->net_ns;
1291 #ifdef CONFIG_NET_NS
1293 atomic_inc(&ns->passive);
1298 static const void *net_initial_ns(void)
1303 static const void *net_netlink_ns(struct sock *sk)
1305 return sock_net(sk);
1308 struct kobj_ns_type_operations net_ns_type_operations = {
1309 .type = KOBJ_NS_TYPE_NET,
1310 .current_may_mount = net_current_may_mount,
1311 .grab_current_ns = net_grab_current_ns,
1312 .netlink_ns = net_netlink_ns,
1313 .initial_ns = net_initial_ns,
1314 .drop_ns = net_drop_ns,
1316 EXPORT_SYMBOL_GPL(net_ns_type_operations);
1318 static int netdev_uevent(struct device *d, struct kobj_uevent_env *env)
1320 struct net_device *dev = to_net_dev(d);
1323 /* pass interface to uevent. */
1324 retval = add_uevent_var(env, "INTERFACE=%s", dev->name);
1328 /* pass ifindex to uevent.
1329 * ifindex is useful as it won't change (interface name may change)
1330 * and is what RtNetlink uses natively. */
1331 retval = add_uevent_var(env, "IFINDEX=%d", dev->ifindex);
1338 * netdev_release -- destroy and free a dead device.
1339 * Called when last reference to device kobject is gone.
1341 static void netdev_release(struct device *d)
1343 struct net_device *dev = to_net_dev(d);
1345 BUG_ON(dev->reg_state != NETREG_RELEASED);
1347 kfree(dev->ifalias);
1348 netdev_freemem(dev);
1351 static const void *net_namespace(struct device *d)
1353 struct net_device *dev;
1354 dev = container_of(d, struct net_device, dev);
1355 return dev_net(dev);
1358 static struct class net_class = {
1360 .dev_release = netdev_release,
1361 .dev_groups = net_class_groups,
1362 .dev_uevent = netdev_uevent,
1363 .ns_type = &net_ns_type_operations,
1364 .namespace = net_namespace,
1367 /* Delete sysfs entries but hold kobject reference until after all
1368 * netdev references are gone.
1370 void netdev_unregister_kobject(struct net_device *ndev)
1372 struct device *dev = &(ndev->dev);
1374 kobject_get(&dev->kobj);
1376 remove_queue_kobjects(ndev);
1378 pm_runtime_set_memalloc_noio(dev, false);
1383 /* Create sysfs entries for network device. */
1384 int netdev_register_kobject(struct net_device *ndev)
1386 struct device *dev = &(ndev->dev);
1387 const struct attribute_group **groups = ndev->sysfs_groups;
1390 device_initialize(dev);
1391 dev->class = &net_class;
1392 dev->platform_data = ndev;
1393 dev->groups = groups;
1395 dev_set_name(dev, "%s", ndev->name);
1398 /* Allow for a device specific group */
1402 *groups++ = &netstat_group;
1404 #if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
1405 if (ndev->ieee80211_ptr)
1406 *groups++ = &wireless_group;
1407 #if IS_ENABLED(CONFIG_WIRELESS_EXT)
1408 else if (ndev->wireless_handlers)
1409 *groups++ = &wireless_group;
1412 #endif /* CONFIG_SYSFS */
1414 error = device_add(dev);
1418 error = register_queue_kobjects(ndev);
1424 pm_runtime_set_memalloc_noio(dev, true);
1429 int netdev_class_create_file_ns(struct class_attribute *class_attr,
1432 return class_create_file_ns(&net_class, class_attr, ns);
1434 EXPORT_SYMBOL(netdev_class_create_file_ns);
1436 void netdev_class_remove_file_ns(struct class_attribute *class_attr,
1439 class_remove_file_ns(&net_class, class_attr, ns);
1441 EXPORT_SYMBOL(netdev_class_remove_file_ns);
1443 int __init netdev_kobject_init(void)
1445 kobj_ns_type_register(&net_ns_type_operations);
1446 return class_register(&net_class);