/**
- * struct vlan_rx_stats - VLAN percpu rx stats
+ * struct vlan_pcpu_stats - VLAN percpu rx/tx stats
* @rx_packets: number of received packets
* @rx_bytes: number of received bytes
* @rx_multicast: number of received multicast packets
+ * @tx_packets: number of transmitted packets
+ * @tx_bytes: number of transmitted bytes
* @syncp: synchronization point for 64bit counters
- * @rx_errors: number of errors
+ * @rx_errors: number of rx errors
+ * @tx_dropped: number of tx drops
*/
-struct vlan_rx_stats {
+struct vlan_pcpu_stats {
u64 rx_packets;
u64 rx_bytes;
u64 rx_multicast;
+ u64 tx_packets;
+ u64 tx_bytes;
struct u64_stats_sync syncp;
- unsigned long rx_errors;
+ u32 rx_errors;
+ u32 tx_dropped;
};
/**
* @real_dev: underlying netdevice
* @real_dev_addr: address of underlying netdevice
* @dent: proc dir entry
- * @vlan_rx_stats: ptr to percpu rx stats
+ * @vlan_pcpu_stats: ptr to percpu rx stats
*/
struct vlan_dev_info {
unsigned int nr_ingress_mappings;
unsigned char real_dev_addr[ETH_ALEN];
struct proc_dir_entry *dent;
- struct vlan_rx_stats __percpu *vlan_rx_stats;
+ struct vlan_pcpu_stats __percpu *vlan_pcpu_stats;
};
static inline struct vlan_dev_info *vlan_dev_info(const struct net_device *dev)
struct packet_type *ptype, struct net_device *orig_dev)
{
struct vlan_hdr *vhdr;
- struct vlan_rx_stats *rx_stats;
+ struct vlan_pcpu_stats *rx_stats;
struct net_device *vlan_dev;
u16 vlan_id;
u16 vlan_tci;
} else {
skb->dev = vlan_dev;
- rx_stats = this_cpu_ptr(vlan_dev_info(skb->dev)->vlan_rx_stats);
+ rx_stats = this_cpu_ptr(vlan_dev_info(skb->dev)->vlan_pcpu_stats);
u64_stats_update_begin(&rx_stats->syncp);
rx_stats->rx_packets++;
static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
- int i = skb_get_queue_mapping(skb);
- struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data);
unsigned int len;
int ret;
ret = dev_queue_xmit(skb);
if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
- txq->tx_packets++;
- txq->tx_bytes += len;
- } else
- txq->tx_dropped++;
+ struct vlan_pcpu_stats *stats;
+
+ stats = this_cpu_ptr(vlan_dev_info(dev)->vlan_pcpu_stats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->tx_packets++;
+ stats->tx_bytes += len;
+ u64_stats_update_begin(&stats->syncp);
+ } else {
+ this_cpu_inc(vlan_dev_info(dev)->vlan_pcpu_stats->tx_dropped);
+ }
return ret;
}
(1<<__LINK_STATE_PRESENT);
dev->features |= real_dev->features & real_dev->vlan_features;
+ dev->features |= NETIF_F_LLTX;
dev->gso_max_size = real_dev->gso_max_size;
/* ipv6 shared card related stuff */
vlan_dev_set_lockdep_class(dev, subclass);
- vlan_dev_info(dev)->vlan_rx_stats = alloc_percpu(struct vlan_rx_stats);
- if (!vlan_dev_info(dev)->vlan_rx_stats)
+ vlan_dev_info(dev)->vlan_pcpu_stats = alloc_percpu(struct vlan_pcpu_stats);
+ if (!vlan_dev_info(dev)->vlan_pcpu_stats)
return -ENOMEM;
return 0;
struct vlan_dev_info *vlan = vlan_dev_info(dev);
int i;
- free_percpu(vlan->vlan_rx_stats);
- vlan->vlan_rx_stats = NULL;
+ free_percpu(vlan->vlan_pcpu_stats);
+ vlan->vlan_pcpu_stats = NULL;
for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) {
while ((pm = vlan->egress_priority_map[i]) != NULL) {
vlan->egress_priority_map[i] = pm->next;
static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
- dev_txq_stats_fold(dev, stats);
- if (vlan_dev_info(dev)->vlan_rx_stats) {
- struct vlan_rx_stats *p, accum = {0};
+ if (vlan_dev_info(dev)->vlan_pcpu_stats) {
+ struct vlan_pcpu_stats *p;
+ u32 rx_errors = 0, tx_dropped = 0;
int i;
for_each_possible_cpu(i) {
- u64 rxpackets, rxbytes, rxmulticast;
+ u64 rxpackets, rxbytes, rxmulticast, txpackets, txbytes;
unsigned int start;
- p = per_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats, i);
+ p = per_cpu_ptr(vlan_dev_info(dev)->vlan_pcpu_stats, i);
do {
start = u64_stats_fetch_begin_bh(&p->syncp);
rxpackets = p->rx_packets;
rxbytes = p->rx_bytes;
rxmulticast = p->rx_multicast;
+ txpackets = p->tx_packets;
+ txbytes = p->tx_bytes;
} while (u64_stats_fetch_retry_bh(&p->syncp, start));
- accum.rx_packets += rxpackets;
- accum.rx_bytes += rxbytes;
- accum.rx_multicast += rxmulticast;
- /* rx_errors is ulong, not protected by syncp */
- accum.rx_errors += p->rx_errors;
+
+ stats->rx_packets += rxpackets;
+ stats->rx_bytes += rxbytes;
+ stats->multicast += rxmulticast;
+ stats->tx_packets += txpackets;
+ stats->tx_bytes += txbytes;
+ /* rx_errors & tx_dropped are u32 */
+ rx_errors += p->rx_errors;
+ tx_dropped += p->tx_dropped;
}
- stats->rx_packets = accum.rx_packets;
- stats->rx_bytes = accum.rx_bytes;
- stats->rx_errors = accum.rx_errors;
- stats->multicast = accum.rx_multicast;
+ stats->rx_errors = rx_errors;
+ stats->tx_dropped = tx_dropped;
}
return stats;
}
return 0;
}
-static int vlan_get_tx_queues(struct net *net,
- struct nlattr *tb[],
- unsigned int *num_tx_queues,
- unsigned int *real_num_tx_queues)
-{
- struct net_device *real_dev;
-
- if (!tb[IFLA_LINK])
- return -EINVAL;
-
- real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK]));
- if (!real_dev)
- return -ENODEV;
-
- *num_tx_queues = real_dev->num_tx_queues;
- *real_num_tx_queues = real_dev->real_num_tx_queues;
- return 0;
-}
-
static int vlan_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
.maxtype = IFLA_VLAN_MAX,
.policy = vlan_policy,
.priv_size = sizeof(struct vlan_dev_info),
- .get_tx_queues = vlan_get_tx_queues,
.setup = vlan_setup,
.validate = vlan_validate,
.newlink = vlan_newlink,