struct netdev_queue rx_queue;
struct netdev_queue tx_queue ____cacheline_aligned_in_smp;
-
- struct Qdisc *qdisc_ingress;
-
unsigned long tx_queue_len; /* Max frames per queue allowed */
/* Partially transmitted GSO packet. */
rxq = &dev->rx_queue;
spin_lock(&rxq->lock);
- if ((q = dev->qdisc_ingress) != NULL)
+ if ((q = rxq->qdisc) != NULL)
result = q->enqueue(skb, q);
spin_unlock(&rxq->lock);
struct packet_type **pt_prev,
int *ret, struct net_device *orig_dev)
{
- if (!skb->dev->qdisc_ingress)
+ if (!skb->dev->rx_queue.qdisc)
goto out;
if (*pt_prev) {
qdisc_lock_tree(dev);
if (qdisc && qdisc->flags&TCQ_F_INGRESS) {
- oqdisc = dev->qdisc_ingress;
+ dev_queue = &dev->rx_queue;
+ oqdisc = dev_queue->qdisc;
/* Prune old scheduler */
if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) {
/* delete */
qdisc_reset(oqdisc);
- dev->qdisc_ingress = NULL;
+ dev_queue->qdisc = NULL;
} else { /* new */
- dev->qdisc_ingress = qdisc;
+ dev_queue->qdisc = qdisc;
}
} else {
return -ENOENT;
q = qdisc_leaf(p, clid);
} else { /* ingress */
- q = dev->qdisc_ingress;
+ q = dev->rx_queue.qdisc;
}
} else {
struct netdev_queue *dev_queue = &dev->tx_queue;
return -ENOENT;
q = qdisc_leaf(p, clid);
} else { /*ingress */
- q = dev->qdisc_ingress;
+ q = dev->rx_queue.qdisc;
}
} else {
struct netdev_queue *dev_queue = &dev->tx_queue;