X-Git-Url: https://git.karo-electronics.de/?a=blobdiff_plain;ds=sidebyside;f=include%2Fnet%2Fsch_generic.h;h=b5f40d7ef724f344be5df5c3648609d5ef282f5d;hb=8725f25acc656c1522d48a6746055099efdaca4c;hp=92417825d387d0bd539f8fbb8e32ad3759da4aba;hpb=7698b4fcabcd790efc4f226bada1e7b5870653af;p=mv-sheeva.git diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 92417825d38..b5f40d7ef72 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -26,6 +26,14 @@ struct qdisc_rate_table enum qdisc_state_t { __QDISC_STATE_RUNNING, + __QDISC_STATE_SCHED, +}; + +struct qdisc_size_table { + struct list_head list; + struct tc_sizespec szopts; + int refcnt; + u16 data[]; }; struct Qdisc @@ -38,6 +46,7 @@ struct Qdisc #define TCQ_F_INGRESS 4 int padded; struct Qdisc_ops *ops; + struct qdisc_size_table *stab; u32 handle; u32 parent; atomic_t refcnt; @@ -45,6 +54,7 @@ struct Qdisc struct sk_buff *gso_skb; struct sk_buff_head q; struct netdev_queue *dev_queue; + struct Qdisc *next_sched; struct list_head list; struct gnet_stats_basic bstats; @@ -54,6 +64,8 @@ struct Qdisc int (*reshape_fail)(struct sk_buff *skb, struct Qdisc *q); + void *u32_node; + /* This field is deprecated, but it is still used by CBQ * and it will live until better solution will be invented. */ @@ -161,6 +173,21 @@ struct tcf_proto struct tcf_proto_ops *ops; }; +struct qdisc_skb_cb { + unsigned int pkt_len; + char data[]; +}; + +static inline struct qdisc_skb_cb *qdisc_skb_cb(struct sk_buff *skb) +{ + return (struct qdisc_skb_cb *)skb->cb; +} + +static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc) +{ + return &qdisc->q.lock; +} + static inline struct Qdisc *qdisc_root(struct Qdisc *qdisc) { return qdisc->dev_queue->qdisc; @@ -170,7 +197,7 @@ static inline spinlock_t *qdisc_root_lock(struct Qdisc *qdisc) { struct Qdisc *root = qdisc_root(qdisc); - return &root->dev_queue->lock; + return qdisc_lock(root); } static inline struct net_device *qdisc_dev(struct Qdisc *qdisc) @@ -178,13 +205,18 @@ static inline struct net_device *qdisc_dev(struct Qdisc *qdisc) return qdisc->dev_queue->dev; } -extern void qdisc_lock_tree(struct net_device *dev); -extern void qdisc_unlock_tree(struct net_device *dev); +static inline void sch_tree_lock(struct Qdisc *q) +{ + spin_lock_bh(qdisc_root_lock(q)); +} -#define sch_tree_lock(q) qdisc_lock_tree(qdisc_dev(q)) -#define sch_tree_unlock(q) qdisc_unlock_tree(qdisc_dev(q)) -#define tcf_tree_lock(tp) qdisc_lock_tree(qdisc_dev((tp)->q)) -#define tcf_tree_unlock(tp) qdisc_unlock_tree(qdisc_dev((tp)->q)) +static inline void sch_tree_unlock(struct Qdisc *q) +{ + spin_unlock_bh(qdisc_root_lock(q)); +} + +#define tcf_tree_lock(tp) sch_tree_lock((tp)->q) +#define tcf_tree_unlock(tp) sch_tree_unlock((tp)->q) extern struct Qdisc noop_qdisc; extern struct Qdisc_ops noop_qdisc_ops; @@ -243,6 +275,8 @@ extern struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, extern struct Qdisc *qdisc_create_dflt(struct net_device *dev, struct netdev_queue *dev_queue, struct Qdisc_ops *ops, u32 parentid); +extern void qdisc_calculate_pkt_len(struct sk_buff *skb, + struct qdisc_size_table *stab); extern void tcf_destroy(struct tcf_proto *tp); extern void tcf_destroy_chain(struct tcf_proto **fl); @@ -292,12 +326,32 @@ static inline bool qdisc_tx_is_noop(const struct net_device *dev) return true; } +static inline unsigned int qdisc_pkt_len(struct sk_buff *skb) +{ + return qdisc_skb_cb(skb)->pkt_len; +} + +static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch) +{ +#ifdef CONFIG_NET_SCHED + if (sch->stab) + qdisc_calculate_pkt_len(skb, sch->stab); +#endif + return sch->enqueue(skb, sch); +} + +static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch) +{ + qdisc_skb_cb(skb)->pkt_len = skb->len; + return qdisc_enqueue(skb, sch); +} + static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff_head *list) { __skb_queue_tail(list, skb); - sch->qstats.backlog += skb->len; - sch->bstats.bytes += skb->len; + sch->qstats.backlog += qdisc_pkt_len(skb); + sch->bstats.bytes += qdisc_pkt_len(skb); sch->bstats.packets++; return NET_XMIT_SUCCESS; @@ -314,7 +368,7 @@ static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch, struct sk_buff *skb = __skb_dequeue(list); if (likely(skb != NULL)) - sch->qstats.backlog -= skb->len; + sch->qstats.backlog -= qdisc_pkt_len(skb); return skb; } @@ -330,7 +384,7 @@ static inline struct sk_buff *__qdisc_dequeue_tail(struct Qdisc *sch, struct sk_buff *skb = __skb_dequeue_tail(list); if (likely(skb != NULL)) - sch->qstats.backlog -= skb->len; + sch->qstats.backlog -= qdisc_pkt_len(skb); return skb; } @@ -344,7 +398,7 @@ static inline int __qdisc_requeue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff_head *list) { __skb_queue_head(list, skb); - sch->qstats.backlog += skb->len; + sch->qstats.backlog += qdisc_pkt_len(skb); sch->qstats.requeues++; return NET_XMIT_SUCCESS; @@ -362,7 +416,7 @@ static inline void __qdisc_reset_queue(struct Qdisc *sch, * We do not know the backlog in bytes of this list, it * is up to the caller to correct it */ - skb_queue_purge(list); + __skb_queue_purge(list); } static inline void qdisc_reset_queue(struct Qdisc *sch) @@ -377,7 +431,7 @@ static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch, struct sk_buff *skb = __qdisc_dequeue_tail(sch, list); if (likely(skb != NULL)) { - unsigned int len = skb->len; + unsigned int len = qdisc_pkt_len(skb); kfree_skb(skb); return len; }