/* topology */
int level; /* our level (see above) */
struct htb_class *parent; /* parent class */
- struct list_head hlist; /* classid hash list item */
+ struct hlist_node hlist; /* classid hash list item */
struct list_head sibling; /* sibling list item */
struct list_head children; /* children list */
struct htb_sched {
struct list_head root; /* root classes list */
- struct list_head hash[HTB_HSIZE]; /* hashed by classid */
- struct list_head drops[TC_HTB_NUMPRIO]; /* active leaves (for drops) */
+ struct hlist_head hash[HTB_HSIZE]; /* hashed by classid */
+ struct list_head drops[TC_HTB_NUMPRIO];/* active leaves (for drops) */
/* self list - roots of self generating tree */
struct rb_root row[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
{
struct htb_sched *q = qdisc_priv(sch);
- struct list_head *p;
+ struct hlist_node *p;
+ struct htb_class *cl;
+
if (TC_H_MAJ(handle) != sch->handle)
return NULL;
- list_for_each(p, q->hash + htb_hash(handle)) {
- struct htb_class *cl = list_entry(p, struct htb_class, hlist);
+ hlist_for_each_entry(cl, p, q->hash + htb_hash(handle), hlist) {
if (cl->classid == handle)
return cl;
}
{
struct Qdisc *sch = (struct Qdisc *)arg;
struct htb_sched *q = qdisc_priv(sch);
- struct list_head *p;
+ struct hlist_node *p;
+ struct htb_class *cl;
+
/* lock queue so that we can muck with it */
spin_lock_bh(&sch->dev->queue_lock);
/* scan and recompute one bucket at time */
if (++q->recmp_bucket >= HTB_HSIZE)
q->recmp_bucket = 0;
- list_for_each(p, q->hash + q->recmp_bucket) {
- struct htb_class *cl = list_entry(p, struct htb_class, hlist);
+ hlist_for_each_entry(cl,p, q->hash + q->recmp_bucket, hlist) {
RT_GEN(cl->sum_bytes, cl->rate_bytes);
RT_GEN(cl->sum_packets, cl->rate_packets);
}
int i;
for (i = 0; i < HTB_HSIZE; i++) {
- struct list_head *p;
- list_for_each(p, q->hash + i) {
- struct htb_class *cl =
- list_entry(p, struct htb_class, hlist);
+ struct hlist_node *p;
+ struct htb_class *cl;
+
+ hlist_for_each_entry(cl, p, q->hash + i, hlist) {
if (cl->level)
memset(&cl->un.inner, 0, sizeof(cl->un.inner));
else {
INIT_LIST_HEAD(&q->root);
for (i = 0; i < HTB_HSIZE; i++)
- INIT_LIST_HEAD(q->hash + i);
+ INIT_HLIST_HEAD(q->hash + i);
for (i = 0; i < TC_HTB_NUMPRIO; i++)
INIT_LIST_HEAD(q->drops + i);
struct htb_class, sibling));
/* note: this delete may happen twice (see htb_delete) */
- list_del(&cl->hlist);
+ if (!hlist_unhashed(&cl->hlist))
+ hlist_del(&cl->hlist);
list_del(&cl->sibling);
if (cl->prio_activity)
sch_tree_lock(sch);
/* delete from hash and active; remainder in destroy_class */
- list_del_init(&cl->hlist);
+ if (!hlist_unhashed(&cl->hlist))
+ hlist_del(&cl->hlist);
+
if (cl->prio_activity)
htb_deactivate(q, cl);
cl->refcnt = 1;
INIT_LIST_HEAD(&cl->sibling);
- INIT_LIST_HEAD(&cl->hlist);
+ INIT_HLIST_NODE(&cl->hlist);
INIT_LIST_HEAD(&cl->children);
INIT_LIST_HEAD(&cl->un.leaf.drop_list);
cl->cmode = HTB_CAN_SEND;
/* attach to the hash list and parent's family */
- list_add_tail(&cl->hlist, q->hash + htb_hash(classid));
+ hlist_add_head(&cl->hlist, q->hash + htb_hash(classid));
list_add_tail(&cl->sibling,
parent ? &parent->children : &q->root);
} else
return;
for (i = 0; i < HTB_HSIZE; i++) {
- struct list_head *p;
- list_for_each(p, q->hash + i) {
- struct htb_class *cl =
- list_entry(p, struct htb_class, hlist);
+ struct hlist_node *p;
+ struct htb_class *cl;
+
+ hlist_for_each_entry(cl, p, q->hash + i, hlist) {
if (arg->count < arg->skip) {
arg->count++;
continue;