1 /* Copyright (c) 2007 Coraid, Inc. See COPYING for GPL terms. */
4 * AoE device utility functions; maintains device list.
7 #include <linux/hdreg.h>
8 #include <linux/blkdev.h>
9 #include <linux/netdevice.h>
10 #include <linux/delay.h>
11 #include <linux/slab.h>
14 static void dummy_timer(ulong);
15 static void aoedev_freedev(struct aoedev *);
16 static void freetgt(struct aoedev *d, struct aoetgt *t);
17 static void skbpoolfree(struct aoedev *d);
19 static struct aoedev *devlist;
20 static DEFINE_SPINLOCK(devlist_lock);
23 aoedev_by_aoeaddr(int maj, int min)
28 spin_lock_irqsave(&devlist_lock, flags);
30 for (d=devlist; d; d=d->next)
31 if (d->aoemajor == maj && d->aoeminor == min)
34 spin_unlock_irqrestore(&devlist_lock, flags);
43 d = (struct aoedev *)vp;
44 if (d->flags & DEVFL_TKILL)
46 d->timer.expires = jiffies + HZ;
51 aoedev_downdev(struct aoedev *d)
53 struct aoetgt **t, **te;
60 for (; t < te && *t; t++) {
62 e = f + (*t)->nframes;
63 for (; f < e; f->tag = FREETAG, f->buf = NULL, f++) {
64 if (f->tag == FREETAG || f->buf == NULL)
68 if (--buf->nframesout == 0
69 && buf != d->inprocess) {
70 mempool_free(buf, d->bufpool);
74 (*t)->maxout = (*t)->nframes;
80 mempool_free(buf, d->bufpool);
86 while (!list_empty(&d->bufq)) {
87 buf = container_of(d->bufq.next, struct buf, bufs);
88 list_del(d->bufq.next);
90 mempool_free(buf, d->bufpool);
95 set_capacity(d->gd, 0);
97 d->flags &= ~DEVFL_UP;
101 aoedev_freedev(struct aoedev *d)
103 struct aoetgt **t, **e;
112 for (; t < e && *t; t++)
115 mempool_destroy(d->bufpool);
117 blk_cleanup_queue(d->blkq);
122 aoedev_flush(const char __user *str, size_t cnt)
125 struct aoedev *d, **dd;
126 struct aoedev *rmd = NULL;
131 if (cnt > sizeof buf)
133 if (copy_from_user(buf, str, cnt))
135 all = !strncmp(buf, "all", 3);
138 flush_scheduled_work();
139 spin_lock_irqsave(&devlist_lock, flags);
143 if ((!all && (d->flags & DEVFL_UP))
144 || (d->flags & (DEVFL_GDALLOC|DEVFL_NEWSIZE))
146 spin_unlock(&d->lock);
152 d->flags |= DEVFL_TKILL;
153 spin_unlock(&d->lock);
157 spin_unlock_irqrestore(&devlist_lock, flags);
160 del_timer_sync(&d->timer);
161 aoedev_freedev(d); /* must be able to sleep */
166 /* I'm not really sure that this is a realistic problem, but if the
167 network driver goes gonzo let's just leak memory after complaining. */
169 skbfree(struct sk_buff *skb)
171 enum { Sms = 100, Tms = 3*1000};
176 while (atomic_read(&skb_shinfo(skb)->dataref) != 1 && i-- > 0)
180 "aoe: %s holds ref: %s\n",
181 skb->dev ? skb->dev->name : "netif",
182 "cannot free skb -- memory leaked.");
185 skb_shinfo(skb)->nr_frags = skb->data_len = 0;
191 skbpoolfree(struct aoedev *d)
193 struct sk_buff *skb, *tmp;
195 skb_queue_walk_safe(&d->skbpool, skb, tmp)
198 __skb_queue_head_init(&d->skbpool);
201 /* find it or malloc it */
203 aoedev_by_sysminor_m(ulong sysminor)
208 spin_lock_irqsave(&devlist_lock, flags);
210 for (d=devlist; d; d=d->next)
211 if (d->sysminor == sysminor)
215 d = kcalloc(1, sizeof *d, GFP_ATOMIC);
218 INIT_WORK(&d->work, aoecmd_sleepwork);
219 spin_lock_init(&d->lock);
220 skb_queue_head_init(&d->sendq);
221 skb_queue_head_init(&d->skbpool);
222 init_timer(&d->timer);
223 d->timer.data = (ulong) d;
224 d->timer.function = dummy_timer;
225 d->timer.expires = jiffies + HZ;
226 add_timer(&d->timer);
227 d->bufpool = NULL; /* defer to aoeblk_gdalloc */
229 INIT_LIST_HEAD(&d->bufq);
230 d->sysminor = sysminor;
231 d->aoemajor = AOEMAJOR(sysminor);
232 d->aoeminor = AOEMINOR(sysminor);
233 d->mintimer = MINTIMER;
237 spin_unlock_irqrestore(&devlist_lock, flags);
242 freetgt(struct aoedev *d, struct aoetgt *t)
260 flush_scheduled_work();
262 while ((d = devlist)) {
265 spin_lock_irqsave(&d->lock, flags);
267 d->flags |= DEVFL_TKILL;
268 spin_unlock_irqrestore(&d->lock, flags);
270 del_timer_sync(&d->timer);