1 /* Copyright (c) 2012 Coraid, Inc. See COPYING for GPL terms. */
4 * Filesystem request handling methods
8 #include <linux/slab.h>
9 #include <linux/hdreg.h>
10 #include <linux/blkdev.h>
11 #include <linux/skbuff.h>
12 #include <linux/netdevice.h>
13 #include <linux/genhd.h>
14 #include <linux/moduleparam.h>
15 #include <linux/workqueue.h>
16 #include <linux/kthread.h>
17 #include <net/net_namespace.h>
18 #include <asm/unaligned.h>
19 #include <linux/uio.h>
22 #define MAXIOC (8192) /* default meant to avoid most soft lockups */
24 static void ktcomplete(struct frame *, struct sk_buff *);
26 static struct buf *nextbuf(struct aoedev *);
28 static int aoe_deadsecs = 60 * 3;
29 module_param(aoe_deadsecs, int, 0644);
30 MODULE_PARM_DESC(aoe_deadsecs, "After aoe_deadsecs seconds, give up and fail dev.");
32 static int aoe_maxout = 16;
33 module_param(aoe_maxout, int, 0644);
34 MODULE_PARM_DESC(aoe_maxout,
35 "Only aoe_maxout outstanding packets for every MAC on eX.Y.");
37 static wait_queue_head_t ktiowq;
38 static struct ktstate kts;
40 /* io completion queue */
42 struct list_head head;
46 static struct sk_buff *
51 skb = alloc_skb(len, GFP_ATOMIC);
53 skb_reset_mac_header(skb);
54 skb_reset_network_header(skb);
55 skb->protocol = __constant_htons(ETH_P_AOE);
56 skb_checksum_none_assert(skb);
62 getframe(struct aoedev *d, u32 tag)
65 struct list_head *head, *pos, *nx;
69 head = &d->factive[n];
70 list_for_each_safe(pos, nx, head) {
71 f = list_entry(pos, struct frame, head);
81 * Leave the top bit clear so we have tagspace for userland.
82 * The bottom 16 bits are the xmit tick for rexmit/rttavg processing.
83 * This driver reserves tag -1 to mean "unused frame."
86 newtag(struct aoedev *d)
91 return n |= (++d->lasttag & 0x7fff) << 16;
95 aoehdr_atainit(struct aoedev *d, struct aoetgt *t, struct aoe_hdr *h)
97 u32 host_tag = newtag(d);
99 memcpy(h->src, t->ifp->nd->dev_addr, sizeof h->src);
100 memcpy(h->dst, t->addr, sizeof h->dst);
101 h->type = __constant_cpu_to_be16(ETH_P_AOE);
103 h->major = cpu_to_be16(d->aoemajor);
104 h->minor = d->aoeminor;
106 h->tag = cpu_to_be32(host_tag);
112 put_lba(struct aoe_atahdr *ah, sector_t lba)
115 ah->lba1 = lba >>= 8;
116 ah->lba2 = lba >>= 8;
117 ah->lba3 = lba >>= 8;
118 ah->lba4 = lba >>= 8;
119 ah->lba5 = lba >>= 8;
122 static struct aoeif *
123 ifrotate(struct aoetgt *t)
129 if (ifp >= &t->ifs[NAOEIFS] || ifp->nd == NULL)
137 skb_pool_put(struct aoedev *d, struct sk_buff *skb)
139 __skb_queue_tail(&d->skbpool, skb);
142 static struct sk_buff *
143 skb_pool_get(struct aoedev *d)
145 struct sk_buff *skb = skb_peek(&d->skbpool);
147 if (skb && atomic_read(&skb_shinfo(skb)->dataref) == 1) {
148 __skb_unlink(skb, &d->skbpool);
151 if (skb_queue_len(&d->skbpool) < NSKBPOOLMAX &&
152 (skb = new_skb(ETH_ZLEN)))
159 aoe_freetframe(struct frame *f)
167 list_add(&f->head, &t->ffree);
170 static struct frame *
171 newtframe(struct aoedev *d, struct aoetgt *t)
175 struct list_head *pos;
177 if (list_empty(&t->ffree)) {
178 if (t->falloc >= NSKBPOOLMAX*2)
180 f = kcalloc(1, sizeof(*f), GFP_ATOMIC);
188 f = list_entry(pos, struct frame, head);
193 f->skb = skb = new_skb(ETH_ZLEN);
195 bail: aoe_freetframe(f);
200 if (atomic_read(&skb_shinfo(skb)->dataref) != 1) {
201 skb = skb_pool_get(d);
204 skb_pool_put(d, f->skb);
208 skb->truesize -= skb->data_len;
209 skb_shinfo(skb)->nr_frags = skb->data_len = 0;
214 static struct frame *
215 newframe(struct aoedev *d)
218 struct aoetgt *t, **tt;
221 if (d->targets[0] == NULL) { /* shouldn't happen, but I'm paranoid */
222 printk(KERN_ERR "aoe: NULL TARGETS!\n");
225 tt = d->tgt; /* last used target */
228 if (tt >= &d->targets[NTARGETS] || !*tt)
232 if (t->nout < t->maxout
242 if (tt == d->tgt) /* we've looped and found nada */
247 d->flags |= DEVFL_KICKME;
253 skb_fillup(struct sk_buff *skb, struct bio_vec *bv, ulong off, ulong cnt)
258 fcnt = bv->bv_len - (off - bv->bv_offset);
261 skb_fill_page_desc(skb, frag++, bv->bv_page, off, fcnt);
271 fhash(struct frame *f)
273 struct aoedev *d = f->t->d;
276 n = f->tag % NFACTIVE;
277 list_add_tail(&f->head, &d->factive[n]);
281 aoecmd_ata_rw(struct aoedev *d)
285 struct aoe_atahdr *ah;
289 struct sk_buff_head queue;
291 char writebit, extbit;
306 if (bcnt > buf->resid)
310 f->bv_off = f->bv->bv_offset + (f->bv->bv_len - buf->bv_resid);
312 if (fbcnt < buf->bv_resid) {
313 buf->bv_resid -= fbcnt;
317 fbcnt -= buf->bv_resid;
318 buf->resid -= buf->bv_resid;
319 if (buf->resid == 0) {
324 buf->bv_resid = buf->bv->bv_len;
325 WARN_ON(buf->bv_resid == 0);
328 /* initialize the headers & frame */
330 h = (struct aoe_hdr *) skb_mac_header(skb);
331 ah = (struct aoe_atahdr *) (h+1);
332 skb_put(skb, sizeof *h + sizeof *ah);
333 memset(h, 0, skb->len);
334 f->tag = aoehdr_atainit(d, t, h);
340 f->lba = buf->sector;
342 /* set up ata header */
343 ah->scnt = bcnt >> 9;
344 put_lba(ah, buf->sector);
345 if (d->flags & DEVFL_EXT) {
346 ah->aflags |= AOEAFL_EXT;
350 ah->lba3 |= 0xe0; /* LBA bit + obsolete 0xa0 */
352 if (bio_data_dir(buf->bio) == WRITE) {
353 skb_fillup(skb, f->bv, f->bv_off, bcnt);
354 ah->aflags |= AOEAFL_WRITE;
356 skb->data_len = bcnt;
357 skb->truesize += bcnt;
364 ah->cmdstat = ATA_CMD_PIO_READ | writebit | extbit;
366 /* mark all tracking fields and load out */
367 buf->nframesout += 1;
368 buf->sector += bcnt >> 9;
370 skb->dev = t->ifp->nd;
371 skb = skb_clone(skb, GFP_ATOMIC);
373 __skb_queue_head_init(&queue);
374 __skb_queue_tail(&queue, skb);
380 /* some callers cannot sleep, and they can call this function,
381 * transmitting the packets later, when interrupts are on
384 aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff_head *queue)
387 struct aoe_cfghdr *ch;
389 struct net_device *ifp;
392 for_each_netdev_rcu(&init_net, ifp) {
394 if (!is_aoe_netif(ifp))
397 skb = new_skb(sizeof *h + sizeof *ch);
399 printk(KERN_INFO "aoe: skb alloc failure\n");
402 skb_put(skb, sizeof *h + sizeof *ch);
404 __skb_queue_tail(queue, skb);
405 h = (struct aoe_hdr *) skb_mac_header(skb);
406 memset(h, 0, sizeof *h + sizeof *ch);
408 memset(h->dst, 0xff, sizeof h->dst);
409 memcpy(h->src, ifp->dev_addr, sizeof h->src);
410 h->type = __constant_cpu_to_be16(ETH_P_AOE);
412 h->major = cpu_to_be16(aoemajor);
423 resend(struct aoedev *d, struct frame *f)
426 struct sk_buff_head queue;
428 struct aoe_atahdr *ah;
436 if (ifrotate(t) == NULL) {
437 /* probably can't happen, but set it up to fail anyway */
438 pr_info("aoe: resend: no interfaces to rotate to.\n");
442 h = (struct aoe_hdr *) skb_mac_header(skb);
443 ah = (struct aoe_atahdr *) (h+1);
445 snprintf(buf, sizeof buf,
446 "%15s e%ld.%d oldtag=%08x@%08lx newtag=%08x s=%pm d=%pm nout=%d\n",
447 "retransmit", d->aoemajor, d->aoeminor, f->tag, jiffies, n,
448 h->src, h->dst, t->nout);
453 h->tag = cpu_to_be32(n);
454 memcpy(h->dst, t->addr, sizeof h->dst);
455 memcpy(h->src, t->ifp->nd->dev_addr, sizeof h->src);
457 skb->dev = t->ifp->nd;
458 skb = skb_clone(skb, GFP_ATOMIC);
461 __skb_queue_head_init(&queue);
462 __skb_queue_tail(&queue, skb);
471 n = jiffies & 0xffff;
478 static struct aoeif *
479 getif(struct aoetgt *t, struct net_device *nd)
492 ejectif(struct aoetgt *t, struct aoeif *ifp)
495 struct net_device *nd;
499 e = t->ifs + NAOEIFS - 1;
500 n = (e - ifp) * sizeof *ifp;
501 memmove(ifp, ifp+1, n);
507 sthtith(struct aoedev *d)
509 struct frame *f, *nf;
510 struct list_head *nx, *pos, *head;
512 struct aoetgt *ht = d->htgt;
515 for (i = 0; i < NFACTIVE; i++) {
516 head = &d->factive[i];
517 list_for_each_safe(pos, nx, head) {
518 f = list_entry(pos, struct frame, head);
526 /* remove frame from active list */
529 /* reassign all pertinent bits to new outbound frame */
536 nf->bv_off = f->bv_off;
545 /* We've cleaned up the outstanding so take away his
546 * interfaces so he won't be used. We should remove him from
547 * the target array here, but cleaning up a target is
550 memset(ht->ifs, 0, sizeof ht->ifs);
555 static inline unsigned char
556 ata_scnt(unsigned char *packet) {
558 struct aoe_atahdr *ah;
560 h = (struct aoe_hdr *) packet;
561 ah = (struct aoe_atahdr *) (h+1);
566 rexmit_timer(ulong vp)
569 struct aoetgt *t, **tt, **te;
572 struct list_head *head, *pos, *nx;
574 register long timeout;
578 d = (struct aoedev *) vp;
580 /* timeout is always ~150% of the moving average */
582 timeout += timeout >> 1;
584 spin_lock_irqsave(&d->lock, flags);
586 if (d->flags & DEVFL_TKILL) {
587 spin_unlock_irqrestore(&d->lock, flags);
591 /* collect all frames to rexmit into flist */
592 for (i = 0; i < NFACTIVE; i++) {
593 head = &d->factive[i];
594 list_for_each_safe(pos, nx, head) {
595 f = list_entry(pos, struct frame, head);
596 if (tsince(f->tag) < timeout)
597 break; /* end of expired frames */
598 /* move to flist for later processing */
599 list_move_tail(pos, &flist);
604 te = tt + d->ntargets;
605 for (; tt < te && (t = *tt); tt++) {
606 if (t->nout == t->maxout
607 && t->maxout < t->nframes
608 && (jiffies - t->lastwadj)/HZ > 10) {
610 t->lastwadj = jiffies;
614 if (!list_empty(&flist)) { /* retransmissions necessary */
617 d->rttavg = MAXTIMER;
620 /* process expired frames */
621 while (!list_empty(&flist)) {
623 f = list_entry(pos, struct frame, head);
624 n = f->waited += timeout;
626 if (n > aoe_deadsecs) {
627 /* Waited too long. Device failure.
628 * Hang all frames on first hash bucket for downdev
631 list_splice(&flist, &d->factive[0]);
638 if (n > aoe_deadsecs/2)
639 d->htgt = t; /* see if another target can help */
641 if (t->nout == t->maxout) {
644 t->lastwadj = jiffies;
647 ifp = getif(t, f->skb->dev);
648 if (ifp && ++ifp->lost > (t->nframes << 1)
649 && (ifp != t->ifs || t->ifs[1].nd)) {
656 if ((d->flags & DEVFL_KICKME || d->htgt) && d->blkq) {
657 d->flags &= ~DEVFL_KICKME;
658 d->blkq->request_fn(d->blkq);
661 d->timer.expires = jiffies + TIMERTICK;
662 add_timer(&d->timer);
664 spin_unlock_irqrestore(&d->lock, flags);
668 rqbiocnt(struct request *r)
673 __rq_for_each_bio(bio, r)
678 /* This can be removed if we are certain that no users of the block
679 * layer will ever use zero-count pages in bios. Otherwise we have to
680 * protect against the put_page sometimes done by the network layer.
682 * See http://oss.sgi.com/archives/xfs/2007-01/msg00594.html for
685 * We cannot use get_page in the workaround, because it insists on a
686 * positive page count as a precondition. So we use _count directly.
689 bio_pageinc(struct bio *bio)
695 bio_for_each_segment(bv, bio, i) {
697 /* Non-zero page count for non-head members of
698 * compound pages is no longer allowed by the kernel,
699 * but this has never been seen here.
701 if (unlikely(PageCompound(page)))
702 if (compound_trans_head(page) != page) {
703 pr_crit("page tail used for block I/O\n");
706 atomic_inc(&page->_count);
711 bio_pagedec(struct bio *bio)
716 bio_for_each_segment(bv, bio, i)
717 atomic_dec(&bv->bv_page->_count);
721 bufinit(struct buf *buf, struct request *rq, struct bio *bio)
725 memset(buf, 0, sizeof(*buf));
728 buf->resid = bio->bi_size;
729 buf->sector = bio->bi_sector;
731 buf->bv = bv = &bio->bi_io_vec[bio->bi_idx];
732 buf->bv_resid = bv->bv_len;
733 WARN_ON(buf->bv_resid == 0);
737 nextbuf(struct aoedev *d)
740 struct request_queue *q;
746 return NULL; /* initializing */
751 rq = blk_peek_request(q);
754 blk_start_request(rq);
756 d->ip.nxbio = rq->bio;
757 rq->special = (void *) rqbiocnt(rq);
759 buf = mempool_alloc(d->bufpool, GFP_ATOMIC);
761 pr_err("aoe: nextbuf: unable to mempool_alloc!\n");
765 bufinit(buf, rq, bio);
770 return d->ip.buf = buf;
773 /* enters with d->lock held */
775 aoecmd_work(struct aoedev *d)
777 if (d->htgt && !sthtith(d))
779 while (aoecmd_ata_rw(d))
783 /* this function performs work that has been deferred until sleeping is OK
786 aoecmd_sleepwork(struct work_struct *work)
788 struct aoedev *d = container_of(work, struct aoedev, work);
789 struct block_device *bd;
792 if (d->flags & DEVFL_GDALLOC)
795 if (d->flags & DEVFL_NEWSIZE) {
796 ssize = get_capacity(d->gd);
797 bd = bdget_disk(d->gd, 0);
799 mutex_lock(&bd->bd_inode->i_mutex);
800 i_size_write(bd->bd_inode, (loff_t)ssize<<9);
801 mutex_unlock(&bd->bd_inode->i_mutex);
804 spin_lock_irq(&d->lock);
805 d->flags |= DEVFL_UP;
806 d->flags &= ~DEVFL_NEWSIZE;
807 spin_unlock_irq(&d->lock);
812 ataid_complete(struct aoedev *d, struct aoetgt *t, unsigned char *id)
817 /* word 83: command set supported */
818 n = get_unaligned_le16(&id[83 << 1]);
820 /* word 86: command set/feature enabled */
821 n |= get_unaligned_le16(&id[86 << 1]);
823 if (n & (1<<10)) { /* bit 10: LBA 48 */
824 d->flags |= DEVFL_EXT;
826 /* word 100: number lba48 sectors */
827 ssize = get_unaligned_le64(&id[100 << 1]);
829 /* set as in ide-disk.c:init_idedisk_capacity */
830 d->geo.cylinders = ssize;
831 d->geo.cylinders /= (255 * 63);
835 d->flags &= ~DEVFL_EXT;
837 /* number lba28 sectors */
838 ssize = get_unaligned_le32(&id[60 << 1]);
840 /* NOTE: obsolete in ATA 6 */
841 d->geo.cylinders = get_unaligned_le16(&id[54 << 1]);
842 d->geo.heads = get_unaligned_le16(&id[55 << 1]);
843 d->geo.sectors = get_unaligned_le16(&id[56 << 1]);
846 if (d->ssize != ssize)
848 "aoe: %pm e%ld.%d v%04x has %llu sectors\n",
850 d->aoemajor, d->aoeminor,
851 d->fw_ver, (long long)ssize);
854 if (d->flags & (DEVFL_GDALLOC|DEVFL_NEWSIZE))
857 set_capacity(d->gd, ssize);
858 d->flags |= DEVFL_NEWSIZE;
860 d->flags |= DEVFL_GDALLOC;
861 schedule_work(&d->work);
865 calc_rttavg(struct aoedev *d, int rtt)
874 else if (n > MAXTIMER)
876 d->mintimer += (n - d->mintimer) >> 1;
877 } else if (n < d->mintimer)
879 else if (n > MAXTIMER)
882 /* g == .25; cf. Congestion Avoidance and Control, Jacobson & Karels; 1988 */
887 static struct aoetgt *
888 gettgt(struct aoedev *d, char *addr)
890 struct aoetgt **t, **e;
894 for (; t < e && *t; t++)
895 if (memcmp((*t)->addr, addr, sizeof((*t)->addr)) == 0)
901 bvcpy(struct bio_vec *bv, ulong off, struct sk_buff *skb, long cnt)
907 fcnt = bv->bv_len - (off - bv->bv_offset);
910 p = page_address(bv->bv_page) + off;
911 skb_copy_bits(skb, soff, p, fcnt);
922 aoe_end_request(struct aoedev *d, struct request *rq, int fastfail)
926 struct request_queue *q;
933 bok = !fastfail && test_bit(BIO_UPTODATE, &bio->bi_flags);
934 } while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_size));
936 /* cf. http://lkml.org/lkml/2006/10/31/28 */
942 aoe_end_buf(struct aoedev *d, struct buf *buf)
947 if (buf == d->ip.buf)
950 bio_pagedec(buf->bio);
951 mempool_free(buf, d->bufpool);
952 n = (unsigned long) rq->special;
953 rq->special = (void *) --n;
955 aoe_end_request(d, rq, 0);
959 ktiocomplete(struct frame *f)
961 struct aoe_hdr *hin, *hout;
962 struct aoe_atahdr *ahin, *ahout;
976 hout = (struct aoe_hdr *) skb_mac_header(f->skb);
977 ahout = (struct aoe_atahdr *) (hout+1);
981 goto noskb; /* just fail the buf. */
983 hin = (struct aoe_hdr *) skb->data;
984 skb_pull(skb, sizeof(*hin));
985 ahin = (struct aoe_atahdr *) skb->data;
986 skb_pull(skb, sizeof(*ahin));
987 if (ahin->cmdstat & 0xa9) { /* these bits cleared on success */
988 pr_err("aoe: ata error cmd=%2.2Xh stat=%2.2Xh from e%ld.%d\n",
989 ahout->cmdstat, ahin->cmdstat,
990 d->aoemajor, d->aoeminor);
992 clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
996 n = ahout->scnt << 9;
997 switch (ahout->cmdstat) {
998 case ATA_CMD_PIO_READ:
999 case ATA_CMD_PIO_READ_EXT:
1001 pr_err("aoe: runt data size in read. skb->len=%d need=%ld\n",
1003 clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
1006 bvcpy(f->bv, f->bv_off, skb, n);
1007 case ATA_CMD_PIO_WRITE:
1008 case ATA_CMD_PIO_WRITE_EXT:
1009 spin_lock_irq(&d->lock);
1010 ifp = getif(t, skb->dev);
1013 if (d->htgt == t) /* I'll help myself, thank you. */
1015 spin_unlock_irq(&d->lock);
1017 case ATA_CMD_ID_ATA:
1018 if (skb->len < 512) {
1019 pr_info("aoe: runt data size in ataid. skb->len=%d\n",
1023 if (skb_linearize(skb))
1025 spin_lock_irq(&d->lock);
1026 ataid_complete(d, t, skb->data);
1027 spin_unlock_irq(&d->lock);
1030 pr_info("aoe: unrecognized ata command %2.2Xh for %d.%d\n",
1032 be16_to_cpu(get_unaligned(&hin->major)),
1036 spin_lock_irq(&d->lock);
1040 if (buf && --buf->nframesout == 0 && buf->resid == 0)
1041 aoe_end_buf(d, buf);
1045 spin_unlock_irq(&d->lock);
1050 /* Enters with iocq.lock held.
1051 * Returns true iff responses needing processing remain.
1057 struct list_head *pos;
1060 for (i = 0; ; ++i) {
1063 if (list_empty(&iocq.head))
1065 pos = iocq.head.next;
1067 spin_unlock_irq(&iocq.lock);
1068 f = list_entry(pos, struct frame, head);
1070 spin_lock_irq(&iocq.lock);
1078 DECLARE_WAITQUEUE(wait, current);
1082 current->flags |= PF_NOFREEZE;
1083 set_user_nice(current, -10);
1084 complete(&k->rendez); /* tell spawner we're running */
1086 spin_lock_irq(k->lock);
1089 add_wait_queue(k->waitq, &wait);
1090 __set_current_state(TASK_INTERRUPTIBLE);
1092 spin_unlock_irq(k->lock);
1095 remove_wait_queue(k->waitq, &wait);
1098 } while (!kthread_should_stop());
1099 complete(&k->rendez); /* tell spawner we're stopping */
1104 aoe_ktstop(struct ktstate *k)
1106 kthread_stop(k->task);
1107 wait_for_completion(&k->rendez);
1111 aoe_ktstart(struct ktstate *k)
1113 struct task_struct *task;
1115 init_completion(&k->rendez);
1116 task = kthread_run(kthread, k, k->name);
1117 if (task == NULL || IS_ERR(task))
1120 wait_for_completion(&k->rendez); /* allow kthread to start */
1121 init_completion(&k->rendez); /* for waiting for exit later */
1125 /* pass it off to kthreads for processing */
1127 ktcomplete(struct frame *f, struct sk_buff *skb)
1132 spin_lock_irqsave(&iocq.lock, flags);
1133 list_add_tail(&f->head, &iocq.head);
1134 spin_unlock_irqrestore(&iocq.lock, flags);
1139 aoecmd_ata_rsp(struct sk_buff *skb)
1150 h = (struct aoe_hdr *) skb->data;
1151 aoemajor = be16_to_cpu(get_unaligned(&h->major));
1152 d = aoedev_by_aoeaddr(aoemajor, h->minor, 0);
1154 snprintf(ebuf, sizeof ebuf, "aoecmd_ata_rsp: ata response "
1155 "for unknown device %d.%d\n",
1156 aoemajor, h->minor);
1161 spin_lock_irqsave(&d->lock, flags);
1163 n = be32_to_cpu(get_unaligned(&h->tag));
1166 calc_rttavg(d, -tsince(n));
1167 spin_unlock_irqrestore(&d->lock, flags);
1169 snprintf(ebuf, sizeof ebuf,
1170 "%15s e%d.%d tag=%08x@%08lx\n",
1172 get_unaligned_be16(&h->major),
1174 get_unaligned_be32(&h->tag),
1180 calc_rttavg(d, tsince(f->tag));
1184 spin_unlock_irqrestore(&d->lock, flags);
1189 * Note here that we do not perform an aoedev_put, as we are
1190 * leaving this reference for the ktio to release.
1196 aoecmd_cfg(ushort aoemajor, unsigned char aoeminor)
1198 struct sk_buff_head queue;
1200 __skb_queue_head_init(&queue);
1201 aoecmd_cfg_pkts(aoemajor, aoeminor, &queue);
1202 aoenet_xmit(&queue);
1206 aoecmd_ata_id(struct aoedev *d)
1209 struct aoe_atahdr *ah;
1211 struct sk_buff *skb;
1220 /* initialize the headers & frame */
1222 h = (struct aoe_hdr *) skb_mac_header(skb);
1223 ah = (struct aoe_atahdr *) (h+1);
1224 skb_put(skb, sizeof *h + sizeof *ah);
1225 memset(h, 0, skb->len);
1226 f->tag = aoehdr_atainit(d, t, h);
1231 /* set up ata header */
1233 ah->cmdstat = ATA_CMD_ID_ATA;
1236 skb->dev = t->ifp->nd;
1238 d->rttavg = MAXTIMER;
1239 d->timer.function = rexmit_timer;
1241 return skb_clone(skb, GFP_ATOMIC);
1244 static struct aoetgt *
1245 addtgt(struct aoedev *d, char *addr, ulong nframes)
1247 struct aoetgt *t, **tt, **te;
1251 for (; tt < te && *tt; tt++)
1256 "aoe: device addtgt failure; too many targets\n");
1259 t = kzalloc(sizeof(*t), GFP_ATOMIC);
1261 printk(KERN_INFO "aoe: cannot allocate memory to add target\n");
1266 t->nframes = nframes;
1268 memcpy(t->addr, addr, sizeof t->addr);
1270 t->maxout = t->nframes;
1271 INIT_LIST_HEAD(&t->ffree);
1276 setdbcnt(struct aoedev *d)
1278 struct aoetgt **t, **e;
1283 for (; t < e && *t; t++)
1284 if (bcnt == 0 || bcnt > (*t)->minbcnt)
1285 bcnt = (*t)->minbcnt;
1286 if (bcnt != d->maxbcnt) {
1288 pr_info("aoe: e%ld.%d: setting %d byte data frames\n",
1289 d->aoemajor, d->aoeminor, bcnt);
1294 setifbcnt(struct aoetgt *t, struct net_device *nd, int bcnt)
1297 struct aoeif *p, *e;
1304 for (; p < e; p++) {
1306 break; /* end of the valid interfaces */
1308 p->bcnt = bcnt; /* we're updating */
1310 } else if (minbcnt > p->bcnt)
1311 minbcnt = p->bcnt; /* find the min interface */
1315 pr_err("aoe: device setifbcnt failure; too many interfaces.\n");
1322 t->minbcnt = minbcnt;
1327 aoecmd_cfg_rsp(struct sk_buff *skb)
1331 struct aoe_cfghdr *ch;
1333 ulong flags, aoemajor;
1335 struct sk_buff_head queue;
1339 h = (struct aoe_hdr *) skb_mac_header(skb);
1340 ch = (struct aoe_cfghdr *) (h+1);
1343 * Enough people have their dip switches set backwards to
1344 * warrant a loud message for this special case.
1346 aoemajor = get_unaligned_be16(&h->major);
1347 if (aoemajor == 0xfff) {
1348 printk(KERN_ERR "aoe: Warning: shelf address is all ones. "
1349 "Check shelf dip switches.\n");
1352 if (aoemajor == 0xffff) {
1353 pr_info("aoe: e%ld.%d: broadcast shelf number invalid\n",
1354 aoemajor, (int) h->minor);
1357 if (h->minor == 0xff) {
1358 pr_info("aoe: e%ld.%d: broadcast slot number invalid\n",
1359 aoemajor, (int) h->minor);
1363 n = be16_to_cpu(ch->bufcnt);
1364 if (n > aoe_maxout) /* keep it reasonable */
1367 d = aoedev_by_aoeaddr(aoemajor, h->minor, 1);
1369 pr_info("aoe: device allocation failure\n");
1373 spin_lock_irqsave(&d->lock, flags);
1375 t = gettgt(d, h->src);
1377 t = addtgt(d, h->src, n);
1382 n -= sizeof(struct aoe_hdr) + sizeof(struct aoe_atahdr);
1386 n = n ? n * 512 : DEFAULTBCNT;
1387 setifbcnt(t, skb->dev, n);
1389 /* don't change users' perspective */
1390 if (d->nopen == 0) {
1391 d->fw_ver = be16_to_cpu(ch->fwver);
1392 sl = aoecmd_ata_id(d);
1395 spin_unlock_irqrestore(&d->lock, flags);
1398 __skb_queue_head_init(&queue);
1399 __skb_queue_tail(&queue, sl);
1400 aoenet_xmit(&queue);
1405 aoecmd_cleanslate(struct aoedev *d)
1407 struct aoetgt **t, **te;
1409 d->mintimer = MINTIMER;
1414 for (; t < te && *t; t++)
1415 (*t)->maxout = (*t)->nframes;
1419 aoe_failbuf(struct aoedev *d, struct buf *buf)
1424 clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
1425 if (buf->nframesout == 0)
1426 aoe_end_buf(d, buf);
1430 aoe_flush_iocq(void)
1435 struct list_head *pos;
1436 struct sk_buff *skb;
1439 spin_lock_irqsave(&iocq.lock, flags);
1440 list_splice_init(&iocq.head, &flist);
1441 spin_unlock_irqrestore(&iocq.lock, flags);
1442 while (!list_empty(&flist)) {
1445 f = list_entry(pos, struct frame, head);
1448 spin_lock_irqsave(&d->lock, flags);
1450 f->buf->nframesout--;
1451 aoe_failbuf(d, f->buf);
1454 spin_unlock_irqrestore(&d->lock, flags);
1463 INIT_LIST_HEAD(&iocq.head);
1464 spin_lock_init(&iocq.lock);
1465 init_waitqueue_head(&ktiowq);
1466 kts.name = "aoe_ktio";
1468 kts.waitq = &ktiowq;
1469 kts.lock = &iocq.lock;
1470 return aoe_ktstart(&kts);