1 /* Copyright (c) 2013 Coraid, Inc. See COPYING for GPL terms. */
4 * Filesystem request handling methods
8 #include <linux/slab.h>
9 #include <linux/hdreg.h>
10 #include <linux/blkdev.h>
11 #include <linux/skbuff.h>
12 #include <linux/netdevice.h>
13 #include <linux/genhd.h>
14 #include <linux/moduleparam.h>
15 #include <linux/workqueue.h>
16 #include <linux/kthread.h>
17 #include <net/net_namespace.h>
18 #include <asm/unaligned.h>
19 #include <linux/uio.h>
22 #define MAXIOC (8192) /* default meant to avoid most soft lockups */
24 static void ktcomplete(struct frame *, struct sk_buff *);
25 static int count_targets(struct aoedev *d, int *untainted);
27 static struct buf *nextbuf(struct aoedev *);
29 static int aoe_deadsecs = 60 * 3;
30 module_param(aoe_deadsecs, int, 0644);
31 MODULE_PARM_DESC(aoe_deadsecs, "After aoe_deadsecs seconds, give up and fail dev.");
33 static int aoe_maxout = 64;
34 module_param(aoe_maxout, int, 0644);
35 MODULE_PARM_DESC(aoe_maxout,
36 "Only aoe_maxout outstanding packets for every MAC on eX.Y.");
38 /* The number of online cpus during module initialization gives us a
39 * convenient heuristic cap on the parallelism used for ktio threads
40 * doing I/O completion. It is not important that the cap equal the
41 * actual number of running CPUs at any given time, but because of CPU
42 * hotplug, we take care to use ncpus instead of using
43 * num_online_cpus() after module initialization.
47 /* mutex lock used for synchronization while thread spawning */
48 static DEFINE_MUTEX(ktio_spawn_lock);
50 static wait_queue_head_t *ktiowq;
51 static struct ktstate *kts;
53 /* io completion queue */
55 struct list_head head;
58 static struct iocq_ktio *iocq;
60 static struct page *empty_page;
62 static struct sk_buff *
67 skb = alloc_skb(len + MAX_HEADER, GFP_ATOMIC);
69 skb_reserve(skb, MAX_HEADER);
70 skb_reset_mac_header(skb);
71 skb_reset_network_header(skb);
72 skb->protocol = __constant_htons(ETH_P_AOE);
73 skb_checksum_none_assert(skb);
79 getframe_deferred(struct aoedev *d, u32 tag)
81 struct list_head *head, *pos, *nx;
85 list_for_each_safe(pos, nx, head) {
86 f = list_entry(pos, struct frame, head);
96 getframe(struct aoedev *d, u32 tag)
99 struct list_head *head, *pos, *nx;
103 head = &d->factive[n];
104 list_for_each_safe(pos, nx, head) {
105 f = list_entry(pos, struct frame, head);
115 * Leave the top bit clear so we have tagspace for userland.
116 * The bottom 16 bits are the xmit tick for rexmit/rttavg processing.
117 * This driver reserves tag -1 to mean "unused frame."
120 newtag(struct aoedev *d)
124 n = jiffies & 0xffff;
125 return n |= (++d->lasttag & 0x7fff) << 16;
129 aoehdr_atainit(struct aoedev *d, struct aoetgt *t, struct aoe_hdr *h)
131 u32 host_tag = newtag(d);
133 memcpy(h->src, t->ifp->nd->dev_addr, sizeof h->src);
134 memcpy(h->dst, t->addr, sizeof h->dst);
135 h->type = __constant_cpu_to_be16(ETH_P_AOE);
137 h->major = cpu_to_be16(d->aoemajor);
138 h->minor = d->aoeminor;
140 h->tag = cpu_to_be32(host_tag);
146 put_lba(struct aoe_atahdr *ah, sector_t lba)
149 ah->lba1 = lba >>= 8;
150 ah->lba2 = lba >>= 8;
151 ah->lba3 = lba >>= 8;
152 ah->lba4 = lba >>= 8;
153 ah->lba5 = lba >>= 8;
156 static struct aoeif *
157 ifrotate(struct aoetgt *t)
163 if (ifp >= &t->ifs[NAOEIFS] || ifp->nd == NULL)
171 skb_pool_put(struct aoedev *d, struct sk_buff *skb)
173 __skb_queue_tail(&d->skbpool, skb);
176 static struct sk_buff *
177 skb_pool_get(struct aoedev *d)
179 struct sk_buff *skb = skb_peek(&d->skbpool);
181 if (skb && atomic_read(&skb_shinfo(skb)->dataref) == 1) {
182 __skb_unlink(skb, &d->skbpool);
185 if (skb_queue_len(&d->skbpool) < NSKBPOOLMAX &&
186 (skb = new_skb(ETH_ZLEN)))
193 aoe_freetframe(struct frame *f)
203 list_add(&f->head, &t->ffree);
206 static struct frame *
207 newtframe(struct aoedev *d, struct aoetgt *t)
211 struct list_head *pos;
213 if (list_empty(&t->ffree)) {
214 if (t->falloc >= NSKBPOOLMAX*2)
216 f = kcalloc(1, sizeof(*f), GFP_ATOMIC);
224 f = list_entry(pos, struct frame, head);
229 f->skb = skb = new_skb(ETH_ZLEN);
231 bail: aoe_freetframe(f);
236 if (atomic_read(&skb_shinfo(skb)->dataref) != 1) {
237 skb = skb_pool_get(d);
240 skb_pool_put(d, f->skb);
244 skb->truesize -= skb->data_len;
245 skb_shinfo(skb)->nr_frags = skb->data_len = 0;
250 static struct frame *
251 newframe(struct aoedev *d)
254 struct aoetgt *t, **tt;
259 if (!d->targets || !d->targets[0]) {
260 printk(KERN_ERR "aoe: NULL TARGETS!\n");
263 tt = d->tgt; /* last used target */
264 for (use_tainted = 0, has_untainted = 0;;) {
266 if (tt >= &d->targets[d->ntargets] || !*tt)
273 if (t->nout < t->maxout
274 && (use_tainted || !t->taint)
283 if (tt == d->tgt) { /* we've looped and found nada */
284 if (!use_tainted && !has_untainted)
292 d->flags |= DEVFL_KICKME;
298 skb_fillup(struct sk_buff *skb, struct bio_vec *bv, ulong off, ulong cnt)
303 fcnt = bv->bv_len - (off - bv->bv_offset);
306 skb_fill_page_desc(skb, frag++, bv->bv_page, off, fcnt);
316 fhash(struct frame *f)
318 struct aoedev *d = f->t->d;
321 n = f->tag % NFACTIVE;
322 list_add_tail(&f->head, &d->factive[n]);
326 ata_rw_frameinit(struct frame *f)
330 struct aoe_atahdr *ah;
332 char writebit, extbit;
335 h = (struct aoe_hdr *) skb_mac_header(skb);
336 ah = (struct aoe_atahdr *) (h + 1);
337 skb_put(skb, sizeof(*h) + sizeof(*ah));
338 memset(h, 0, skb->len);
344 f->tag = aoehdr_atainit(t->d, t, h);
350 f->lba = f->buf->sector;
352 /* set up ata header */
353 ah->scnt = f->bcnt >> 9;
355 if (t->d->flags & DEVFL_EXT) {
356 ah->aflags |= AOEAFL_EXT;
360 ah->lba3 |= 0xe0; /* LBA bit + obsolete 0xa0 */
362 if (f->buf && bio_data_dir(f->buf->bio) == WRITE) {
363 skb_fillup(skb, f->bv, f->bv_off, f->bcnt);
364 ah->aflags |= AOEAFL_WRITE;
366 skb->data_len = f->bcnt;
367 skb->truesize += f->bcnt;
374 ah->cmdstat = ATA_CMD_PIO_READ | writebit | extbit;
375 skb->dev = t->ifp->nd;
379 aoecmd_ata_rw(struct aoedev *d)
385 struct sk_buff_head queue;
398 if (bcnt > buf->resid)
402 f->bv_off = f->bv->bv_offset + (f->bv->bv_len - buf->bv_resid);
404 if (fbcnt < buf->bv_resid) {
405 buf->bv_resid -= fbcnt;
409 fbcnt -= buf->bv_resid;
410 buf->resid -= buf->bv_resid;
411 if (buf->resid == 0) {
416 buf->bv_resid = buf->bv->bv_len;
417 WARN_ON(buf->bv_resid == 0);
420 /* initialize the headers & frame */
425 /* mark all tracking fields and load out */
426 buf->nframesout += 1;
427 buf->sector += bcnt >> 9;
429 skb = skb_clone(f->skb, GFP_ATOMIC);
431 do_gettimeofday(&f->sent);
432 f->sent_jiffs = (u32) jiffies;
433 __skb_queue_head_init(&queue);
434 __skb_queue_tail(&queue, skb);
440 /* some callers cannot sleep, and they can call this function,
441 * transmitting the packets later, when interrupts are on
444 aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff_head *queue)
447 struct aoe_cfghdr *ch;
449 struct net_device *ifp;
452 for_each_netdev_rcu(&init_net, ifp) {
454 if (!is_aoe_netif(ifp))
457 skb = new_skb(sizeof *h + sizeof *ch);
459 printk(KERN_INFO "aoe: skb alloc failure\n");
462 skb_put(skb, sizeof *h + sizeof *ch);
464 __skb_queue_tail(queue, skb);
465 h = (struct aoe_hdr *) skb_mac_header(skb);
466 memset(h, 0, sizeof *h + sizeof *ch);
468 memset(h->dst, 0xff, sizeof h->dst);
469 memcpy(h->src, ifp->dev_addr, sizeof h->src);
470 h->type = __constant_cpu_to_be16(ETH_P_AOE);
472 h->major = cpu_to_be16(aoemajor);
483 resend(struct aoedev *d, struct frame *f)
486 struct sk_buff_head queue;
488 struct aoe_atahdr *ah;
496 if (ifrotate(t) == NULL) {
497 /* probably can't happen, but set it up to fail anyway */
498 pr_info("aoe: resend: no interfaces to rotate to.\n");
502 h = (struct aoe_hdr *) skb_mac_header(skb);
503 ah = (struct aoe_atahdr *) (h+1);
505 if (!(f->flags & FFL_PROBE)) {
506 snprintf(buf, sizeof(buf),
507 "%15s e%ld.%d oldtag=%08x@%08lx newtag=%08x s=%pm d=%pm nout=%d\n",
508 "retransmit", d->aoemajor, d->aoeminor,
510 h->src, h->dst, t->nout);
516 h->tag = cpu_to_be32(n);
517 memcpy(h->dst, t->addr, sizeof h->dst);
518 memcpy(h->src, t->ifp->nd->dev_addr, sizeof h->src);
520 skb->dev = t->ifp->nd;
521 skb = skb_clone(skb, GFP_ATOMIC);
524 do_gettimeofday(&f->sent);
525 f->sent_jiffs = (u32) jiffies;
526 __skb_queue_head_init(&queue);
527 __skb_queue_tail(&queue, skb);
532 tsince_hr(struct frame *f)
537 do_gettimeofday(&now);
538 n = now.tv_usec - f->sent.tv_usec;
539 n += (now.tv_sec - f->sent.tv_sec) * USEC_PER_SEC;
544 /* For relatively long periods, use jiffies to avoid
545 * discrepancies caused by updates to the system time.
547 * On system with HZ of 1000, 32-bits is over 49 days
548 * worth of jiffies, or over 71 minutes worth of usecs.
550 * Jiffies overflow is handled by subtraction of unsigned ints:
551 * (gdb) print (unsigned) 2 - (unsigned) 0xfffffffe
555 if (n > USEC_PER_SEC / 4) {
556 n = ((u32) jiffies) - f->sent_jiffs;
557 n *= USEC_PER_SEC / HZ;
568 n = jiffies & 0xffff;
572 return jiffies_to_usecs(n + 1);
575 static struct aoeif *
576 getif(struct aoetgt *t, struct net_device *nd)
589 ejectif(struct aoetgt *t, struct aoeif *ifp)
592 struct net_device *nd;
596 e = t->ifs + NAOEIFS - 1;
597 n = (e - ifp) * sizeof *ifp;
598 memmove(ifp, ifp+1, n);
603 static struct frame *
604 reassign_frame(struct frame *f)
609 nf = newframe(f->t->d);
623 nf->bv_off = f->bv_off;
625 nf->waited_total = f->waited_total;
627 nf->sent_jiffs = f->sent_jiffs;
634 probe(struct aoetgt *t)
639 struct sk_buff_head queue;
646 pr_err("%s %pm for e%ld.%d: %s\n",
647 "aoe: cannot probe remote address",
649 (long) d->aoemajor, d->aoeminor,
650 "no frame available");
653 f->flags |= FFL_PROBE;
655 f->bcnt = t->d->maxbcnt ? t->d->maxbcnt : DEFAULTBCNT;
658 for (frag = 0, n = f->bcnt; n > 0; ++frag, n -= m) {
663 skb_fill_page_desc(skb, frag, empty_page, 0, m);
666 skb->data_len = f->bcnt;
667 skb->truesize += f->bcnt;
669 skb = skb_clone(f->skb, GFP_ATOMIC);
671 do_gettimeofday(&f->sent);
672 f->sent_jiffs = (u32) jiffies;
673 __skb_queue_head_init(&queue);
674 __skb_queue_tail(&queue, skb);
680 rto(struct aoedev *d)
684 t = 2 * d->rttavg >> RTTSCALE;
685 t += 8 * d->rttdev >> RTTDSCALE;
693 rexmit_deferred(struct aoedev *d)
698 struct list_head *pos, *nx, *head;
702 count_targets(d, &untainted);
705 list_for_each_safe(pos, nx, head) {
706 f = list_entry(pos, struct frame, head);
709 if (!(f->flags & FFL_PROBE)) {
710 nf = reassign_frame(f);
712 if (t->nout_probes == 0
717 list_replace(&f->head, &nf->head);
723 } else if (untainted < 1) {
724 /* don't probe w/o other untainted aoetgts */
726 } else if (tsince_hr(f) < t->taint * rto(d)) {
727 /* reprobe slowly when taint is high */
730 } else if (f->flags & FFL_PROBE) {
731 stop_probe: /* don't probe untainted aoetgts */
734 /* leaving d->kicked, because this is routine */
735 f->t->d->flags |= DEVFL_KICKME;
738 if (t->nout >= t->maxout)
742 if (f->flags & FFL_PROBE)
744 since = tsince_hr(f);
746 f->waited_total += since;
751 /* An aoetgt accumulates demerits quickly, and successful
752 * probing redeems the aoetgt slowly.
755 scorn(struct aoetgt *t)
760 t->taint += t->taint * 2;
763 if (t->taint > MAX_TAINT)
764 t->taint = MAX_TAINT;
768 count_targets(struct aoedev *d, int *untainted)
772 for (i = good = 0; i < d->ntargets && d->targets[i]; ++i)
773 if (d->targets[i]->taint == 0)
782 rexmit_timer(ulong vp)
788 struct list_head *head, *pos, *nx;
790 register long timeout;
793 int utgts; /* number of aoetgt descriptors (not slots) */
796 d = (struct aoedev *) vp;
798 spin_lock_irqsave(&d->lock, flags);
800 /* timeout based on observed timings and variations */
803 utgts = count_targets(d, NULL);
805 if (d->flags & DEVFL_TKILL) {
806 spin_unlock_irqrestore(&d->lock, flags);
810 /* collect all frames to rexmit into flist */
811 for (i = 0; i < NFACTIVE; i++) {
812 head = &d->factive[i];
813 list_for_each_safe(pos, nx, head) {
814 f = list_entry(pos, struct frame, head);
815 if (tsince_hr(f) < timeout)
816 break; /* end of expired frames */
817 /* move to flist for later processing */
818 list_move_tail(pos, &flist);
822 /* process expired frames */
823 while (!list_empty(&flist)) {
825 f = list_entry(pos, struct frame, head);
826 since = tsince_hr(f);
827 n = f->waited_total + since;
831 && !(f->flags & FFL_PROBE)) {
832 /* Waited too long. Device failure.
833 * Hang all frames on first hash bucket for downdev
836 list_splice(&flist, &d->factive[0]);
842 n = f->waited + since;
844 if (aoe_deadsecs && utgts > 0
845 && (n > aoe_deadsecs / utgts || n > HARD_SCORN_SECS))
846 scorn(t); /* avoid this target */
848 if (t->maxout != 1) {
849 t->ssthresh = t->maxout / 2;
853 if (f->flags & FFL_PROBE) {
856 ifp = getif(t, f->skb->dev);
857 if (ifp && ++ifp->lost > (t->nframes << 1)
858 && (ifp != t->ifs || t->ifs[1].nd)) {
863 list_move_tail(pos, &d->rexmitq);
869 if ((d->flags & DEVFL_KICKME) && d->blkq) {
870 d->flags &= ~DEVFL_KICKME;
871 d->blkq->request_fn(d->blkq);
874 d->timer.expires = jiffies + TIMERTICK;
875 add_timer(&d->timer);
877 spin_unlock_irqrestore(&d->lock, flags);
881 rqbiocnt(struct request *r)
886 __rq_for_each_bio(bio, r)
891 /* This can be removed if we are certain that no users of the block
892 * layer will ever use zero-count pages in bios. Otherwise we have to
893 * protect against the put_page sometimes done by the network layer.
895 * See http://oss.sgi.com/archives/xfs/2007-01/msg00594.html for
898 * We cannot use get_page in the workaround, because it insists on a
899 * positive page count as a precondition. So we use _count directly.
902 bio_pageinc(struct bio *bio)
908 bio_for_each_segment(bv, bio, i) {
910 /* Non-zero page count for non-head members of
911 * compound pages is no longer allowed by the kernel,
912 * but this has never been seen here.
914 if (unlikely(PageCompound(page)))
915 if (compound_trans_head(page) != page) {
916 pr_crit("page tail used for block I/O\n");
919 atomic_inc(&page->_count);
924 bio_pagedec(struct bio *bio)
929 bio_for_each_segment(bv, bio, i)
930 atomic_dec(&bv->bv_page->_count);
934 bufinit(struct buf *buf, struct request *rq, struct bio *bio)
936 memset(buf, 0, sizeof(*buf));
939 buf->resid = bio->bi_size;
940 buf->sector = bio->bi_sector;
942 buf->bv = bio_iovec(bio);
943 buf->bv_resid = buf->bv->bv_len;
944 WARN_ON(buf->bv_resid == 0);
948 nextbuf(struct aoedev *d)
951 struct request_queue *q;
957 return NULL; /* initializing */
962 rq = blk_peek_request(q);
965 blk_start_request(rq);
967 d->ip.nxbio = rq->bio;
968 rq->special = (void *) rqbiocnt(rq);
970 buf = mempool_alloc(d->bufpool, GFP_ATOMIC);
972 pr_err("aoe: nextbuf: unable to mempool_alloc!\n");
976 bufinit(buf, rq, bio);
981 return d->ip.buf = buf;
984 /* enters with d->lock held */
986 aoecmd_work(struct aoedev *d)
989 while (aoecmd_ata_rw(d))
993 /* this function performs work that has been deferred until sleeping is OK
996 aoecmd_sleepwork(struct work_struct *work)
998 struct aoedev *d = container_of(work, struct aoedev, work);
999 struct block_device *bd;
1002 if (d->flags & DEVFL_GDALLOC)
1005 if (d->flags & DEVFL_NEWSIZE) {
1006 ssize = get_capacity(d->gd);
1007 bd = bdget_disk(d->gd, 0);
1009 mutex_lock(&bd->bd_inode->i_mutex);
1010 i_size_write(bd->bd_inode, (loff_t)ssize<<9);
1011 mutex_unlock(&bd->bd_inode->i_mutex);
1014 spin_lock_irq(&d->lock);
1015 d->flags |= DEVFL_UP;
1016 d->flags &= ~DEVFL_NEWSIZE;
1017 spin_unlock_irq(&d->lock);
1022 ata_ident_fixstring(u16 *id, int ns)
1028 *id++ = s >> 8 | s << 8;
1033 ataid_complete(struct aoedev *d, struct aoetgt *t, unsigned char *id)
1038 /* word 83: command set supported */
1039 n = get_unaligned_le16(&id[83 << 1]);
1041 /* word 86: command set/feature enabled */
1042 n |= get_unaligned_le16(&id[86 << 1]);
1044 if (n & (1<<10)) { /* bit 10: LBA 48 */
1045 d->flags |= DEVFL_EXT;
1047 /* word 100: number lba48 sectors */
1048 ssize = get_unaligned_le64(&id[100 << 1]);
1050 /* set as in ide-disk.c:init_idedisk_capacity */
1051 d->geo.cylinders = ssize;
1052 d->geo.cylinders /= (255 * 63);
1054 d->geo.sectors = 63;
1056 d->flags &= ~DEVFL_EXT;
1058 /* number lba28 sectors */
1059 ssize = get_unaligned_le32(&id[60 << 1]);
1061 /* NOTE: obsolete in ATA 6 */
1062 d->geo.cylinders = get_unaligned_le16(&id[54 << 1]);
1063 d->geo.heads = get_unaligned_le16(&id[55 << 1]);
1064 d->geo.sectors = get_unaligned_le16(&id[56 << 1]);
1067 ata_ident_fixstring((u16 *) &id[10<<1], 10); /* serial */
1068 ata_ident_fixstring((u16 *) &id[23<<1], 4); /* firmware */
1069 ata_ident_fixstring((u16 *) &id[27<<1], 20); /* model */
1070 memcpy(d->ident, id, sizeof(d->ident));
1072 if (d->ssize != ssize)
1074 "aoe: %pm e%ld.%d v%04x has %llu sectors\n",
1076 d->aoemajor, d->aoeminor,
1077 d->fw_ver, (long long)ssize);
1080 if (d->flags & (DEVFL_GDALLOC|DEVFL_NEWSIZE))
1082 if (d->gd != NULL) {
1083 set_capacity(d->gd, ssize);
1084 d->flags |= DEVFL_NEWSIZE;
1086 d->flags |= DEVFL_GDALLOC;
1087 schedule_work(&d->work);
1091 calc_rttavg(struct aoedev *d, struct aoetgt *t, int rtt)
1097 /* cf. Congestion Avoidance and Control, Jacobson & Karels, 1988 */
1098 n -= d->rttavg >> RTTSCALE;
1102 n -= d->rttdev >> RTTDSCALE;
1105 if (!t || t->maxout >= t->nframes)
1107 if (t->maxout < t->ssthresh)
1109 else if (t->nout == t->maxout && t->next_cwnd-- == 0) {
1111 t->next_cwnd = t->maxout;
1115 static struct aoetgt *
1116 gettgt(struct aoedev *d, char *addr)
1118 struct aoetgt **t, **e;
1121 e = t + d->ntargets;
1122 for (; t < e && *t; t++)
1123 if (memcmp((*t)->addr, addr, sizeof((*t)->addr)) == 0)
1129 bvcpy(struct bio_vec *bv, ulong off, struct sk_buff *skb, long cnt)
1135 fcnt = bv->bv_len - (off - bv->bv_offset);
1138 p = page_address(bv->bv_page) + off;
1139 skb_copy_bits(skb, soff, p, fcnt);
1145 off = bv->bv_offset;
1150 aoe_end_request(struct aoedev *d, struct request *rq, int fastfail)
1154 struct request_queue *q;
1161 bok = !fastfail && test_bit(BIO_UPTODATE, &bio->bi_flags);
1162 } while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_size));
1164 /* cf. http://lkml.org/lkml/2006/10/31/28 */
1170 aoe_end_buf(struct aoedev *d, struct buf *buf)
1175 if (buf == d->ip.buf)
1178 bio_pagedec(buf->bio);
1179 mempool_free(buf, d->bufpool);
1180 n = (unsigned long) rq->special;
1181 rq->special = (void *) --n;
1183 aoe_end_request(d, rq, 0);
1187 ktiocomplete(struct frame *f)
1189 struct aoe_hdr *hin, *hout;
1190 struct aoe_atahdr *ahin, *ahout;
1192 struct sk_buff *skb;
1206 if (f->flags & FFL_PROBE)
1208 if (!skb) /* just fail the buf. */
1211 hout = (struct aoe_hdr *) skb_mac_header(f->skb);
1212 ahout = (struct aoe_atahdr *) (hout+1);
1214 hin = (struct aoe_hdr *) skb->data;
1215 skb_pull(skb, sizeof(*hin));
1216 ahin = (struct aoe_atahdr *) skb->data;
1217 skb_pull(skb, sizeof(*ahin));
1218 if (ahin->cmdstat & 0xa9) { /* these bits cleared on success */
1219 pr_err("aoe: ata error cmd=%2.2Xh stat=%2.2Xh from e%ld.%d\n",
1220 ahout->cmdstat, ahin->cmdstat,
1221 d->aoemajor, d->aoeminor);
1223 clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
1227 n = ahout->scnt << 9;
1228 switch (ahout->cmdstat) {
1229 case ATA_CMD_PIO_READ:
1230 case ATA_CMD_PIO_READ_EXT:
1232 pr_err("%s e%ld.%d. skb->len=%d need=%ld\n",
1233 "aoe: runt data size in read from",
1234 (long) d->aoemajor, d->aoeminor,
1236 clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
1239 bvcpy(f->bv, f->bv_off, skb, n);
1240 case ATA_CMD_PIO_WRITE:
1241 case ATA_CMD_PIO_WRITE_EXT:
1242 spin_lock_irq(&d->lock);
1243 ifp = getif(t, skb->dev);
1246 spin_unlock_irq(&d->lock);
1248 case ATA_CMD_ID_ATA:
1249 if (skb->len < 512) {
1250 pr_info("%s e%ld.%d. skb->len=%d need=512\n",
1251 "aoe: runt data size in ataid from",
1252 (long) d->aoemajor, d->aoeminor,
1256 if (skb_linearize(skb))
1258 spin_lock_irq(&d->lock);
1259 ataid_complete(d, t, skb->data);
1260 spin_unlock_irq(&d->lock);
1263 pr_info("aoe: unrecognized ata command %2.2Xh for %d.%d\n",
1265 be16_to_cpu(get_unaligned(&hin->major)),
1269 spin_lock_irq(&d->lock);
1272 && t->nout_probes == 0) {
1273 count_targets(d, &untainted);
1274 if (untainted > 0) {
1282 if (buf && --buf->nframesout == 0 && buf->resid == 0)
1283 aoe_end_buf(d, buf);
1285 spin_unlock_irq(&d->lock);
1290 /* Enters with iocq.lock held.
1291 * Returns true iff responses needing processing remain.
1297 struct list_head *pos;
1301 for (i = 0; ; ++i) {
1304 if (list_empty(&iocq[id].head))
1306 pos = iocq[id].head.next;
1308 f = list_entry(pos, struct frame, head);
1309 spin_unlock_irq(&iocq[id].lock);
1312 /* Figure out if extra threads are required. */
1313 actual_id = f->t->d->aoeminor % ncpus;
1315 if (!kts[actual_id].active) {
1317 mutex_lock(&ktio_spawn_lock);
1318 if (!kts[actual_id].active
1319 && aoe_ktstart(&kts[actual_id]) == 0)
1320 kts[actual_id].active = 1;
1321 mutex_unlock(&ktio_spawn_lock);
1323 spin_lock_irq(&iocq[id].lock);
1331 DECLARE_WAITQUEUE(wait, current);
1335 current->flags |= PF_NOFREEZE;
1336 set_user_nice(current, -10);
1337 complete(&k->rendez); /* tell spawner we're running */
1339 spin_lock_irq(k->lock);
1340 more = k->fn(k->id);
1342 add_wait_queue(k->waitq, &wait);
1343 __set_current_state(TASK_INTERRUPTIBLE);
1345 spin_unlock_irq(k->lock);
1348 remove_wait_queue(k->waitq, &wait);
1351 } while (!kthread_should_stop());
1352 complete(&k->rendez); /* tell spawner we're stopping */
1357 aoe_ktstop(struct ktstate *k)
1359 kthread_stop(k->task);
1360 wait_for_completion(&k->rendez);
1364 aoe_ktstart(struct ktstate *k)
1366 struct task_struct *task;
1368 init_completion(&k->rendez);
1369 task = kthread_run(kthread, k, "%s", k->name);
1370 if (task == NULL || IS_ERR(task))
1373 wait_for_completion(&k->rendez); /* allow kthread to start */
1374 init_completion(&k->rendez); /* for waiting for exit later */
1378 /* pass it off to kthreads for processing */
1380 ktcomplete(struct frame *f, struct sk_buff *skb)
1386 id = f->t->d->aoeminor % ncpus;
1387 spin_lock_irqsave(&iocq[id].lock, flags);
1388 if (!kts[id].active) {
1389 spin_unlock_irqrestore(&iocq[id].lock, flags);
1390 /* The thread with id has not been spawned yet,
1391 * so delegate the work to the main thread and
1392 * try spawning a new thread.
1395 spin_lock_irqsave(&iocq[id].lock, flags);
1397 list_add_tail(&f->head, &iocq[id].head);
1398 spin_unlock_irqrestore(&iocq[id].lock, flags);
1399 wake_up(&ktiowq[id]);
1403 aoecmd_ata_rsp(struct sk_buff *skb)
1413 h = (struct aoe_hdr *) skb->data;
1414 aoemajor = be16_to_cpu(get_unaligned(&h->major));
1415 d = aoedev_by_aoeaddr(aoemajor, h->minor, 0);
1417 snprintf(ebuf, sizeof ebuf, "aoecmd_ata_rsp: ata response "
1418 "for unknown device %d.%d\n",
1419 aoemajor, h->minor);
1424 spin_lock_irqsave(&d->lock, flags);
1426 n = be32_to_cpu(get_unaligned(&h->tag));
1429 calc_rttavg(d, f->t, tsince_hr(f));
1431 if (f->flags & FFL_PROBE)
1432 f->t->nout_probes--;
1434 f = getframe_deferred(d, n);
1436 calc_rttavg(d, NULL, tsince_hr(f));
1438 calc_rttavg(d, NULL, tsince(n));
1439 spin_unlock_irqrestore(&d->lock, flags);
1441 snprintf(ebuf, sizeof(ebuf),
1442 "%15s e%d.%d tag=%08x@%08lx s=%pm d=%pm\n",
1444 get_unaligned_be16(&h->major),
1446 get_unaligned_be32(&h->tag),
1456 spin_unlock_irqrestore(&d->lock, flags);
1461 * Note here that we do not perform an aoedev_put, as we are
1462 * leaving this reference for the ktio to release.
1468 aoecmd_cfg(ushort aoemajor, unsigned char aoeminor)
1470 struct sk_buff_head queue;
1472 __skb_queue_head_init(&queue);
1473 aoecmd_cfg_pkts(aoemajor, aoeminor, &queue);
1474 aoenet_xmit(&queue);
1478 aoecmd_ata_id(struct aoedev *d)
1481 struct aoe_atahdr *ah;
1483 struct sk_buff *skb;
1492 /* initialize the headers & frame */
1494 h = (struct aoe_hdr *) skb_mac_header(skb);
1495 ah = (struct aoe_atahdr *) (h+1);
1496 skb_put(skb, sizeof *h + sizeof *ah);
1497 memset(h, 0, skb->len);
1498 f->tag = aoehdr_atainit(d, t, h);
1502 f->waited_total = 0;
1504 /* set up ata header */
1506 ah->cmdstat = ATA_CMD_ID_ATA;
1509 skb->dev = t->ifp->nd;
1511 d->rttavg = RTTAVG_INIT;
1512 d->rttdev = RTTDEV_INIT;
1513 d->timer.function = rexmit_timer;
1515 skb = skb_clone(skb, GFP_ATOMIC);
1517 do_gettimeofday(&f->sent);
1518 f->sent_jiffs = (u32) jiffies;
1524 static struct aoetgt **
1525 grow_targets(struct aoedev *d)
1532 tt = kcalloc(newn, sizeof(*d->targets), GFP_ATOMIC);
1535 memmove(tt, d->targets, sizeof(*d->targets) * oldn);
1536 d->tgt = tt + (d->tgt - d->targets);
1541 return &d->targets[oldn];
1544 static struct aoetgt *
1545 addtgt(struct aoedev *d, char *addr, ulong nframes)
1547 struct aoetgt *t, **tt, **te;
1550 te = tt + d->ntargets;
1551 for (; tt < te && *tt; tt++)
1555 tt = grow_targets(d);
1559 t = kzalloc(sizeof(*t), GFP_ATOMIC);
1562 t->nframes = nframes;
1564 memcpy(t->addr, addr, sizeof t->addr);
1567 t->maxout = t->nframes / 2;
1568 INIT_LIST_HEAD(&t->ffree);
1572 pr_info("aoe: cannot allocate memory to add target\n");
1577 setdbcnt(struct aoedev *d)
1579 struct aoetgt **t, **e;
1583 e = t + d->ntargets;
1584 for (; t < e && *t; t++)
1585 if (bcnt == 0 || bcnt > (*t)->minbcnt)
1586 bcnt = (*t)->minbcnt;
1587 if (bcnt != d->maxbcnt) {
1589 pr_info("aoe: e%ld.%d: setting %d byte data frames\n",
1590 d->aoemajor, d->aoeminor, bcnt);
1595 setifbcnt(struct aoetgt *t, struct net_device *nd, int bcnt)
1598 struct aoeif *p, *e;
1605 for (; p < e; p++) {
1607 break; /* end of the valid interfaces */
1609 p->bcnt = bcnt; /* we're updating */
1611 } else if (minbcnt > p->bcnt)
1612 minbcnt = p->bcnt; /* find the min interface */
1616 pr_err("aoe: device setifbcnt failure; too many interfaces.\n");
1623 t->minbcnt = minbcnt;
1628 aoecmd_cfg_rsp(struct sk_buff *skb)
1632 struct aoe_cfghdr *ch;
1634 ulong flags, aoemajor;
1636 struct sk_buff_head queue;
1640 h = (struct aoe_hdr *) skb_mac_header(skb);
1641 ch = (struct aoe_cfghdr *) (h+1);
1644 * Enough people have their dip switches set backwards to
1645 * warrant a loud message for this special case.
1647 aoemajor = get_unaligned_be16(&h->major);
1648 if (aoemajor == 0xfff) {
1649 printk(KERN_ERR "aoe: Warning: shelf address is all ones. "
1650 "Check shelf dip switches.\n");
1653 if (aoemajor == 0xffff) {
1654 pr_info("aoe: e%ld.%d: broadcast shelf number invalid\n",
1655 aoemajor, (int) h->minor);
1658 if (h->minor == 0xff) {
1659 pr_info("aoe: e%ld.%d: broadcast slot number invalid\n",
1660 aoemajor, (int) h->minor);
1664 n = be16_to_cpu(ch->bufcnt);
1665 if (n > aoe_maxout) /* keep it reasonable */
1668 d = aoedev_by_aoeaddr(aoemajor, h->minor, 1);
1670 pr_info("aoe: device allocation failure\n");
1674 spin_lock_irqsave(&d->lock, flags);
1676 t = gettgt(d, h->src);
1682 t = addtgt(d, h->src, n);
1687 n -= sizeof(struct aoe_hdr) + sizeof(struct aoe_atahdr);
1691 n = n ? n * 512 : DEFAULTBCNT;
1692 setifbcnt(t, skb->dev, n);
1694 /* don't change users' perspective */
1695 if (d->nopen == 0) {
1696 d->fw_ver = be16_to_cpu(ch->fwver);
1697 sl = aoecmd_ata_id(d);
1700 spin_unlock_irqrestore(&d->lock, flags);
1703 __skb_queue_head_init(&queue);
1704 __skb_queue_tail(&queue, sl);
1705 aoenet_xmit(&queue);
1710 aoecmd_wreset(struct aoetgt *t)
1713 t->ssthresh = t->nframes / 2;
1714 t->next_cwnd = t->nframes;
1718 aoecmd_cleanslate(struct aoedev *d)
1720 struct aoetgt **t, **te;
1722 d->rttavg = RTTAVG_INIT;
1723 d->rttdev = RTTDEV_INIT;
1727 te = t + d->ntargets;
1728 for (; t < te && *t; t++)
1733 aoe_failbuf(struct aoedev *d, struct buf *buf)
1738 clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
1739 if (buf->nframesout == 0)
1740 aoe_end_buf(d, buf);
1744 aoe_flush_iocq(void)
1748 for (i = 0; i < ncpus; i++) {
1750 aoe_flush_iocq_by_index(i);
1755 aoe_flush_iocq_by_index(int id)
1760 struct list_head *pos;
1761 struct sk_buff *skb;
1764 spin_lock_irqsave(&iocq[id].lock, flags);
1765 list_splice_init(&iocq[id].head, &flist);
1766 spin_unlock_irqrestore(&iocq[id].lock, flags);
1767 while (!list_empty(&flist)) {
1770 f = list_entry(pos, struct frame, head);
1773 spin_lock_irqsave(&d->lock, flags);
1775 f->buf->nframesout--;
1776 aoe_failbuf(d, f->buf);
1779 spin_unlock_irqrestore(&d->lock, flags);
1792 /* get_zeroed_page returns page with ref count 1 */
1793 p = (void *) get_zeroed_page(GFP_KERNEL | __GFP_REPEAT);
1796 empty_page = virt_to_page(p);
1798 ncpus = num_online_cpus();
1800 iocq = kcalloc(ncpus, sizeof(struct iocq_ktio), GFP_KERNEL);
1804 kts = kcalloc(ncpus, sizeof(struct ktstate), GFP_KERNEL);
1810 ktiowq = kcalloc(ncpus, sizeof(wait_queue_head_t), GFP_KERNEL);
1816 mutex_init(&ktio_spawn_lock);
1818 for (i = 0; i < ncpus; i++) {
1819 INIT_LIST_HEAD(&iocq[i].head);
1820 spin_lock_init(&iocq[i].lock);
1821 init_waitqueue_head(&ktiowq[i]);
1822 snprintf(kts[i].name, sizeof(kts[i].name), "aoe_ktio%d", i);
1824 kts[i].waitq = &ktiowq[i];
1825 kts[i].lock = &iocq[i].lock;
1830 if (aoe_ktstart(&kts[0])) {
1851 for (i = 0; i < ncpus; i++)
1853 aoe_ktstop(&kts[i]);
1857 /* Free up the iocq and thread speicific configuration
1858 * allocated during startup.
1864 free_page((unsigned long) page_address(empty_page));