3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2015 Intel Corporation.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
21 * Copyright(c) 2015 Intel Corporation.
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51 #include <linux/types.h>
52 #include <linux/device.h>
53 #include <linux/dmapool.h>
54 #include <linux/slab.h>
55 #include <linux/list.h>
56 #include <linux/highmem.h>
58 #include <linux/uio.h>
59 #include <linux/rbtree.h>
60 #include <linux/spinlock.h>
61 #include <linux/delay.h>
62 #include <linux/kthread.h>
63 #include <linux/mmu_context.h>
64 #include <linux/module.h>
65 #include <linux/vmalloc.h>
69 #include "user_sdma.h"
71 #include "verbs.h" /* for the headers */
72 #include "common.h" /* for struct hfi1_tid_info */
75 static uint hfi1_sdma_comp_ring_size = 128;
76 module_param_named(sdma_comp_size, hfi1_sdma_comp_ring_size, uint, S_IRUGO);
77 MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 128");
79 /* The maximum number of Data io vectors per message/request */
80 #define MAX_VECTORS_PER_REQ 8
82 * Maximum number of packet to send from each message/request
83 * before moving to the next one.
85 #define MAX_PKTS_PER_QUEUE 16
87 #define num_pages(x) (1 + ((((x) - 1) & PAGE_MASK) >> PAGE_SHIFT))
89 #define req_opcode(x) \
90 (((x) >> HFI1_SDMA_REQ_OPCODE_SHIFT) & HFI1_SDMA_REQ_OPCODE_MASK)
91 #define req_version(x) \
92 (((x) >> HFI1_SDMA_REQ_VERSION_SHIFT) & HFI1_SDMA_REQ_OPCODE_MASK)
93 #define req_iovcnt(x) \
94 (((x) >> HFI1_SDMA_REQ_IOVCNT_SHIFT) & HFI1_SDMA_REQ_IOVCNT_MASK)
96 /* Number of BTH.PSN bits used for sequence number in expected rcvs */
97 #define BTH_SEQ_MASK 0x7ffull
100 * Define fields in the KDETH header so we can update the header
103 #define KDETH_OFFSET_SHIFT 0
104 #define KDETH_OFFSET_MASK 0x7fff
105 #define KDETH_OM_SHIFT 15
106 #define KDETH_OM_MASK 0x1
107 #define KDETH_TID_SHIFT 16
108 #define KDETH_TID_MASK 0x3ff
109 #define KDETH_TIDCTRL_SHIFT 26
110 #define KDETH_TIDCTRL_MASK 0x3
111 #define KDETH_INTR_SHIFT 28
112 #define KDETH_INTR_MASK 0x1
113 #define KDETH_SH_SHIFT 29
114 #define KDETH_SH_MASK 0x1
115 #define KDETH_HCRC_UPPER_SHIFT 16
116 #define KDETH_HCRC_UPPER_MASK 0xff
117 #define KDETH_HCRC_LOWER_SHIFT 24
118 #define KDETH_HCRC_LOWER_MASK 0xff
120 #define PBC2LRH(x) ((((x) & 0xfff) << 2) - 4)
121 #define LRH2PBC(x) ((((x) >> 2) + 1) & 0xfff)
123 #define KDETH_GET(val, field) \
124 (((le32_to_cpu((val))) >> KDETH_##field##_SHIFT) & KDETH_##field##_MASK)
125 #define KDETH_SET(dw, field, val) do { \
126 u32 dwval = le32_to_cpu(dw); \
127 dwval &= ~(KDETH_##field##_MASK << KDETH_##field##_SHIFT); \
128 dwval |= (((val) & KDETH_##field##_MASK) << \
129 KDETH_##field##_SHIFT); \
130 dw = cpu_to_le32(dwval); \
133 #define AHG_HEADER_SET(arr, idx, dw, bit, width, value) \
135 if ((idx) < ARRAY_SIZE((arr))) \
136 (arr)[(idx++)] = sdma_build_ahg_descriptor( \
137 (__force u16)(value), (dw), (bit), \
143 /* KDETH OM multipliers and switch over point */
144 #define KDETH_OM_SMALL 4
145 #define KDETH_OM_LARGE 64
146 #define KDETH_OM_MAX_SIZE (1 << ((KDETH_OM_LARGE / KDETH_OM_SMALL) + 1))
148 /* Last packet in the request */
149 #define TXREQ_FLAGS_REQ_LAST_PKT BIT(0)
150 #define TXREQ_FLAGS_IOVEC_LAST_PKT BIT(0)
152 #define SDMA_REQ_IN_USE 0
153 #define SDMA_REQ_FOR_THREAD 1
154 #define SDMA_REQ_SEND_DONE 2
155 #define SDMA_REQ_HAVE_AHG 3
156 #define SDMA_REQ_HAS_ERROR 4
157 #define SDMA_REQ_DONE_ERROR 5
159 #define SDMA_PKT_Q_INACTIVE BIT(0)
160 #define SDMA_PKT_Q_ACTIVE BIT(1)
161 #define SDMA_PKT_Q_DEFERRED BIT(2)
164 * Maximum retry attempts to submit a TX request
165 * before putting the process to sleep.
167 #define MAX_DEFER_RETRY_COUNT 1
169 static unsigned initial_pkt_count = 8;
171 #define SDMA_IOWAIT_TIMEOUT 1000 /* in milliseconds */
173 struct user_sdma_iovec {
175 /* number of pages in this vector */
177 /* array of pinned pages for this vector */
179 /* offset into the virtual address space of the vector at
180 * which we last left off. */
184 struct user_sdma_request {
185 struct sdma_req_info info;
186 struct hfi1_user_sdma_pkt_q *pq;
187 struct hfi1_user_sdma_comp_q *cq;
188 /* This is the original header from user space */
189 struct hfi1_pkt_header hdr;
191 * Pointer to the SDMA engine for this request.
192 * Since different request could be on different VLs,
193 * each request will need it's own engine pointer.
195 struct sdma_engine *sde;
199 * KDETH.Offset (Eager) field
200 * We need to remember the initial value so the headers
201 * can be updated properly.
205 * KDETH.OFFSET (TID) field
206 * The offset can cover multiple packets, depending on the
207 * size of the TID entry.
212 * Remember this because the header template always sets it
217 * pointer to the user's task_struct. We are going to
218 * get a reference to it so we can process io vectors
221 struct task_struct *user_proc;
223 * pointer to the user's mm_struct. We are going to
224 * get a reference to it so it doesn't get freed
225 * since we might not be in process context when we
226 * are processing the iov's.
227 * Using this mm_struct, we can get vma based on the
228 * iov's address (find_vma()).
230 struct mm_struct *user_mm;
232 * We copy the iovs for this request (based on
233 * info.iovcnt). These are only the data vectors
236 /* total length of the data in the request */
238 /* progress index moving along the iovs array */
240 struct user_sdma_iovec iovs[MAX_VECTORS_PER_REQ];
241 /* number of elements copied to the tids array */
243 /* TID array values copied from the tid_iov vector */
248 spinlock_t list_lock;
249 struct list_head txps;
254 * A single txreq could span up to 3 physical pages when the MTU
255 * is sufficiently large (> 4K). Each of the IOV pointers also
256 * needs it's own set of flags so the vector has been handled
257 * independently of each other.
259 struct user_sdma_txreq {
260 /* Packet header for the txreq */
261 struct hfi1_pkt_header hdr;
262 struct sdma_txreq txreq;
263 struct user_sdma_request *req;
265 struct user_sdma_iovec *vec;
274 #define SDMA_DBG(req, fmt, ...) \
275 hfi1_cdbg(SDMA, "[%u:%u:%u:%u] " fmt, (req)->pq->dd->unit, \
276 (req)->pq->ctxt, (req)->pq->subctxt, (req)->info.comp_idx, \
278 #define SDMA_Q_DBG(pq, fmt, ...) \
279 hfi1_cdbg(SDMA, "[%u:%u:%u] " fmt, (pq)->dd->unit, (pq)->ctxt, \
280 (pq)->subctxt, ##__VA_ARGS__)
282 static int user_sdma_send_pkts(struct user_sdma_request *, unsigned);
283 static int num_user_pages(const struct iovec *);
284 static void user_sdma_txreq_cb(struct sdma_txreq *, int, int);
285 static void user_sdma_free_request(struct user_sdma_request *);
286 static int pin_vector_pages(struct user_sdma_request *,
287 struct user_sdma_iovec *);
288 static void unpin_vector_pages(struct user_sdma_iovec *);
289 static int check_header_template(struct user_sdma_request *,
290 struct hfi1_pkt_header *, u32, u32);
291 static int set_txreq_header(struct user_sdma_request *,
292 struct user_sdma_txreq *, u32);
293 static int set_txreq_header_ahg(struct user_sdma_request *,
294 struct user_sdma_txreq *, u32);
295 static inline void set_comp_state(struct user_sdma_request *,
296 enum hfi1_sdma_comp_state, int);
297 static inline u32 set_pkt_bth_psn(__be32, u8, u32);
298 static inline u32 get_lrh_len(struct hfi1_pkt_header, u32 len);
300 static int defer_packet_queue(
301 struct sdma_engine *,
305 static void activate_packet_queue(struct iowait *, int);
307 static int defer_packet_queue(
308 struct sdma_engine *sde,
310 struct sdma_txreq *txreq,
313 struct hfi1_user_sdma_pkt_q *pq =
314 container_of(wait, struct hfi1_user_sdma_pkt_q, busy);
315 struct hfi1_ibdev *dev = &pq->dd->verbs_dev;
316 struct user_sdma_txreq *tx =
317 container_of(txreq, struct user_sdma_txreq, txreq);
319 if (sdma_progress(sde, seq, txreq)) {
320 if (tx->busycount++ < MAX_DEFER_RETRY_COUNT)
324 * We are assuming that if the list is enqueued somewhere, it
325 * is to the dmawait list since that is the only place where
326 * it is supposed to be enqueued.
328 xchg(&pq->state, SDMA_PKT_Q_DEFERRED);
329 write_seqlock(&dev->iowait_lock);
330 if (list_empty(&pq->busy.list))
331 list_add_tail(&pq->busy.list, &sde->dmawait);
332 write_sequnlock(&dev->iowait_lock);
338 static void activate_packet_queue(struct iowait *wait, int reason)
340 struct hfi1_user_sdma_pkt_q *pq =
341 container_of(wait, struct hfi1_user_sdma_pkt_q, busy);
342 xchg(&pq->state, SDMA_PKT_Q_ACTIVE);
343 wake_up(&wait->wait_dma);
346 static void sdma_kmem_cache_ctor(void *obj)
348 struct user_sdma_txreq *tx = (struct user_sdma_txreq *)obj;
350 memset(tx, 0, sizeof(*tx));
353 int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, struct file *fp)
355 struct hfi1_filedata *fd;
359 struct hfi1_devdata *dd;
360 struct hfi1_user_sdma_comp_q *cq;
361 struct hfi1_user_sdma_pkt_q *pq;
369 fd = fp->private_data;
371 if (!hfi1_sdma_comp_ring_size) {
378 pq = kzalloc(sizeof(*pq), GFP_KERNEL);
382 memsize = sizeof(*pq->reqs) * hfi1_sdma_comp_ring_size;
383 pq->reqs = kmalloc(memsize, GFP_KERNEL);
387 INIT_LIST_HEAD(&pq->list);
389 pq->ctxt = uctxt->ctxt;
390 pq->subctxt = fd->subctxt;
391 pq->n_max_reqs = hfi1_sdma_comp_ring_size;
392 pq->state = SDMA_PKT_Q_INACTIVE;
393 atomic_set(&pq->n_reqs, 0);
395 iowait_init(&pq->busy, 0, NULL, defer_packet_queue,
396 activate_packet_queue);
398 snprintf(buf, 64, "txreq-kmem-cache-%u-%u-%u", dd->unit, uctxt->ctxt,
400 pq->txreq_cache = kmem_cache_create(buf,
401 sizeof(struct user_sdma_txreq),
404 sdma_kmem_cache_ctor);
405 if (!pq->txreq_cache) {
406 dd_dev_err(dd, "[%u] Failed to allocate TxReq cache\n",
411 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
415 memsize = ALIGN(sizeof(*cq->comps) * hfi1_sdma_comp_ring_size,
417 cq->comps = vmalloc_user(memsize);
421 cq->nentries = hfi1_sdma_comp_ring_size;
424 spin_lock_irqsave(&uctxt->sdma_qlock, flags);
425 list_add(&pq->list, &uctxt->sdma_queues);
426 spin_unlock_irqrestore(&uctxt->sdma_qlock, flags);
432 kmem_cache_destroy(pq->txreq_cache);
444 int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd)
446 struct hfi1_ctxtdata *uctxt = fd->uctxt;
447 struct hfi1_user_sdma_pkt_q *pq;
450 hfi1_cdbg(SDMA, "[%u:%u:%u] Freeing user SDMA queues", uctxt->dd->unit,
451 uctxt->ctxt, fd->subctxt);
456 spin_lock_irqsave(&uctxt->sdma_qlock, flags);
457 if (!list_empty(&pq->list))
458 list_del_init(&pq->list);
459 spin_unlock_irqrestore(&uctxt->sdma_qlock, flags);
460 iowait_sdma_drain(&pq->busy);
462 for (i = 0, j = 0; i < atomic_read(&pq->n_reqs) &&
463 j < pq->n_max_reqs; j++) {
464 struct user_sdma_request *req = &pq->reqs[j];
466 if (test_bit(SDMA_REQ_IN_USE, &req->flags)) {
467 set_comp_state(req, ERROR, -ECOMM);
468 user_sdma_free_request(req);
474 kmem_cache_destroy(pq->txreq_cache);
480 vfree(fd->cq->comps);
487 int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
488 unsigned long dim, unsigned long *count)
490 int ret = 0, i = 0, sent;
491 struct hfi1_filedata *fd = fp->private_data;
492 struct hfi1_ctxtdata *uctxt = fd->uctxt;
493 struct hfi1_user_sdma_pkt_q *pq = fd->pq;
494 struct hfi1_user_sdma_comp_q *cq = fd->cq;
495 struct hfi1_devdata *dd = pq->dd;
496 unsigned long idx = 0;
497 u8 pcount = initial_pkt_count;
498 struct sdma_req_info info;
499 struct user_sdma_request *req;
502 if (iovec[idx].iov_len < sizeof(info) + sizeof(req->hdr)) {
505 "[%u:%u:%u] First vector not big enough for header %lu/%lu",
506 dd->unit, uctxt->ctxt, fd->subctxt,
507 iovec[idx].iov_len, sizeof(info) + sizeof(req->hdr));
511 ret = copy_from_user(&info, iovec[idx].iov_base, sizeof(info));
513 hfi1_cdbg(SDMA, "[%u:%u:%u] Failed to copy info QW (%d)",
514 dd->unit, uctxt->ctxt, fd->subctxt, ret);
518 trace_hfi1_sdma_user_reqinfo(dd, uctxt->ctxt, fd->subctxt,
520 if (cq->comps[info.comp_idx].status == QUEUED) {
521 hfi1_cdbg(SDMA, "[%u:%u:%u] Entry %u is in QUEUED state",
522 dd->unit, uctxt->ctxt, fd->subctxt,
527 if (!info.fragsize) {
529 "[%u:%u:%u:%u] Request does not specify fragsize",
530 dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx);
535 * We've done all the safety checks that we can up to this point,
536 * "allocate" the request entry.
538 hfi1_cdbg(SDMA, "[%u:%u:%u] Using req/comp entry %u\n", dd->unit,
539 uctxt->ctxt, fd->subctxt, info.comp_idx);
540 req = pq->reqs + info.comp_idx;
541 memset(req, 0, sizeof(*req));
542 /* Mark the request as IN_USE before we start filling it in. */
543 set_bit(SDMA_REQ_IN_USE, &req->flags);
544 req->data_iovs = req_iovcnt(info.ctrl) - 1;
547 INIT_LIST_HEAD(&req->txps);
548 spin_lock_init(&req->list_lock);
549 memcpy(&req->info, &info, sizeof(info));
551 if (req_opcode(info.ctrl) == EXPECTED)
554 if (!info.npkts || req->data_iovs > MAX_VECTORS_PER_REQ) {
555 SDMA_DBG(req, "Too many vectors (%u/%u)", req->data_iovs,
556 MAX_VECTORS_PER_REQ);
560 /* Copy the header from the user buffer */
561 ret = copy_from_user(&req->hdr, iovec[idx].iov_base + sizeof(info),
564 SDMA_DBG(req, "Failed to copy header template (%d)", ret);
569 /* If Static rate control is not enabled, sanitize the header. */
570 if (!HFI1_CAP_IS_USET(STATIC_RATE_CTRL))
573 /* Validate the opcode. Do not trust packets from user space blindly. */
574 opcode = (be32_to_cpu(req->hdr.bth[0]) >> 24) & 0xff;
575 if ((opcode & USER_OPCODE_CHECK_MASK) !=
576 USER_OPCODE_CHECK_VAL) {
577 SDMA_DBG(req, "Invalid opcode (%d)", opcode);
582 * Validate the vl. Do not trust packets from user space blindly.
583 * VL comes from PBC, SC comes from LRH, and the VL needs to
584 * match the SC look up.
586 vl = (le16_to_cpu(req->hdr.pbc[0]) >> 12) & 0xF;
587 sc = (((be16_to_cpu(req->hdr.lrh[0]) >> 12) & 0xF) |
588 (((le16_to_cpu(req->hdr.pbc[1]) >> 14) & 0x1) << 4));
589 if (vl >= dd->pport->vls_operational ||
590 vl != sc_to_vlt(dd, sc)) {
591 SDMA_DBG(req, "Invalid SC(%u)/VL(%u)", sc, vl);
597 * Also should check the BTH.lnh. If it says the next header is GRH then
598 * the RXE parsing will be off and will land in the middle of the KDETH
599 * or miss it entirely.
601 if ((be16_to_cpu(req->hdr.lrh[0]) & 0x3) == HFI1_LRH_GRH) {
602 SDMA_DBG(req, "User tried to pass in a GRH");
607 req->koffset = le32_to_cpu(req->hdr.kdeth.swdata[6]);
608 /* Calculate the initial TID offset based on the values of
609 KDETH.OFFSET and KDETH.OM that are passed in. */
610 req->tidoffset = KDETH_GET(req->hdr.kdeth.ver_tid_offset, OFFSET) *
611 (KDETH_GET(req->hdr.kdeth.ver_tid_offset, OM) ?
612 KDETH_OM_LARGE : KDETH_OM_SMALL);
613 SDMA_DBG(req, "Initial TID offset %u", req->tidoffset);
616 /* Save all the IO vector structures */
617 while (i < req->data_iovs) {
618 memcpy(&req->iovs[i].iov, iovec + idx++, sizeof(struct iovec));
619 req->iovs[i].offset = 0;
620 req->data_len += req->iovs[i++].iov.iov_len;
622 SDMA_DBG(req, "total data length %u", req->data_len);
624 if (pcount > req->info.npkts)
625 pcount = req->info.npkts;
628 * User space will provide the TID info only when the
629 * request type is EXPECTED. This is true even if there is
630 * only one packet in the request and the header is already
631 * setup. The reason for the singular TID case is that the
632 * driver needs to perform safety checks.
634 if (req_opcode(req->info.ctrl) == EXPECTED) {
635 u16 ntids = iovec[idx].iov_len / sizeof(*req->tids);
637 if (!ntids || ntids > MAX_TID_PAIR_ENTRIES) {
641 req->tids = kcalloc(ntids, sizeof(*req->tids), GFP_KERNEL);
647 * We have to copy all of the tids because they may vary
648 * in size and, therefore, the TID count might not be
649 * equal to the pkt count. However, there is no way to
650 * tell at this point.
652 ret = copy_from_user(req->tids, iovec[idx].iov_base,
653 ntids * sizeof(*req->tids));
655 SDMA_DBG(req, "Failed to copy %d TIDs (%d)",
664 /* Have to select the engine */
665 req->sde = sdma_select_engine_vl(dd,
666 (u32)(uctxt->ctxt + fd->subctxt),
668 if (!req->sde || !sdma_running(req->sde)) {
673 /* We don't need an AHG entry if the request contains only one packet */
674 if (req->info.npkts > 1 && HFI1_CAP_IS_USET(SDMA_AHG)) {
675 int ahg = sdma_ahg_alloc(req->sde);
677 if (likely(ahg >= 0)) {
678 req->ahg_idx = (u8)ahg;
679 set_bit(SDMA_REQ_HAVE_AHG, &req->flags);
683 set_comp_state(req, QUEUED, 0);
684 /* Send the first N packets in the request to buy us some time */
685 sent = user_sdma_send_pkts(req, pcount);
686 if (unlikely(sent < 0)) {
687 if (sent != -EBUSY) {
693 atomic_inc(&pq->n_reqs);
695 if (sent < req->info.npkts) {
696 /* Take the references to the user's task and mm_struct */
697 get_task_struct(current);
698 req->user_proc = current;
701 * This is a somewhat blocking send implementation.
702 * The driver will block the caller until all packets of the
703 * request have been submitted to the SDMA engine. However, it
704 * will not wait for send completions.
706 while (!test_bit(SDMA_REQ_SEND_DONE, &req->flags)) {
707 ret = user_sdma_send_pkts(req, pcount);
711 wait_event_interruptible_timeout(
713 (pq->state == SDMA_PKT_Q_ACTIVE),
715 SDMA_IOWAIT_TIMEOUT));
724 set_comp_state(req, ERROR, ret);
726 user_sdma_free_request(req);
731 static inline u32 compute_data_length(struct user_sdma_request *req,
732 struct user_sdma_txreq *tx)
735 * Determine the proper size of the packet data.
736 * The size of the data of the first packet is in the header
737 * template. However, it includes the header and ICRC, which need
739 * The size of the remaining packets is the minimum of the frag
740 * size (MTU) or remaining data in the request.
745 len = ((be16_to_cpu(req->hdr.lrh[2]) << 2) -
746 (sizeof(tx->hdr) - 4));
747 } else if (req_opcode(req->info.ctrl) == EXPECTED) {
748 u32 tidlen = EXP_TID_GET(req->tids[req->tididx], LEN) *
750 /* Get the data length based on the remaining space in the
752 len = min(tidlen - req->tidoffset, (u32)req->info.fragsize);
753 /* If we've filled up the TID pair, move to the next one. */
754 if (unlikely(!len) && ++req->tididx < req->n_tids &&
755 req->tids[req->tididx]) {
756 tidlen = EXP_TID_GET(req->tids[req->tididx],
759 len = min_t(u32, tidlen, req->info.fragsize);
761 /* Since the TID pairs map entire pages, make sure that we
762 * are not going to try to send more data that we have
764 len = min(len, req->data_len - req->sent);
766 len = min(req->data_len - req->sent, (u32)req->info.fragsize);
767 SDMA_DBG(req, "Data Length = %u", len);
771 static inline u32 get_lrh_len(struct hfi1_pkt_header hdr, u32 len)
773 /* (Size of complete header - size of PBC) + 4B ICRC + data length */
774 return ((sizeof(hdr) - sizeof(hdr.pbc)) + 4 + len);
777 static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
781 struct user_sdma_txreq *tx = NULL;
782 struct hfi1_user_sdma_pkt_q *pq = NULL;
783 struct user_sdma_iovec *iovec = NULL;
793 * Check if we might have sent the entire request already
795 if (unlikely(req->seqnum == req->info.npkts)) {
796 if (!list_empty(&req->txps))
801 if (!maxpkts || maxpkts > req->info.npkts - req->seqnum)
802 maxpkts = req->info.npkts - req->seqnum;
804 while (npkts < maxpkts) {
805 u32 datalen = 0, queued = 0, data_sent = 0;
809 * Check whether any of the completions have come back
810 * with errors. If so, we are not going to process any
811 * more packets from this request.
813 if (test_bit(SDMA_REQ_HAS_ERROR, &req->flags)) {
814 set_bit(SDMA_REQ_DONE_ERROR, &req->flags);
819 tx = kmem_cache_alloc(pq->txreq_cache, GFP_KERNEL);
828 memset(tx->iovecs, 0, sizeof(tx->iovecs));
830 if (req->seqnum == req->info.npkts - 1)
831 tx->flags |= TXREQ_FLAGS_REQ_LAST_PKT;
834 * Calculate the payload size - this is min of the fragment
835 * (MTU) size or the remaining bytes in the request but only
836 * if we have payload data.
839 iovec = &req->iovs[req->iov_idx];
840 if (ACCESS_ONCE(iovec->offset) == iovec->iov.iov_len) {
841 if (++req->iov_idx == req->data_iovs) {
845 iovec = &req->iovs[req->iov_idx];
846 WARN_ON(iovec->offset);
850 * This request might include only a header and no user
851 * data, so pin pages only if there is data and it the
852 * pages have not been pinned already.
854 if (unlikely(!iovec->pages && iovec->iov.iov_len)) {
855 ret = pin_vector_pages(req, iovec);
860 tx->iovecs[++tx->idx].vec = iovec;
861 datalen = compute_data_length(req, tx);
864 "Request has data but pkt len is 0");
870 if (test_bit(SDMA_REQ_HAVE_AHG, &req->flags)) {
872 u16 pbclen = le16_to_cpu(req->hdr.pbc[0]);
873 u32 lrhlen = get_lrh_len(req->hdr, datalen);
875 * Copy the request header into the tx header
876 * because the HW needs a cacheline-aligned
878 * This copy can be optimized out if the hdr
879 * member of user_sdma_request were also
882 memcpy(&tx->hdr, &req->hdr, sizeof(tx->hdr));
883 if (PBC2LRH(pbclen) != lrhlen) {
884 pbclen = (pbclen & 0xf000) |
886 tx->hdr.pbc[0] = cpu_to_le16(pbclen);
888 ret = sdma_txinit_ahg(&tx->txreq,
889 SDMA_TXREQ_F_AHG_COPY,
890 sizeof(tx->hdr) + datalen,
891 req->ahg_idx, 0, NULL, 0,
895 ret = sdma_txadd_kvaddr(pq->dd, &tx->txreq,
903 changes = set_txreq_header_ahg(req, tx,
907 sdma_txinit_ahg(&tx->txreq,
908 SDMA_TXREQ_F_USE_AHG,
909 datalen, req->ahg_idx, changes,
910 req->ahg, sizeof(req->hdr),
914 ret = sdma_txinit(&tx->txreq, 0, sizeof(req->hdr) +
915 datalen, user_sdma_txreq_cb);
919 * Modify the header for this packet. This only needs
920 * to be done if we are not going to use AHG. Otherwise,
921 * the HW will do it based on the changes we gave it
922 * during sdma_txinit_ahg().
924 ret = set_txreq_header(req, tx, datalen);
930 * If the request contains any data vectors, add up to
931 * fragsize bytes to the descriptor.
933 while (queued < datalen &&
934 (req->sent + data_sent) < req->data_len) {
935 unsigned long base, offset;
936 unsigned pageidx, len;
938 base = (unsigned long)iovec->iov.iov_base;
939 offset = ((base + iovec->offset + iov_offset) &
941 pageidx = (((iovec->offset + iov_offset +
942 base) - (base & PAGE_MASK)) >> PAGE_SHIFT);
943 len = offset + req->info.fragsize > PAGE_SIZE ?
944 PAGE_SIZE - offset : req->info.fragsize;
945 len = min((datalen - queued), len);
946 ret = sdma_txadd_page(pq->dd, &tx->txreq,
947 iovec->pages[pageidx],
953 "SDMA txreq add page failed %d\n",
955 /* Mark all assigned vectors as complete so they
956 * are unpinned in the callback. */
957 for (i = tx->idx; i >= 0; i--) {
958 tx->iovecs[i].flags |=
959 TXREQ_FLAGS_IOVEC_LAST_PKT;
966 if (unlikely(queued < datalen &&
967 pageidx == iovec->npages &&
968 req->iov_idx < req->data_iovs - 1 &&
969 tx->idx < ARRAY_SIZE(tx->iovecs))) {
970 iovec->offset += iov_offset;
971 tx->iovecs[tx->idx].flags |=
972 TXREQ_FLAGS_IOVEC_LAST_PKT;
973 iovec = &req->iovs[++req->iov_idx];
975 ret = pin_vector_pages(req, iovec);
980 tx->iovecs[++tx->idx].vec = iovec;
984 * The txreq was submitted successfully so we can update
987 req->koffset += datalen;
988 if (req_opcode(req->info.ctrl) == EXPECTED)
989 req->tidoffset += datalen;
990 req->sent += data_sent;
992 tx->iovecs[tx->idx].vec->offset += iov_offset;
993 /* If we've reached the end of the io vector, mark it
994 * so the callback can unpin the pages and free it. */
995 if (tx->iovecs[tx->idx].vec->offset ==
996 tx->iovecs[tx->idx].vec->iov.iov_len)
997 tx->iovecs[tx->idx].flags |=
998 TXREQ_FLAGS_IOVEC_LAST_PKT;
1002 * It is important to increment this here as it is used to
1003 * generate the BTH.PSN and, therefore, can't be bulk-updated
1004 * outside of the loop.
1006 tx->seqnum = req->seqnum++;
1007 list_add_tail(&tx->txreq.list, &req->txps);
1011 ret = sdma_send_txlist(req->sde, &pq->busy, &req->txps);
1012 if (list_empty(&req->txps))
1013 if (req->seqnum == req->info.npkts) {
1014 set_bit(SDMA_REQ_SEND_DONE, &req->flags);
1016 * The txreq has already been submitted to the HW queue
1017 * so we can free the AHG entry now. Corruption will not
1018 * happen due to the sequential manner in which
1019 * descriptors are processed.
1021 if (test_bit(SDMA_REQ_HAVE_AHG, &req->flags))
1022 sdma_ahg_free(req->sde, req->ahg_idx);
1026 sdma_txclean(pq->dd, &tx->txreq);
1028 kmem_cache_free(pq->txreq_cache, tx);
1034 * How many pages in this iovec element?
1036 static inline int num_user_pages(const struct iovec *iov)
1038 const unsigned long addr = (unsigned long) iov->iov_base;
1039 const unsigned long len = iov->iov_len;
1040 const unsigned long spage = addr & PAGE_MASK;
1041 const unsigned long epage = (addr + len - 1) & PAGE_MASK;
1043 return 1 + ((epage - spage) >> PAGE_SHIFT);
1046 static int pin_vector_pages(struct user_sdma_request *req,
1047 struct user_sdma_iovec *iovec) {
1051 iovec->npages = num_user_pages(&iovec->iov);
1052 iovec->pages = kcalloc(iovec->npages, sizeof(*iovec->pages),
1054 if (!iovec->pages) {
1055 SDMA_DBG(req, "Failed page array alloc");
1059 /* If called by the kernel thread, use the user's mm */
1060 if (current->flags & PF_KTHREAD)
1061 use_mm(req->user_proc->mm);
1062 pinned = get_user_pages_fast(
1063 (unsigned long)iovec->iov.iov_base,
1064 iovec->npages, 0, iovec->pages);
1065 /* If called by the kernel thread, unuse the user's mm */
1066 if (current->flags & PF_KTHREAD)
1067 unuse_mm(req->user_proc->mm);
1068 if (pinned != iovec->npages) {
1069 SDMA_DBG(req, "Failed to pin pages (%u/%u)", pinned,
1076 unpin_vector_pages(iovec);
1081 static void unpin_vector_pages(struct user_sdma_iovec *iovec)
1085 if (ACCESS_ONCE(iovec->offset) != iovec->iov.iov_len) {
1087 "the complete vector has not been sent yet %llu %zu",
1088 iovec->offset, iovec->iov.iov_len);
1091 for (i = 0; i < iovec->npages; i++)
1092 if (iovec->pages[i])
1093 put_page(iovec->pages[i]);
1094 kfree(iovec->pages);
1095 iovec->pages = NULL;
1100 static int check_header_template(struct user_sdma_request *req,
1101 struct hfi1_pkt_header *hdr, u32 lrhlen,
1105 * Perform safety checks for any type of packet:
1106 * - transfer size is multiple of 64bytes
1107 * - packet length is multiple of 4bytes
1108 * - entire request length is multiple of 4bytes
1109 * - packet length is not larger than MTU size
1111 * These checks are only done for the first packet of the
1112 * transfer since the header is "given" to us by user space.
1113 * For the remainder of the packets we compute the values.
1115 if (req->info.fragsize % PIO_BLOCK_SIZE ||
1116 lrhlen & 0x3 || req->data_len & 0x3 ||
1117 lrhlen > get_lrh_len(*hdr, req->info.fragsize))
1120 if (req_opcode(req->info.ctrl) == EXPECTED) {
1122 * The header is checked only on the first packet. Furthermore,
1123 * we ensure that at least one TID entry is copied when the
1124 * request is submitted. Therefore, we don't have to verify that
1125 * tididx points to something sane.
1127 u32 tidval = req->tids[req->tididx],
1128 tidlen = EXP_TID_GET(tidval, LEN) * PAGE_SIZE,
1129 tididx = EXP_TID_GET(tidval, IDX),
1130 tidctrl = EXP_TID_GET(tidval, CTRL),
1132 __le32 kval = hdr->kdeth.ver_tid_offset;
1134 tidoff = KDETH_GET(kval, OFFSET) *
1135 (KDETH_GET(req->hdr.kdeth.ver_tid_offset, OM) ?
1136 KDETH_OM_LARGE : KDETH_OM_SMALL);
1138 * Expected receive packets have the following
1139 * additional checks:
1140 * - offset is not larger than the TID size
1141 * - TIDCtrl values match between header and TID array
1142 * - TID indexes match between header and TID array
1144 if ((tidoff + datalen > tidlen) ||
1145 KDETH_GET(kval, TIDCTRL) != tidctrl ||
1146 KDETH_GET(kval, TID) != tididx)
1153 * Correctly set the BTH.PSN field based on type of
1154 * transfer - eager packets can just increment the PSN but
1155 * expected packets encode generation and sequence in the
1156 * BTH.PSN field so just incrementing will result in errors.
1158 static inline u32 set_pkt_bth_psn(__be32 bthpsn, u8 expct, u32 frags)
1160 u32 val = be32_to_cpu(bthpsn),
1161 mask = (HFI1_CAP_IS_KSET(EXTENDED_PSN) ? 0x7fffffffull :
1165 psn = (psn & ~BTH_SEQ_MASK) | ((psn + frags) & BTH_SEQ_MASK);
1171 static int set_txreq_header(struct user_sdma_request *req,
1172 struct user_sdma_txreq *tx, u32 datalen)
1174 struct hfi1_user_sdma_pkt_q *pq = req->pq;
1175 struct hfi1_pkt_header *hdr = &tx->hdr;
1178 u32 tidval = 0, lrhlen = get_lrh_len(*hdr, datalen);
1180 /* Copy the header template to the request before modification */
1181 memcpy(hdr, &req->hdr, sizeof(*hdr));
1184 * Check if the PBC and LRH length are mismatched. If so
1185 * adjust both in the header.
1187 pbclen = le16_to_cpu(hdr->pbc[0]);
1188 if (PBC2LRH(pbclen) != lrhlen) {
1189 pbclen = (pbclen & 0xf000) | LRH2PBC(lrhlen);
1190 hdr->pbc[0] = cpu_to_le16(pbclen);
1191 hdr->lrh[2] = cpu_to_be16(lrhlen >> 2);
1194 * This is the first packet in the sequence that has
1195 * a "static" size that can be used for the rest of
1196 * the packets (besides the last one).
1198 if (unlikely(req->seqnum == 2)) {
1200 * From this point on the lengths in both the
1201 * PBC and LRH are the same until the last
1203 * Adjust the template so we don't have to update
1206 req->hdr.pbc[0] = hdr->pbc[0];
1207 req->hdr.lrh[2] = hdr->lrh[2];
1211 * We only have to modify the header if this is not the
1212 * first packet in the request. Otherwise, we use the
1213 * header given to us.
1215 if (unlikely(!req->seqnum)) {
1216 ret = check_header_template(req, hdr, lrhlen, datalen);
1223 hdr->bth[2] = cpu_to_be32(
1224 set_pkt_bth_psn(hdr->bth[2],
1225 (req_opcode(req->info.ctrl) == EXPECTED),
1228 /* Set ACK request on last packet */
1229 if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT))
1230 hdr->bth[2] |= cpu_to_be32(1UL<<31);
1232 /* Set the new offset */
1233 hdr->kdeth.swdata[6] = cpu_to_le32(req->koffset);
1234 /* Expected packets have to fill in the new TID information */
1235 if (req_opcode(req->info.ctrl) == EXPECTED) {
1236 tidval = req->tids[req->tididx];
1238 * If the offset puts us at the end of the current TID,
1239 * advance everything.
1241 if ((req->tidoffset) == (EXP_TID_GET(tidval, LEN) *
1244 /* Since we don't copy all the TIDs, all at once,
1245 * we have to check again. */
1246 if (++req->tididx > req->n_tids - 1 ||
1247 !req->tids[req->tididx]) {
1250 tidval = req->tids[req->tididx];
1252 req->omfactor = EXP_TID_GET(tidval, LEN) * PAGE_SIZE >=
1253 KDETH_OM_MAX_SIZE ? KDETH_OM_LARGE : KDETH_OM_SMALL;
1254 /* Set KDETH.TIDCtrl based on value for this TID. */
1255 KDETH_SET(hdr->kdeth.ver_tid_offset, TIDCTRL,
1256 EXP_TID_GET(tidval, CTRL));
1257 /* Set KDETH.TID based on value for this TID */
1258 KDETH_SET(hdr->kdeth.ver_tid_offset, TID,
1259 EXP_TID_GET(tidval, IDX));
1260 /* Clear KDETH.SH only on the last packet */
1261 if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT))
1262 KDETH_SET(hdr->kdeth.ver_tid_offset, SH, 0);
1264 * Set the KDETH.OFFSET and KDETH.OM based on size of
1267 SDMA_DBG(req, "TID offset %ubytes %uunits om%u",
1268 req->tidoffset, req->tidoffset / req->omfactor,
1269 !!(req->omfactor - KDETH_OM_SMALL));
1270 KDETH_SET(hdr->kdeth.ver_tid_offset, OFFSET,
1271 req->tidoffset / req->omfactor);
1272 KDETH_SET(hdr->kdeth.ver_tid_offset, OM,
1273 !!(req->omfactor - KDETH_OM_SMALL));
1276 trace_hfi1_sdma_user_header(pq->dd, pq->ctxt, pq->subctxt,
1277 req->info.comp_idx, hdr, tidval);
1278 return sdma_txadd_kvaddr(pq->dd, &tx->txreq, hdr, sizeof(*hdr));
1281 static int set_txreq_header_ahg(struct user_sdma_request *req,
1282 struct user_sdma_txreq *tx, u32 len)
1285 struct hfi1_user_sdma_pkt_q *pq = req->pq;
1286 struct hfi1_pkt_header *hdr = &req->hdr;
1287 u16 pbclen = le16_to_cpu(hdr->pbc[0]);
1288 u32 val32, tidval = 0, lrhlen = get_lrh_len(*hdr, len);
1290 if (PBC2LRH(pbclen) != lrhlen) {
1291 /* PBC.PbcLengthDWs */
1292 AHG_HEADER_SET(req->ahg, diff, 0, 0, 12,
1293 cpu_to_le16(LRH2PBC(lrhlen)));
1294 /* LRH.PktLen (we need the full 16 bits due to byte swap) */
1295 AHG_HEADER_SET(req->ahg, diff, 3, 0, 16,
1296 cpu_to_be16(lrhlen >> 2));
1300 * Do the common updates
1302 /* BTH.PSN and BTH.A */
1303 val32 = (be32_to_cpu(hdr->bth[2]) + req->seqnum) &
1304 (HFI1_CAP_IS_KSET(EXTENDED_PSN) ? 0x7fffffff : 0xffffff);
1305 if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT))
1307 AHG_HEADER_SET(req->ahg, diff, 6, 0, 16, cpu_to_be16(val32 >> 16));
1308 AHG_HEADER_SET(req->ahg, diff, 6, 16, 16, cpu_to_be16(val32 & 0xffff));
1310 AHG_HEADER_SET(req->ahg, diff, 15, 0, 16,
1311 cpu_to_le16(req->koffset & 0xffff));
1312 AHG_HEADER_SET(req->ahg, diff, 15, 16, 16,
1313 cpu_to_le16(req->koffset >> 16));
1314 if (req_opcode(req->info.ctrl) == EXPECTED) {
1317 tidval = req->tids[req->tididx];
1320 * If the offset puts us at the end of the current TID,
1321 * advance everything.
1323 if ((req->tidoffset) == (EXP_TID_GET(tidval, LEN) *
1326 /* Since we don't copy all the TIDs, all at once,
1327 * we have to check again. */
1328 if (++req->tididx > req->n_tids - 1 ||
1329 !req->tids[req->tididx]) {
1332 tidval = req->tids[req->tididx];
1334 req->omfactor = ((EXP_TID_GET(tidval, LEN) *
1336 KDETH_OM_MAX_SIZE) ? KDETH_OM_LARGE :
1338 /* KDETH.OM and KDETH.OFFSET (TID) */
1339 AHG_HEADER_SET(req->ahg, diff, 7, 0, 16,
1340 ((!!(req->omfactor - KDETH_OM_SMALL)) << 15 |
1341 ((req->tidoffset / req->omfactor) & 0x7fff)));
1342 /* KDETH.TIDCtrl, KDETH.TID */
1343 val = cpu_to_le16(((EXP_TID_GET(tidval, CTRL) & 0x3) << 10) |
1344 (EXP_TID_GET(tidval, IDX) & 0x3ff));
1345 /* Clear KDETH.SH on last packet */
1346 if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT)) {
1347 val |= cpu_to_le16(KDETH_GET(hdr->kdeth.ver_tid_offset,
1349 val &= cpu_to_le16(~(1U << 13));
1350 AHG_HEADER_SET(req->ahg, diff, 7, 16, 14, val);
1352 AHG_HEADER_SET(req->ahg, diff, 7, 16, 12, val);
1355 trace_hfi1_sdma_user_header_ahg(pq->dd, pq->ctxt, pq->subctxt,
1356 req->info.comp_idx, req->sde->this_idx,
1357 req->ahg_idx, req->ahg, diff, tidval);
1361 static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status,
1364 struct user_sdma_txreq *tx =
1365 container_of(txreq, struct user_sdma_txreq, txreq);
1366 struct user_sdma_request *req = tx->req;
1367 struct hfi1_user_sdma_pkt_q *pq = req ? req->pq : NULL;
1370 if (unlikely(!req || !pq))
1373 /* If we have any io vectors associated with this txreq,
1374 * check whether they need to be 'freed'. */
1375 if (tx->idx != -1) {
1378 for (i = tx->idx; i >= 0; i--) {
1379 if (tx->iovecs[i].flags & TXREQ_FLAGS_IOVEC_LAST_PKT)
1380 unpin_vector_pages(tx->iovecs[i].vec);
1384 tx_seqnum = tx->seqnum;
1385 kmem_cache_free(pq->txreq_cache, tx);
1387 if (status != SDMA_TXREQ_S_OK) {
1388 dd_dev_err(pq->dd, "SDMA completion with error %d", status);
1389 set_comp_state(req, ERROR, status);
1390 set_bit(SDMA_REQ_HAS_ERROR, &req->flags);
1391 /* Do not free the request until the sender loop has ack'ed
1392 * the error and we've seen all txreqs. */
1393 if (tx_seqnum == ACCESS_ONCE(req->seqnum) &&
1394 test_bit(SDMA_REQ_DONE_ERROR, &req->flags)) {
1395 atomic_dec(&pq->n_reqs);
1396 user_sdma_free_request(req);
1399 if (tx_seqnum == req->info.npkts - 1) {
1400 /* We've sent and completed all packets in this
1401 * request. Signal completion to the user */
1402 atomic_dec(&pq->n_reqs);
1403 set_comp_state(req, COMPLETE, 0);
1404 user_sdma_free_request(req);
1407 if (!atomic_read(&pq->n_reqs))
1408 xchg(&pq->state, SDMA_PKT_Q_INACTIVE);
1411 static void user_sdma_free_request(struct user_sdma_request *req)
1413 if (!list_empty(&req->txps)) {
1414 struct sdma_txreq *t, *p;
1416 list_for_each_entry_safe(t, p, &req->txps, list) {
1417 struct user_sdma_txreq *tx =
1418 container_of(t, struct user_sdma_txreq, txreq);
1419 list_del_init(&t->list);
1420 sdma_txclean(req->pq->dd, t);
1421 kmem_cache_free(req->pq->txreq_cache, tx);
1424 if (req->data_iovs) {
1427 for (i = 0; i < req->data_iovs; i++)
1428 if (req->iovs[i].npages && req->iovs[i].pages)
1429 unpin_vector_pages(&req->iovs[i]);
1432 put_task_struct(req->user_proc);
1434 clear_bit(SDMA_REQ_IN_USE, &req->flags);
1437 static inline void set_comp_state(struct user_sdma_request *req,
1438 enum hfi1_sdma_comp_state state,
1441 SDMA_DBG(req, "Setting completion status %u %d", state, ret);
1442 req->cq->comps[req->info.comp_idx].status = state;
1444 req->cq->comps[req->info.comp_idx].errcode = -ret;
1445 trace_hfi1_sdma_user_completion(req->pq->dd, req->pq->ctxt,
1446 req->pq->subctxt, req->info.comp_idx,