3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2015 Intel Corporation.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
21 * Copyright(c) 2015 Intel Corporation.
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
50 #include <linux/pci.h>
51 #include <linux/poll.h>
52 #include <linux/cdev.h>
53 #include <linux/swap.h>
54 #include <linux/vmalloc.h>
55 #include <linux/highmem.h>
57 #include <linux/jiffies.h>
58 #include <asm/pgtable.h>
59 #include <linux/delay.h>
60 #include <linux/export.h>
61 #include <linux/module.h>
62 #include <linux/cred.h>
63 #include <linux/uio.h>
70 #include "user_sdma.h"
74 #define pr_fmt(fmt) DRIVER_NAME ": " fmt
76 #define SEND_CTXT_HALT_TIMEOUT 1000 /* msecs */
79 * File operation functions
81 static int hfi1_file_open(struct inode *, struct file *);
82 static int hfi1_file_close(struct inode *, struct file *);
83 static ssize_t hfi1_file_write(struct file *, const char __user *,
85 static ssize_t hfi1_write_iter(struct kiocb *, struct iov_iter *);
86 static unsigned int hfi1_poll(struct file *, struct poll_table_struct *);
87 static int hfi1_file_mmap(struct file *, struct vm_area_struct *);
89 static u64 kvirt_to_phys(void *);
90 static int assign_ctxt(struct file *, struct hfi1_user_info *);
91 static int init_subctxts(struct hfi1_ctxtdata *, const struct hfi1_user_info *);
92 static int user_init(struct file *);
93 static int get_ctxt_info(struct file *, void __user *, __u32);
94 static int get_base_info(struct file *, void __user *, __u32);
95 static int setup_ctxt(struct file *);
96 static int setup_subctxt(struct hfi1_ctxtdata *);
97 static int get_user_context(struct file *, struct hfi1_user_info *,
99 static int find_shared_ctxt(struct file *, const struct hfi1_user_info *);
100 static int allocate_ctxt(struct file *, struct hfi1_devdata *,
101 struct hfi1_user_info *);
102 static unsigned int poll_urgent(struct file *, struct poll_table_struct *);
103 static unsigned int poll_next(struct file *, struct poll_table_struct *);
104 static int user_event_ack(struct hfi1_ctxtdata *, int, unsigned long);
105 static int set_ctxt_pkey(struct hfi1_ctxtdata *, unsigned, u16);
106 static int manage_rcvq(struct hfi1_ctxtdata *, unsigned, int);
107 static int vma_fault(struct vm_area_struct *, struct vm_fault *);
108 static int exp_tid_setup(struct file *, struct hfi1_tid_info *);
109 static int exp_tid_free(struct file *, struct hfi1_tid_info *);
110 static void unlock_exp_tids(struct hfi1_ctxtdata *);
112 static const struct file_operations hfi1_file_ops = {
113 .owner = THIS_MODULE,
114 .write = hfi1_file_write,
115 .write_iter = hfi1_write_iter,
116 .open = hfi1_file_open,
117 .release = hfi1_file_close,
119 .mmap = hfi1_file_mmap,
120 .llseek = noop_llseek,
123 static struct vm_operations_struct vm_ops = {
128 * Types of memories mapped into user processes' space
147 * Masks and offsets defining the mmap tokens
149 #define HFI1_MMAP_OFFSET_MASK 0xfffULL
150 #define HFI1_MMAP_OFFSET_SHIFT 0
151 #define HFI1_MMAP_SUBCTXT_MASK 0xfULL
152 #define HFI1_MMAP_SUBCTXT_SHIFT 12
153 #define HFI1_MMAP_CTXT_MASK 0xffULL
154 #define HFI1_MMAP_CTXT_SHIFT 16
155 #define HFI1_MMAP_TYPE_MASK 0xfULL
156 #define HFI1_MMAP_TYPE_SHIFT 24
157 #define HFI1_MMAP_MAGIC_MASK 0xffffffffULL
158 #define HFI1_MMAP_MAGIC_SHIFT 32
160 #define HFI1_MMAP_MAGIC 0xdabbad00
162 #define HFI1_MMAP_TOKEN_SET(field, val) \
163 (((val) & HFI1_MMAP_##field##_MASK) << HFI1_MMAP_##field##_SHIFT)
164 #define HFI1_MMAP_TOKEN_GET(field, token) \
165 (((token) >> HFI1_MMAP_##field##_SHIFT) & HFI1_MMAP_##field##_MASK)
166 #define HFI1_MMAP_TOKEN(type, ctxt, subctxt, addr) \
167 (HFI1_MMAP_TOKEN_SET(MAGIC, HFI1_MMAP_MAGIC) | \
168 HFI1_MMAP_TOKEN_SET(TYPE, type) | \
169 HFI1_MMAP_TOKEN_SET(CTXT, ctxt) | \
170 HFI1_MMAP_TOKEN_SET(SUBCTXT, subctxt) | \
171 HFI1_MMAP_TOKEN_SET(OFFSET, ((unsigned long)addr & ~PAGE_MASK)))
173 #define EXP_TID_SET(field, value) \
174 (((value) & EXP_TID_TID##field##_MASK) << \
175 EXP_TID_TID##field##_SHIFT)
176 #define EXP_TID_CLEAR(tid, field) { \
177 (tid) &= ~(EXP_TID_TID##field##_MASK << \
178 EXP_TID_TID##field##_SHIFT); \
180 #define EXP_TID_RESET(tid, field, value) do { \
181 EXP_TID_CLEAR(tid, field); \
182 (tid) |= EXP_TID_SET(field, value); \
185 #define dbg(fmt, ...) \
186 pr_info(fmt, ##__VA_ARGS__)
189 static inline int is_valid_mmap(u64 token)
191 return (HFI1_MMAP_TOKEN_GET(MAGIC, token) == HFI1_MMAP_MAGIC);
194 static int hfi1_file_open(struct inode *inode, struct file *fp)
196 /* The real work is performed later in assign_ctxt() */
197 fp->private_data = kzalloc(sizeof(struct hfi1_filedata), GFP_KERNEL);
198 if (fp->private_data) /* no cpu affinity by default */
199 ((struct hfi1_filedata *)fp->private_data)->rec_cpu_num = -1;
200 return fp->private_data ? 0 : -ENOMEM;
203 static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
204 size_t count, loff_t *offset)
206 const struct hfi1_cmd __user *ucmd;
207 struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
209 struct hfi1_user_info uinfo;
210 struct hfi1_tid_info tinfo;
211 ssize_t consumed = 0, copy = 0, ret = 0;
214 int uctxt_required = 1;
215 int must_be_root = 0;
217 if (count < sizeof(cmd)) {
222 ucmd = (const struct hfi1_cmd __user *)data;
223 if (copy_from_user(&cmd, ucmd, sizeof(cmd))) {
228 consumed = sizeof(cmd);
231 case HFI1_CMD_ASSIGN_CTXT:
232 uctxt_required = 0; /* assigned user context not required */
233 copy = sizeof(uinfo);
236 case HFI1_CMD_SDMA_STATUS_UPD:
237 case HFI1_CMD_CREDIT_UPD:
240 case HFI1_CMD_TID_UPDATE:
241 case HFI1_CMD_TID_FREE:
242 copy = sizeof(tinfo);
245 case HFI1_CMD_USER_INFO:
246 case HFI1_CMD_RECV_CTRL:
247 case HFI1_CMD_POLL_TYPE:
248 case HFI1_CMD_ACK_EVENT:
249 case HFI1_CMD_CTXT_INFO:
250 case HFI1_CMD_SET_PKEY:
251 case HFI1_CMD_CTXT_RESET:
255 case HFI1_CMD_EP_INFO:
256 case HFI1_CMD_EP_ERASE_CHIP:
257 case HFI1_CMD_EP_ERASE_P0:
258 case HFI1_CMD_EP_ERASE_P1:
259 case HFI1_CMD_EP_READ_P0:
260 case HFI1_CMD_EP_READ_P1:
261 case HFI1_CMD_EP_WRITE_P0:
262 case HFI1_CMD_EP_WRITE_P1:
263 uctxt_required = 0; /* assigned user context not required */
264 must_be_root = 1; /* validate user */
272 /* If the command comes with user data, copy it. */
274 if (copy_from_user(dest, (void __user *)cmd.addr, copy)) {
282 * Make sure there is a uctxt when needed.
284 if (uctxt_required && !uctxt) {
289 /* only root can do these operations */
290 if (must_be_root && !capable(CAP_SYS_ADMIN)) {
296 case HFI1_CMD_ASSIGN_CTXT:
297 ret = assign_ctxt(fp, &uinfo);
300 ret = setup_ctxt(fp);
305 case HFI1_CMD_CTXT_INFO:
306 ret = get_ctxt_info(fp, (void __user *)(unsigned long)
309 case HFI1_CMD_USER_INFO:
310 ret = get_base_info(fp, (void __user *)(unsigned long)
313 case HFI1_CMD_SDMA_STATUS_UPD:
315 case HFI1_CMD_CREDIT_UPD:
316 if (uctxt && uctxt->sc)
317 sc_return_credits(uctxt->sc);
319 case HFI1_CMD_TID_UPDATE:
320 ret = exp_tid_setup(fp, &tinfo);
324 * Copy the number of tidlist entries we used
325 * and the length of the buffer we registered.
326 * These fields are adjacent in the structure so
327 * we can copy them at the same time.
329 addr = (unsigned long)cmd.addr +
330 offsetof(struct hfi1_tid_info, tidcnt);
331 if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
332 sizeof(tinfo.tidcnt) +
333 sizeof(tinfo.length)))
337 case HFI1_CMD_TID_FREE:
338 ret = exp_tid_free(fp, &tinfo);
340 case HFI1_CMD_RECV_CTRL:
341 ret = manage_rcvq(uctxt, subctxt_fp(fp), (int)user_val);
343 case HFI1_CMD_POLL_TYPE:
344 uctxt->poll_type = (typeof(uctxt->poll_type))user_val;
346 case HFI1_CMD_ACK_EVENT:
347 ret = user_event_ack(uctxt, subctxt_fp(fp), user_val);
349 case HFI1_CMD_SET_PKEY:
350 if (HFI1_CAP_IS_USET(PKEY_CHECK))
351 ret = set_ctxt_pkey(uctxt, subctxt_fp(fp), user_val);
355 case HFI1_CMD_CTXT_RESET: {
356 struct send_context *sc;
357 struct hfi1_devdata *dd;
359 if (!uctxt || !uctxt->dd || !uctxt->sc) {
364 * There is no protection here. User level has to
365 * guarantee that no one will be writing to the send
366 * context while it is being re-initialized.
367 * If user level breaks that guarantee, it will break
368 * it's own context and no one else's.
373 * Wait until the interrupt handler has marked the
374 * context as halted or frozen. Report error if we time
377 wait_event_interruptible_timeout(
378 sc->halt_wait, (sc->flags & SCF_HALTED),
379 msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
380 if (!(sc->flags & SCF_HALTED)) {
385 * If the send context was halted due to a Freeze,
386 * wait until the device has been "unfrozen" before
387 * resetting the context.
389 if (sc->flags & SCF_FROZEN) {
390 wait_event_interruptible_timeout(
392 !(ACCESS_ONCE(dd->flags) & HFI1_FROZEN),
393 msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
394 if (dd->flags & HFI1_FROZEN) {
398 if (dd->flags & HFI1_FORCED_FREEZE) {
399 /* Don't allow context reset if we are into
406 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB,
409 ret = sc_restart(sc);
411 sc_return_credits(sc);
414 case HFI1_CMD_EP_INFO:
415 case HFI1_CMD_EP_ERASE_CHIP:
416 case HFI1_CMD_EP_ERASE_P0:
417 case HFI1_CMD_EP_ERASE_P1:
418 case HFI1_CMD_EP_READ_P0:
419 case HFI1_CMD_EP_READ_P1:
420 case HFI1_CMD_EP_WRITE_P0:
421 case HFI1_CMD_EP_WRITE_P1:
422 ret = handle_eprom_command(&cmd);
432 static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from)
434 struct hfi1_user_sdma_pkt_q *pq;
435 struct hfi1_user_sdma_comp_q *cq;
436 int ret = 0, done = 0, reqs = 0;
437 unsigned long dim = from->nr_segs;
439 if (!user_sdma_comp_fp(kiocb->ki_filp) ||
440 !user_sdma_pkt_fp(kiocb->ki_filp)) {
445 if (!iter_is_iovec(from) || !dim) {
450 hfi1_cdbg(SDMA, "SDMA request from %u:%u (%lu)",
451 ctxt_fp(kiocb->ki_filp)->ctxt, subctxt_fp(kiocb->ki_filp),
453 pq = user_sdma_pkt_fp(kiocb->ki_filp);
454 cq = user_sdma_comp_fp(kiocb->ki_filp);
456 if (atomic_read(&pq->n_reqs) == pq->n_max_reqs) {
462 unsigned long count = 0;
464 ret = hfi1_user_sdma_process_request(
465 kiocb->ki_filp, (struct iovec *)(from->iov + done),
474 return ret ? ret : reqs;
477 static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
479 struct hfi1_ctxtdata *uctxt;
480 struct hfi1_devdata *dd;
481 unsigned long flags, pfn;
482 u64 token = vma->vm_pgoff << PAGE_SHIFT,
484 u8 subctxt, mapio = 0, vmf = 0, type;
490 if (!is_valid_mmap(token) || !uctxt ||
491 !(vma->vm_flags & VM_SHARED)) {
496 ctxt = HFI1_MMAP_TOKEN_GET(CTXT, token);
497 subctxt = HFI1_MMAP_TOKEN_GET(SUBCTXT, token);
498 type = HFI1_MMAP_TOKEN_GET(TYPE, token);
499 if (ctxt != uctxt->ctxt || subctxt != subctxt_fp(fp)) {
504 flags = vma->vm_flags;
509 memaddr = ((dd->physaddr + TXE_PIO_SEND) +
511 (uctxt->sc->hw_context * (1 << 16))) +
512 /* 64K PIO space / ctxt */
513 (type == PIO_BUFS_SOP ?
514 (TXE_PIO_SIZE / 2) : 0); /* sop? */
516 * Map only the amount allocated to the context, not the
517 * entire available context's PIO space.
519 memlen = ALIGN(uctxt->sc->credits * PIO_BLOCK_SIZE,
521 flags &= ~VM_MAYREAD;
522 flags |= VM_DONTCOPY | VM_DONTEXPAND;
523 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
527 if (flags & VM_WRITE) {
532 * The credit return location for this context could be on the
533 * second or third page allocated for credit returns (if number
534 * of enabled contexts > 64 and 128 respectively).
536 memaddr = dd->cr_base[uctxt->numa_id].pa +
537 (((u64)uctxt->sc->hw_free -
538 (u64)dd->cr_base[uctxt->numa_id].va) & PAGE_MASK);
540 flags &= ~VM_MAYWRITE;
541 flags |= VM_DONTCOPY | VM_DONTEXPAND;
543 * The driver has already allocated memory for credit
544 * returns and programmed it into the chip. Has that
545 * memory been flagged as non-cached?
547 /* vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); */
551 memaddr = uctxt->rcvhdrq_phys;
552 memlen = uctxt->rcvhdrq_size;
558 * The RcvEgr buffer need to be handled differently
559 * as multiple non-contiguous pages need to be mapped
560 * into the user process.
562 memlen = uctxt->egrbufs.size;
563 if ((vma->vm_end - vma->vm_start) != memlen) {
564 dd_dev_err(dd, "Eager buffer map size invalid (%lu != %lu)\n",
565 (vma->vm_end - vma->vm_start), memlen);
569 if (vma->vm_flags & VM_WRITE) {
573 vma->vm_flags &= ~VM_MAYWRITE;
574 addr = vma->vm_start;
575 for (i = 0 ; i < uctxt->egrbufs.numbufs; i++) {
576 ret = remap_pfn_range(
578 uctxt->egrbufs.buffers[i].phys >> PAGE_SHIFT,
579 uctxt->egrbufs.buffers[i].len,
583 addr += uctxt->egrbufs.buffers[i].len;
590 * Map only the page that contains this context's user
593 memaddr = (unsigned long)
594 (dd->physaddr + RXE_PER_CONTEXT_USER)
595 + (uctxt->ctxt * RXE_PER_CONTEXT_SIZE);
597 * TidFlow table is on the same page as the rest of the
601 flags |= VM_DONTCOPY | VM_DONTEXPAND;
602 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
607 * Use the page where this context's flags are. User level
608 * knows where it's own bitmap is within the page.
610 memaddr = ((unsigned long)dd->events +
611 ((uctxt->ctxt - dd->first_user_ctxt) *
612 HFI1_MAX_SHARED_CTXTS)) & PAGE_MASK;
615 * v3.7 removes VM_RESERVED but the effect is kept by
618 flags |= VM_IO | VM_DONTEXPAND;
622 memaddr = kvirt_to_phys((void *)dd->status);
624 flags |= VM_IO | VM_DONTEXPAND;
627 if (!HFI1_CAP_IS_USET(DMA_RTAIL)) {
629 * If the memory allocation failed, the context alloc
630 * also would have failed, so we would never get here
635 if (flags & VM_WRITE) {
639 memaddr = uctxt->rcvhdrqtailaddr_phys;
641 flags &= ~VM_MAYWRITE;
644 memaddr = (u64)uctxt->subctxt_uregbase;
646 flags |= VM_IO | VM_DONTEXPAND;
649 case SUBCTXT_RCV_HDRQ:
650 memaddr = (u64)uctxt->subctxt_rcvhdr_base;
651 memlen = uctxt->rcvhdrq_size * uctxt->subctxt_cnt;
652 flags |= VM_IO | VM_DONTEXPAND;
656 memaddr = (u64)uctxt->subctxt_rcvegrbuf;
657 memlen = uctxt->egrbufs.size * uctxt->subctxt_cnt;
658 flags |= VM_IO | VM_DONTEXPAND;
659 flags &= ~VM_MAYWRITE;
663 struct hfi1_user_sdma_comp_q *cq;
665 if (!user_sdma_comp_fp(fp)) {
669 cq = user_sdma_comp_fp(fp);
670 memaddr = (u64)cq->comps;
671 memlen = ALIGN(sizeof(*cq->comps) * cq->nentries, PAGE_SIZE);
672 flags |= VM_IO | VM_DONTEXPAND;
681 if ((vma->vm_end - vma->vm_start) != memlen) {
682 hfi1_cdbg(PROC, "%u:%u Memory size mismatch %lu:%lu",
683 uctxt->ctxt, subctxt_fp(fp),
684 (vma->vm_end - vma->vm_start), memlen);
689 vma->vm_flags = flags;
691 "%s: %u:%u type:%u io/vf:%d/%d, addr:0x%llx, len:%lu(%lu), flags:0x%lx\n",
692 __func__, ctxt, subctxt, type, mapio, vmf, memaddr, memlen,
693 vma->vm_end - vma->vm_start, vma->vm_flags);
694 pfn = (unsigned long)(memaddr >> PAGE_SHIFT);
697 vma->vm_ops = &vm_ops;
700 ret = io_remap_pfn_range(vma, vma->vm_start, pfn, memlen,
703 ret = remap_pfn_range(vma, vma->vm_start, pfn, memlen,
711 * Local (non-chip) user memory is not mapped right away but as it is
712 * accessed by the user-level code.
714 static int vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
718 page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT));
720 return VM_FAULT_SIGBUS;
728 static unsigned int hfi1_poll(struct file *fp, struct poll_table_struct *pt)
730 struct hfi1_ctxtdata *uctxt;
736 else if (uctxt->poll_type == HFI1_POLL_TYPE_URGENT)
737 pollflag = poll_urgent(fp, pt);
738 else if (uctxt->poll_type == HFI1_POLL_TYPE_ANYRCV)
739 pollflag = poll_next(fp, pt);
746 static int hfi1_file_close(struct inode *inode, struct file *fp)
748 struct hfi1_filedata *fdata = fp->private_data;
749 struct hfi1_ctxtdata *uctxt = fdata->uctxt;
750 struct hfi1_devdata *dd;
751 unsigned long flags, *ev;
753 fp->private_data = NULL;
758 hfi1_cdbg(PROC, "freeing ctxt %u:%u", uctxt->ctxt, fdata->subctxt);
760 mutex_lock(&hfi1_mutex);
763 /* drain user sdma queue */
765 hfi1_user_sdma_free_queues(fdata);
768 * Clear any left over, unhandled events so the next process that
769 * gets this context doesn't get confused.
771 ev = dd->events + ((uctxt->ctxt - dd->first_user_ctxt) *
772 HFI1_MAX_SHARED_CTXTS) + fdata->subctxt;
776 uctxt->active_slaves &= ~(1 << fdata->subctxt);
777 uctxt->subpid[fdata->subctxt] = 0;
778 mutex_unlock(&hfi1_mutex);
782 spin_lock_irqsave(&dd->uctxt_lock, flags);
784 * Disable receive context and interrupt available, reset all
785 * RcvCtxtCtrl bits to default values.
787 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS |
788 HFI1_RCVCTRL_TIDFLOW_DIS |
789 HFI1_RCVCTRL_INTRAVAIL_DIS |
790 HFI1_RCVCTRL_ONE_PKT_EGR_DIS |
791 HFI1_RCVCTRL_NO_RHQ_DROP_DIS |
792 HFI1_RCVCTRL_NO_EGR_DROP_DIS, uctxt->ctxt);
793 /* Clear the context's J_KEY */
794 hfi1_clear_ctxt_jkey(dd, uctxt->ctxt);
796 * Reset context integrity checks to default.
797 * (writes to CSRs probably belong in chip.c)
799 write_kctxt_csr(dd, uctxt->sc->hw_context, SEND_CTXT_CHECK_ENABLE,
800 hfi1_pkt_default_send_ctxt_mask(dd, uctxt->sc->type));
801 sc_disable(uctxt->sc);
803 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
805 dd->rcd[uctxt->ctxt] = NULL;
806 uctxt->rcvwait_to = 0;
807 uctxt->piowait_to = 0;
808 uctxt->rcvnowait = 0;
809 uctxt->pionowait = 0;
810 uctxt->event_flags = 0;
812 hfi1_clear_tids(uctxt);
813 hfi1_clear_ctxt_pkey(dd, uctxt->ctxt);
815 if (uctxt->tid_pg_list)
816 unlock_exp_tids(uctxt);
818 hfi1_stats.sps_ctxts--;
820 mutex_unlock(&hfi1_mutex);
821 hfi1_free_ctxtdata(dd, uctxt);
828 * Convert kernel *virtual* addresses to physical addresses.
829 * This is used to vmalloc'ed addresses.
831 static u64 kvirt_to_phys(void *addr)
836 page = vmalloc_to_page(addr);
838 paddr = page_to_pfn(page) << PAGE_SHIFT;
843 static int assign_ctxt(struct file *fp, struct hfi1_user_info *uinfo)
845 int i_minor, ret = 0;
846 unsigned swmajor, swminor, alg = HFI1_ALG_ACROSS;
848 swmajor = uinfo->userversion >> 16;
849 if (swmajor != HFI1_USER_SWMAJOR) {
854 swminor = uinfo->userversion & 0xffff;
856 if (uinfo->hfi1_alg < HFI1_ALG_COUNT)
857 alg = uinfo->hfi1_alg;
859 mutex_lock(&hfi1_mutex);
860 /* First, lets check if we need to setup a shared context? */
861 if (uinfo->subctxt_cnt)
862 ret = find_shared_ctxt(fp, uinfo);
865 * We execute the following block if we couldn't find a
866 * shared context or if context sharing is not required.
869 i_minor = iminor(file_inode(fp)) - HFI1_USER_MINOR_BASE;
870 ret = get_user_context(fp, uinfo, i_minor - 1, alg);
872 mutex_unlock(&hfi1_mutex);
877 static int get_user_context(struct file *fp, struct hfi1_user_info *uinfo,
878 int devno, unsigned alg)
880 struct hfi1_devdata *dd = NULL;
881 int ret = 0, devmax, npresent, nup, dev;
883 devmax = hfi1_count_units(&npresent, &nup);
893 dd = hfi1_lookup(devno);
896 else if (!dd->freectxts)
899 struct hfi1_devdata *pdd;
901 if (alg == HFI1_ALG_ACROSS) {
904 for (dev = 0; dev < devmax; dev++) {
905 pdd = hfi1_lookup(dev);
906 if (pdd && pdd->freectxts &&
907 pdd->freectxts > free) {
909 free = pdd->freectxts;
913 for (dev = 0; dev < devmax; dev++) {
914 pdd = hfi1_lookup(dev);
915 if (pdd && pdd->freectxts) {
925 return ret ? ret : allocate_ctxt(fp, dd, uinfo);
928 static int find_shared_ctxt(struct file *fp,
929 const struct hfi1_user_info *uinfo)
934 devmax = hfi1_count_units(NULL, NULL);
936 for (ndev = 0; ndev < devmax; ndev++) {
937 struct hfi1_devdata *dd = hfi1_lookup(ndev);
939 /* device portion of usable() */
940 if (!(dd && (dd->flags & HFI1_PRESENT) && dd->kregbase))
942 for (i = dd->first_user_ctxt; i < dd->num_rcv_contexts; i++) {
943 struct hfi1_ctxtdata *uctxt = dd->rcd[i];
945 /* Skip ctxts which are not yet open */
946 if (!uctxt || !uctxt->cnt)
948 /* Skip ctxt if it doesn't match the requested one */
949 if (memcmp(uctxt->uuid, uinfo->uuid,
950 sizeof(uctxt->uuid)) ||
951 uctxt->subctxt_id != uinfo->subctxt_id ||
952 uctxt->subctxt_cnt != uinfo->subctxt_cnt)
955 /* Verify the sharing process matches the master */
956 if (uctxt->userversion != uinfo->userversion ||
957 uctxt->cnt >= uctxt->subctxt_cnt) {
962 subctxt_fp(fp) = uctxt->cnt++;
963 uctxt->subpid[subctxt_fp(fp)] = current->pid;
964 uctxt->active_slaves |= 1 << subctxt_fp(fp);
974 static int allocate_ctxt(struct file *fp, struct hfi1_devdata *dd,
975 struct hfi1_user_info *uinfo)
977 struct hfi1_ctxtdata *uctxt;
981 if (dd->flags & HFI1_FROZEN) {
983 * Pick an error that is unique from all other errors
984 * that are returned so the user process knows that
985 * it tried to allocate while the SPC was frozen. It
986 * it should be able to retry with success in a short
992 for (ctxt = dd->first_user_ctxt; ctxt < dd->num_rcv_contexts; ctxt++)
996 if (ctxt == dd->num_rcv_contexts)
999 uctxt = hfi1_create_ctxtdata(dd->pport, ctxt);
1002 "Unable to allocate ctxtdata memory, failing open\n");
1006 * Allocate and enable a PIO send context.
1008 uctxt->sc = sc_alloc(dd, SC_USER, uctxt->rcvhdrqentsize,
1013 dbg("allocated send context %u(%u)\n", uctxt->sc->sw_index,
1014 uctxt->sc->hw_context);
1015 ret = sc_enable(uctxt->sc);
1019 * Setup shared context resources if the user-level has requested
1020 * shared contexts and this is the 'master' process.
1021 * This has to be done here so the rest of the sub-contexts find the
1024 if (uinfo->subctxt_cnt && !subctxt_fp(fp)) {
1025 ret = init_subctxts(uctxt, uinfo);
1027 * On error, we don't need to disable and de-allocate the
1028 * send context because it will be done during file close
1033 uctxt->userversion = uinfo->userversion;
1034 uctxt->pid = current->pid;
1035 uctxt->flags = HFI1_CAP_UGET(MASK);
1036 init_waitqueue_head(&uctxt->wait);
1037 strlcpy(uctxt->comm, current->comm, sizeof(uctxt->comm));
1038 memcpy(uctxt->uuid, uinfo->uuid, sizeof(uctxt->uuid));
1039 uctxt->jkey = generate_jkey(current_uid());
1040 INIT_LIST_HEAD(&uctxt->sdma_queues);
1041 spin_lock_init(&uctxt->sdma_qlock);
1042 hfi1_stats.sps_ctxts++;
1044 ctxt_fp(fp) = uctxt;
1049 static int init_subctxts(struct hfi1_ctxtdata *uctxt,
1050 const struct hfi1_user_info *uinfo)
1053 unsigned num_subctxts;
1055 num_subctxts = uinfo->subctxt_cnt;
1056 if (num_subctxts > HFI1_MAX_SHARED_CTXTS) {
1061 uctxt->subctxt_cnt = uinfo->subctxt_cnt;
1062 uctxt->subctxt_id = uinfo->subctxt_id;
1063 uctxt->active_slaves = 1;
1064 uctxt->redirect_seq_cnt = 1;
1065 set_bit(HFI1_CTXT_MASTER_UNINIT, &uctxt->event_flags);
1070 static int setup_subctxt(struct hfi1_ctxtdata *uctxt)
1073 unsigned num_subctxts = uctxt->subctxt_cnt;
1075 uctxt->subctxt_uregbase = vmalloc_user(PAGE_SIZE);
1076 if (!uctxt->subctxt_uregbase) {
1080 /* We can take the size of the RcvHdr Queue from the master */
1081 uctxt->subctxt_rcvhdr_base = vmalloc_user(uctxt->rcvhdrq_size *
1083 if (!uctxt->subctxt_rcvhdr_base) {
1088 uctxt->subctxt_rcvegrbuf = vmalloc_user(uctxt->egrbufs.size *
1090 if (!uctxt->subctxt_rcvegrbuf) {
1096 vfree(uctxt->subctxt_rcvhdr_base);
1098 vfree(uctxt->subctxt_uregbase);
1099 uctxt->subctxt_uregbase = NULL;
1104 static int user_init(struct file *fp)
1107 unsigned int rcvctrl_ops = 0;
1108 struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
1110 /* make sure that the context has already been setup */
1111 if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags)) {
1117 * Subctxts don't need to initialize anything since master
1120 if (subctxt_fp(fp)) {
1121 ret = wait_event_interruptible(uctxt->wait,
1122 !test_bit(HFI1_CTXT_MASTER_UNINIT,
1123 &uctxt->event_flags));
1127 /* initialize poll variables... */
1129 uctxt->urgent_poll = 0;
1132 * Now enable the ctxt for receive.
1133 * For chips that are set to DMA the tail register to memory
1134 * when they change (and when the update bit transitions from
1135 * 0 to 1. So for those chips, we turn it off and then back on.
1136 * This will (very briefly) affect any other open ctxts, but the
1137 * duration is very short, and therefore isn't an issue. We
1138 * explicitly set the in-memory tail copy to 0 beforehand, so we
1139 * don't have to wait to be sure the DMA update has happened
1140 * (chip resets head/tail to 0 on transition to enable).
1142 if (uctxt->rcvhdrtail_kvaddr)
1143 clear_rcvhdrtail(uctxt);
1145 /* Setup J_KEY before enabling the context */
1146 hfi1_set_ctxt_jkey(uctxt->dd, uctxt->ctxt, uctxt->jkey);
1148 rcvctrl_ops = HFI1_RCVCTRL_CTXT_ENB;
1149 if (HFI1_CAP_KGET_MASK(uctxt->flags, HDRSUPP))
1150 rcvctrl_ops |= HFI1_RCVCTRL_TIDFLOW_ENB;
1152 * Ignore the bit in the flags for now until proper
1153 * support for multiple packet per rcv array entry is
1156 if (!HFI1_CAP_KGET_MASK(uctxt->flags, MULTI_PKT_EGR))
1157 rcvctrl_ops |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB;
1158 if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_EGR_FULL))
1159 rcvctrl_ops |= HFI1_RCVCTRL_NO_EGR_DROP_ENB;
1160 if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_RHQ_FULL))
1161 rcvctrl_ops |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB;
1162 if (HFI1_CAP_KGET_MASK(uctxt->flags, DMA_RTAIL))
1163 rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_ENB;
1164 hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt->ctxt);
1166 /* Notify any waiting slaves */
1167 if (uctxt->subctxt_cnt) {
1168 clear_bit(HFI1_CTXT_MASTER_UNINIT, &uctxt->event_flags);
1169 wake_up(&uctxt->wait);
1177 static int get_ctxt_info(struct file *fp, void __user *ubase, __u32 len)
1179 struct hfi1_ctxt_info cinfo;
1180 struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
1181 struct hfi1_filedata *fd = fp->private_data;
1184 ret = hfi1_get_base_kinfo(uctxt, &cinfo);
1187 cinfo.num_active = hfi1_count_active_units();
1188 cinfo.unit = uctxt->dd->unit;
1189 cinfo.ctxt = uctxt->ctxt;
1190 cinfo.subctxt = subctxt_fp(fp);
1191 cinfo.rcvtids = roundup(uctxt->egrbufs.alloced,
1192 uctxt->dd->rcv_entries.group_size) +
1193 uctxt->expected_count;
1194 cinfo.credits = uctxt->sc->credits;
1195 cinfo.numa_node = uctxt->numa_id;
1196 cinfo.rec_cpu = fd->rec_cpu_num;
1197 cinfo.send_ctxt = uctxt->sc->hw_context;
1199 cinfo.egrtids = uctxt->egrbufs.alloced;
1200 cinfo.rcvhdrq_cnt = uctxt->rcvhdrq_cnt;
1201 cinfo.rcvhdrq_entsize = uctxt->rcvhdrqentsize << 2;
1202 cinfo.sdma_ring_size = user_sdma_comp_fp(fp)->nentries;
1203 cinfo.rcvegr_size = uctxt->egrbufs.rcvtid_size;
1205 trace_hfi1_ctxt_info(uctxt->dd, uctxt->ctxt, subctxt_fp(fp), cinfo);
1206 if (copy_to_user(ubase, &cinfo, sizeof(cinfo)))
1212 static int setup_ctxt(struct file *fp)
1214 struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
1215 struct hfi1_devdata *dd = uctxt->dd;
1219 * Context should be set up only once (including allocation and
1220 * programming of eager buffers. This is done if context sharing
1221 * is not requested or by the master process.
1223 if (!uctxt->subctxt_cnt || !subctxt_fp(fp)) {
1224 ret = hfi1_init_ctxt(uctxt->sc);
1228 /* Now allocate the RcvHdr queue and eager buffers. */
1229 ret = hfi1_create_rcvhdrq(dd, uctxt);
1232 ret = hfi1_setup_eagerbufs(uctxt);
1235 if (uctxt->subctxt_cnt && !subctxt_fp(fp)) {
1236 ret = setup_subctxt(uctxt);
1240 /* Setup Expected Rcv memories */
1241 uctxt->tid_pg_list = vzalloc(uctxt->expected_count *
1242 sizeof(struct page **));
1243 if (!uctxt->tid_pg_list) {
1247 uctxt->physshadow = vzalloc(uctxt->expected_count *
1248 sizeof(*uctxt->physshadow));
1249 if (!uctxt->physshadow) {
1253 /* allocate expected TID map and initialize the cursor */
1254 atomic_set(&uctxt->tidcursor, 0);
1255 uctxt->numtidgroups = uctxt->expected_count /
1256 dd->rcv_entries.group_size;
1257 uctxt->tidmapcnt = uctxt->numtidgroups / BITS_PER_LONG +
1258 !!(uctxt->numtidgroups % BITS_PER_LONG);
1259 uctxt->tidusemap = kzalloc_node(uctxt->tidmapcnt *
1260 sizeof(*uctxt->tidusemap),
1261 GFP_KERNEL, uctxt->numa_id);
1262 if (!uctxt->tidusemap) {
1267 * In case that the number of groups is not a multiple of
1268 * 64 (the number of groups in a tidusemap element), mark
1269 * the extra ones as used. This will effectively make them
1270 * permanently used and should never be assigned. Otherwise,
1271 * the code which checks how many free groups we have will
1272 * get completely confused about the state of the bits.
1274 if (uctxt->numtidgroups % BITS_PER_LONG)
1275 uctxt->tidusemap[uctxt->tidmapcnt - 1] =
1276 ~((1ULL << (uctxt->numtidgroups %
1277 BITS_PER_LONG)) - 1);
1278 trace_hfi1_exp_tid_map(uctxt->ctxt, subctxt_fp(fp), 0,
1279 uctxt->tidusemap, uctxt->tidmapcnt);
1281 ret = hfi1_user_sdma_alloc_queues(uctxt, fp);
1285 set_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags);
1290 static int get_base_info(struct file *fp, void __user *ubase, __u32 len)
1292 struct hfi1_base_info binfo;
1293 struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
1294 struct hfi1_devdata *dd = uctxt->dd;
1299 trace_hfi1_uctxtdata(uctxt->dd, uctxt);
1301 memset(&binfo, 0, sizeof(binfo));
1302 binfo.hw_version = dd->revision;
1303 binfo.sw_version = HFI1_KERN_SWVERSION;
1304 binfo.bthqp = kdeth_qp;
1305 binfo.jkey = uctxt->jkey;
1307 * If more than 64 contexts are enabled the allocated credit
1308 * return will span two or three contiguous pages. Since we only
1309 * map the page containing the context's credit return address,
1310 * we need to calculate the offset in the proper page.
1312 offset = ((u64)uctxt->sc->hw_free -
1313 (u64)dd->cr_base[uctxt->numa_id].va) % PAGE_SIZE;
1314 binfo.sc_credits_addr = HFI1_MMAP_TOKEN(PIO_CRED, uctxt->ctxt,
1315 subctxt_fp(fp), offset);
1316 binfo.pio_bufbase = HFI1_MMAP_TOKEN(PIO_BUFS, uctxt->ctxt,
1318 uctxt->sc->base_addr);
1319 binfo.pio_bufbase_sop = HFI1_MMAP_TOKEN(PIO_BUFS_SOP,
1322 uctxt->sc->base_addr);
1323 binfo.rcvhdr_bufbase = HFI1_MMAP_TOKEN(RCV_HDRQ, uctxt->ctxt,
1326 binfo.rcvegr_bufbase = HFI1_MMAP_TOKEN(RCV_EGRBUF, uctxt->ctxt,
1328 uctxt->egrbufs.rcvtids[0].phys);
1329 binfo.sdma_comp_bufbase = HFI1_MMAP_TOKEN(SDMA_COMP, uctxt->ctxt,
1333 * (RXE_PER_CONTEXT_USER + (ctxt * RXE_PER_CONTEXT_SIZE))
1335 binfo.user_regbase = HFI1_MMAP_TOKEN(UREGS, uctxt->ctxt,
1337 offset = ((((uctxt->ctxt - dd->first_user_ctxt) *
1338 HFI1_MAX_SHARED_CTXTS) + subctxt_fp(fp)) *
1339 sizeof(*dd->events)) & ~PAGE_MASK;
1340 binfo.events_bufbase = HFI1_MMAP_TOKEN(EVENTS, uctxt->ctxt,
1343 binfo.status_bufbase = HFI1_MMAP_TOKEN(STATUS, uctxt->ctxt,
1346 if (HFI1_CAP_IS_USET(DMA_RTAIL))
1347 binfo.rcvhdrtail_base = HFI1_MMAP_TOKEN(RTAIL, uctxt->ctxt,
1349 if (uctxt->subctxt_cnt) {
1350 binfo.subctxt_uregbase = HFI1_MMAP_TOKEN(SUBCTXT_UREGS,
1353 binfo.subctxt_rcvhdrbuf = HFI1_MMAP_TOKEN(SUBCTXT_RCV_HDRQ,
1356 binfo.subctxt_rcvegrbuf = HFI1_MMAP_TOKEN(SUBCTXT_EGRBUF,
1360 sz = (len < sizeof(binfo)) ? len : sizeof(binfo);
1361 if (copy_to_user(ubase, &binfo, sz))
1366 static unsigned int poll_urgent(struct file *fp,
1367 struct poll_table_struct *pt)
1369 struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
1370 struct hfi1_devdata *dd = uctxt->dd;
1373 poll_wait(fp, &uctxt->wait, pt);
1375 spin_lock_irq(&dd->uctxt_lock);
1376 if (uctxt->urgent != uctxt->urgent_poll) {
1377 pollflag = POLLIN | POLLRDNORM;
1378 uctxt->urgent_poll = uctxt->urgent;
1381 set_bit(HFI1_CTXT_WAITING_URG, &uctxt->event_flags);
1383 spin_unlock_irq(&dd->uctxt_lock);
1388 static unsigned int poll_next(struct file *fp,
1389 struct poll_table_struct *pt)
1391 struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
1392 struct hfi1_devdata *dd = uctxt->dd;
1395 poll_wait(fp, &uctxt->wait, pt);
1397 spin_lock_irq(&dd->uctxt_lock);
1398 if (hdrqempty(uctxt)) {
1399 set_bit(HFI1_CTXT_WAITING_RCV, &uctxt->event_flags);
1400 hfi1_rcvctrl(dd, HFI1_RCVCTRL_INTRAVAIL_ENB, uctxt->ctxt);
1403 pollflag = POLLIN | POLLRDNORM;
1404 spin_unlock_irq(&dd->uctxt_lock);
1410 * Find all user contexts in use, and set the specified bit in their
1412 * See also find_ctxt() for a similar use, that is specific to send buffers.
1414 int hfi1_set_uevent_bits(struct hfi1_pportdata *ppd, const int evtbit)
1416 struct hfi1_ctxtdata *uctxt;
1417 struct hfi1_devdata *dd = ppd->dd;
1420 unsigned long flags;
1427 spin_lock_irqsave(&dd->uctxt_lock, flags);
1428 for (ctxt = dd->first_user_ctxt; ctxt < dd->num_rcv_contexts;
1430 uctxt = dd->rcd[ctxt];
1432 unsigned long *evs = dd->events +
1433 (uctxt->ctxt - dd->first_user_ctxt) *
1434 HFI1_MAX_SHARED_CTXTS;
1437 * subctxt_cnt is 0 if not shared, so do base
1438 * separately, first, then remaining subctxt, if any
1440 set_bit(evtbit, evs);
1441 for (i = 1; i < uctxt->subctxt_cnt; i++)
1442 set_bit(evtbit, evs + i);
1445 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
1451 * manage_rcvq - manage a context's receive queue
1452 * @uctxt: the context
1453 * @subctxt: the sub-context
1454 * @start_stop: action to carry out
1456 * start_stop == 0 disables receive on the context, for use in queue
1457 * overflow conditions. start_stop==1 re-enables, to be used to
1458 * re-init the software copy of the head register
1460 static int manage_rcvq(struct hfi1_ctxtdata *uctxt, unsigned subctxt,
1463 struct hfi1_devdata *dd = uctxt->dd;
1464 unsigned int rcvctrl_op;
1468 /* atomically clear receive enable ctxt. */
1471 * On enable, force in-memory copy of the tail register to
1472 * 0, so that protocol code doesn't have to worry about
1473 * whether or not the chip has yet updated the in-memory
1474 * copy or not on return from the system call. The chip
1475 * always resets it's tail register back to 0 on a
1476 * transition from disabled to enabled.
1478 if (uctxt->rcvhdrtail_kvaddr)
1479 clear_rcvhdrtail(uctxt);
1480 rcvctrl_op = HFI1_RCVCTRL_CTXT_ENB;
1482 rcvctrl_op = HFI1_RCVCTRL_CTXT_DIS;
1483 hfi1_rcvctrl(dd, rcvctrl_op, uctxt->ctxt);
1484 /* always; new head should be equal to new tail; see above */
1490 * clear the event notifier events for this context.
1491 * User process then performs actions appropriate to bit having been
1492 * set, if desired, and checks again in future.
1494 static int user_event_ack(struct hfi1_ctxtdata *uctxt, int subctxt,
1495 unsigned long events)
1498 struct hfi1_devdata *dd = uctxt->dd;
1504 evs = dd->events + ((uctxt->ctxt - dd->first_user_ctxt) *
1505 HFI1_MAX_SHARED_CTXTS) + subctxt;
1507 for (i = 0; i <= _HFI1_MAX_EVENT_BIT; i++) {
1508 if (!test_bit(i, &events))
1515 #define num_user_pages(vaddr, len) \
1516 (1 + (((((unsigned long)(vaddr) + \
1517 (unsigned long)(len) - 1) & PAGE_MASK) - \
1518 ((unsigned long)vaddr & PAGE_MASK)) >> PAGE_SHIFT))
1521 * tzcnt - count the number of trailing zeros in a 64bit value
1522 * @value: the value to be examined
1524 * Returns the number of trailing least significant zeros in the
1525 * the input value. If the value is zero, return the number of
1526 * bits of the value.
1528 static inline u8 tzcnt(u64 value)
1530 return value ? __builtin_ctzl(value) : sizeof(value) * 8;
1533 static inline unsigned num_free_groups(unsigned long map, u16 *start)
1536 u16 bitidx = *start;
1538 if (bitidx >= BITS_PER_LONG)
1540 /* "Turn off" any bits set before our bit index */
1541 map &= ~((1ULL << bitidx) - 1);
1542 free = tzcnt(map) - bitidx;
1543 while (!free && bitidx < BITS_PER_LONG) {
1544 /* Zero out the last set bit so we look at the rest */
1545 map &= ~(1ULL << bitidx);
1547 * Account for the previously checked bits and advance
1548 * the bit index. We don't have to check for bitidx
1549 * getting bigger than BITS_PER_LONG here as it would
1550 * mean extra instructions that we don't need. If it
1551 * did happen, it would push free to a negative value
1552 * which will break the loop.
1554 free = tzcnt(map) - ++bitidx;
1560 static int exp_tid_setup(struct file *fp, struct hfi1_tid_info *tinfo)
1563 struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
1564 struct hfi1_devdata *dd = uctxt->dd;
1565 unsigned tid, mapped = 0, npages, ngroups, exp_groups,
1566 tidpairs = uctxt->expected_count / 2;
1567 struct page **pages;
1568 unsigned long vaddr, tidmap[uctxt->tidmapcnt];
1570 u32 tidlist[tidpairs], pairidx = 0, tidcursor;
1571 u16 useidx, idx, bitidx, tidcnt = 0;
1573 vaddr = tinfo->vaddr;
1575 if (vaddr & ~PAGE_MASK) {
1580 npages = num_user_pages(vaddr, tinfo->length);
1585 if (!access_ok(VERIFY_WRITE, (void __user *)vaddr,
1586 npages * PAGE_SIZE)) {
1587 dd_dev_err(dd, "Fail vaddr %p, %u pages, !access_ok\n",
1588 (void *)vaddr, npages);
1593 memset(tidmap, 0, sizeof(tidmap[0]) * uctxt->tidmapcnt);
1594 memset(tidlist, 0, sizeof(tidlist[0]) * tidpairs);
1596 exp_groups = uctxt->expected_count / dd->rcv_entries.group_size;
1597 /* which group set do we look at first? */
1598 tidcursor = atomic_read(&uctxt->tidcursor);
1599 useidx = (tidcursor >> 16) & 0xffff;
1600 bitidx = tidcursor & 0xffff;
1603 * Keep going until we've mapped all pages or we've exhausted all
1605 * This iterates over the number of tidmaps + 1
1606 * (idx <= uctxt->tidmapcnt) so we check the bitmap which we
1607 * started from one more time for any free bits before the
1608 * starting point bit.
1610 for (mapped = 0, idx = 0;
1611 mapped < npages && idx <= uctxt->tidmapcnt;) {
1613 unsigned free, pinned, pmapped = 0, bits_used;
1617 * "Reserve" the needed group bits under lock so other
1618 * processes can't step in the middle of it. Once
1619 * reserved, we don't need the lock anymore since we
1620 * are guaranteed the groups.
1622 spin_lock(&uctxt->exp_lock);
1623 if (uctxt->tidusemap[useidx] == -1ULL ||
1624 bitidx >= BITS_PER_LONG) {
1625 /* no free groups in the set, use the next */
1626 useidx = (useidx + 1) % uctxt->tidmapcnt;
1629 spin_unlock(&uctxt->exp_lock);
1632 ngroups = ((npages - mapped) / dd->rcv_entries.group_size) +
1633 !!((npages - mapped) % dd->rcv_entries.group_size);
1636 * If we've gotten here, the current set of groups does have
1637 * one or more free groups.
1639 free = num_free_groups(uctxt->tidusemap[useidx], &bitidx);
1642 * Despite the check above, free could still come back
1643 * as 0 because we don't check the entire bitmap but
1644 * we start from bitidx.
1646 spin_unlock(&uctxt->exp_lock);
1649 bits_used = min(free, ngroups);
1650 tidmap[useidx] |= ((1ULL << bits_used) - 1) << bitidx;
1651 uctxt->tidusemap[useidx] |= tidmap[useidx];
1652 spin_unlock(&uctxt->exp_lock);
1655 * At this point, we know where in the map we have free bits.
1656 * properly offset into the various "shadow" arrays and compute
1657 * the RcvArray entry index.
1659 offset = ((useidx * BITS_PER_LONG) + bitidx) *
1660 dd->rcv_entries.group_size;
1661 pages = uctxt->tid_pg_list + offset;
1662 phys = uctxt->physshadow + offset;
1663 tid = uctxt->expected_base + offset;
1665 /* Calculate how many pages we can pin based on free bits */
1666 pinned = min((bits_used * dd->rcv_entries.group_size),
1669 * Now that we know how many free RcvArray entries we have,
1670 * we can pin that many user pages.
1672 ret = hfi1_get_user_pages(vaddr + (mapped * PAGE_SIZE),
1676 * We can't continue because the pages array won't be
1677 * initialized. This should never happen,
1678 * unless perhaps the user has mpin'ed the pages
1682 "Failed to lock addr %p, %u pages: errno %d\n",
1683 (void *) vaddr, pinned, -ret);
1685 * Let go of the bits that we reserved since we are not
1686 * going to use them.
1688 spin_lock(&uctxt->exp_lock);
1689 uctxt->tidusemap[useidx] &=
1690 ~(((1ULL << bits_used) - 1) << bitidx);
1691 spin_unlock(&uctxt->exp_lock);
1695 * How many groups do we need based on how many pages we have
1698 ngroups = (pinned / dd->rcv_entries.group_size) +
1699 !!(pinned % dd->rcv_entries.group_size);
1701 * Keep programming RcvArray entries for all the <ngroups> free
1704 for (i = 0, grp = 0; grp < ngroups; i++, grp++) {
1706 u32 pair_size = 0, tidsize;
1708 * This inner loop will program an entire group or the
1709 * array of pinned pages (which ever limit is hit
1712 for (j = 0; j < dd->rcv_entries.group_size &&
1713 pmapped < pinned; j++, pmapped++, tid++) {
1714 tidsize = PAGE_SIZE;
1715 phys[pmapped] = hfi1_map_page(dd->pcidev,
1717 tidsize, PCI_DMA_FROMDEVICE);
1718 trace_hfi1_exp_rcv_set(uctxt->ctxt,
1724 * Each RcvArray entry is programmed with one
1725 * page * worth of memory. This will handle
1726 * the 8K MTU as well as anything smaller
1727 * due to the fact that both entries in the
1728 * RcvTidPair are programmed with a page.
1729 * PSM currently does not handle anything
1730 * bigger than 8K MTU, so should we even worry
1733 hfi1_put_tid(dd, tid, PT_EXPECTED,
1735 ilog2(tidsize >> PAGE_SHIFT) + 1);
1736 pair_size += tidsize >> PAGE_SHIFT;
1737 EXP_TID_RESET(tidlist[pairidx], LEN, pair_size);
1741 (tid - uctxt->expected_base)
1744 EXP_TID_SET(CTRL, 1);
1748 EXP_TID_SET(CTRL, 2);
1754 * We've programmed the entire group (or as much of the
1755 * group as we'll use. Now, it's time to push it out...
1760 atomic_set(&uctxt->tidcursor,
1761 (((useidx & 0xffffff) << 16) |
1762 ((bitidx + bits_used) & 0xffffff)));
1764 trace_hfi1_exp_tid_map(uctxt->ctxt, subctxt_fp(fp), 0, uctxt->tidusemap,
1768 /* If we've mapped anything, copy relevant info to user */
1770 if (copy_to_user((void __user *)(unsigned long)tinfo->tidlist,
1771 tidlist, sizeof(tidlist[0]) * tidcnt)) {
1775 /* copy TID info to user */
1776 if (copy_to_user((void __user *)(unsigned long)tinfo->tidmap,
1777 tidmap, sizeof(tidmap[0]) * uctxt->tidmapcnt))
1782 * Calculate mapped length. New Exp TID protocol does not "unwind" and
1783 * report an error if it can't map the entire buffer. It just reports
1784 * the length that was mapped.
1786 tinfo->length = mapped * PAGE_SIZE;
1787 tinfo->tidcnt = tidcnt;
1791 static int exp_tid_free(struct file *fp, struct hfi1_tid_info *tinfo)
1793 struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
1794 struct hfi1_devdata *dd = uctxt->dd;
1795 unsigned long tidmap[uctxt->tidmapcnt];
1796 struct page **pages;
1798 u16 idx, bitidx, tid;
1801 if (copy_from_user(&tidmap, (void __user *)(unsigned long)
1803 sizeof(tidmap[0]) * uctxt->tidmapcnt)) {
1807 for (idx = 0; idx < uctxt->tidmapcnt; idx++) {
1814 while ((bitidx = tzcnt(map)) < BITS_PER_LONG) {
1816 struct page *pshadow[dd->rcv_entries.group_size];
1817 unsigned offset = ((idx * BITS_PER_LONG) + bitidx) *
1818 dd->rcv_entries.group_size;
1820 pages = uctxt->tid_pg_list + offset;
1821 phys = uctxt->physshadow + offset;
1822 tid = uctxt->expected_base + offset;
1823 for (i = 0; i < dd->rcv_entries.group_size;
1826 hfi1_put_tid(dd, tid, PT_INVALID,
1828 trace_hfi1_exp_rcv_free(uctxt->ctxt,
1832 pci_unmap_page(dd->pcidev, phys[i],
1833 PAGE_SIZE, PCI_DMA_FROMDEVICE);
1834 pshadow[pcount] = pages[i];
1841 hfi1_release_user_pages(pshadow, pcount);
1842 clear_bit(bitidx, &uctxt->tidusemap[idx]);
1843 map &= ~(1ULL<<bitidx);
1846 trace_hfi1_exp_tid_map(uctxt->ctxt, subctxt_fp(fp), 1, uctxt->tidusemap,
1852 static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt)
1854 struct hfi1_devdata *dd = uctxt->dd;
1857 dd_dev_info(dd, "ctxt %u unlocking any locked expTID pages\n",
1859 for (tid = 0; tid < uctxt->expected_count; tid++) {
1860 struct page *p = uctxt->tid_pg_list[tid];
1866 phys = uctxt->physshadow[tid];
1867 uctxt->physshadow[tid] = 0;
1868 uctxt->tid_pg_list[tid] = NULL;
1869 pci_unmap_page(dd->pcidev, phys, PAGE_SIZE, PCI_DMA_FROMDEVICE);
1870 hfi1_release_user_pages(&p, 1);
1874 static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, unsigned subctxt,
1877 int ret = -ENOENT, i, intable = 0;
1878 struct hfi1_pportdata *ppd = uctxt->ppd;
1879 struct hfi1_devdata *dd = uctxt->dd;
1881 if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY) {
1886 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++)
1887 if (pkey == ppd->pkeys[i]) {
1893 ret = hfi1_set_ctxt_pkey(dd, uctxt->ctxt, pkey);
1898 static int ui_open(struct inode *inode, struct file *filp)
1900 struct hfi1_devdata *dd;
1902 dd = container_of(inode->i_cdev, struct hfi1_devdata, ui_cdev);
1903 filp->private_data = dd; /* for other methods */
1907 static int ui_release(struct inode *inode, struct file *filp)
1913 static loff_t ui_lseek(struct file *filp, loff_t offset, int whence)
1915 struct hfi1_devdata *dd = filp->private_data;
1921 offset += filp->f_pos;
1924 offset = ((dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE) -
1934 if (offset >= (dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE)
1937 filp->f_pos = offset;
1943 /* NOTE: assumes unsigned long is 8 bytes */
1944 static ssize_t ui_read(struct file *filp, char __user *buf, size_t count,
1947 struct hfi1_devdata *dd = filp->private_data;
1948 void __iomem *base = dd->kregbase;
1949 unsigned long total, csr_off,
1950 barlen = (dd->kregend - dd->kregbase);
1953 /* only read 8 byte quantities */
1954 if ((count % 8) != 0)
1956 /* offset must be 8-byte aligned */
1957 if ((*f_pos % 8) != 0)
1959 /* destination buffer must be 8-byte aligned */
1960 if ((unsigned long)buf % 8 != 0)
1962 /* must be in range */
1963 if (*f_pos + count > (barlen + DC8051_DATA_MEM_SIZE))
1965 /* only set the base if we are not starting past the BAR */
1966 if (*f_pos < barlen)
1969 for (total = 0; total < count; total += 8, csr_off += 8) {
1970 /* accessing LCB CSRs requires more checks */
1971 if (is_lcb_offset(csr_off)) {
1972 if (read_lcb_csr(dd, csr_off, (u64 *)&data))
1976 * Cannot read ASIC GPIO/QSFP* clear and force CSRs without a
1977 * false parity error. Avoid the whole issue by not reading
1978 * them. These registers are defined as having a read value
1981 else if (csr_off == ASIC_GPIO_CLEAR
1982 || csr_off == ASIC_GPIO_FORCE
1983 || csr_off == ASIC_QSFP1_CLEAR
1984 || csr_off == ASIC_QSFP1_FORCE
1985 || csr_off == ASIC_QSFP2_CLEAR
1986 || csr_off == ASIC_QSFP2_FORCE)
1988 else if (csr_off >= barlen) {
1990 * read_8051_data can read more than just 8 bytes at
1991 * a time. However, folding this into the loop and
1992 * handling the reads in 8 byte increments allows us
1993 * to smoothly transition from chip memory to 8051
1996 if (read_8051_data(dd,
1997 (u32)(csr_off - barlen),
1998 sizeof(data), &data))
2001 data = readq(base + total);
2002 if (put_user(data, (unsigned long __user *)(buf + total)))
2009 /* NOTE: assumes unsigned long is 8 bytes */
2010 static ssize_t ui_write(struct file *filp, const char __user *buf,
2011 size_t count, loff_t *f_pos)
2013 struct hfi1_devdata *dd = filp->private_data;
2015 unsigned long total, data, csr_off;
2018 /* only write 8 byte quantities */
2019 if ((count % 8) != 0)
2021 /* offset must be 8-byte aligned */
2022 if ((*f_pos % 8) != 0)
2024 /* source buffer must be 8-byte aligned */
2025 if ((unsigned long)buf % 8 != 0)
2027 /* must be in range */
2028 if (*f_pos + count > dd->kregend - dd->kregbase)
2031 base = (void __iomem *)dd->kregbase + *f_pos;
2034 for (total = 0; total < count; total += 8, csr_off += 8) {
2035 if (get_user(data, (unsigned long __user *)(buf + total)))
2037 /* accessing LCB CSRs requires a special procedure */
2038 if (is_lcb_offset(csr_off)) {
2040 int ret = acquire_lcb_access(dd, 1);
2048 release_lcb_access(dd, 1);
2052 writeq(data, base + total);
2055 release_lcb_access(dd, 1);
2060 static const struct file_operations ui_file_ops = {
2061 .owner = THIS_MODULE,
2066 .release = ui_release,
2068 #define UI_OFFSET 192 /* device minor offset for UI devices */
2069 static int create_ui = 1;
2071 static struct cdev wildcard_cdev;
2072 static struct device *wildcard_device;
2074 static atomic_t user_count = ATOMIC_INIT(0);
2076 static void user_remove(struct hfi1_devdata *dd)
2078 if (atomic_dec_return(&user_count) == 0)
2079 hfi1_cdev_cleanup(&wildcard_cdev, &wildcard_device);
2081 hfi1_cdev_cleanup(&dd->user_cdev, &dd->user_device);
2082 hfi1_cdev_cleanup(&dd->ui_cdev, &dd->ui_device);
2085 static int user_add(struct hfi1_devdata *dd)
2090 if (atomic_inc_return(&user_count) == 1) {
2091 ret = hfi1_cdev_init(0, class_name(), &hfi1_file_ops,
2092 &wildcard_cdev, &wildcard_device);
2097 snprintf(name, sizeof(name), "%s_%d", class_name(), dd->unit);
2098 ret = hfi1_cdev_init(dd->unit + 1, name, &hfi1_file_ops,
2099 &dd->user_cdev, &dd->user_device);
2104 snprintf(name, sizeof(name),
2105 "%s_ui%d", class_name(), dd->unit);
2106 ret = hfi1_cdev_init(dd->unit + UI_OFFSET, name, &ui_file_ops,
2107 &dd->ui_cdev, &dd->ui_device);
2119 * Create per-unit files in /dev
2121 int hfi1_device_create(struct hfi1_devdata *dd)
2126 ret = hfi1_diag_add(dd);
2133 * Remove per-unit files in /dev
2134 * void, core kernel returns no errors for this stuff
2136 void hfi1_device_remove(struct hfi1_devdata *dd)
2139 hfi1_diag_remove(dd);