2 * This file implements the DMA operations for NVLink devices. The NPU
3 * devices all point to the same iommu table as the parent PCI device.
5 * Copyright Alistair Popple, IBM Corporation 2015.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of version 2 of the GNU General Public
9 * License as published by the Free Software Foundation.
12 #include <linux/slab.h>
13 #include <linux/mmu_notifier.h>
14 #include <linux/mmu_context.h>
16 #include <linux/export.h>
17 #include <linux/pci.h>
18 #include <linux/memblock.h>
19 #include <linux/iommu.h>
22 #include <asm/powernv.h>
26 #include <asm/iommu.h>
27 #include <asm/pnv-pci.h>
28 #include <asm/msi_bitmap.h>
34 #define npu_to_phb(x) container_of(x, struct pnv_phb, npu)
37 * Other types of TCE cache invalidation are not functional in the
40 static struct pci_dev *get_pci_dev(struct device_node *dn)
42 return PCI_DN(dn)->pcidev;
45 /* Given a NPU device get the associated PCI device. */
46 struct pci_dev *pnv_pci_get_gpu_dev(struct pci_dev *npdev)
48 struct device_node *dn;
49 struct pci_dev *gpdev;
54 if (WARN_ON(!npdev->dev.of_node))
57 /* Get assoicated PCI device */
58 dn = of_parse_phandle(npdev->dev.of_node, "ibm,gpu", 0);
62 gpdev = get_pci_dev(dn);
67 EXPORT_SYMBOL(pnv_pci_get_gpu_dev);
69 /* Given the real PCI device get a linked NPU device. */
70 struct pci_dev *pnv_pci_get_npu_dev(struct pci_dev *gpdev, int index)
72 struct device_node *dn;
73 struct pci_dev *npdev;
78 /* Not all PCI devices have device-tree nodes */
79 if (!gpdev->dev.of_node)
82 /* Get assoicated PCI device */
83 dn = of_parse_phandle(gpdev->dev.of_node, "ibm,npu", index);
87 npdev = get_pci_dev(dn);
92 EXPORT_SYMBOL(pnv_pci_get_npu_dev);
94 #define NPU_DMA_OP_UNSUPPORTED() \
95 dev_err_once(dev, "%s operation unsupported for NVLink devices\n", \
98 static void *dma_npu_alloc(struct device *dev, size_t size,
99 dma_addr_t *dma_handle, gfp_t flag,
102 NPU_DMA_OP_UNSUPPORTED();
106 static void dma_npu_free(struct device *dev, size_t size,
107 void *vaddr, dma_addr_t dma_handle,
110 NPU_DMA_OP_UNSUPPORTED();
113 static dma_addr_t dma_npu_map_page(struct device *dev, struct page *page,
114 unsigned long offset, size_t size,
115 enum dma_data_direction direction,
118 NPU_DMA_OP_UNSUPPORTED();
122 static int dma_npu_map_sg(struct device *dev, struct scatterlist *sglist,
123 int nelems, enum dma_data_direction direction,
126 NPU_DMA_OP_UNSUPPORTED();
130 static int dma_npu_dma_supported(struct device *dev, u64 mask)
132 NPU_DMA_OP_UNSUPPORTED();
136 static u64 dma_npu_get_required_mask(struct device *dev)
138 NPU_DMA_OP_UNSUPPORTED();
142 static const struct dma_map_ops dma_npu_ops = {
143 .map_page = dma_npu_map_page,
144 .map_sg = dma_npu_map_sg,
145 .alloc = dma_npu_alloc,
146 .free = dma_npu_free,
147 .dma_supported = dma_npu_dma_supported,
148 .get_required_mask = dma_npu_get_required_mask,
152 * Returns the PE assoicated with the PCI device of the given
153 * NPU. Returns the linked pci device if pci_dev != NULL.
155 static struct pnv_ioda_pe *get_gpu_pci_dev_and_pe(struct pnv_ioda_pe *npe,
156 struct pci_dev **gpdev)
159 struct pci_controller *hose;
160 struct pci_dev *pdev;
161 struct pnv_ioda_pe *pe;
164 pdev = pnv_pci_get_gpu_dev(npe->pdev);
168 pdn = pci_get_pdn(pdev);
169 if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
172 hose = pci_bus_to_host(pdev->bus);
173 phb = hose->private_data;
174 pe = &phb->ioda.pe_array[pdn->pe_number];
182 long pnv_npu_set_window(struct pnv_ioda_pe *npe, int num,
183 struct iommu_table *tbl)
185 struct pnv_phb *phb = npe->phb;
187 const unsigned long size = tbl->it_indirect_levels ?
188 tbl->it_level_size : tbl->it_size;
189 const __u64 start_addr = tbl->it_offset << tbl->it_page_shift;
190 const __u64 win_size = tbl->it_size << tbl->it_page_shift;
192 pe_info(npe, "Setting up window %llx..%llx pg=%lx\n",
193 start_addr, start_addr + win_size - 1,
194 IOMMU_PAGE_SIZE(tbl));
196 rc = opal_pci_map_pe_dma_window(phb->opal_id,
199 tbl->it_indirect_levels + 1,
202 IOMMU_PAGE_SIZE(tbl));
204 pe_err(npe, "Failed to configure TCE table, err %lld\n", rc);
207 pnv_pci_ioda2_tce_invalidate_entire(phb, false);
209 /* Add the table to the list so its TCE cache will get invalidated */
210 pnv_pci_link_table_and_group(phb->hose->node, num,
211 tbl, &npe->table_group);
216 long pnv_npu_unset_window(struct pnv_ioda_pe *npe, int num)
218 struct pnv_phb *phb = npe->phb;
221 pe_info(npe, "Removing DMA window\n");
223 rc = opal_pci_map_pe_dma_window(phb->opal_id, npe->pe_number,
225 0/* levels */, 0/* table address */,
226 0/* table size */, 0/* page size */);
228 pe_err(npe, "Unmapping failed, ret = %lld\n", rc);
231 pnv_pci_ioda2_tce_invalidate_entire(phb, false);
233 pnv_pci_unlink_table_and_group(npe->table_group.tables[num],
240 * Enables 32 bit DMA on NPU.
242 static void pnv_npu_dma_set_32(struct pnv_ioda_pe *npe)
244 struct pci_dev *gpdev;
245 struct pnv_ioda_pe *gpe;
249 * Find the assoicated PCI devices and get the dma window
250 * information from there.
252 if (!npe->pdev || !(npe->flags & PNV_IODA_PE_DEV))
255 gpe = get_gpu_pci_dev_and_pe(npe, &gpdev);
259 rc = pnv_npu_set_window(npe, 0, gpe->table_group.tables[0]);
262 * We don't initialise npu_pe->tce32_table as we always use
263 * dma_npu_ops which are nops.
265 set_dma_ops(&npe->pdev->dev, &dma_npu_ops);
269 * Enables bypass mode on the NPU. The NPU only supports one
270 * window per link, so bypass needs to be explicitly enabled or
271 * disabled. Unlike for a PHB3 bypass and non-bypass modes can't be
272 * active at the same time.
274 static int pnv_npu_dma_set_bypass(struct pnv_ioda_pe *npe)
276 struct pnv_phb *phb = npe->phb;
278 phys_addr_t top = memblock_end_of_DRAM();
280 if (phb->type != PNV_PHB_NPU || !npe->pdev)
283 rc = pnv_npu_unset_window(npe, 0);
284 if (rc != OPAL_SUCCESS)
287 /* Enable the bypass window */
289 top = roundup_pow_of_two(top);
290 dev_info(&npe->pdev->dev, "Enabling bypass for PE %x\n",
292 rc = opal_pci_map_pe_dma_window_real(phb->opal_id,
293 npe->pe_number, npe->pe_number,
294 0 /* bypass base */, top);
296 if (rc == OPAL_SUCCESS)
297 pnv_pci_ioda2_tce_invalidate_entire(phb, false);
302 void pnv_npu_try_dma_set_bypass(struct pci_dev *gpdev, bool bypass)
307 struct pnv_ioda_pe *npe;
308 struct pci_dev *npdev;
311 npdev = pnv_pci_get_npu_dev(gpdev, i);
316 pdn = pci_get_pdn(npdev);
317 if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
320 phb = pci_bus_to_host(npdev->bus)->private_data;
322 /* We only do bypass if it's enabled on the linked device */
323 npe = &phb->ioda.pe_array[pdn->pe_number];
326 dev_info(&npdev->dev,
327 "Using 64-bit DMA iommu bypass\n");
328 pnv_npu_dma_set_bypass(npe);
330 dev_info(&npdev->dev, "Using 32-bit DMA via iommu\n");
331 pnv_npu_dma_set_32(npe);
336 /* Switch ownership from platform code to external user (e.g. VFIO) */
337 void pnv_npu_take_ownership(struct pnv_ioda_pe *npe)
339 struct pnv_phb *phb = npe->phb;
343 * Note: NPU has just a single TVE in the hardware which means that
344 * while used by the kernel, it can have either 32bit window or
345 * DMA bypass but never both. So we deconfigure 32bit window only
346 * if it was enabled at the moment of ownership change.
348 if (npe->table_group.tables[0]) {
349 pnv_npu_unset_window(npe, 0);
354 rc = opal_pci_map_pe_dma_window_real(phb->opal_id,
355 npe->pe_number, npe->pe_number,
356 0 /* bypass base */, 0);
358 pe_err(npe, "Failed to disable bypass, err %lld\n", rc);
361 pnv_pci_ioda2_tce_invalidate_entire(npe->phb, false);
364 struct pnv_ioda_pe *pnv_pci_npu_setup_iommu(struct pnv_ioda_pe *npe)
366 struct pnv_phb *phb = npe->phb;
367 struct pci_bus *pbus = phb->hose->bus;
368 struct pci_dev *npdev, *gpdev = NULL, *gptmp;
369 struct pnv_ioda_pe *gpe = get_gpu_pci_dev_and_pe(npe, &gpdev);
374 list_for_each_entry(npdev, &pbus->devices, bus_list) {
375 gptmp = pnv_pci_get_gpu_dev(npdev);
380 pe_info(gpe, "Attached NPU %s\n", dev_name(&npdev->dev));
381 iommu_group_add_device(gpe->table_group.group, &npdev->dev);
387 /* Maximum number of nvlinks per npu */
388 #define NV_MAX_LINKS 6
390 /* Maximum index of npu2 hosts in the system. Always < NV_MAX_NPUS */
391 static int max_npu2_index;
394 struct mm_struct *mm;
395 struct pci_dev *npdev[NV_MAX_NPUS][NV_MAX_LINKS];
396 struct mmu_notifier mn;
399 /* Callback to stop translation requests on a given GPU */
400 struct npu_context *(*release_cb)(struct npu_context *, void *);
403 * Private pointer passed to the above callback for usage by
410 * Find a free MMIO ATSD register and mark it in use. Return -ENOSPC
411 * if none are available.
413 static int get_mmio_atsd_reg(struct npu *npu)
417 for (i = 0; i < npu->mmio_atsd_count; i++) {
418 if (!test_and_set_bit(i, &npu->mmio_atsd_usage))
425 static void put_mmio_atsd_reg(struct npu *npu, int reg)
427 clear_bit(reg, &npu->mmio_atsd_usage);
430 /* MMIO ATSD register offsets */
431 #define XTS_ATSD_AVA 1
432 #define XTS_ATSD_STAT 2
434 static int mmio_launch_invalidate(struct npu *npu, unsigned long launch,
440 mmio_atsd_reg = get_mmio_atsd_reg(npu);
442 } while (mmio_atsd_reg < 0);
444 __raw_writeq(cpu_to_be64(va),
445 npu->mmio_atsd_regs[mmio_atsd_reg] + XTS_ATSD_AVA);
447 __raw_writeq(cpu_to_be64(launch), npu->mmio_atsd_regs[mmio_atsd_reg]);
449 return mmio_atsd_reg;
452 static int mmio_invalidate_pid(struct npu *npu, unsigned long pid)
454 unsigned long launch;
456 /* IS set to invalidate matching PID */
457 launch = PPC_BIT(12);
459 /* PRS set to process-scoped */
460 launch |= PPC_BIT(13);
463 launch |= (u64) mmu_get_ap(mmu_virtual_psize) << PPC_BITLSHIFT(17);
466 launch |= pid << PPC_BITLSHIFT(38);
468 /* Invalidating the entire process doesn't use a va */
469 return mmio_launch_invalidate(npu, launch, 0);
472 static int mmio_invalidate_va(struct npu *npu, unsigned long va,
475 unsigned long launch;
477 /* IS set to invalidate target VA */
480 /* PRS set to process scoped */
481 launch |= PPC_BIT(13);
484 launch |= (u64) mmu_get_ap(mmu_virtual_psize) << PPC_BITLSHIFT(17);
487 launch |= pid << PPC_BITLSHIFT(38);
489 return mmio_launch_invalidate(npu, launch, va);
492 #define mn_to_npu_context(x) container_of(x, struct npu_context, mn)
495 * Invalidate either a single address or an entire PID depending on
498 static void mmio_invalidate(struct npu_context *npu_context, int va,
499 unsigned long address)
503 struct pnv_phb *nphb;
504 struct pci_dev *npdev;
508 } mmio_atsd_reg[NV_MAX_NPUS];
509 unsigned long pid = npu_context->mm->context.id;
512 * Loop over all the NPUs this process is active on and launch
515 for (i = 0; i <= max_npu2_index; i++) {
516 mmio_atsd_reg[i].reg = -1;
517 for (j = 0; j < NV_MAX_LINKS; j++) {
518 npdev = npu_context->npdev[i][j];
522 nphb = pci_bus_to_host(npdev->bus)->private_data;
524 mmio_atsd_reg[i].npu = npu;
527 mmio_atsd_reg[i].reg =
528 mmio_invalidate_va(npu, address, pid);
530 mmio_atsd_reg[i].reg =
531 mmio_invalidate_pid(npu, pid);
534 * The NPU hardware forwards the shootdown to all GPUs
535 * so we only have to launch one shootdown per NPU.
542 * Unfortunately the nest mmu does not support flushing specific
543 * addresses so we have to flush the whole mm.
545 flush_tlb_mm(npu_context->mm);
547 /* Wait for all invalidations to complete */
548 for (i = 0; i <= max_npu2_index; i++) {
549 if (mmio_atsd_reg[i].reg < 0)
552 /* Wait for completion */
553 npu = mmio_atsd_reg[i].npu;
554 reg = mmio_atsd_reg[i].reg;
555 while (__raw_readq(npu->mmio_atsd_regs[reg] + XTS_ATSD_STAT))
557 put_mmio_atsd_reg(npu, reg);
561 static void pnv_npu2_mn_release(struct mmu_notifier *mn,
562 struct mm_struct *mm)
564 struct npu_context *npu_context = mn_to_npu_context(mn);
566 /* Call into device driver to stop requests to the NMMU */
567 if (npu_context->release_cb)
568 npu_context->release_cb(npu_context, npu_context->priv);
571 * There should be no more translation requests for this PID, but we
572 * need to ensure any entries for it are removed from the TLB.
574 mmio_invalidate(npu_context, 0, 0);
577 static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn,
578 struct mm_struct *mm,
579 unsigned long address,
582 struct npu_context *npu_context = mn_to_npu_context(mn);
584 mmio_invalidate(npu_context, 1, address);
587 static void pnv_npu2_mn_invalidate_page(struct mmu_notifier *mn,
588 struct mm_struct *mm,
589 unsigned long address)
591 struct npu_context *npu_context = mn_to_npu_context(mn);
593 mmio_invalidate(npu_context, 1, address);
596 static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
597 struct mm_struct *mm,
598 unsigned long start, unsigned long end)
600 struct npu_context *npu_context = mn_to_npu_context(mn);
601 unsigned long address;
603 for (address = start; address <= end; address += PAGE_SIZE)
604 mmio_invalidate(npu_context, 1, address);
607 static const struct mmu_notifier_ops nv_nmmu_notifier_ops = {
608 .release = pnv_npu2_mn_release,
609 .change_pte = pnv_npu2_mn_change_pte,
610 .invalidate_page = pnv_npu2_mn_invalidate_page,
611 .invalidate_range = pnv_npu2_mn_invalidate_range,
615 * Call into OPAL to setup the nmmu context for the current task in
616 * the NPU. This must be called to setup the context tables before the
617 * GPU issues ATRs. pdev should be a pointed to PCIe GPU device.
619 * A release callback should be registered to allow a device driver to
620 * be notified that it should not launch any new translation requests
621 * as the final TLB invalidate is about to occur.
623 * Returns an error if there no contexts are currently available or a
624 * npu_context which should be passed to pnv_npu2_handle_fault().
626 * mmap_sem must be held in write mode.
628 struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
630 struct npu_context *(*cb)(struct npu_context *, void *),
635 struct device_node *nvlink_dn;
636 struct mm_struct *mm = current->mm;
637 struct pnv_phb *nphb;
639 struct npu_context *npu_context;
642 * At present we don't support GPUs connected to multiple NPUs and I'm
643 * not sure the hardware does either.
645 struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0);
647 if (!firmware_has_feature(FW_FEATURE_OPAL))
648 return ERR_PTR(-ENODEV);
651 /* No nvlink associated with this GPU device */
652 return ERR_PTR(-ENODEV);
655 /* kernel thread contexts are not supported */
656 return ERR_PTR(-EINVAL);
659 nphb = pci_bus_to_host(npdev->bus)->private_data;
663 * Setup the NPU context table for a particular GPU. These need to be
664 * per-GPU as we need the tables to filter ATSDs when there are no
665 * active contexts on a particular GPU.
667 rc = opal_npu_init_context(nphb->opal_id, mm->context.id, flags,
668 PCI_DEVID(gpdev->bus->number, gpdev->devfn));
670 return ERR_PTR(-ENOSPC);
673 * We store the npu pci device so we can more easily get at the
676 npu_context = mm->context.npu_context;
678 npu_context = kzalloc(sizeof(struct npu_context), GFP_KERNEL);
680 return ERR_PTR(-ENOMEM);
682 mm->context.npu_context = npu_context;
683 npu_context->mm = mm;
684 npu_context->mn.ops = &nv_nmmu_notifier_ops;
685 __mmu_notifier_register(&npu_context->mn, mm);
686 kref_init(&npu_context->kref);
688 kref_get(&npu_context->kref);
691 npu_context->release_cb = cb;
692 npu_context->priv = priv;
693 nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0);
694 if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index",
696 return ERR_PTR(-ENODEV);
697 npu_context->npdev[npu->index][nvlink_index] = npdev;
701 EXPORT_SYMBOL(pnv_npu2_init_context);
703 static void pnv_npu2_release_context(struct kref *kref)
705 struct npu_context *npu_context =
706 container_of(kref, struct npu_context, kref);
708 npu_context->mm->context.npu_context = NULL;
709 mmu_notifier_unregister(&npu_context->mn,
715 void pnv_npu2_destroy_context(struct npu_context *npu_context,
716 struct pci_dev *gpdev)
718 struct pnv_phb *nphb;
720 struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0);
721 struct device_node *nvlink_dn;
727 if (!firmware_has_feature(FW_FEATURE_OPAL))
730 nphb = pci_bus_to_host(npdev->bus)->private_data;
732 nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0);
733 if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index",
736 npu_context->npdev[npu->index][nvlink_index] = NULL;
737 opal_npu_destroy_context(nphb->opal_id, npu_context->mm->context.id,
738 PCI_DEVID(gpdev->bus->number, gpdev->devfn));
739 kref_put(&npu_context->kref, pnv_npu2_release_context);
741 EXPORT_SYMBOL(pnv_npu2_destroy_context);
744 * Assumes mmap_sem is held for the contexts associated mm.
746 int pnv_npu2_handle_fault(struct npu_context *context, uintptr_t *ea,
747 unsigned long *flags, unsigned long *status, int count)
749 u64 rc = 0, result = 0;
751 struct page *page[1];
753 /* mmap_sem should be held so the struct_mm must be present */
754 struct mm_struct *mm = context->mm;
756 if (!firmware_has_feature(FW_FEATURE_OPAL))
759 WARN_ON(!rwsem_is_locked(&mm->mmap_sem));
761 for (i = 0; i < count; i++) {
762 is_write = flags[i] & NPU2_WRITE;
763 rc = get_user_pages_remote(NULL, mm, ea[i], 1,
764 is_write ? FOLL_WRITE : 0,
768 * To support virtualised environments we will have to do an
769 * access to the page to ensure it gets faulted into the
770 * hypervisor. For the moment virtualisation is not supported in
771 * other areas so leave the access out.
785 EXPORT_SYMBOL(pnv_npu2_handle_fault);
787 int pnv_npu2_init(struct pnv_phb *phb)
791 struct device_node *dn;
792 struct pci_dev *gpdev;
793 static int npu_index;
796 for_each_child_of_node(phb->hose->dn, dn) {
797 gpdev = pnv_pci_get_gpu_dev(get_pci_dev(dn));
799 rc = opal_npu_map_lpar(phb->opal_id,
800 PCI_DEVID(gpdev->bus->number, gpdev->devfn),
804 "Error %lld mapping device to LPAR\n",
809 for (i = 0; !of_property_read_u64_index(phb->hose->dn, "ibm,mmio-atsd",
811 phb->npu.mmio_atsd_regs[i] = ioremap(mmio_atsd, 32);
813 pr_info("NPU%lld: Found %d MMIO ATSD registers", phb->opal_id, i);
814 phb->npu.mmio_atsd_count = i;
815 phb->npu.mmio_atsd_usage = 0;
817 if (WARN_ON(npu_index >= NV_MAX_NPUS))
819 max_npu2_index = npu_index;
820 phb->npu.index = npu_index;