2 * Copyright (C) 2010-2012 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/mmu_notifier.h>
20 #include <linux/amd-iommu.h>
21 #include <linux/mm_types.h>
22 #include <linux/profile.h>
23 #include <linux/module.h>
24 #include <linux/sched.h>
25 #include <linux/iommu.h>
26 #include <linux/wait.h>
27 #include <linux/pci.h>
28 #include <linux/gfp.h>
30 #include "amd_iommu_types.h"
31 #include "amd_iommu_proto.h"
33 MODULE_LICENSE("GPL v2");
34 MODULE_AUTHOR("Joerg Roedel <joerg.roedel@amd.com>");
36 #define MAX_DEVICES 0x10000
37 #define PRI_QUEUE_SIZE 512
46 struct list_head list; /* For global state-list */
47 atomic_t count; /* Reference count */
48 unsigned mmu_notifier_count; /* Counting nested mmu_notifier
50 struct task_struct *task; /* Task bound to this PASID */
51 struct mm_struct *mm; /* mm_struct for the faults */
52 struct mmu_notifier mn; /* mmu_otifier handle */
53 struct pri_queue pri[PRI_QUEUE_SIZE]; /* PRI tag states */
54 struct device_state *device_state; /* Link to our device_state */
55 int pasid; /* PASID index */
56 spinlock_t lock; /* Protect pri_queues and
58 wait_queue_head_t wq; /* To wait for count == 0 */
62 struct list_head list;
66 struct pasid_state **states;
67 struct iommu_domain *domain;
70 amd_iommu_invalid_ppr_cb inv_ppr_cb;
71 amd_iommu_invalidate_ctx inv_ctx_cb;
77 struct work_struct work;
78 struct device_state *dev_state;
79 struct pasid_state *state;
89 static LIST_HEAD(state_list);
90 static spinlock_t state_lock;
92 static struct workqueue_struct *iommu_wq;
95 * Empty page table - Used between
96 * mmu_notifier_invalidate_range_start and
97 * mmu_notifier_invalidate_range_end
99 static u64 *empty_page_table;
101 static void free_pasid_states(struct device_state *dev_state);
102 static void unbind_pasid(struct device_state *dev_state, int pasid);
104 static u16 device_id(struct pci_dev *pdev)
108 devid = pdev->bus->number;
109 devid = (devid << 8) | pdev->devfn;
114 static struct device_state *__get_device_state(u16 devid)
116 struct device_state *dev_state;
118 list_for_each_entry(dev_state, &state_list, list) {
119 if (dev_state->devid == devid)
126 static struct device_state *get_device_state(u16 devid)
128 struct device_state *dev_state;
131 spin_lock_irqsave(&state_lock, flags);
132 dev_state = __get_device_state(devid);
133 if (dev_state != NULL)
134 atomic_inc(&dev_state->count);
135 spin_unlock_irqrestore(&state_lock, flags);
140 static void free_device_state(struct device_state *dev_state)
143 * First detach device from domain - No more PRI requests will arrive
144 * from that device after it is unbound from the IOMMUv2 domain.
146 iommu_detach_device(dev_state->domain, &dev_state->pdev->dev);
148 /* Everything is down now, free the IOMMUv2 domain */
149 iommu_domain_free(dev_state->domain);
151 /* Finally get rid of the device-state */
155 static void put_device_state(struct device_state *dev_state)
157 if (atomic_dec_and_test(&dev_state->count))
158 wake_up(&dev_state->wq);
161 static void put_device_state_wait(struct device_state *dev_state)
165 prepare_to_wait(&dev_state->wq, &wait, TASK_UNINTERRUPTIBLE);
166 if (!atomic_dec_and_test(&dev_state->count))
168 finish_wait(&dev_state->wq, &wait);
170 free_device_state(dev_state);
173 /* Must be called under dev_state->lock */
174 static struct pasid_state **__get_pasid_state_ptr(struct device_state *dev_state,
175 int pasid, bool alloc)
177 struct pasid_state **root, **ptr;
180 level = dev_state->pasid_levels;
181 root = dev_state->states;
185 index = (pasid >> (9 * level)) & 0x1ff;
195 *ptr = (void *)get_zeroed_page(GFP_ATOMIC);
200 root = (struct pasid_state **)*ptr;
207 static int set_pasid_state(struct device_state *dev_state,
208 struct pasid_state *pasid_state,
211 struct pasid_state **ptr;
215 spin_lock_irqsave(&dev_state->lock, flags);
216 ptr = __get_pasid_state_ptr(dev_state, pasid, true);
231 spin_unlock_irqrestore(&dev_state->lock, flags);
236 static void clear_pasid_state(struct device_state *dev_state, int pasid)
238 struct pasid_state **ptr;
241 spin_lock_irqsave(&dev_state->lock, flags);
242 ptr = __get_pasid_state_ptr(dev_state, pasid, true);
250 spin_unlock_irqrestore(&dev_state->lock, flags);
253 static struct pasid_state *get_pasid_state(struct device_state *dev_state,
256 struct pasid_state **ptr, *ret = NULL;
259 spin_lock_irqsave(&dev_state->lock, flags);
260 ptr = __get_pasid_state_ptr(dev_state, pasid, false);
267 atomic_inc(&ret->count);
270 spin_unlock_irqrestore(&dev_state->lock, flags);
275 static void free_pasid_state(struct pasid_state *pasid_state)
280 static void put_pasid_state(struct pasid_state *pasid_state)
282 if (atomic_dec_and_test(&pasid_state->count)) {
283 put_device_state(pasid_state->device_state);
284 wake_up(&pasid_state->wq);
288 static void put_pasid_state_wait(struct pasid_state *pasid_state)
292 prepare_to_wait(&pasid_state->wq, &wait, TASK_UNINTERRUPTIBLE);
294 if (atomic_dec_and_test(&pasid_state->count))
295 put_device_state(pasid_state->device_state);
299 finish_wait(&pasid_state->wq, &wait);
300 mmput(pasid_state->mm);
301 free_pasid_state(pasid_state);
304 static void __unbind_pasid(struct pasid_state *pasid_state)
306 struct iommu_domain *domain;
308 domain = pasid_state->device_state->domain;
310 amd_iommu_domain_clear_gcr3(domain, pasid_state->pasid);
311 clear_pasid_state(pasid_state->device_state, pasid_state->pasid);
313 /* Make sure no more pending faults are in the queue */
314 flush_workqueue(iommu_wq);
316 mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
318 put_pasid_state(pasid_state); /* Reference taken in bind() function */
321 static void unbind_pasid(struct device_state *dev_state, int pasid)
323 struct pasid_state *pasid_state;
325 pasid_state = get_pasid_state(dev_state, pasid);
326 if (pasid_state == NULL)
329 __unbind_pasid(pasid_state);
330 put_pasid_state_wait(pasid_state); /* Reference taken in this function */
333 static void free_pasid_states_level1(struct pasid_state **tbl)
337 for (i = 0; i < 512; ++i) {
341 free_page((unsigned long)tbl[i]);
345 static void free_pasid_states_level2(struct pasid_state **tbl)
347 struct pasid_state **ptr;
350 for (i = 0; i < 512; ++i) {
354 ptr = (struct pasid_state **)tbl[i];
355 free_pasid_states_level1(ptr);
359 static void free_pasid_states(struct device_state *dev_state)
361 struct pasid_state *pasid_state;
364 for (i = 0; i < dev_state->max_pasids; ++i) {
365 pasid_state = get_pasid_state(dev_state, i);
366 if (pasid_state == NULL)
369 put_pasid_state(pasid_state);
372 * This will call the mn_release function and
375 mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
378 if (dev_state->pasid_levels == 2)
379 free_pasid_states_level2(dev_state->states);
380 else if (dev_state->pasid_levels == 1)
381 free_pasid_states_level1(dev_state->states);
382 else if (dev_state->pasid_levels != 0)
385 free_page((unsigned long)dev_state->states);
388 static struct pasid_state *mn_to_state(struct mmu_notifier *mn)
390 return container_of(mn, struct pasid_state, mn);
393 static void __mn_flush_page(struct mmu_notifier *mn,
394 unsigned long address)
396 struct pasid_state *pasid_state;
397 struct device_state *dev_state;
399 pasid_state = mn_to_state(mn);
400 dev_state = pasid_state->device_state;
402 amd_iommu_flush_page(dev_state->domain, pasid_state->pasid, address);
405 static int mn_clear_flush_young(struct mmu_notifier *mn,
406 struct mm_struct *mm,
407 unsigned long address)
409 __mn_flush_page(mn, address);
414 static void mn_change_pte(struct mmu_notifier *mn,
415 struct mm_struct *mm,
416 unsigned long address,
419 __mn_flush_page(mn, address);
422 static void mn_invalidate_page(struct mmu_notifier *mn,
423 struct mm_struct *mm,
424 unsigned long address)
426 __mn_flush_page(mn, address);
429 static void mn_invalidate_range_start(struct mmu_notifier *mn,
430 struct mm_struct *mm,
431 unsigned long start, unsigned long end)
433 struct pasid_state *pasid_state;
434 struct device_state *dev_state;
437 pasid_state = mn_to_state(mn);
438 dev_state = pasid_state->device_state;
440 spin_lock_irqsave(&pasid_state->lock, flags);
441 if (pasid_state->mmu_notifier_count == 0) {
442 amd_iommu_domain_set_gcr3(dev_state->domain,
444 __pa(empty_page_table));
446 pasid_state->mmu_notifier_count += 1;
447 spin_unlock_irqrestore(&pasid_state->lock, flags);
450 static void mn_invalidate_range_end(struct mmu_notifier *mn,
451 struct mm_struct *mm,
452 unsigned long start, unsigned long end)
454 struct pasid_state *pasid_state;
455 struct device_state *dev_state;
458 pasid_state = mn_to_state(mn);
459 dev_state = pasid_state->device_state;
461 spin_lock_irqsave(&pasid_state->lock, flags);
462 pasid_state->mmu_notifier_count -= 1;
463 if (pasid_state->mmu_notifier_count == 0) {
464 amd_iommu_domain_set_gcr3(dev_state->domain,
466 __pa(pasid_state->mm->pgd));
468 spin_unlock_irqrestore(&pasid_state->lock, flags);
471 static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
473 struct pasid_state *pasid_state;
474 struct device_state *dev_state;
478 pasid_state = mn_to_state(mn);
479 dev_state = pasid_state->device_state;
481 if (pasid_state->device_state->inv_ctx_cb)
482 dev_state->inv_ctx_cb(dev_state->pdev, pasid_state->pasid);
484 unbind_pasid(dev_state, pasid_state->pasid);
487 static struct mmu_notifier_ops iommu_mn = {
488 .release = mn_release,
489 .clear_flush_young = mn_clear_flush_young,
490 .change_pte = mn_change_pte,
491 .invalidate_page = mn_invalidate_page,
492 .invalidate_range_start = mn_invalidate_range_start,
493 .invalidate_range_end = mn_invalidate_range_end,
496 static void set_pri_tag_status(struct pasid_state *pasid_state,
501 spin_lock_irqsave(&pasid_state->lock, flags);
502 pasid_state->pri[tag].status = status;
503 spin_unlock_irqrestore(&pasid_state->lock, flags);
506 static void finish_pri_tag(struct device_state *dev_state,
507 struct pasid_state *pasid_state,
512 spin_lock_irqsave(&pasid_state->lock, flags);
513 if (atomic_dec_and_test(&pasid_state->pri[tag].inflight) &&
514 pasid_state->pri[tag].finish) {
515 amd_iommu_complete_ppr(dev_state->pdev, pasid_state->pasid,
516 pasid_state->pri[tag].status, tag);
517 pasid_state->pri[tag].finish = false;
518 pasid_state->pri[tag].status = PPR_SUCCESS;
520 spin_unlock_irqrestore(&pasid_state->lock, flags);
523 static void do_fault(struct work_struct *work)
525 struct fault *fault = container_of(work, struct fault, work);
529 write = !!(fault->flags & PPR_FAULT_WRITE);
531 down_read(&fault->state->mm->mmap_sem);
532 npages = get_user_pages(fault->state->task, fault->state->mm,
533 fault->address, 1, write, 0, &page, NULL);
534 up_read(&fault->state->mm->mmap_sem);
538 } else if (fault->dev_state->inv_ppr_cb) {
541 status = fault->dev_state->inv_ppr_cb(fault->dev_state->pdev,
546 case AMD_IOMMU_INV_PRI_RSP_SUCCESS:
547 set_pri_tag_status(fault->state, fault->tag, PPR_SUCCESS);
549 case AMD_IOMMU_INV_PRI_RSP_INVALID:
550 set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
552 case AMD_IOMMU_INV_PRI_RSP_FAIL:
553 set_pri_tag_status(fault->state, fault->tag, PPR_FAILURE);
559 set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
562 finish_pri_tag(fault->dev_state, fault->state, fault->tag);
564 put_pasid_state(fault->state);
569 static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
571 struct amd_iommu_fault *iommu_fault;
572 struct pasid_state *pasid_state;
573 struct device_state *dev_state;
581 tag = iommu_fault->tag & 0x1ff;
582 finish = (iommu_fault->tag >> 9) & 1;
585 dev_state = get_device_state(iommu_fault->device_id);
586 if (dev_state == NULL)
589 pasid_state = get_pasid_state(dev_state, iommu_fault->pasid);
590 if (pasid_state == NULL) {
591 /* We know the device but not the PASID -> send INVALID */
592 amd_iommu_complete_ppr(dev_state->pdev, iommu_fault->pasid,
597 spin_lock_irqsave(&pasid_state->lock, flags);
598 atomic_inc(&pasid_state->pri[tag].inflight);
600 pasid_state->pri[tag].finish = true;
601 spin_unlock_irqrestore(&pasid_state->lock, flags);
603 fault = kzalloc(sizeof(*fault), GFP_ATOMIC);
605 /* We are OOM - send success and let the device re-fault */
606 finish_pri_tag(dev_state, pasid_state, tag);
610 fault->dev_state = dev_state;
611 fault->address = iommu_fault->address;
612 fault->state = pasid_state;
614 fault->finish = finish;
615 fault->flags = iommu_fault->flags;
616 INIT_WORK(&fault->work, do_fault);
618 queue_work(iommu_wq, &fault->work);
623 put_device_state(dev_state);
629 static struct notifier_block ppr_nb = {
630 .notifier_call = ppr_notifier,
633 int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
634 struct task_struct *task)
636 struct pasid_state *pasid_state;
637 struct device_state *dev_state;
643 if (!amd_iommu_v2_supported())
646 devid = device_id(pdev);
647 dev_state = get_device_state(devid);
649 if (dev_state == NULL)
653 if (pasid < 0 || pasid >= dev_state->max_pasids)
657 pasid_state = kzalloc(sizeof(*pasid_state), GFP_KERNEL);
658 if (pasid_state == NULL)
661 atomic_set(&pasid_state->count, 1);
662 init_waitqueue_head(&pasid_state->wq);
663 spin_lock_init(&pasid_state->lock);
665 pasid_state->task = task;
666 pasid_state->mm = get_task_mm(task);
667 pasid_state->device_state = dev_state;
668 pasid_state->pasid = pasid;
669 pasid_state->mn.ops = &iommu_mn;
671 if (pasid_state->mm == NULL)
674 mmu_notifier_register(&pasid_state->mn, pasid_state->mm);
676 ret = set_pasid_state(dev_state, pasid_state, pasid);
680 ret = amd_iommu_domain_set_gcr3(dev_state->domain, pasid,
681 __pa(pasid_state->mm->pgd));
683 goto out_clear_state;
688 clear_pasid_state(dev_state, pasid);
691 mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
694 free_pasid_state(pasid_state);
697 put_device_state(dev_state);
701 EXPORT_SYMBOL(amd_iommu_bind_pasid);
703 void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid)
705 struct pasid_state *pasid_state;
706 struct device_state *dev_state;
711 if (!amd_iommu_v2_supported())
714 devid = device_id(pdev);
715 dev_state = get_device_state(devid);
716 if (dev_state == NULL)
719 if (pasid < 0 || pasid >= dev_state->max_pasids)
722 pasid_state = get_pasid_state(dev_state, pasid);
723 if (pasid_state == NULL)
726 * Drop reference taken here. We are safe because we still hold
727 * the reference taken in the amd_iommu_bind_pasid function.
729 put_pasid_state(pasid_state);
731 /* This will call the mn_release function and unbind the PASID */
732 mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
735 put_device_state(dev_state);
737 EXPORT_SYMBOL(amd_iommu_unbind_pasid);
739 int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
741 struct device_state *dev_state;
748 if (!amd_iommu_v2_supported())
751 if (pasids <= 0 || pasids > (PASID_MASK + 1))
754 devid = device_id(pdev);
756 dev_state = kzalloc(sizeof(*dev_state), GFP_KERNEL);
757 if (dev_state == NULL)
760 spin_lock_init(&dev_state->lock);
761 init_waitqueue_head(&dev_state->wq);
762 dev_state->pdev = pdev;
763 dev_state->devid = devid;
766 for (dev_state->pasid_levels = 0; (tmp - 1) & ~0x1ff; tmp >>= 9)
767 dev_state->pasid_levels += 1;
769 atomic_set(&dev_state->count, 1);
770 dev_state->max_pasids = pasids;
773 dev_state->states = (void *)get_zeroed_page(GFP_KERNEL);
774 if (dev_state->states == NULL)
775 goto out_free_dev_state;
777 dev_state->domain = iommu_domain_alloc(&pci_bus_type);
778 if (dev_state->domain == NULL)
779 goto out_free_states;
781 amd_iommu_domain_direct_map(dev_state->domain);
783 ret = amd_iommu_domain_enable_v2(dev_state->domain, pasids);
785 goto out_free_domain;
787 ret = iommu_attach_device(dev_state->domain, &pdev->dev);
789 goto out_free_domain;
791 spin_lock_irqsave(&state_lock, flags);
793 if (__get_device_state(devid) != NULL) {
794 spin_unlock_irqrestore(&state_lock, flags);
796 goto out_free_domain;
799 list_add_tail(&dev_state->list, &state_list);
801 spin_unlock_irqrestore(&state_lock, flags);
806 iommu_domain_free(dev_state->domain);
809 free_page((unsigned long)dev_state->states);
816 EXPORT_SYMBOL(amd_iommu_init_device);
818 void amd_iommu_free_device(struct pci_dev *pdev)
820 struct device_state *dev_state;
824 if (!amd_iommu_v2_supported())
827 devid = device_id(pdev);
829 spin_lock_irqsave(&state_lock, flags);
831 dev_state = __get_device_state(devid);
832 if (dev_state == NULL) {
833 spin_unlock_irqrestore(&state_lock, flags);
837 list_del(&dev_state->list);
839 spin_unlock_irqrestore(&state_lock, flags);
841 /* Get rid of any remaining pasid states */
842 free_pasid_states(dev_state);
844 put_device_state_wait(dev_state);
846 EXPORT_SYMBOL(amd_iommu_free_device);
848 int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev,
849 amd_iommu_invalid_ppr_cb cb)
851 struct device_state *dev_state;
856 if (!amd_iommu_v2_supported())
859 devid = device_id(pdev);
861 spin_lock_irqsave(&state_lock, flags);
864 dev_state = __get_device_state(devid);
865 if (dev_state == NULL)
868 dev_state->inv_ppr_cb = cb;
873 spin_unlock_irqrestore(&state_lock, flags);
877 EXPORT_SYMBOL(amd_iommu_set_invalid_ppr_cb);
879 int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev,
880 amd_iommu_invalidate_ctx cb)
882 struct device_state *dev_state;
887 if (!amd_iommu_v2_supported())
890 devid = device_id(pdev);
892 spin_lock_irqsave(&state_lock, flags);
895 dev_state = __get_device_state(devid);
896 if (dev_state == NULL)
899 dev_state->inv_ctx_cb = cb;
904 spin_unlock_irqrestore(&state_lock, flags);
908 EXPORT_SYMBOL(amd_iommu_set_invalidate_ctx_cb);
910 static int __init amd_iommu_v2_init(void)
914 pr_info("AMD IOMMUv2 driver by Joerg Roedel <joerg.roedel@amd.com>\n");
916 if (!amd_iommu_v2_supported()) {
917 pr_info("AMD IOMMUv2 functionality not available on this system\n");
919 * Load anyway to provide the symbols to other modules
920 * which may use AMD IOMMUv2 optionally.
925 spin_lock_init(&state_lock);
928 iommu_wq = create_workqueue("amd_iommu_v2");
929 if (iommu_wq == NULL)
933 empty_page_table = (u64 *)get_zeroed_page(GFP_KERNEL);
934 if (empty_page_table == NULL)
937 amd_iommu_register_ppr_notifier(&ppr_nb);
942 destroy_workqueue(iommu_wq);
948 static void __exit amd_iommu_v2_exit(void)
950 struct device_state *dev_state;
953 if (!amd_iommu_v2_supported())
956 amd_iommu_unregister_ppr_notifier(&ppr_nb);
958 flush_workqueue(iommu_wq);
961 * The loop below might call flush_workqueue(), so call
962 * destroy_workqueue() after it
964 for (i = 0; i < MAX_DEVICES; ++i) {
965 dev_state = get_device_state(i);
967 if (dev_state == NULL)
972 put_device_state(dev_state);
973 amd_iommu_free_device(dev_state->pdev);
976 destroy_workqueue(iommu_wq);
978 free_page((unsigned long)empty_page_table);
981 module_init(amd_iommu_v2_init);
982 module_exit(amd_iommu_v2_exit);