2 * Copyright (C) 2010-2012 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/mmu_notifier.h>
20 #include <linux/amd-iommu.h>
21 #include <linux/mm_types.h>
22 #include <linux/profile.h>
23 #include <linux/module.h>
24 #include <linux/sched.h>
25 #include <linux/iommu.h>
26 #include <linux/wait.h>
27 #include <linux/pci.h>
28 #include <linux/gfp.h>
30 #include "amd_iommu_types.h"
31 #include "amd_iommu_proto.h"
33 MODULE_LICENSE("GPL v2");
34 MODULE_AUTHOR("Joerg Roedel <joerg.roedel@amd.com>");
36 #define MAX_DEVICES 0x10000
37 #define PRI_QUEUE_SIZE 512
46 struct list_head list; /* For global state-list */
47 atomic_t count; /* Reference count */
48 struct task_struct *task; /* Task bound to this PASID */
49 struct mm_struct *mm; /* mm_struct for the faults */
50 struct mmu_notifier mn; /* mmu_otifier handle */
51 struct pri_queue pri[PRI_QUEUE_SIZE]; /* PRI tag states */
52 struct device_state *device_state; /* Link to our device_state */
53 int pasid; /* PASID index */
54 spinlock_t lock; /* Protect pri_queues */
55 wait_queue_head_t wq; /* To wait for count == 0 */
61 struct pasid_state **states;
62 struct iommu_domain *domain;
65 amd_iommu_invalid_ppr_cb inv_ppr_cb;
66 amd_iommu_invalidate_ctx inv_ctx_cb;
72 struct work_struct work;
73 struct device_state *dev_state;
74 struct pasid_state *state;
84 static struct device_state **state_table;
85 static spinlock_t state_lock;
87 /* List and lock for all pasid_states */
88 static LIST_HEAD(pasid_state_list);
89 static DEFINE_SPINLOCK(ps_lock);
91 static struct workqueue_struct *iommu_wq;
94 * Empty page table - Used between
95 * mmu_notifier_invalidate_range_start and
96 * mmu_notifier_invalidate_range_end
98 static u64 *empty_page_table;
100 static void free_pasid_states(struct device_state *dev_state);
101 static void unbind_pasid(struct device_state *dev_state, int pasid);
102 static int task_exit(struct notifier_block *nb, unsigned long e, void *data);
104 static u16 device_id(struct pci_dev *pdev)
108 devid = pdev->bus->number;
109 devid = (devid << 8) | pdev->devfn;
114 static struct device_state *get_device_state(u16 devid)
116 struct device_state *dev_state;
119 spin_lock_irqsave(&state_lock, flags);
120 dev_state = state_table[devid];
121 if (dev_state != NULL)
122 atomic_inc(&dev_state->count);
123 spin_unlock_irqrestore(&state_lock, flags);
128 static void free_device_state(struct device_state *dev_state)
131 * First detach device from domain - No more PRI requests will arrive
132 * from that device after it is unbound from the IOMMUv2 domain.
134 iommu_detach_device(dev_state->domain, &dev_state->pdev->dev);
136 /* Everything is down now, free the IOMMUv2 domain */
137 iommu_domain_free(dev_state->domain);
139 /* Finally get rid of the device-state */
143 static void put_device_state(struct device_state *dev_state)
145 if (atomic_dec_and_test(&dev_state->count))
146 wake_up(&dev_state->wq);
149 static void put_device_state_wait(struct device_state *dev_state)
153 prepare_to_wait(&dev_state->wq, &wait, TASK_UNINTERRUPTIBLE);
154 if (!atomic_dec_and_test(&dev_state->count))
156 finish_wait(&dev_state->wq, &wait);
158 free_device_state(dev_state);
161 static struct notifier_block profile_nb = {
162 .notifier_call = task_exit,
165 static void link_pasid_state(struct pasid_state *pasid_state)
168 list_add_tail(&pasid_state->list, &pasid_state_list);
169 spin_unlock(&ps_lock);
172 static void __unlink_pasid_state(struct pasid_state *pasid_state)
174 list_del(&pasid_state->list);
177 static void unlink_pasid_state(struct pasid_state *pasid_state)
180 __unlink_pasid_state(pasid_state);
181 spin_unlock(&ps_lock);
184 /* Must be called under dev_state->lock */
185 static struct pasid_state **__get_pasid_state_ptr(struct device_state *dev_state,
186 int pasid, bool alloc)
188 struct pasid_state **root, **ptr;
191 level = dev_state->pasid_levels;
192 root = dev_state->states;
196 index = (pasid >> (9 * level)) & 0x1ff;
206 *ptr = (void *)get_zeroed_page(GFP_ATOMIC);
211 root = (struct pasid_state **)*ptr;
218 static int set_pasid_state(struct device_state *dev_state,
219 struct pasid_state *pasid_state,
222 struct pasid_state **ptr;
226 spin_lock_irqsave(&dev_state->lock, flags);
227 ptr = __get_pasid_state_ptr(dev_state, pasid, true);
242 spin_unlock_irqrestore(&dev_state->lock, flags);
247 static void clear_pasid_state(struct device_state *dev_state, int pasid)
249 struct pasid_state **ptr;
252 spin_lock_irqsave(&dev_state->lock, flags);
253 ptr = __get_pasid_state_ptr(dev_state, pasid, true);
261 spin_unlock_irqrestore(&dev_state->lock, flags);
264 static struct pasid_state *get_pasid_state(struct device_state *dev_state,
267 struct pasid_state **ptr, *ret = NULL;
270 spin_lock_irqsave(&dev_state->lock, flags);
271 ptr = __get_pasid_state_ptr(dev_state, pasid, false);
278 atomic_inc(&ret->count);
281 spin_unlock_irqrestore(&dev_state->lock, flags);
286 static void free_pasid_state(struct pasid_state *pasid_state)
291 static void put_pasid_state(struct pasid_state *pasid_state)
293 if (atomic_dec_and_test(&pasid_state->count)) {
294 put_device_state(pasid_state->device_state);
295 wake_up(&pasid_state->wq);
299 static void put_pasid_state_wait(struct pasid_state *pasid_state)
303 prepare_to_wait(&pasid_state->wq, &wait, TASK_UNINTERRUPTIBLE);
305 if (atomic_dec_and_test(&pasid_state->count))
306 put_device_state(pasid_state->device_state);
310 finish_wait(&pasid_state->wq, &wait);
311 mmput(pasid_state->mm);
312 free_pasid_state(pasid_state);
315 static void __unbind_pasid(struct pasid_state *pasid_state)
317 struct iommu_domain *domain;
319 domain = pasid_state->device_state->domain;
321 amd_iommu_domain_clear_gcr3(domain, pasid_state->pasid);
322 clear_pasid_state(pasid_state->device_state, pasid_state->pasid);
324 /* Make sure no more pending faults are in the queue */
325 flush_workqueue(iommu_wq);
327 mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
329 put_pasid_state(pasid_state); /* Reference taken in bind() function */
332 static void unbind_pasid(struct device_state *dev_state, int pasid)
334 struct pasid_state *pasid_state;
336 pasid_state = get_pasid_state(dev_state, pasid);
337 if (pasid_state == NULL)
340 unlink_pasid_state(pasid_state);
341 __unbind_pasid(pasid_state);
342 put_pasid_state_wait(pasid_state); /* Reference taken in this function */
345 static void free_pasid_states_level1(struct pasid_state **tbl)
349 for (i = 0; i < 512; ++i) {
353 free_page((unsigned long)tbl[i]);
357 static void free_pasid_states_level2(struct pasid_state **tbl)
359 struct pasid_state **ptr;
362 for (i = 0; i < 512; ++i) {
366 ptr = (struct pasid_state **)tbl[i];
367 free_pasid_states_level1(ptr);
371 static void free_pasid_states(struct device_state *dev_state)
373 struct pasid_state *pasid_state;
376 for (i = 0; i < dev_state->max_pasids; ++i) {
377 pasid_state = get_pasid_state(dev_state, i);
378 if (pasid_state == NULL)
381 put_pasid_state(pasid_state);
382 unbind_pasid(dev_state, i);
385 if (dev_state->pasid_levels == 2)
386 free_pasid_states_level2(dev_state->states);
387 else if (dev_state->pasid_levels == 1)
388 free_pasid_states_level1(dev_state->states);
389 else if (dev_state->pasid_levels != 0)
392 free_page((unsigned long)dev_state->states);
395 static struct pasid_state *mn_to_state(struct mmu_notifier *mn)
397 return container_of(mn, struct pasid_state, mn);
400 static void __mn_flush_page(struct mmu_notifier *mn,
401 unsigned long address)
403 struct pasid_state *pasid_state;
404 struct device_state *dev_state;
406 pasid_state = mn_to_state(mn);
407 dev_state = pasid_state->device_state;
409 amd_iommu_flush_page(dev_state->domain, pasid_state->pasid, address);
412 static int mn_clear_flush_young(struct mmu_notifier *mn,
413 struct mm_struct *mm,
414 unsigned long address)
416 __mn_flush_page(mn, address);
421 static void mn_change_pte(struct mmu_notifier *mn,
422 struct mm_struct *mm,
423 unsigned long address,
426 __mn_flush_page(mn, address);
429 static void mn_invalidate_page(struct mmu_notifier *mn,
430 struct mm_struct *mm,
431 unsigned long address)
433 __mn_flush_page(mn, address);
436 static void mn_invalidate_range_start(struct mmu_notifier *mn,
437 struct mm_struct *mm,
438 unsigned long start, unsigned long end)
440 struct pasid_state *pasid_state;
441 struct device_state *dev_state;
443 pasid_state = mn_to_state(mn);
444 dev_state = pasid_state->device_state;
446 amd_iommu_domain_set_gcr3(dev_state->domain, pasid_state->pasid,
447 __pa(empty_page_table));
450 static void mn_invalidate_range_end(struct mmu_notifier *mn,
451 struct mm_struct *mm,
452 unsigned long start, unsigned long end)
454 struct pasid_state *pasid_state;
455 struct device_state *dev_state;
457 pasid_state = mn_to_state(mn);
458 dev_state = pasid_state->device_state;
460 amd_iommu_domain_set_gcr3(dev_state->domain, pasid_state->pasid,
461 __pa(pasid_state->mm->pgd));
464 static struct mmu_notifier_ops iommu_mn = {
465 .clear_flush_young = mn_clear_flush_young,
466 .change_pte = mn_change_pte,
467 .invalidate_page = mn_invalidate_page,
468 .invalidate_range_start = mn_invalidate_range_start,
469 .invalidate_range_end = mn_invalidate_range_end,
472 static void set_pri_tag_status(struct pasid_state *pasid_state,
477 spin_lock_irqsave(&pasid_state->lock, flags);
478 pasid_state->pri[tag].status = status;
479 spin_unlock_irqrestore(&pasid_state->lock, flags);
482 static void finish_pri_tag(struct device_state *dev_state,
483 struct pasid_state *pasid_state,
488 spin_lock_irqsave(&pasid_state->lock, flags);
489 if (atomic_dec_and_test(&pasid_state->pri[tag].inflight) &&
490 pasid_state->pri[tag].finish) {
491 amd_iommu_complete_ppr(dev_state->pdev, pasid_state->pasid,
492 pasid_state->pri[tag].status, tag);
493 pasid_state->pri[tag].finish = false;
494 pasid_state->pri[tag].status = PPR_SUCCESS;
496 spin_unlock_irqrestore(&pasid_state->lock, flags);
499 static void do_fault(struct work_struct *work)
501 struct fault *fault = container_of(work, struct fault, work);
505 write = !!(fault->flags & PPR_FAULT_WRITE);
507 down_read(&fault->state->mm->mmap_sem);
508 npages = get_user_pages(fault->state->task, fault->state->mm,
509 fault->address, 1, write, 0, &page, NULL);
510 up_read(&fault->state->mm->mmap_sem);
514 } else if (fault->dev_state->inv_ppr_cb) {
517 status = fault->dev_state->inv_ppr_cb(fault->dev_state->pdev,
522 case AMD_IOMMU_INV_PRI_RSP_SUCCESS:
523 set_pri_tag_status(fault->state, fault->tag, PPR_SUCCESS);
525 case AMD_IOMMU_INV_PRI_RSP_INVALID:
526 set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
528 case AMD_IOMMU_INV_PRI_RSP_FAIL:
529 set_pri_tag_status(fault->state, fault->tag, PPR_FAILURE);
535 set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
538 finish_pri_tag(fault->dev_state, fault->state, fault->tag);
540 put_pasid_state(fault->state);
545 static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
547 struct amd_iommu_fault *iommu_fault;
548 struct pasid_state *pasid_state;
549 struct device_state *dev_state;
557 tag = iommu_fault->tag & 0x1ff;
558 finish = (iommu_fault->tag >> 9) & 1;
561 dev_state = get_device_state(iommu_fault->device_id);
562 if (dev_state == NULL)
565 pasid_state = get_pasid_state(dev_state, iommu_fault->pasid);
566 if (pasid_state == NULL) {
567 /* We know the device but not the PASID -> send INVALID */
568 amd_iommu_complete_ppr(dev_state->pdev, iommu_fault->pasid,
573 spin_lock_irqsave(&pasid_state->lock, flags);
574 atomic_inc(&pasid_state->pri[tag].inflight);
576 pasid_state->pri[tag].finish = true;
577 spin_unlock_irqrestore(&pasid_state->lock, flags);
579 fault = kzalloc(sizeof(*fault), GFP_ATOMIC);
581 /* We are OOM - send success and let the device re-fault */
582 finish_pri_tag(dev_state, pasid_state, tag);
586 fault->dev_state = dev_state;
587 fault->address = iommu_fault->address;
588 fault->state = pasid_state;
590 fault->finish = finish;
591 fault->flags = iommu_fault->flags;
592 INIT_WORK(&fault->work, do_fault);
594 queue_work(iommu_wq, &fault->work);
599 put_device_state(dev_state);
605 static struct notifier_block ppr_nb = {
606 .notifier_call = ppr_notifier,
609 static int task_exit(struct notifier_block *nb, unsigned long e, void *data)
611 struct pasid_state *pasid_state;
612 struct task_struct *task;
617 * Using this notifier is a hack - but there is no other choice
618 * at the moment. What I really want is a sleeping notifier that
619 * is called when an MM goes down. But such a notifier doesn't
620 * exist yet. The notifier needs to sleep because it has to make
621 * sure that the device does not use the PASID and the address
622 * space anymore before it is destroyed. This includes waiting
623 * for pending PRI requests to pass the workqueue. The
624 * MMU-Notifiers would be a good fit, but they use RCU and so
625 * they are not allowed to sleep. Lets see how we can solve this
626 * in a more intelligent way in the future.
630 list_for_each_entry(pasid_state, &pasid_state_list, list) {
631 struct device_state *dev_state;
634 if (pasid_state->task != task)
637 /* Drop Lock and unbind */
638 spin_unlock(&ps_lock);
640 dev_state = pasid_state->device_state;
641 pasid = pasid_state->pasid;
643 if (pasid_state->device_state->inv_ctx_cb)
644 dev_state->inv_ctx_cb(dev_state->pdev, pasid);
646 unbind_pasid(dev_state, pasid);
648 /* Task may be in the list multiple times */
651 spin_unlock(&ps_lock);
656 int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
657 struct task_struct *task)
659 struct pasid_state *pasid_state;
660 struct device_state *dev_state;
666 if (!amd_iommu_v2_supported())
669 devid = device_id(pdev);
670 dev_state = get_device_state(devid);
672 if (dev_state == NULL)
676 if (pasid < 0 || pasid >= dev_state->max_pasids)
680 pasid_state = kzalloc(sizeof(*pasid_state), GFP_KERNEL);
681 if (pasid_state == NULL)
684 atomic_set(&pasid_state->count, 1);
685 init_waitqueue_head(&pasid_state->wq);
686 spin_lock_init(&pasid_state->lock);
688 pasid_state->task = task;
689 pasid_state->mm = get_task_mm(task);
690 pasid_state->device_state = dev_state;
691 pasid_state->pasid = pasid;
692 pasid_state->mn.ops = &iommu_mn;
694 if (pasid_state->mm == NULL)
697 mmu_notifier_register(&pasid_state->mn, pasid_state->mm);
699 ret = set_pasid_state(dev_state, pasid_state, pasid);
703 ret = amd_iommu_domain_set_gcr3(dev_state->domain, pasid,
704 __pa(pasid_state->mm->pgd));
706 goto out_clear_state;
708 link_pasid_state(pasid_state);
713 clear_pasid_state(dev_state, pasid);
716 mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
719 free_pasid_state(pasid_state);
722 put_device_state(dev_state);
726 EXPORT_SYMBOL(amd_iommu_bind_pasid);
728 void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid)
730 struct device_state *dev_state;
735 if (!amd_iommu_v2_supported())
738 devid = device_id(pdev);
739 dev_state = get_device_state(devid);
740 if (dev_state == NULL)
743 if (pasid < 0 || pasid >= dev_state->max_pasids)
746 unbind_pasid(dev_state, pasid);
749 put_device_state(dev_state);
751 EXPORT_SYMBOL(amd_iommu_unbind_pasid);
753 int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
755 struct device_state *dev_state;
762 if (!amd_iommu_v2_supported())
765 if (pasids <= 0 || pasids > (PASID_MASK + 1))
768 devid = device_id(pdev);
770 dev_state = kzalloc(sizeof(*dev_state), GFP_KERNEL);
771 if (dev_state == NULL)
774 spin_lock_init(&dev_state->lock);
775 init_waitqueue_head(&dev_state->wq);
776 dev_state->pdev = pdev;
779 for (dev_state->pasid_levels = 0; (tmp - 1) & ~0x1ff; tmp >>= 9)
780 dev_state->pasid_levels += 1;
782 atomic_set(&dev_state->count, 1);
783 dev_state->max_pasids = pasids;
786 dev_state->states = (void *)get_zeroed_page(GFP_KERNEL);
787 if (dev_state->states == NULL)
788 goto out_free_dev_state;
790 dev_state->domain = iommu_domain_alloc(&pci_bus_type);
791 if (dev_state->domain == NULL)
792 goto out_free_states;
794 amd_iommu_domain_direct_map(dev_state->domain);
796 ret = amd_iommu_domain_enable_v2(dev_state->domain, pasids);
798 goto out_free_domain;
800 ret = iommu_attach_device(dev_state->domain, &pdev->dev);
802 goto out_free_domain;
804 spin_lock_irqsave(&state_lock, flags);
806 if (state_table[devid] != NULL) {
807 spin_unlock_irqrestore(&state_lock, flags);
809 goto out_free_domain;
812 state_table[devid] = dev_state;
814 spin_unlock_irqrestore(&state_lock, flags);
819 iommu_domain_free(dev_state->domain);
822 free_page((unsigned long)dev_state->states);
829 EXPORT_SYMBOL(amd_iommu_init_device);
831 void amd_iommu_free_device(struct pci_dev *pdev)
833 struct device_state *dev_state;
837 if (!amd_iommu_v2_supported())
840 devid = device_id(pdev);
842 spin_lock_irqsave(&state_lock, flags);
844 dev_state = state_table[devid];
845 if (dev_state == NULL) {
846 spin_unlock_irqrestore(&state_lock, flags);
850 state_table[devid] = NULL;
852 spin_unlock_irqrestore(&state_lock, flags);
854 /* Get rid of any remaining pasid states */
855 free_pasid_states(dev_state);
857 put_device_state_wait(dev_state);
859 EXPORT_SYMBOL(amd_iommu_free_device);
861 int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev,
862 amd_iommu_invalid_ppr_cb cb)
864 struct device_state *dev_state;
869 if (!amd_iommu_v2_supported())
872 devid = device_id(pdev);
874 spin_lock_irqsave(&state_lock, flags);
877 dev_state = state_table[devid];
878 if (dev_state == NULL)
881 dev_state->inv_ppr_cb = cb;
886 spin_unlock_irqrestore(&state_lock, flags);
890 EXPORT_SYMBOL(amd_iommu_set_invalid_ppr_cb);
892 int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev,
893 amd_iommu_invalidate_ctx cb)
895 struct device_state *dev_state;
900 if (!amd_iommu_v2_supported())
903 devid = device_id(pdev);
905 spin_lock_irqsave(&state_lock, flags);
908 dev_state = state_table[devid];
909 if (dev_state == NULL)
912 dev_state->inv_ctx_cb = cb;
917 spin_unlock_irqrestore(&state_lock, flags);
921 EXPORT_SYMBOL(amd_iommu_set_invalidate_ctx_cb);
923 static int __init amd_iommu_v2_init(void)
925 size_t state_table_size;
928 pr_info("AMD IOMMUv2 driver by Joerg Roedel <joerg.roedel@amd.com>\n");
930 if (!amd_iommu_v2_supported()) {
931 pr_info("AMD IOMMUv2 functionality not available on this system\n");
933 * Load anyway to provide the symbols to other modules
934 * which may use AMD IOMMUv2 optionally.
939 spin_lock_init(&state_lock);
941 state_table_size = MAX_DEVICES * sizeof(struct device_state *);
942 state_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
943 get_order(state_table_size));
944 if (state_table == NULL)
948 iommu_wq = create_workqueue("amd_iommu_v2");
949 if (iommu_wq == NULL)
953 empty_page_table = (u64 *)get_zeroed_page(GFP_KERNEL);
954 if (empty_page_table == NULL)
957 amd_iommu_register_ppr_notifier(&ppr_nb);
958 profile_event_register(PROFILE_TASK_EXIT, &profile_nb);
963 destroy_workqueue(iommu_wq);
966 free_pages((unsigned long)state_table, get_order(state_table_size));
971 static void __exit amd_iommu_v2_exit(void)
973 struct device_state *dev_state;
974 size_t state_table_size;
977 if (!amd_iommu_v2_supported())
980 profile_event_unregister(PROFILE_TASK_EXIT, &profile_nb);
981 amd_iommu_unregister_ppr_notifier(&ppr_nb);
983 flush_workqueue(iommu_wq);
986 * The loop below might call flush_workqueue(), so call
987 * destroy_workqueue() after it
989 for (i = 0; i < MAX_DEVICES; ++i) {
990 dev_state = get_device_state(i);
992 if (dev_state == NULL)
997 put_device_state(dev_state);
998 amd_iommu_free_device(dev_state->pdev);
1001 destroy_workqueue(iommu_wq);
1003 state_table_size = MAX_DEVICES * sizeof(struct device_state *);
1004 free_pages((unsigned long)state_table, get_order(state_table_size));
1006 free_page((unsigned long)empty_page_table);
1009 module_init(amd_iommu_v2_init);
1010 module_exit(amd_iommu_v2_exit);