2 * Copyright (C) 2010-2012 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <jroedel@suse.de>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/mmu_notifier.h>
20 #include <linux/amd-iommu.h>
21 #include <linux/mm_types.h>
22 #include <linux/profile.h>
23 #include <linux/module.h>
24 #include <linux/sched.h>
25 #include <linux/iommu.h>
26 #include <linux/wait.h>
27 #include <linux/pci.h>
28 #include <linux/gfp.h>
30 #include "amd_iommu_types.h"
31 #include "amd_iommu_proto.h"
33 MODULE_LICENSE("GPL v2");
34 MODULE_AUTHOR("Joerg Roedel <jroedel@suse.de>");
36 #define MAX_DEVICES 0x10000
37 #define PRI_QUEUE_SIZE 512
46 struct list_head list; /* For global state-list */
47 atomic_t count; /* Reference count */
48 unsigned mmu_notifier_count; /* Counting nested mmu_notifier
50 struct mm_struct *mm; /* mm_struct for the faults */
51 struct mmu_notifier mn; /* mmu_notifier handle */
52 struct pri_queue pri[PRI_QUEUE_SIZE]; /* PRI tag states */
53 struct device_state *device_state; /* Link to our device_state */
54 int pasid; /* PASID index */
55 bool invalid; /* Used during setup and
56 teardown of the pasid */
57 spinlock_t lock; /* Protect pri_queues and
59 wait_queue_head_t wq; /* To wait for count == 0 */
63 struct list_head list;
67 struct pasid_state **states;
68 struct iommu_domain *domain;
71 amd_iommu_invalid_ppr_cb inv_ppr_cb;
72 amd_iommu_invalidate_ctx inv_ctx_cb;
78 struct work_struct work;
79 struct device_state *dev_state;
80 struct pasid_state *state;
90 static LIST_HEAD(state_list);
91 static spinlock_t state_lock;
93 static struct workqueue_struct *iommu_wq;
95 static void free_pasid_states(struct device_state *dev_state);
97 static u16 device_id(struct pci_dev *pdev)
101 devid = pdev->bus->number;
102 devid = (devid << 8) | pdev->devfn;
107 static struct device_state *__get_device_state(u16 devid)
109 struct device_state *dev_state;
111 list_for_each_entry(dev_state, &state_list, list) {
112 if (dev_state->devid == devid)
119 static struct device_state *get_device_state(u16 devid)
121 struct device_state *dev_state;
124 spin_lock_irqsave(&state_lock, flags);
125 dev_state = __get_device_state(devid);
126 if (dev_state != NULL)
127 atomic_inc(&dev_state->count);
128 spin_unlock_irqrestore(&state_lock, flags);
133 static void free_device_state(struct device_state *dev_state)
135 struct iommu_group *group;
138 * First detach device from domain - No more PRI requests will arrive
139 * from that device after it is unbound from the IOMMUv2 domain.
141 group = iommu_group_get(&dev_state->pdev->dev);
145 iommu_detach_group(dev_state->domain, group);
147 iommu_group_put(group);
149 /* Everything is down now, free the IOMMUv2 domain */
150 iommu_domain_free(dev_state->domain);
152 /* Finally get rid of the device-state */
156 static void put_device_state(struct device_state *dev_state)
158 if (atomic_dec_and_test(&dev_state->count))
159 wake_up(&dev_state->wq);
162 /* Must be called under dev_state->lock */
163 static struct pasid_state **__get_pasid_state_ptr(struct device_state *dev_state,
164 int pasid, bool alloc)
166 struct pasid_state **root, **ptr;
169 level = dev_state->pasid_levels;
170 root = dev_state->states;
174 index = (pasid >> (9 * level)) & 0x1ff;
184 *ptr = (void *)get_zeroed_page(GFP_ATOMIC);
189 root = (struct pasid_state **)*ptr;
196 static int set_pasid_state(struct device_state *dev_state,
197 struct pasid_state *pasid_state,
200 struct pasid_state **ptr;
204 spin_lock_irqsave(&dev_state->lock, flags);
205 ptr = __get_pasid_state_ptr(dev_state, pasid, true);
220 spin_unlock_irqrestore(&dev_state->lock, flags);
225 static void clear_pasid_state(struct device_state *dev_state, int pasid)
227 struct pasid_state **ptr;
230 spin_lock_irqsave(&dev_state->lock, flags);
231 ptr = __get_pasid_state_ptr(dev_state, pasid, true);
239 spin_unlock_irqrestore(&dev_state->lock, flags);
242 static struct pasid_state *get_pasid_state(struct device_state *dev_state,
245 struct pasid_state **ptr, *ret = NULL;
248 spin_lock_irqsave(&dev_state->lock, flags);
249 ptr = __get_pasid_state_ptr(dev_state, pasid, false);
256 atomic_inc(&ret->count);
259 spin_unlock_irqrestore(&dev_state->lock, flags);
264 static void free_pasid_state(struct pasid_state *pasid_state)
269 static void put_pasid_state(struct pasid_state *pasid_state)
271 if (atomic_dec_and_test(&pasid_state->count))
272 wake_up(&pasid_state->wq);
275 static void put_pasid_state_wait(struct pasid_state *pasid_state)
277 atomic_dec(&pasid_state->count);
278 wait_event(pasid_state->wq, !atomic_read(&pasid_state->count));
279 free_pasid_state(pasid_state);
282 static void unbind_pasid(struct pasid_state *pasid_state)
284 struct iommu_domain *domain;
286 domain = pasid_state->device_state->domain;
289 * Mark pasid_state as invalid, no more faults will we added to the
290 * work queue after this is visible everywhere.
292 pasid_state->invalid = true;
294 /* Make sure this is visible */
297 /* After this the device/pasid can't access the mm anymore */
298 amd_iommu_domain_clear_gcr3(domain, pasid_state->pasid);
300 /* Make sure no more pending faults are in the queue */
301 flush_workqueue(iommu_wq);
304 static void free_pasid_states_level1(struct pasid_state **tbl)
308 for (i = 0; i < 512; ++i) {
312 free_page((unsigned long)tbl[i]);
316 static void free_pasid_states_level2(struct pasid_state **tbl)
318 struct pasid_state **ptr;
321 for (i = 0; i < 512; ++i) {
325 ptr = (struct pasid_state **)tbl[i];
326 free_pasid_states_level1(ptr);
330 static void free_pasid_states(struct device_state *dev_state)
332 struct pasid_state *pasid_state;
335 for (i = 0; i < dev_state->max_pasids; ++i) {
336 pasid_state = get_pasid_state(dev_state, i);
337 if (pasid_state == NULL)
340 put_pasid_state(pasid_state);
343 * This will call the mn_release function and
346 mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
348 put_pasid_state_wait(pasid_state); /* Reference taken in
349 amd_iommu_bind_pasid */
351 /* Drop reference taken in amd_iommu_bind_pasid */
352 put_device_state(dev_state);
355 if (dev_state->pasid_levels == 2)
356 free_pasid_states_level2(dev_state->states);
357 else if (dev_state->pasid_levels == 1)
358 free_pasid_states_level1(dev_state->states);
360 BUG_ON(dev_state->pasid_levels != 0);
362 free_page((unsigned long)dev_state->states);
365 static struct pasid_state *mn_to_state(struct mmu_notifier *mn)
367 return container_of(mn, struct pasid_state, mn);
370 static void __mn_flush_page(struct mmu_notifier *mn,
371 unsigned long address)
373 struct pasid_state *pasid_state;
374 struct device_state *dev_state;
376 pasid_state = mn_to_state(mn);
377 dev_state = pasid_state->device_state;
379 amd_iommu_flush_page(dev_state->domain, pasid_state->pasid, address);
382 static int mn_clear_flush_young(struct mmu_notifier *mn,
383 struct mm_struct *mm,
387 for (; start < end; start += PAGE_SIZE)
388 __mn_flush_page(mn, start);
393 static void mn_invalidate_page(struct mmu_notifier *mn,
394 struct mm_struct *mm,
395 unsigned long address)
397 __mn_flush_page(mn, address);
400 static void mn_invalidate_range(struct mmu_notifier *mn,
401 struct mm_struct *mm,
402 unsigned long start, unsigned long end)
404 struct pasid_state *pasid_state;
405 struct device_state *dev_state;
407 pasid_state = mn_to_state(mn);
408 dev_state = pasid_state->device_state;
410 if ((start ^ (end - 1)) < PAGE_SIZE)
411 amd_iommu_flush_page(dev_state->domain, pasid_state->pasid,
414 amd_iommu_flush_tlb(dev_state->domain, pasid_state->pasid);
417 static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
419 struct pasid_state *pasid_state;
420 struct device_state *dev_state;
425 pasid_state = mn_to_state(mn);
426 dev_state = pasid_state->device_state;
427 run_inv_ctx_cb = !pasid_state->invalid;
429 if (run_inv_ctx_cb && dev_state->inv_ctx_cb)
430 dev_state->inv_ctx_cb(dev_state->pdev, pasid_state->pasid);
432 unbind_pasid(pasid_state);
435 static const struct mmu_notifier_ops iommu_mn = {
436 .release = mn_release,
437 .clear_flush_young = mn_clear_flush_young,
438 .invalidate_page = mn_invalidate_page,
439 .invalidate_range = mn_invalidate_range,
442 static void set_pri_tag_status(struct pasid_state *pasid_state,
447 spin_lock_irqsave(&pasid_state->lock, flags);
448 pasid_state->pri[tag].status = status;
449 spin_unlock_irqrestore(&pasid_state->lock, flags);
452 static void finish_pri_tag(struct device_state *dev_state,
453 struct pasid_state *pasid_state,
458 spin_lock_irqsave(&pasid_state->lock, flags);
459 if (atomic_dec_and_test(&pasid_state->pri[tag].inflight) &&
460 pasid_state->pri[tag].finish) {
461 amd_iommu_complete_ppr(dev_state->pdev, pasid_state->pasid,
462 pasid_state->pri[tag].status, tag);
463 pasid_state->pri[tag].finish = false;
464 pasid_state->pri[tag].status = PPR_SUCCESS;
466 spin_unlock_irqrestore(&pasid_state->lock, flags);
469 static void handle_fault_error(struct fault *fault)
473 if (!fault->dev_state->inv_ppr_cb) {
474 set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
478 status = fault->dev_state->inv_ppr_cb(fault->dev_state->pdev,
483 case AMD_IOMMU_INV_PRI_RSP_SUCCESS:
484 set_pri_tag_status(fault->state, fault->tag, PPR_SUCCESS);
486 case AMD_IOMMU_INV_PRI_RSP_INVALID:
487 set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
489 case AMD_IOMMU_INV_PRI_RSP_FAIL:
490 set_pri_tag_status(fault->state, fault->tag, PPR_FAILURE);
497 static bool access_error(struct vm_area_struct *vma, struct fault *fault)
499 unsigned long requested = 0;
501 if (fault->flags & PPR_FAULT_EXEC)
502 requested |= VM_EXEC;
504 if (fault->flags & PPR_FAULT_READ)
505 requested |= VM_READ;
507 if (fault->flags & PPR_FAULT_WRITE)
508 requested |= VM_WRITE;
510 return (requested & ~vma->vm_flags) != 0;
513 static void do_fault(struct work_struct *work)
515 struct fault *fault = container_of(work, struct fault, work);
516 struct vm_area_struct *vma;
517 int ret = VM_FAULT_ERROR;
518 unsigned int flags = 0;
519 struct mm_struct *mm;
522 mm = fault->state->mm;
523 address = fault->address;
525 if (fault->flags & PPR_FAULT_USER)
526 flags |= FAULT_FLAG_USER;
527 if (fault->flags & PPR_FAULT_WRITE)
528 flags |= FAULT_FLAG_WRITE;
529 flags |= FAULT_FLAG_REMOTE;
531 down_read(&mm->mmap_sem);
532 vma = find_extend_vma(mm, address);
533 if (!vma || address < vma->vm_start)
534 /* failed to get a vma in the right range */
537 /* Check if we have the right permissions on the vma */
538 if (access_error(vma, fault))
541 ret = handle_mm_fault(mm, vma, address, flags);
544 up_read(&mm->mmap_sem);
546 if (ret & VM_FAULT_ERROR)
547 /* failed to service fault */
548 handle_fault_error(fault);
550 finish_pri_tag(fault->dev_state, fault->state, fault->tag);
552 put_pasid_state(fault->state);
557 static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
559 struct amd_iommu_fault *iommu_fault;
560 struct pasid_state *pasid_state;
561 struct device_state *dev_state;
569 tag = iommu_fault->tag & 0x1ff;
570 finish = (iommu_fault->tag >> 9) & 1;
573 dev_state = get_device_state(iommu_fault->device_id);
574 if (dev_state == NULL)
577 pasid_state = get_pasid_state(dev_state, iommu_fault->pasid);
578 if (pasid_state == NULL || pasid_state->invalid) {
579 /* We know the device but not the PASID -> send INVALID */
580 amd_iommu_complete_ppr(dev_state->pdev, iommu_fault->pasid,
585 spin_lock_irqsave(&pasid_state->lock, flags);
586 atomic_inc(&pasid_state->pri[tag].inflight);
588 pasid_state->pri[tag].finish = true;
589 spin_unlock_irqrestore(&pasid_state->lock, flags);
591 fault = kzalloc(sizeof(*fault), GFP_ATOMIC);
593 /* We are OOM - send success and let the device re-fault */
594 finish_pri_tag(dev_state, pasid_state, tag);
598 fault->dev_state = dev_state;
599 fault->address = iommu_fault->address;
600 fault->state = pasid_state;
602 fault->finish = finish;
603 fault->pasid = iommu_fault->pasid;
604 fault->flags = iommu_fault->flags;
605 INIT_WORK(&fault->work, do_fault);
607 queue_work(iommu_wq, &fault->work);
613 if (ret != NOTIFY_OK && pasid_state)
614 put_pasid_state(pasid_state);
616 put_device_state(dev_state);
622 static struct notifier_block ppr_nb = {
623 .notifier_call = ppr_notifier,
626 int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
627 struct task_struct *task)
629 struct pasid_state *pasid_state;
630 struct device_state *dev_state;
631 struct mm_struct *mm;
637 if (!amd_iommu_v2_supported())
640 devid = device_id(pdev);
641 dev_state = get_device_state(devid);
643 if (dev_state == NULL)
647 if (pasid < 0 || pasid >= dev_state->max_pasids)
651 pasid_state = kzalloc(sizeof(*pasid_state), GFP_KERNEL);
652 if (pasid_state == NULL)
656 atomic_set(&pasid_state->count, 1);
657 init_waitqueue_head(&pasid_state->wq);
658 spin_lock_init(&pasid_state->lock);
660 mm = get_task_mm(task);
661 pasid_state->mm = mm;
662 pasid_state->device_state = dev_state;
663 pasid_state->pasid = pasid;
664 pasid_state->invalid = true; /* Mark as valid only if we are
665 done with setting up the pasid */
666 pasid_state->mn.ops = &iommu_mn;
668 if (pasid_state->mm == NULL)
671 mmu_notifier_register(&pasid_state->mn, mm);
673 ret = set_pasid_state(dev_state, pasid_state, pasid);
677 ret = amd_iommu_domain_set_gcr3(dev_state->domain, pasid,
678 __pa(pasid_state->mm->pgd));
680 goto out_clear_state;
682 /* Now we are ready to handle faults */
683 pasid_state->invalid = false;
686 * Drop the reference to the mm_struct here. We rely on the
687 * mmu_notifier release call-back to inform us when the mm
695 clear_pasid_state(dev_state, pasid);
698 mmu_notifier_unregister(&pasid_state->mn, mm);
702 free_pasid_state(pasid_state);
705 put_device_state(dev_state);
709 EXPORT_SYMBOL(amd_iommu_bind_pasid);
711 void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid)
713 struct pasid_state *pasid_state;
714 struct device_state *dev_state;
719 if (!amd_iommu_v2_supported())
722 devid = device_id(pdev);
723 dev_state = get_device_state(devid);
724 if (dev_state == NULL)
727 if (pasid < 0 || pasid >= dev_state->max_pasids)
730 pasid_state = get_pasid_state(dev_state, pasid);
731 if (pasid_state == NULL)
734 * Drop reference taken here. We are safe because we still hold
735 * the reference taken in the amd_iommu_bind_pasid function.
737 put_pasid_state(pasid_state);
739 /* Clear the pasid state so that the pasid can be re-used */
740 clear_pasid_state(dev_state, pasid_state->pasid);
743 * Call mmu_notifier_unregister to drop our reference
746 mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
748 put_pasid_state_wait(pasid_state); /* Reference taken in
749 amd_iommu_bind_pasid */
751 /* Drop reference taken in this function */
752 put_device_state(dev_state);
754 /* Drop reference taken in amd_iommu_bind_pasid */
755 put_device_state(dev_state);
757 EXPORT_SYMBOL(amd_iommu_unbind_pasid);
759 int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
761 struct device_state *dev_state;
762 struct iommu_group *group;
769 if (!amd_iommu_v2_supported())
772 if (pasids <= 0 || pasids > (PASID_MASK + 1))
775 devid = device_id(pdev);
777 dev_state = kzalloc(sizeof(*dev_state), GFP_KERNEL);
778 if (dev_state == NULL)
781 spin_lock_init(&dev_state->lock);
782 init_waitqueue_head(&dev_state->wq);
783 dev_state->pdev = pdev;
784 dev_state->devid = devid;
787 for (dev_state->pasid_levels = 0; (tmp - 1) & ~0x1ff; tmp >>= 9)
788 dev_state->pasid_levels += 1;
790 atomic_set(&dev_state->count, 1);
791 dev_state->max_pasids = pasids;
794 dev_state->states = (void *)get_zeroed_page(GFP_KERNEL);
795 if (dev_state->states == NULL)
796 goto out_free_dev_state;
798 dev_state->domain = iommu_domain_alloc(&pci_bus_type);
799 if (dev_state->domain == NULL)
800 goto out_free_states;
802 amd_iommu_domain_direct_map(dev_state->domain);
804 ret = amd_iommu_domain_enable_v2(dev_state->domain, pasids);
806 goto out_free_domain;
808 group = iommu_group_get(&pdev->dev);
810 goto out_free_domain;
812 ret = iommu_attach_group(dev_state->domain, group);
816 iommu_group_put(group);
818 spin_lock_irqsave(&state_lock, flags);
820 if (__get_device_state(devid) != NULL) {
821 spin_unlock_irqrestore(&state_lock, flags);
823 goto out_free_domain;
826 list_add_tail(&dev_state->list, &state_list);
828 spin_unlock_irqrestore(&state_lock, flags);
833 iommu_group_put(group);
836 iommu_domain_free(dev_state->domain);
839 free_page((unsigned long)dev_state->states);
846 EXPORT_SYMBOL(amd_iommu_init_device);
848 void amd_iommu_free_device(struct pci_dev *pdev)
850 struct device_state *dev_state;
854 if (!amd_iommu_v2_supported())
857 devid = device_id(pdev);
859 spin_lock_irqsave(&state_lock, flags);
861 dev_state = __get_device_state(devid);
862 if (dev_state == NULL) {
863 spin_unlock_irqrestore(&state_lock, flags);
867 list_del(&dev_state->list);
869 spin_unlock_irqrestore(&state_lock, flags);
871 /* Get rid of any remaining pasid states */
872 free_pasid_states(dev_state);
874 put_device_state(dev_state);
876 * Wait until the last reference is dropped before freeing
879 wait_event(dev_state->wq, !atomic_read(&dev_state->count));
880 free_device_state(dev_state);
882 EXPORT_SYMBOL(amd_iommu_free_device);
884 int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev,
885 amd_iommu_invalid_ppr_cb cb)
887 struct device_state *dev_state;
892 if (!amd_iommu_v2_supported())
895 devid = device_id(pdev);
897 spin_lock_irqsave(&state_lock, flags);
900 dev_state = __get_device_state(devid);
901 if (dev_state == NULL)
904 dev_state->inv_ppr_cb = cb;
909 spin_unlock_irqrestore(&state_lock, flags);
913 EXPORT_SYMBOL(amd_iommu_set_invalid_ppr_cb);
915 int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev,
916 amd_iommu_invalidate_ctx cb)
918 struct device_state *dev_state;
923 if (!amd_iommu_v2_supported())
926 devid = device_id(pdev);
928 spin_lock_irqsave(&state_lock, flags);
931 dev_state = __get_device_state(devid);
932 if (dev_state == NULL)
935 dev_state->inv_ctx_cb = cb;
940 spin_unlock_irqrestore(&state_lock, flags);
944 EXPORT_SYMBOL(amd_iommu_set_invalidate_ctx_cb);
946 static int __init amd_iommu_v2_init(void)
950 pr_info("AMD IOMMUv2 driver by Joerg Roedel <jroedel@suse.de>\n");
952 if (!amd_iommu_v2_supported()) {
953 pr_info("AMD IOMMUv2 functionality not available on this system\n");
955 * Load anyway to provide the symbols to other modules
956 * which may use AMD IOMMUv2 optionally.
961 spin_lock_init(&state_lock);
964 iommu_wq = create_workqueue("amd_iommu_v2");
965 if (iommu_wq == NULL)
968 amd_iommu_register_ppr_notifier(&ppr_nb);
976 static void __exit amd_iommu_v2_exit(void)
978 struct device_state *dev_state;
981 if (!amd_iommu_v2_supported())
984 amd_iommu_unregister_ppr_notifier(&ppr_nb);
986 flush_workqueue(iommu_wq);
989 * The loop below might call flush_workqueue(), so call
990 * destroy_workqueue() after it
992 for (i = 0; i < MAX_DEVICES; ++i) {
993 dev_state = get_device_state(i);
995 if (dev_state == NULL)
1000 put_device_state(dev_state);
1001 amd_iommu_free_device(dev_state->pdev);
1004 destroy_workqueue(iommu_wq);
1007 module_init(amd_iommu_v2_init);
1008 module_exit(amd_iommu_v2_exit);