]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/iommu/dmar.c
usb: chipidea: udc: remove unused value assignment
[karo-tx-linux.git] / drivers / iommu / dmar.c
1 /*
2  * Copyright (c) 2006, Intel Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15  * Place - Suite 330, Boston, MA 02111-1307 USA.
16  *
17  * Copyright (C) 2006-2008 Intel Corporation
18  * Author: Ashok Raj <ashok.raj@intel.com>
19  * Author: Shaohua Li <shaohua.li@intel.com>
20  * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21  *
22  * This file implements early detection/parsing of Remapping Devices
23  * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24  * tables.
25  *
26  * These routines are used by both DMA-remapping and Interrupt-remapping
27  */
28
29 #define pr_fmt(fmt)     "DMAR: " fmt
30
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/iova.h>
34 #include <linux/intel-iommu.h>
35 #include <linux/timer.h>
36 #include <linux/irq.h>
37 #include <linux/interrupt.h>
38 #include <linux/tboot.h>
39 #include <linux/dmi.h>
40 #include <linux/slab.h>
41 #include <linux/iommu.h>
42 #include <asm/irq_remapping.h>
43 #include <asm/iommu_table.h>
44
45 #include "irq_remapping.h"
46
47 typedef int (*dmar_res_handler_t)(struct acpi_dmar_header *, void *);
48 struct dmar_res_callback {
49         dmar_res_handler_t      cb[ACPI_DMAR_TYPE_RESERVED];
50         void                    *arg[ACPI_DMAR_TYPE_RESERVED];
51         bool                    ignore_unhandled;
52         bool                    print_entry;
53 };
54
55 /*
56  * Assumptions:
57  * 1) The hotplug framework guarentees that DMAR unit will be hot-added
58  *    before IO devices managed by that unit.
59  * 2) The hotplug framework guarantees that DMAR unit will be hot-removed
60  *    after IO devices managed by that unit.
61  * 3) Hotplug events are rare.
62  *
63  * Locking rules for DMA and interrupt remapping related global data structures:
64  * 1) Use dmar_global_lock in process context
65  * 2) Use RCU in interrupt context
66  */
67 DECLARE_RWSEM(dmar_global_lock);
68 LIST_HEAD(dmar_drhd_units);
69
70 struct acpi_table_header * __initdata dmar_tbl;
71 static acpi_size dmar_tbl_size;
72 static int dmar_dev_scope_status = 1;
73 static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)];
74
75 static int alloc_iommu(struct dmar_drhd_unit *drhd);
76 static void free_iommu(struct intel_iommu *iommu);
77
78 static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
79 {
80         /*
81          * add INCLUDE_ALL at the tail, so scan the list will find it at
82          * the very end.
83          */
84         if (drhd->include_all)
85                 list_add_tail_rcu(&drhd->list, &dmar_drhd_units);
86         else
87                 list_add_rcu(&drhd->list, &dmar_drhd_units);
88 }
89
90 void *dmar_alloc_dev_scope(void *start, void *end, int *cnt)
91 {
92         struct acpi_dmar_device_scope *scope;
93
94         *cnt = 0;
95         while (start < end) {
96                 scope = start;
97                 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_NAMESPACE ||
98                     scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
99                     scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
100                         (*cnt)++;
101                 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
102                         scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
103                         pr_warn("Unsupported device scope\n");
104                 }
105                 start += scope->length;
106         }
107         if (*cnt == 0)
108                 return NULL;
109
110         return kcalloc(*cnt, sizeof(struct dmar_dev_scope), GFP_KERNEL);
111 }
112
113 void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt)
114 {
115         int i;
116         struct device *tmp_dev;
117
118         if (*devices && *cnt) {
119                 for_each_active_dev_scope(*devices, *cnt, i, tmp_dev)
120                         put_device(tmp_dev);
121                 kfree(*devices);
122         }
123
124         *devices = NULL;
125         *cnt = 0;
126 }
127
128 /* Optimize out kzalloc()/kfree() for normal cases */
129 static char dmar_pci_notify_info_buf[64];
130
131 static struct dmar_pci_notify_info *
132 dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
133 {
134         int level = 0;
135         size_t size;
136         struct pci_dev *tmp;
137         struct dmar_pci_notify_info *info;
138
139         BUG_ON(dev->is_virtfn);
140
141         /* Only generate path[] for device addition event */
142         if (event == BUS_NOTIFY_ADD_DEVICE)
143                 for (tmp = dev; tmp; tmp = tmp->bus->self)
144                         level++;
145
146         size = sizeof(*info) + level * sizeof(struct acpi_dmar_pci_path);
147         if (size <= sizeof(dmar_pci_notify_info_buf)) {
148                 info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf;
149         } else {
150                 info = kzalloc(size, GFP_KERNEL);
151                 if (!info) {
152                         pr_warn("Out of memory when allocating notify_info "
153                                 "for %s.\n", pci_name(dev));
154                         if (dmar_dev_scope_status == 0)
155                                 dmar_dev_scope_status = -ENOMEM;
156                         return NULL;
157                 }
158         }
159
160         info->event = event;
161         info->dev = dev;
162         info->seg = pci_domain_nr(dev->bus);
163         info->level = level;
164         if (event == BUS_NOTIFY_ADD_DEVICE) {
165                 for (tmp = dev; tmp; tmp = tmp->bus->self) {
166                         level--;
167                         info->path[level].bus = tmp->bus->number;
168                         info->path[level].device = PCI_SLOT(tmp->devfn);
169                         info->path[level].function = PCI_FUNC(tmp->devfn);
170                         if (pci_is_root_bus(tmp->bus))
171                                 info->bus = tmp->bus->number;
172                 }
173         }
174
175         return info;
176 }
177
178 static inline void dmar_free_pci_notify_info(struct dmar_pci_notify_info *info)
179 {
180         if ((void *)info != dmar_pci_notify_info_buf)
181                 kfree(info);
182 }
183
184 static bool dmar_match_pci_path(struct dmar_pci_notify_info *info, int bus,
185                                 struct acpi_dmar_pci_path *path, int count)
186 {
187         int i;
188
189         if (info->bus != bus)
190                 goto fallback;
191         if (info->level != count)
192                 goto fallback;
193
194         for (i = 0; i < count; i++) {
195                 if (path[i].device != info->path[i].device ||
196                     path[i].function != info->path[i].function)
197                         goto fallback;
198         }
199
200         return true;
201
202 fallback:
203
204         if (count != 1)
205                 return false;
206
207         i = info->level - 1;
208         if (bus              == info->path[i].bus &&
209             path[0].device   == info->path[i].device &&
210             path[0].function == info->path[i].function) {
211                 pr_info(FW_BUG "RMRR entry for device %02x:%02x.%x is broken - applying workaround\n",
212                         bus, path[0].device, path[0].function);
213                 return true;
214         }
215
216         return false;
217 }
218
219 /* Return: > 0 if match found, 0 if no match found, < 0 if error happens */
220 int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
221                           void *start, void*end, u16 segment,
222                           struct dmar_dev_scope *devices,
223                           int devices_cnt)
224 {
225         int i, level;
226         struct device *tmp, *dev = &info->dev->dev;
227         struct acpi_dmar_device_scope *scope;
228         struct acpi_dmar_pci_path *path;
229
230         if (segment != info->seg)
231                 return 0;
232
233         for (; start < end; start += scope->length) {
234                 scope = start;
235                 if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
236                     scope->entry_type != ACPI_DMAR_SCOPE_TYPE_BRIDGE)
237                         continue;
238
239                 path = (struct acpi_dmar_pci_path *)(scope + 1);
240                 level = (scope->length - sizeof(*scope)) / sizeof(*path);
241                 if (!dmar_match_pci_path(info, scope->bus, path, level))
242                         continue;
243
244                 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT) ^
245                     (info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL)) {
246                         pr_warn("Device scope type does not match for %s\n",
247                                 pci_name(info->dev));
248                         return -EINVAL;
249                 }
250
251                 for_each_dev_scope(devices, devices_cnt, i, tmp)
252                         if (tmp == NULL) {
253                                 devices[i].bus = info->dev->bus->number;
254                                 devices[i].devfn = info->dev->devfn;
255                                 rcu_assign_pointer(devices[i].dev,
256                                                    get_device(dev));
257                                 return 1;
258                         }
259                 BUG_ON(i >= devices_cnt);
260         }
261
262         return 0;
263 }
264
265 int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, u16 segment,
266                           struct dmar_dev_scope *devices, int count)
267 {
268         int index;
269         struct device *tmp;
270
271         if (info->seg != segment)
272                 return 0;
273
274         for_each_active_dev_scope(devices, count, index, tmp)
275                 if (tmp == &info->dev->dev) {
276                         RCU_INIT_POINTER(devices[index].dev, NULL);
277                         synchronize_rcu();
278                         put_device(tmp);
279                         return 1;
280                 }
281
282         return 0;
283 }
284
285 static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info *info)
286 {
287         int ret = 0;
288         struct dmar_drhd_unit *dmaru;
289         struct acpi_dmar_hardware_unit *drhd;
290
291         for_each_drhd_unit(dmaru) {
292                 if (dmaru->include_all)
293                         continue;
294
295                 drhd = container_of(dmaru->hdr,
296                                     struct acpi_dmar_hardware_unit, header);
297                 ret = dmar_insert_dev_scope(info, (void *)(drhd + 1),
298                                 ((void *)drhd) + drhd->header.length,
299                                 dmaru->segment,
300                                 dmaru->devices, dmaru->devices_cnt);
301                 if (ret != 0)
302                         break;
303         }
304         if (ret >= 0)
305                 ret = dmar_iommu_notify_scope_dev(info);
306         if (ret < 0 && dmar_dev_scope_status == 0)
307                 dmar_dev_scope_status = ret;
308
309         return ret;
310 }
311
312 static void  dmar_pci_bus_del_dev(struct dmar_pci_notify_info *info)
313 {
314         struct dmar_drhd_unit *dmaru;
315
316         for_each_drhd_unit(dmaru)
317                 if (dmar_remove_dev_scope(info, dmaru->segment,
318                         dmaru->devices, dmaru->devices_cnt))
319                         break;
320         dmar_iommu_notify_scope_dev(info);
321 }
322
323 static int dmar_pci_bus_notifier(struct notifier_block *nb,
324                                  unsigned long action, void *data)
325 {
326         struct pci_dev *pdev = to_pci_dev(data);
327         struct dmar_pci_notify_info *info;
328
329         /* Only care about add/remove events for physical functions */
330         if (pdev->is_virtfn)
331                 return NOTIFY_DONE;
332         if (action != BUS_NOTIFY_ADD_DEVICE && action != BUS_NOTIFY_DEL_DEVICE)
333                 return NOTIFY_DONE;
334
335         info = dmar_alloc_pci_notify_info(pdev, action);
336         if (!info)
337                 return NOTIFY_DONE;
338
339         down_write(&dmar_global_lock);
340         if (action == BUS_NOTIFY_ADD_DEVICE)
341                 dmar_pci_bus_add_dev(info);
342         else if (action == BUS_NOTIFY_DEL_DEVICE)
343                 dmar_pci_bus_del_dev(info);
344         up_write(&dmar_global_lock);
345
346         dmar_free_pci_notify_info(info);
347
348         return NOTIFY_OK;
349 }
350
351 static struct notifier_block dmar_pci_bus_nb = {
352         .notifier_call = dmar_pci_bus_notifier,
353         .priority = INT_MIN,
354 };
355
356 static struct dmar_drhd_unit *
357 dmar_find_dmaru(struct acpi_dmar_hardware_unit *drhd)
358 {
359         struct dmar_drhd_unit *dmaru;
360
361         list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list)
362                 if (dmaru->segment == drhd->segment &&
363                     dmaru->reg_base_addr == drhd->address)
364                         return dmaru;
365
366         return NULL;
367 }
368
369 /**
370  * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
371  * structure which uniquely represent one DMA remapping hardware unit
372  * present in the platform
373  */
374 static int dmar_parse_one_drhd(struct acpi_dmar_header *header, void *arg)
375 {
376         struct acpi_dmar_hardware_unit *drhd;
377         struct dmar_drhd_unit *dmaru;
378         int ret = 0;
379
380         drhd = (struct acpi_dmar_hardware_unit *)header;
381         dmaru = dmar_find_dmaru(drhd);
382         if (dmaru)
383                 goto out;
384
385         dmaru = kzalloc(sizeof(*dmaru) + header->length, GFP_KERNEL);
386         if (!dmaru)
387                 return -ENOMEM;
388
389         /*
390          * If header is allocated from slab by ACPI _DSM method, we need to
391          * copy the content because the memory buffer will be freed on return.
392          */
393         dmaru->hdr = (void *)(dmaru + 1);
394         memcpy(dmaru->hdr, header, header->length);
395         dmaru->reg_base_addr = drhd->address;
396         dmaru->segment = drhd->segment;
397         dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
398         dmaru->devices = dmar_alloc_dev_scope((void *)(drhd + 1),
399                                               ((void *)drhd) + drhd->header.length,
400                                               &dmaru->devices_cnt);
401         if (dmaru->devices_cnt && dmaru->devices == NULL) {
402                 kfree(dmaru);
403                 return -ENOMEM;
404         }
405
406         ret = alloc_iommu(dmaru);
407         if (ret) {
408                 dmar_free_dev_scope(&dmaru->devices,
409                                     &dmaru->devices_cnt);
410                 kfree(dmaru);
411                 return ret;
412         }
413         dmar_register_drhd_unit(dmaru);
414
415 out:
416         if (arg)
417                 (*(int *)arg)++;
418
419         return 0;
420 }
421
422 static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
423 {
424         if (dmaru->devices && dmaru->devices_cnt)
425                 dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt);
426         if (dmaru->iommu)
427                 free_iommu(dmaru->iommu);
428         kfree(dmaru);
429 }
430
431 static int __init dmar_parse_one_andd(struct acpi_dmar_header *header,
432                                       void *arg)
433 {
434         struct acpi_dmar_andd *andd = (void *)header;
435
436         /* Check for NUL termination within the designated length */
437         if (strnlen(andd->device_name, header->length - 8) == header->length - 8) {
438                 WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND,
439                            "Your BIOS is broken; ANDD object name is not NUL-terminated\n"
440                            "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
441                            dmi_get_system_info(DMI_BIOS_VENDOR),
442                            dmi_get_system_info(DMI_BIOS_VERSION),
443                            dmi_get_system_info(DMI_PRODUCT_VERSION));
444                 return -EINVAL;
445         }
446         pr_info("ANDD device: %x name: %s\n", andd->device_number,
447                 andd->device_name);
448
449         return 0;
450 }
451
452 #ifdef CONFIG_ACPI_NUMA
453 static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
454 {
455         struct acpi_dmar_rhsa *rhsa;
456         struct dmar_drhd_unit *drhd;
457
458         rhsa = (struct acpi_dmar_rhsa *)header;
459         for_each_drhd_unit(drhd) {
460                 if (drhd->reg_base_addr == rhsa->base_address) {
461                         int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
462
463                         if (!node_online(node))
464                                 node = -1;
465                         drhd->iommu->node = node;
466                         return 0;
467                 }
468         }
469         WARN_TAINT(
470                 1, TAINT_FIRMWARE_WORKAROUND,
471                 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
472                 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
473                 drhd->reg_base_addr,
474                 dmi_get_system_info(DMI_BIOS_VENDOR),
475                 dmi_get_system_info(DMI_BIOS_VERSION),
476                 dmi_get_system_info(DMI_PRODUCT_VERSION));
477
478         return 0;
479 }
480 #else
481 #define dmar_parse_one_rhsa             dmar_res_noop
482 #endif
483
484 static void __init
485 dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
486 {
487         struct acpi_dmar_hardware_unit *drhd;
488         struct acpi_dmar_reserved_memory *rmrr;
489         struct acpi_dmar_atsr *atsr;
490         struct acpi_dmar_rhsa *rhsa;
491
492         switch (header->type) {
493         case ACPI_DMAR_TYPE_HARDWARE_UNIT:
494                 drhd = container_of(header, struct acpi_dmar_hardware_unit,
495                                     header);
496                 pr_info("DRHD base: %#016Lx flags: %#x\n",
497                         (unsigned long long)drhd->address, drhd->flags);
498                 break;
499         case ACPI_DMAR_TYPE_RESERVED_MEMORY:
500                 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
501                                     header);
502                 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
503                         (unsigned long long)rmrr->base_address,
504                         (unsigned long long)rmrr->end_address);
505                 break;
506         case ACPI_DMAR_TYPE_ROOT_ATS:
507                 atsr = container_of(header, struct acpi_dmar_atsr, header);
508                 pr_info("ATSR flags: %#x\n", atsr->flags);
509                 break;
510         case ACPI_DMAR_TYPE_HARDWARE_AFFINITY:
511                 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
512                 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
513                        (unsigned long long)rhsa->base_address,
514                        rhsa->proximity_domain);
515                 break;
516         case ACPI_DMAR_TYPE_NAMESPACE:
517                 /* We don't print this here because we need to sanity-check
518                    it first. So print it in dmar_parse_one_andd() instead. */
519                 break;
520         }
521 }
522
523 /**
524  * dmar_table_detect - checks to see if the platform supports DMAR devices
525  */
526 static int __init dmar_table_detect(void)
527 {
528         acpi_status status = AE_OK;
529
530         /* if we could find DMAR table, then there are DMAR devices */
531         status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
532                                 (struct acpi_table_header **)&dmar_tbl,
533                                 &dmar_tbl_size);
534
535         if (ACPI_SUCCESS(status) && !dmar_tbl) {
536                 pr_warn("Unable to map DMAR\n");
537                 status = AE_NOT_FOUND;
538         }
539
540         return (ACPI_SUCCESS(status) ? 1 : 0);
541 }
542
543 static int dmar_walk_remapping_entries(struct acpi_dmar_header *start,
544                                        size_t len, struct dmar_res_callback *cb)
545 {
546         int ret = 0;
547         struct acpi_dmar_header *iter, *next;
548         struct acpi_dmar_header *end = ((void *)start) + len;
549
550         for (iter = start; iter < end && ret == 0; iter = next) {
551                 next = (void *)iter + iter->length;
552                 if (iter->length == 0) {
553                         /* Avoid looping forever on bad ACPI tables */
554                         pr_debug(FW_BUG "Invalid 0-length structure\n");
555                         break;
556                 } else if (next > end) {
557                         /* Avoid passing table end */
558                         pr_warn(FW_BUG "Record passes table end\n");
559                         ret = -EINVAL;
560                         break;
561                 }
562
563                 if (cb->print_entry)
564                         dmar_table_print_dmar_entry(iter);
565
566                 if (iter->type >= ACPI_DMAR_TYPE_RESERVED) {
567                         /* continue for forward compatibility */
568                         pr_debug("Unknown DMAR structure type %d\n",
569                                  iter->type);
570                 } else if (cb->cb[iter->type]) {
571                         ret = cb->cb[iter->type](iter, cb->arg[iter->type]);
572                 } else if (!cb->ignore_unhandled) {
573                         pr_warn("No handler for DMAR structure type %d\n",
574                                 iter->type);
575                         ret = -EINVAL;
576                 }
577         }
578
579         return ret;
580 }
581
582 static inline int dmar_walk_dmar_table(struct acpi_table_dmar *dmar,
583                                        struct dmar_res_callback *cb)
584 {
585         return dmar_walk_remapping_entries((void *)(dmar + 1),
586                         dmar->header.length - sizeof(*dmar), cb);
587 }
588
589 /**
590  * parse_dmar_table - parses the DMA reporting table
591  */
592 static int __init
593 parse_dmar_table(void)
594 {
595         struct acpi_table_dmar *dmar;
596         int ret = 0;
597         int drhd_count = 0;
598         struct dmar_res_callback cb = {
599                 .print_entry = true,
600                 .ignore_unhandled = true,
601                 .arg[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &drhd_count,
602                 .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_parse_one_drhd,
603                 .cb[ACPI_DMAR_TYPE_RESERVED_MEMORY] = &dmar_parse_one_rmrr,
604                 .cb[ACPI_DMAR_TYPE_ROOT_ATS] = &dmar_parse_one_atsr,
605                 .cb[ACPI_DMAR_TYPE_HARDWARE_AFFINITY] = &dmar_parse_one_rhsa,
606                 .cb[ACPI_DMAR_TYPE_NAMESPACE] = &dmar_parse_one_andd,
607         };
608
609         /*
610          * Do it again, earlier dmar_tbl mapping could be mapped with
611          * fixed map.
612          */
613         dmar_table_detect();
614
615         /*
616          * ACPI tables may not be DMA protected by tboot, so use DMAR copy
617          * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
618          */
619         dmar_tbl = tboot_get_dmar_table(dmar_tbl);
620
621         dmar = (struct acpi_table_dmar *)dmar_tbl;
622         if (!dmar)
623                 return -ENODEV;
624
625         if (dmar->width < PAGE_SHIFT - 1) {
626                 pr_warn("Invalid DMAR haw\n");
627                 return -EINVAL;
628         }
629
630         pr_info("Host address width %d\n", dmar->width + 1);
631         ret = dmar_walk_dmar_table(dmar, &cb);
632         if (ret == 0 && drhd_count == 0)
633                 pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
634
635         return ret;
636 }
637
638 static int dmar_pci_device_match(struct dmar_dev_scope devices[],
639                                  int cnt, struct pci_dev *dev)
640 {
641         int index;
642         struct device *tmp;
643
644         while (dev) {
645                 for_each_active_dev_scope(devices, cnt, index, tmp)
646                         if (dev_is_pci(tmp) && dev == to_pci_dev(tmp))
647                                 return 1;
648
649                 /* Check our parent */
650                 dev = dev->bus->self;
651         }
652
653         return 0;
654 }
655
656 struct dmar_drhd_unit *
657 dmar_find_matched_drhd_unit(struct pci_dev *dev)
658 {
659         struct dmar_drhd_unit *dmaru;
660         struct acpi_dmar_hardware_unit *drhd;
661
662         dev = pci_physfn(dev);
663
664         rcu_read_lock();
665         for_each_drhd_unit(dmaru) {
666                 drhd = container_of(dmaru->hdr,
667                                     struct acpi_dmar_hardware_unit,
668                                     header);
669
670                 if (dmaru->include_all &&
671                     drhd->segment == pci_domain_nr(dev->bus))
672                         goto out;
673
674                 if (dmar_pci_device_match(dmaru->devices,
675                                           dmaru->devices_cnt, dev))
676                         goto out;
677         }
678         dmaru = NULL;
679 out:
680         rcu_read_unlock();
681
682         return dmaru;
683 }
684
685 static void __init dmar_acpi_insert_dev_scope(u8 device_number,
686                                               struct acpi_device *adev)
687 {
688         struct dmar_drhd_unit *dmaru;
689         struct acpi_dmar_hardware_unit *drhd;
690         struct acpi_dmar_device_scope *scope;
691         struct device *tmp;
692         int i;
693         struct acpi_dmar_pci_path *path;
694
695         for_each_drhd_unit(dmaru) {
696                 drhd = container_of(dmaru->hdr,
697                                     struct acpi_dmar_hardware_unit,
698                                     header);
699
700                 for (scope = (void *)(drhd + 1);
701                      (unsigned long)scope < ((unsigned long)drhd) + drhd->header.length;
702                      scope = ((void *)scope) + scope->length) {
703                         if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_NAMESPACE)
704                                 continue;
705                         if (scope->enumeration_id != device_number)
706                                 continue;
707
708                         path = (void *)(scope + 1);
709                         pr_info("ACPI device \"%s\" under DMAR at %llx as %02x:%02x.%d\n",
710                                 dev_name(&adev->dev), dmaru->reg_base_addr,
711                                 scope->bus, path->device, path->function);
712                         for_each_dev_scope(dmaru->devices, dmaru->devices_cnt, i, tmp)
713                                 if (tmp == NULL) {
714                                         dmaru->devices[i].bus = scope->bus;
715                                         dmaru->devices[i].devfn = PCI_DEVFN(path->device,
716                                                                             path->function);
717                                         rcu_assign_pointer(dmaru->devices[i].dev,
718                                                            get_device(&adev->dev));
719                                         return;
720                                 }
721                         BUG_ON(i >= dmaru->devices_cnt);
722                 }
723         }
724         pr_warn("No IOMMU scope found for ANDD enumeration ID %d (%s)\n",
725                 device_number, dev_name(&adev->dev));
726 }
727
728 static int __init dmar_acpi_dev_scope_init(void)
729 {
730         struct acpi_dmar_andd *andd;
731
732         if (dmar_tbl == NULL)
733                 return -ENODEV;
734
735         for (andd = (void *)dmar_tbl + sizeof(struct acpi_table_dmar);
736              ((unsigned long)andd) < ((unsigned long)dmar_tbl) + dmar_tbl->length;
737              andd = ((void *)andd) + andd->header.length) {
738                 if (andd->header.type == ACPI_DMAR_TYPE_NAMESPACE) {
739                         acpi_handle h;
740                         struct acpi_device *adev;
741
742                         if (!ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT,
743                                                           andd->device_name,
744                                                           &h))) {
745                                 pr_err("Failed to find handle for ACPI object %s\n",
746                                        andd->device_name);
747                                 continue;
748                         }
749                         if (acpi_bus_get_device(h, &adev)) {
750                                 pr_err("Failed to get device for ACPI object %s\n",
751                                        andd->device_name);
752                                 continue;
753                         }
754                         dmar_acpi_insert_dev_scope(andd->device_number, adev);
755                 }
756         }
757         return 0;
758 }
759
760 int __init dmar_dev_scope_init(void)
761 {
762         struct pci_dev *dev = NULL;
763         struct dmar_pci_notify_info *info;
764
765         if (dmar_dev_scope_status != 1)
766                 return dmar_dev_scope_status;
767
768         if (list_empty(&dmar_drhd_units)) {
769                 dmar_dev_scope_status = -ENODEV;
770         } else {
771                 dmar_dev_scope_status = 0;
772
773                 dmar_acpi_dev_scope_init();
774
775                 for_each_pci_dev(dev) {
776                         if (dev->is_virtfn)
777                                 continue;
778
779                         info = dmar_alloc_pci_notify_info(dev,
780                                         BUS_NOTIFY_ADD_DEVICE);
781                         if (!info) {
782                                 return dmar_dev_scope_status;
783                         } else {
784                                 dmar_pci_bus_add_dev(info);
785                                 dmar_free_pci_notify_info(info);
786                         }
787                 }
788
789                 bus_register_notifier(&pci_bus_type, &dmar_pci_bus_nb);
790         }
791
792         return dmar_dev_scope_status;
793 }
794
795
796 int __init dmar_table_init(void)
797 {
798         static int dmar_table_initialized;
799         int ret;
800
801         if (dmar_table_initialized == 0) {
802                 ret = parse_dmar_table();
803                 if (ret < 0) {
804                         if (ret != -ENODEV)
805                                 pr_info("Parse DMAR table failure.\n");
806                 } else  if (list_empty(&dmar_drhd_units)) {
807                         pr_info("No DMAR devices found\n");
808                         ret = -ENODEV;
809                 }
810
811                 if (ret < 0)
812                         dmar_table_initialized = ret;
813                 else
814                         dmar_table_initialized = 1;
815         }
816
817         return dmar_table_initialized < 0 ? dmar_table_initialized : 0;
818 }
819
820 static void warn_invalid_dmar(u64 addr, const char *message)
821 {
822         WARN_TAINT_ONCE(
823                 1, TAINT_FIRMWARE_WORKAROUND,
824                 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
825                 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
826                 addr, message,
827                 dmi_get_system_info(DMI_BIOS_VENDOR),
828                 dmi_get_system_info(DMI_BIOS_VERSION),
829                 dmi_get_system_info(DMI_PRODUCT_VERSION));
830 }
831
832 static int __ref
833 dmar_validate_one_drhd(struct acpi_dmar_header *entry, void *arg)
834 {
835         struct acpi_dmar_hardware_unit *drhd;
836         void __iomem *addr;
837         u64 cap, ecap;
838
839         drhd = (void *)entry;
840         if (!drhd->address) {
841                 warn_invalid_dmar(0, "");
842                 return -EINVAL;
843         }
844
845         if (arg)
846                 addr = ioremap(drhd->address, VTD_PAGE_SIZE);
847         else
848                 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
849         if (!addr) {
850                 pr_warn("Can't validate DRHD address: %llx\n", drhd->address);
851                 return -EINVAL;
852         }
853
854         cap = dmar_readq(addr + DMAR_CAP_REG);
855         ecap = dmar_readq(addr + DMAR_ECAP_REG);
856
857         if (arg)
858                 iounmap(addr);
859         else
860                 early_iounmap(addr, VTD_PAGE_SIZE);
861
862         if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
863                 warn_invalid_dmar(drhd->address, " returns all ones");
864                 return -EINVAL;
865         }
866
867         return 0;
868 }
869
870 int __init detect_intel_iommu(void)
871 {
872         int ret;
873         struct dmar_res_callback validate_drhd_cb = {
874                 .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_validate_one_drhd,
875                 .ignore_unhandled = true,
876         };
877
878         down_write(&dmar_global_lock);
879         ret = dmar_table_detect();
880         if (ret)
881                 ret = !dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl,
882                                             &validate_drhd_cb);
883         if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
884                 iommu_detected = 1;
885                 /* Make sure ACS will be enabled */
886                 pci_request_acs();
887         }
888
889 #ifdef CONFIG_X86
890         if (ret)
891                 x86_init.iommu.iommu_init = intel_iommu_init;
892 #endif
893
894         early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size);
895         dmar_tbl = NULL;
896         up_write(&dmar_global_lock);
897
898         return ret ? 1 : -ENODEV;
899 }
900
901
902 static void unmap_iommu(struct intel_iommu *iommu)
903 {
904         iounmap(iommu->reg);
905         release_mem_region(iommu->reg_phys, iommu->reg_size);
906 }
907
908 /**
909  * map_iommu: map the iommu's registers
910  * @iommu: the iommu to map
911  * @phys_addr: the physical address of the base resgister
912  *
913  * Memory map the iommu's registers.  Start w/ a single page, and
914  * possibly expand if that turns out to be insufficent.
915  */
916 static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
917 {
918         int map_size, err=0;
919
920         iommu->reg_phys = phys_addr;
921         iommu->reg_size = VTD_PAGE_SIZE;
922
923         if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
924                 pr_err("Can't reserve memory\n");
925                 err = -EBUSY;
926                 goto out;
927         }
928
929         iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
930         if (!iommu->reg) {
931                 pr_err("Can't map the region\n");
932                 err = -ENOMEM;
933                 goto release;
934         }
935
936         iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
937         iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
938
939         if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
940                 err = -EINVAL;
941                 warn_invalid_dmar(phys_addr, " returns all ones");
942                 goto unmap;
943         }
944
945         /* the registers might be more than one page */
946         map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
947                          cap_max_fault_reg_offset(iommu->cap));
948         map_size = VTD_PAGE_ALIGN(map_size);
949         if (map_size > iommu->reg_size) {
950                 iounmap(iommu->reg);
951                 release_mem_region(iommu->reg_phys, iommu->reg_size);
952                 iommu->reg_size = map_size;
953                 if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
954                                         iommu->name)) {
955                         pr_err("Can't reserve memory\n");
956                         err = -EBUSY;
957                         goto out;
958                 }
959                 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
960                 if (!iommu->reg) {
961                         pr_err("Can't map the region\n");
962                         err = -ENOMEM;
963                         goto release;
964                 }
965         }
966         err = 0;
967         goto out;
968
969 unmap:
970         iounmap(iommu->reg);
971 release:
972         release_mem_region(iommu->reg_phys, iommu->reg_size);
973 out:
974         return err;
975 }
976
977 static int dmar_alloc_seq_id(struct intel_iommu *iommu)
978 {
979         iommu->seq_id = find_first_zero_bit(dmar_seq_ids,
980                                             DMAR_UNITS_SUPPORTED);
981         if (iommu->seq_id >= DMAR_UNITS_SUPPORTED) {
982                 iommu->seq_id = -1;
983         } else {
984                 set_bit(iommu->seq_id, dmar_seq_ids);
985                 sprintf(iommu->name, "dmar%d", iommu->seq_id);
986         }
987
988         return iommu->seq_id;
989 }
990
991 static void dmar_free_seq_id(struct intel_iommu *iommu)
992 {
993         if (iommu->seq_id >= 0) {
994                 clear_bit(iommu->seq_id, dmar_seq_ids);
995                 iommu->seq_id = -1;
996         }
997 }
998
999 static int alloc_iommu(struct dmar_drhd_unit *drhd)
1000 {
1001         struct intel_iommu *iommu;
1002         u32 ver, sts;
1003         int agaw = 0;
1004         int msagaw = 0;
1005         int err;
1006
1007         if (!drhd->reg_base_addr) {
1008                 warn_invalid_dmar(0, "");
1009                 return -EINVAL;
1010         }
1011
1012         iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
1013         if (!iommu)
1014                 return -ENOMEM;
1015
1016         if (dmar_alloc_seq_id(iommu) < 0) {
1017                 pr_err("Failed to allocate seq_id\n");
1018                 err = -ENOSPC;
1019                 goto error;
1020         }
1021
1022         err = map_iommu(iommu, drhd->reg_base_addr);
1023         if (err) {
1024                 pr_err("Failed to map %s\n", iommu->name);
1025                 goto error_free_seq_id;
1026         }
1027
1028         err = -EINVAL;
1029         agaw = iommu_calculate_agaw(iommu);
1030         if (agaw < 0) {
1031                 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
1032                         iommu->seq_id);
1033                 goto err_unmap;
1034         }
1035         msagaw = iommu_calculate_max_sagaw(iommu);
1036         if (msagaw < 0) {
1037                 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
1038                         iommu->seq_id);
1039                 goto err_unmap;
1040         }
1041         iommu->agaw = agaw;
1042         iommu->msagaw = msagaw;
1043         iommu->segment = drhd->segment;
1044
1045         iommu->node = -1;
1046
1047         ver = readl(iommu->reg + DMAR_VER_REG);
1048         pr_info("%s: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
1049                 iommu->name,
1050                 (unsigned long long)drhd->reg_base_addr,
1051                 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
1052                 (unsigned long long)iommu->cap,
1053                 (unsigned long long)iommu->ecap);
1054
1055         /* Reflect status in gcmd */
1056         sts = readl(iommu->reg + DMAR_GSTS_REG);
1057         if (sts & DMA_GSTS_IRES)
1058                 iommu->gcmd |= DMA_GCMD_IRE;
1059         if (sts & DMA_GSTS_TES)
1060                 iommu->gcmd |= DMA_GCMD_TE;
1061         if (sts & DMA_GSTS_QIES)
1062                 iommu->gcmd |= DMA_GCMD_QIE;
1063
1064         raw_spin_lock_init(&iommu->register_lock);
1065
1066         if (intel_iommu_enabled) {
1067                 iommu->iommu_dev = iommu_device_create(NULL, iommu,
1068                                                        intel_iommu_groups,
1069                                                        "%s", iommu->name);
1070
1071                 if (IS_ERR(iommu->iommu_dev)) {
1072                         err = PTR_ERR(iommu->iommu_dev);
1073                         goto err_unmap;
1074                 }
1075         }
1076
1077         drhd->iommu = iommu;
1078
1079         return 0;
1080
1081 err_unmap:
1082         unmap_iommu(iommu);
1083 error_free_seq_id:
1084         dmar_free_seq_id(iommu);
1085 error:
1086         kfree(iommu);
1087         return err;
1088 }
1089
1090 static void free_iommu(struct intel_iommu *iommu)
1091 {
1092         iommu_device_destroy(iommu->iommu_dev);
1093
1094         if (iommu->irq) {
1095                 if (iommu->pr_irq) {
1096                         free_irq(iommu->pr_irq, iommu);
1097                         dmar_free_hwirq(iommu->pr_irq);
1098                         iommu->pr_irq = 0;
1099                 }
1100                 free_irq(iommu->irq, iommu);
1101                 dmar_free_hwirq(iommu->irq);
1102                 iommu->irq = 0;
1103         }
1104
1105         if (iommu->qi) {
1106                 free_page((unsigned long)iommu->qi->desc);
1107                 kfree(iommu->qi->desc_status);
1108                 kfree(iommu->qi);
1109         }
1110
1111         if (iommu->reg)
1112                 unmap_iommu(iommu);
1113
1114         dmar_free_seq_id(iommu);
1115         kfree(iommu);
1116 }
1117
1118 /*
1119  * Reclaim all the submitted descriptors which have completed its work.
1120  */
1121 static inline void reclaim_free_desc(struct q_inval *qi)
1122 {
1123         while (qi->desc_status[qi->free_tail] == QI_DONE ||
1124                qi->desc_status[qi->free_tail] == QI_ABORT) {
1125                 qi->desc_status[qi->free_tail] = QI_FREE;
1126                 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
1127                 qi->free_cnt++;
1128         }
1129 }
1130
1131 static int qi_check_fault(struct intel_iommu *iommu, int index)
1132 {
1133         u32 fault;
1134         int head, tail;
1135         struct q_inval *qi = iommu->qi;
1136         int wait_index = (index + 1) % QI_LENGTH;
1137
1138         if (qi->desc_status[wait_index] == QI_ABORT)
1139                 return -EAGAIN;
1140
1141         fault = readl(iommu->reg + DMAR_FSTS_REG);
1142
1143         /*
1144          * If IQE happens, the head points to the descriptor associated
1145          * with the error. No new descriptors are fetched until the IQE
1146          * is cleared.
1147          */
1148         if (fault & DMA_FSTS_IQE) {
1149                 head = readl(iommu->reg + DMAR_IQH_REG);
1150                 if ((head >> DMAR_IQ_SHIFT) == index) {
1151                         pr_err("VT-d detected invalid descriptor: "
1152                                 "low=%llx, high=%llx\n",
1153                                 (unsigned long long)qi->desc[index].low,
1154                                 (unsigned long long)qi->desc[index].high);
1155                         memcpy(&qi->desc[index], &qi->desc[wait_index],
1156                                         sizeof(struct qi_desc));
1157                         __iommu_flush_cache(iommu, &qi->desc[index],
1158                                         sizeof(struct qi_desc));
1159                         writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
1160                         return -EINVAL;
1161                 }
1162         }
1163
1164         /*
1165          * If ITE happens, all pending wait_desc commands are aborted.
1166          * No new descriptors are fetched until the ITE is cleared.
1167          */
1168         if (fault & DMA_FSTS_ITE) {
1169                 head = readl(iommu->reg + DMAR_IQH_REG);
1170                 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
1171                 head |= 1;
1172                 tail = readl(iommu->reg + DMAR_IQT_REG);
1173                 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
1174
1175                 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
1176
1177                 do {
1178                         if (qi->desc_status[head] == QI_IN_USE)
1179                                 qi->desc_status[head] = QI_ABORT;
1180                         head = (head - 2 + QI_LENGTH) % QI_LENGTH;
1181                 } while (head != tail);
1182
1183                 if (qi->desc_status[wait_index] == QI_ABORT)
1184                         return -EAGAIN;
1185         }
1186
1187         if (fault & DMA_FSTS_ICE)
1188                 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
1189
1190         return 0;
1191 }
1192
1193 /*
1194  * Submit the queued invalidation descriptor to the remapping
1195  * hardware unit and wait for its completion.
1196  */
1197 int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
1198 {
1199         int rc;
1200         struct q_inval *qi = iommu->qi;
1201         struct qi_desc *hw, wait_desc;
1202         int wait_index, index;
1203         unsigned long flags;
1204
1205         if (!qi)
1206                 return 0;
1207
1208         hw = qi->desc;
1209
1210 restart:
1211         rc = 0;
1212
1213         raw_spin_lock_irqsave(&qi->q_lock, flags);
1214         while (qi->free_cnt < 3) {
1215                 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
1216                 cpu_relax();
1217                 raw_spin_lock_irqsave(&qi->q_lock, flags);
1218         }
1219
1220         index = qi->free_head;
1221         wait_index = (index + 1) % QI_LENGTH;
1222
1223         qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
1224
1225         hw[index] = *desc;
1226
1227         wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
1228                         QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
1229         wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
1230
1231         hw[wait_index] = wait_desc;
1232
1233         __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
1234         __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
1235
1236         qi->free_head = (qi->free_head + 2) % QI_LENGTH;
1237         qi->free_cnt -= 2;
1238
1239         /*
1240          * update the HW tail register indicating the presence of
1241          * new descriptors.
1242          */
1243         writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
1244
1245         while (qi->desc_status[wait_index] != QI_DONE) {
1246                 /*
1247                  * We will leave the interrupts disabled, to prevent interrupt
1248                  * context to queue another cmd while a cmd is already submitted
1249                  * and waiting for completion on this cpu. This is to avoid
1250                  * a deadlock where the interrupt context can wait indefinitely
1251                  * for free slots in the queue.
1252                  */
1253                 rc = qi_check_fault(iommu, index);
1254                 if (rc)
1255                         break;
1256
1257                 raw_spin_unlock(&qi->q_lock);
1258                 cpu_relax();
1259                 raw_spin_lock(&qi->q_lock);
1260         }
1261
1262         qi->desc_status[index] = QI_DONE;
1263
1264         reclaim_free_desc(qi);
1265         raw_spin_unlock_irqrestore(&qi->q_lock, flags);
1266
1267         if (rc == -EAGAIN)
1268                 goto restart;
1269
1270         return rc;
1271 }
1272
1273 /*
1274  * Flush the global interrupt entry cache.
1275  */
1276 void qi_global_iec(struct intel_iommu *iommu)
1277 {
1278         struct qi_desc desc;
1279
1280         desc.low = QI_IEC_TYPE;
1281         desc.high = 0;
1282
1283         /* should never fail */
1284         qi_submit_sync(&desc, iommu);
1285 }
1286
1287 void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
1288                       u64 type)
1289 {
1290         struct qi_desc desc;
1291
1292         desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
1293                         | QI_CC_GRAN(type) | QI_CC_TYPE;
1294         desc.high = 0;
1295
1296         qi_submit_sync(&desc, iommu);
1297 }
1298
1299 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1300                     unsigned int size_order, u64 type)
1301 {
1302         u8 dw = 0, dr = 0;
1303
1304         struct qi_desc desc;
1305         int ih = 0;
1306
1307         if (cap_write_drain(iommu->cap))
1308                 dw = 1;
1309
1310         if (cap_read_drain(iommu->cap))
1311                 dr = 1;
1312
1313         desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
1314                 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
1315         desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
1316                 | QI_IOTLB_AM(size_order);
1317
1318         qi_submit_sync(&desc, iommu);
1319 }
1320
1321 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
1322                         u64 addr, unsigned mask)
1323 {
1324         struct qi_desc desc;
1325
1326         if (mask) {
1327                 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
1328                 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
1329                 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
1330         } else
1331                 desc.high = QI_DEV_IOTLB_ADDR(addr);
1332
1333         if (qdep >= QI_DEV_IOTLB_MAX_INVS)
1334                 qdep = 0;
1335
1336         desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
1337                    QI_DIOTLB_TYPE;
1338
1339         qi_submit_sync(&desc, iommu);
1340 }
1341
1342 /*
1343  * Disable Queued Invalidation interface.
1344  */
1345 void dmar_disable_qi(struct intel_iommu *iommu)
1346 {
1347         unsigned long flags;
1348         u32 sts;
1349         cycles_t start_time = get_cycles();
1350
1351         if (!ecap_qis(iommu->ecap))
1352                 return;
1353
1354         raw_spin_lock_irqsave(&iommu->register_lock, flags);
1355
1356         sts =  dmar_readq(iommu->reg + DMAR_GSTS_REG);
1357         if (!(sts & DMA_GSTS_QIES))
1358                 goto end;
1359
1360         /*
1361          * Give a chance to HW to complete the pending invalidation requests.
1362          */
1363         while ((readl(iommu->reg + DMAR_IQT_REG) !=
1364                 readl(iommu->reg + DMAR_IQH_REG)) &&
1365                 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1366                 cpu_relax();
1367
1368         iommu->gcmd &= ~DMA_GCMD_QIE;
1369         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1370
1371         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1372                       !(sts & DMA_GSTS_QIES), sts);
1373 end:
1374         raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1375 }
1376
1377 /*
1378  * Enable queued invalidation.
1379  */
1380 static void __dmar_enable_qi(struct intel_iommu *iommu)
1381 {
1382         u32 sts;
1383         unsigned long flags;
1384         struct q_inval *qi = iommu->qi;
1385
1386         qi->free_head = qi->free_tail = 0;
1387         qi->free_cnt = QI_LENGTH;
1388
1389         raw_spin_lock_irqsave(&iommu->register_lock, flags);
1390
1391         /* write zero to the tail reg */
1392         writel(0, iommu->reg + DMAR_IQT_REG);
1393
1394         dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1395
1396         iommu->gcmd |= DMA_GCMD_QIE;
1397         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1398
1399         /* Make sure hardware complete it */
1400         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1401
1402         raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1403 }
1404
1405 /*
1406  * Enable Queued Invalidation interface. This is a must to support
1407  * interrupt-remapping. Also used by DMA-remapping, which replaces
1408  * register based IOTLB invalidation.
1409  */
1410 int dmar_enable_qi(struct intel_iommu *iommu)
1411 {
1412         struct q_inval *qi;
1413         struct page *desc_page;
1414
1415         if (!ecap_qis(iommu->ecap))
1416                 return -ENOENT;
1417
1418         /*
1419          * queued invalidation is already setup and enabled.
1420          */
1421         if (iommu->qi)
1422                 return 0;
1423
1424         iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
1425         if (!iommu->qi)
1426                 return -ENOMEM;
1427
1428         qi = iommu->qi;
1429
1430
1431         desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1432         if (!desc_page) {
1433                 kfree(qi);
1434                 iommu->qi = NULL;
1435                 return -ENOMEM;
1436         }
1437
1438         qi->desc = page_address(desc_page);
1439
1440         qi->desc_status = kzalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
1441         if (!qi->desc_status) {
1442                 free_page((unsigned long) qi->desc);
1443                 kfree(qi);
1444                 iommu->qi = NULL;
1445                 return -ENOMEM;
1446         }
1447
1448         raw_spin_lock_init(&qi->q_lock);
1449
1450         __dmar_enable_qi(iommu);
1451
1452         return 0;
1453 }
1454
1455 /* iommu interrupt handling. Most stuff are MSI-like. */
1456
1457 enum faulttype {
1458         DMA_REMAP,
1459         INTR_REMAP,
1460         UNKNOWN,
1461 };
1462
1463 static const char *dma_remap_fault_reasons[] =
1464 {
1465         "Software",
1466         "Present bit in root entry is clear",
1467         "Present bit in context entry is clear",
1468         "Invalid context entry",
1469         "Access beyond MGAW",
1470         "PTE Write access is not set",
1471         "PTE Read access is not set",
1472         "Next page table ptr is invalid",
1473         "Root table address invalid",
1474         "Context table ptr is invalid",
1475         "non-zero reserved fields in RTP",
1476         "non-zero reserved fields in CTP",
1477         "non-zero reserved fields in PTE",
1478         "PCE for translation request specifies blocking",
1479 };
1480
1481 static const char *irq_remap_fault_reasons[] =
1482 {
1483         "Detected reserved fields in the decoded interrupt-remapped request",
1484         "Interrupt index exceeded the interrupt-remapping table size",
1485         "Present field in the IRTE entry is clear",
1486         "Error accessing interrupt-remapping table pointed by IRTA_REG",
1487         "Detected reserved fields in the IRTE entry",
1488         "Blocked a compatibility format interrupt request",
1489         "Blocked an interrupt request due to source-id verification failure",
1490 };
1491
1492 static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
1493 {
1494         if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1495                                         ARRAY_SIZE(irq_remap_fault_reasons))) {
1496                 *fault_type = INTR_REMAP;
1497                 return irq_remap_fault_reasons[fault_reason - 0x20];
1498         } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1499                 *fault_type = DMA_REMAP;
1500                 return dma_remap_fault_reasons[fault_reason];
1501         } else {
1502                 *fault_type = UNKNOWN;
1503                 return "Unknown";
1504         }
1505 }
1506
1507
1508 static inline int dmar_msi_reg(struct intel_iommu *iommu, int irq)
1509 {
1510         if (iommu->irq == irq)
1511                 return DMAR_FECTL_REG;
1512         else if (iommu->pr_irq == irq)
1513                 return DMAR_PECTL_REG;
1514         else
1515                 BUG();
1516 }
1517
1518 void dmar_msi_unmask(struct irq_data *data)
1519 {
1520         struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1521         int reg = dmar_msi_reg(iommu, data->irq);
1522         unsigned long flag;
1523
1524         /* unmask it */
1525         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1526         writel(0, iommu->reg + reg);
1527         /* Read a reg to force flush the post write */
1528         readl(iommu->reg + reg);
1529         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1530 }
1531
1532 void dmar_msi_mask(struct irq_data *data)
1533 {
1534         struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1535         int reg = dmar_msi_reg(iommu, data->irq);
1536         unsigned long flag;
1537
1538         /* mask it */
1539         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1540         writel(DMA_FECTL_IM, iommu->reg + reg);
1541         /* Read a reg to force flush the post write */
1542         readl(iommu->reg + reg);
1543         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1544 }
1545
1546 void dmar_msi_write(int irq, struct msi_msg *msg)
1547 {
1548         struct intel_iommu *iommu = irq_get_handler_data(irq);
1549         int reg = dmar_msi_reg(iommu, irq);
1550         unsigned long flag;
1551
1552         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1553         writel(msg->data, iommu->reg + reg + 4);
1554         writel(msg->address_lo, iommu->reg + reg + 8);
1555         writel(msg->address_hi, iommu->reg + reg + 12);
1556         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1557 }
1558
1559 void dmar_msi_read(int irq, struct msi_msg *msg)
1560 {
1561         struct intel_iommu *iommu = irq_get_handler_data(irq);
1562         int reg = dmar_msi_reg(iommu, irq);
1563         unsigned long flag;
1564
1565         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1566         msg->data = readl(iommu->reg + reg + 4);
1567         msg->address_lo = readl(iommu->reg + reg + 8);
1568         msg->address_hi = readl(iommu->reg + reg + 12);
1569         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1570 }
1571
1572 static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1573                 u8 fault_reason, u16 source_id, unsigned long long addr)
1574 {
1575         const char *reason;
1576         int fault_type;
1577
1578         reason = dmar_get_fault_reason(fault_reason, &fault_type);
1579
1580         if (fault_type == INTR_REMAP)
1581                 pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] "
1582                        "fault index %llx\n"
1583                         "INTR-REMAP:[fault reason %02d] %s\n",
1584                         (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1585                         PCI_FUNC(source_id & 0xFF), addr >> 48,
1586                         fault_reason, reason);
1587         else
1588                 pr_err("DMAR:[%s] Request device [%02x:%02x.%d] "
1589                        "fault addr %llx \n"
1590                        "DMAR:[fault reason %02d] %s\n",
1591                        (type ? "DMA Read" : "DMA Write"),
1592                        (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1593                        PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
1594         return 0;
1595 }
1596
1597 #define PRIMARY_FAULT_REG_LEN (16)
1598 irqreturn_t dmar_fault(int irq, void *dev_id)
1599 {
1600         struct intel_iommu *iommu = dev_id;
1601         int reg, fault_index;
1602         u32 fault_status;
1603         unsigned long flag;
1604
1605         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1606         fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1607         if (fault_status)
1608                 pr_err("DRHD: handling fault status reg %x\n", fault_status);
1609
1610         /* TBD: ignore advanced fault log currently */
1611         if (!(fault_status & DMA_FSTS_PPF))
1612                 goto unlock_exit;
1613
1614         fault_index = dma_fsts_fault_record_index(fault_status);
1615         reg = cap_fault_reg_offset(iommu->cap);
1616         while (1) {
1617                 u8 fault_reason;
1618                 u16 source_id;
1619                 u64 guest_addr;
1620                 int type;
1621                 u32 data;
1622
1623                 /* highest 32 bits */
1624                 data = readl(iommu->reg + reg +
1625                                 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1626                 if (!(data & DMA_FRCD_F))
1627                         break;
1628
1629                 fault_reason = dma_frcd_fault_reason(data);
1630                 type = dma_frcd_type(data);
1631
1632                 data = readl(iommu->reg + reg +
1633                                 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1634                 source_id = dma_frcd_source_id(data);
1635
1636                 guest_addr = dmar_readq(iommu->reg + reg +
1637                                 fault_index * PRIMARY_FAULT_REG_LEN);
1638                 guest_addr = dma_frcd_page_addr(guest_addr);
1639                 /* clear the fault */
1640                 writel(DMA_FRCD_F, iommu->reg + reg +
1641                         fault_index * PRIMARY_FAULT_REG_LEN + 12);
1642
1643                 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1644
1645                 dmar_fault_do_one(iommu, type, fault_reason,
1646                                 source_id, guest_addr);
1647
1648                 fault_index++;
1649                 if (fault_index >= cap_num_fault_regs(iommu->cap))
1650                         fault_index = 0;
1651                 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1652         }
1653
1654         writel(DMA_FSTS_PFO | DMA_FSTS_PPF, iommu->reg + DMAR_FSTS_REG);
1655
1656 unlock_exit:
1657         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1658         return IRQ_HANDLED;
1659 }
1660
1661 int dmar_set_interrupt(struct intel_iommu *iommu)
1662 {
1663         int irq, ret;
1664
1665         /*
1666          * Check if the fault interrupt is already initialized.
1667          */
1668         if (iommu->irq)
1669                 return 0;
1670
1671         irq = dmar_alloc_hwirq(iommu->seq_id, iommu->node, iommu);
1672         if (irq > 0) {
1673                 iommu->irq = irq;
1674         } else {
1675                 pr_err("No free IRQ vectors\n");
1676                 return -EINVAL;
1677         }
1678
1679         ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
1680         if (ret)
1681                 pr_err("Can't request irq\n");
1682         return ret;
1683 }
1684
1685 int __init enable_drhd_fault_handling(void)
1686 {
1687         struct dmar_drhd_unit *drhd;
1688         struct intel_iommu *iommu;
1689
1690         /*
1691          * Enable fault control interrupt.
1692          */
1693         for_each_iommu(iommu, drhd) {
1694                 u32 fault_status;
1695                 int ret = dmar_set_interrupt(iommu);
1696
1697                 if (ret) {
1698                         pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
1699                                (unsigned long long)drhd->reg_base_addr, ret);
1700                         return -1;
1701                 }
1702
1703                 /*
1704                  * Clear any previous faults.
1705                  */
1706                 dmar_fault(iommu->irq, iommu);
1707                 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1708                 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
1709         }
1710
1711         return 0;
1712 }
1713
1714 /*
1715  * Re-enable Queued Invalidation interface.
1716  */
1717 int dmar_reenable_qi(struct intel_iommu *iommu)
1718 {
1719         if (!ecap_qis(iommu->ecap))
1720                 return -ENOENT;
1721
1722         if (!iommu->qi)
1723                 return -ENOENT;
1724
1725         /*
1726          * First disable queued invalidation.
1727          */
1728         dmar_disable_qi(iommu);
1729         /*
1730          * Then enable queued invalidation again. Since there is no pending
1731          * invalidation requests now, it's safe to re-enable queued
1732          * invalidation.
1733          */
1734         __dmar_enable_qi(iommu);
1735
1736         return 0;
1737 }
1738
1739 /*
1740  * Check interrupt remapping support in DMAR table description.
1741  */
1742 int __init dmar_ir_support(void)
1743 {
1744         struct acpi_table_dmar *dmar;
1745         dmar = (struct acpi_table_dmar *)dmar_tbl;
1746         if (!dmar)
1747                 return 0;
1748         return dmar->flags & 0x1;
1749 }
1750
1751 /* Check whether DMAR units are in use */
1752 static inline bool dmar_in_use(void)
1753 {
1754         return irq_remapping_enabled || intel_iommu_enabled;
1755 }
1756
1757 static int __init dmar_free_unused_resources(void)
1758 {
1759         struct dmar_drhd_unit *dmaru, *dmaru_n;
1760
1761         if (dmar_in_use())
1762                 return 0;
1763
1764         if (dmar_dev_scope_status != 1 && !list_empty(&dmar_drhd_units))
1765                 bus_unregister_notifier(&pci_bus_type, &dmar_pci_bus_nb);
1766
1767         down_write(&dmar_global_lock);
1768         list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
1769                 list_del(&dmaru->list);
1770                 dmar_free_drhd(dmaru);
1771         }
1772         up_write(&dmar_global_lock);
1773
1774         return 0;
1775 }
1776
1777 late_initcall(dmar_free_unused_resources);
1778 IOMMU_INIT_POST(detect_intel_iommu);
1779
1780 /*
1781  * DMAR Hotplug Support
1782  * For more details, please refer to Intel(R) Virtualization Technology
1783  * for Directed-IO Architecture Specifiction, Rev 2.2, Section 8.8
1784  * "Remapping Hardware Unit Hot Plug".
1785  */
1786 static u8 dmar_hp_uuid[] = {
1787         /* 0000 */    0xA6, 0xA3, 0xC1, 0xD8, 0x9B, 0xBE, 0x9B, 0x4C,
1788         /* 0008 */    0x91, 0xBF, 0xC3, 0xCB, 0x81, 0xFC, 0x5D, 0xAF
1789 };
1790
1791 /*
1792  * Currently there's only one revision and BIOS will not check the revision id,
1793  * so use 0 for safety.
1794  */
1795 #define DMAR_DSM_REV_ID                 0
1796 #define DMAR_DSM_FUNC_DRHD              1
1797 #define DMAR_DSM_FUNC_ATSR              2
1798 #define DMAR_DSM_FUNC_RHSA              3
1799
1800 static inline bool dmar_detect_dsm(acpi_handle handle, int func)
1801 {
1802         return acpi_check_dsm(handle, dmar_hp_uuid, DMAR_DSM_REV_ID, 1 << func);
1803 }
1804
1805 static int dmar_walk_dsm_resource(acpi_handle handle, int func,
1806                                   dmar_res_handler_t handler, void *arg)
1807 {
1808         int ret = -ENODEV;
1809         union acpi_object *obj;
1810         struct acpi_dmar_header *start;
1811         struct dmar_res_callback callback;
1812         static int res_type[] = {
1813                 [DMAR_DSM_FUNC_DRHD] = ACPI_DMAR_TYPE_HARDWARE_UNIT,
1814                 [DMAR_DSM_FUNC_ATSR] = ACPI_DMAR_TYPE_ROOT_ATS,
1815                 [DMAR_DSM_FUNC_RHSA] = ACPI_DMAR_TYPE_HARDWARE_AFFINITY,
1816         };
1817
1818         if (!dmar_detect_dsm(handle, func))
1819                 return 0;
1820
1821         obj = acpi_evaluate_dsm_typed(handle, dmar_hp_uuid, DMAR_DSM_REV_ID,
1822                                       func, NULL, ACPI_TYPE_BUFFER);
1823         if (!obj)
1824                 return -ENODEV;
1825
1826         memset(&callback, 0, sizeof(callback));
1827         callback.cb[res_type[func]] = handler;
1828         callback.arg[res_type[func]] = arg;
1829         start = (struct acpi_dmar_header *)obj->buffer.pointer;
1830         ret = dmar_walk_remapping_entries(start, obj->buffer.length, &callback);
1831
1832         ACPI_FREE(obj);
1833
1834         return ret;
1835 }
1836
1837 static int dmar_hp_add_drhd(struct acpi_dmar_header *header, void *arg)
1838 {
1839         int ret;
1840         struct dmar_drhd_unit *dmaru;
1841
1842         dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
1843         if (!dmaru)
1844                 return -ENODEV;
1845
1846         ret = dmar_ir_hotplug(dmaru, true);
1847         if (ret == 0)
1848                 ret = dmar_iommu_hotplug(dmaru, true);
1849
1850         return ret;
1851 }
1852
1853 static int dmar_hp_remove_drhd(struct acpi_dmar_header *header, void *arg)
1854 {
1855         int i, ret;
1856         struct device *dev;
1857         struct dmar_drhd_unit *dmaru;
1858
1859         dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
1860         if (!dmaru)
1861                 return 0;
1862
1863         /*
1864          * All PCI devices managed by this unit should have been destroyed.
1865          */
1866         if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt)
1867                 for_each_active_dev_scope(dmaru->devices,
1868                                           dmaru->devices_cnt, i, dev)
1869                         return -EBUSY;
1870
1871         ret = dmar_ir_hotplug(dmaru, false);
1872         if (ret == 0)
1873                 ret = dmar_iommu_hotplug(dmaru, false);
1874
1875         return ret;
1876 }
1877
1878 static int dmar_hp_release_drhd(struct acpi_dmar_header *header, void *arg)
1879 {
1880         struct dmar_drhd_unit *dmaru;
1881
1882         dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
1883         if (dmaru) {
1884                 list_del_rcu(&dmaru->list);
1885                 synchronize_rcu();
1886                 dmar_free_drhd(dmaru);
1887         }
1888
1889         return 0;
1890 }
1891
1892 static int dmar_hotplug_insert(acpi_handle handle)
1893 {
1894         int ret;
1895         int drhd_count = 0;
1896
1897         ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1898                                      &dmar_validate_one_drhd, (void *)1);
1899         if (ret)
1900                 goto out;
1901
1902         ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1903                                      &dmar_parse_one_drhd, (void *)&drhd_count);
1904         if (ret == 0 && drhd_count == 0) {
1905                 pr_warn(FW_BUG "No DRHD structures in buffer returned by _DSM method\n");
1906                 goto out;
1907         } else if (ret) {
1908                 goto release_drhd;
1909         }
1910
1911         ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_RHSA,
1912                                      &dmar_parse_one_rhsa, NULL);
1913         if (ret)
1914                 goto release_drhd;
1915
1916         ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1917                                      &dmar_parse_one_atsr, NULL);
1918         if (ret)
1919                 goto release_atsr;
1920
1921         ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1922                                      &dmar_hp_add_drhd, NULL);
1923         if (!ret)
1924                 return 0;
1925
1926         dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1927                                &dmar_hp_remove_drhd, NULL);
1928 release_atsr:
1929         dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1930                                &dmar_release_one_atsr, NULL);
1931 release_drhd:
1932         dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1933                                &dmar_hp_release_drhd, NULL);
1934 out:
1935         return ret;
1936 }
1937
1938 static int dmar_hotplug_remove(acpi_handle handle)
1939 {
1940         int ret;
1941
1942         ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1943                                      &dmar_check_one_atsr, NULL);
1944         if (ret)
1945                 return ret;
1946
1947         ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1948                                      &dmar_hp_remove_drhd, NULL);
1949         if (ret == 0) {
1950                 WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1951                                                &dmar_release_one_atsr, NULL));
1952                 WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1953                                                &dmar_hp_release_drhd, NULL));
1954         } else {
1955                 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1956                                        &dmar_hp_add_drhd, NULL);
1957         }
1958
1959         return ret;
1960 }
1961
1962 static acpi_status dmar_get_dsm_handle(acpi_handle handle, u32 lvl,
1963                                        void *context, void **retval)
1964 {
1965         acpi_handle *phdl = retval;
1966
1967         if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
1968                 *phdl = handle;
1969                 return AE_CTRL_TERMINATE;
1970         }
1971
1972         return AE_OK;
1973 }
1974
1975 static int dmar_device_hotplug(acpi_handle handle, bool insert)
1976 {
1977         int ret;
1978         acpi_handle tmp = NULL;
1979         acpi_status status;
1980
1981         if (!dmar_in_use())
1982                 return 0;
1983
1984         if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
1985                 tmp = handle;
1986         } else {
1987                 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
1988                                              ACPI_UINT32_MAX,
1989                                              dmar_get_dsm_handle,
1990                                              NULL, NULL, &tmp);
1991                 if (ACPI_FAILURE(status)) {
1992                         pr_warn("Failed to locate _DSM method.\n");
1993                         return -ENXIO;
1994                 }
1995         }
1996         if (tmp == NULL)
1997                 return 0;
1998
1999         down_write(&dmar_global_lock);
2000         if (insert)
2001                 ret = dmar_hotplug_insert(tmp);
2002         else
2003                 ret = dmar_hotplug_remove(tmp);
2004         up_write(&dmar_global_lock);
2005
2006         return ret;
2007 }
2008
2009 int dmar_device_add(acpi_handle handle)
2010 {
2011         return dmar_device_hotplug(handle, true);
2012 }
2013
2014 int dmar_device_remove(acpi_handle handle)
2015 {
2016         return dmar_device_hotplug(handle, false);
2017 }