]> git.karo-electronics.de Git - mv-sheeva.git/blob - drivers/pci/dmar.c
525a32487abdd24a2457b445ff094355beb03160
[mv-sheeva.git] / drivers / pci / dmar.c
1 /*
2  * Copyright (c) 2006, Intel Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15  * Place - Suite 330, Boston, MA 02111-1307 USA.
16  *
17  * Copyright (C) 2006-2008 Intel Corporation
18  * Author: Ashok Raj <ashok.raj@intel.com>
19  * Author: Shaohua Li <shaohua.li@intel.com>
20  * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21  *
22  * This file implements early detection/parsing of Remapping Devices
23  * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24  * tables.
25  *
26  * These routines are used by both DMA-remapping and Interrupt-remapping
27  */
28
29 #include <linux/pci.h>
30 #include <linux/dmar.h>
31 #include <linux/iova.h>
32 #include <linux/intel-iommu.h>
33 #include <linux/timer.h>
34 #include <linux/irq.h>
35 #include <linux/interrupt.h>
36 #include <linux/tboot.h>
37 #include <linux/dmi.h>
38
39 #define PREFIX "DMAR: "
40
41 /* No locks are needed as DMA remapping hardware unit
42  * list is constructed at boot time and hotplug of
43  * these units are not supported by the architecture.
44  */
45 LIST_HEAD(dmar_drhd_units);
46
47 static struct acpi_table_header * __initdata dmar_tbl;
48 static acpi_size dmar_tbl_size;
49
50 static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
51 {
52         /*
53          * add INCLUDE_ALL at the tail, so scan the list will find it at
54          * the very end.
55          */
56         if (drhd->include_all)
57                 list_add_tail(&drhd->list, &dmar_drhd_units);
58         else
59                 list_add(&drhd->list, &dmar_drhd_units);
60 }
61
62 static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
63                                            struct pci_dev **dev, u16 segment)
64 {
65         struct pci_bus *bus;
66         struct pci_dev *pdev = NULL;
67         struct acpi_dmar_pci_path *path;
68         int count;
69
70         bus = pci_find_bus(segment, scope->bus);
71         path = (struct acpi_dmar_pci_path *)(scope + 1);
72         count = (scope->length - sizeof(struct acpi_dmar_device_scope))
73                 / sizeof(struct acpi_dmar_pci_path);
74
75         while (count) {
76                 if (pdev)
77                         pci_dev_put(pdev);
78                 /*
79                  * Some BIOSes list non-exist devices in DMAR table, just
80                  * ignore it
81                  */
82                 if (!bus) {
83                         printk(KERN_WARNING
84                         PREFIX "Device scope bus [%d] not found\n",
85                         scope->bus);
86                         break;
87                 }
88                 pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
89                 if (!pdev) {
90                         printk(KERN_WARNING PREFIX
91                         "Device scope device [%04x:%02x:%02x.%02x] not found\n",
92                                 segment, bus->number, path->dev, path->fn);
93                         break;
94                 }
95                 path ++;
96                 count --;
97                 bus = pdev->subordinate;
98         }
99         if (!pdev) {
100                 printk(KERN_WARNING PREFIX
101                 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
102                 segment, scope->bus, path->dev, path->fn);
103                 *dev = NULL;
104                 return 0;
105         }
106         if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
107                         pdev->subordinate) || (scope->entry_type == \
108                         ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
109                 pci_dev_put(pdev);
110                 printk(KERN_WARNING PREFIX
111                         "Device scope type does not match for %s\n",
112                          pci_name(pdev));
113                 return -EINVAL;
114         }
115         *dev = pdev;
116         return 0;
117 }
118
119 static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
120                                        struct pci_dev ***devices, u16 segment)
121 {
122         struct acpi_dmar_device_scope *scope;
123         void * tmp = start;
124         int index;
125         int ret;
126
127         *cnt = 0;
128         while (start < end) {
129                 scope = start;
130                 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
131                     scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
132                         (*cnt)++;
133                 else
134                         printk(KERN_WARNING PREFIX
135                                 "Unsupported device scope\n");
136                 start += scope->length;
137         }
138         if (*cnt == 0)
139                 return 0;
140
141         *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
142         if (!*devices)
143                 return -ENOMEM;
144
145         start = tmp;
146         index = 0;
147         while (start < end) {
148                 scope = start;
149                 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
150                     scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
151                         ret = dmar_parse_one_dev_scope(scope,
152                                 &(*devices)[index], segment);
153                         if (ret) {
154                                 kfree(*devices);
155                                 return ret;
156                         }
157                         index ++;
158                 }
159                 start += scope->length;
160         }
161
162         return 0;
163 }
164
165 /**
166  * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
167  * structure which uniquely represent one DMA remapping hardware unit
168  * present in the platform
169  */
170 static int __init
171 dmar_parse_one_drhd(struct acpi_dmar_header *header)
172 {
173         struct acpi_dmar_hardware_unit *drhd;
174         struct dmar_drhd_unit *dmaru;
175         int ret = 0;
176
177         drhd = (struct acpi_dmar_hardware_unit *)header;
178         dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
179         if (!dmaru)
180                 return -ENOMEM;
181
182         dmaru->hdr = header;
183         dmaru->reg_base_addr = drhd->address;
184         dmaru->segment = drhd->segment;
185         dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
186
187         ret = alloc_iommu(dmaru);
188         if (ret) {
189                 kfree(dmaru);
190                 return ret;
191         }
192         dmar_register_drhd_unit(dmaru);
193         return 0;
194 }
195
196 static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
197 {
198         struct acpi_dmar_hardware_unit *drhd;
199         int ret = 0;
200
201         drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
202
203         if (dmaru->include_all)
204                 return 0;
205
206         ret = dmar_parse_dev_scope((void *)(drhd + 1),
207                                 ((void *)drhd) + drhd->header.length,
208                                 &dmaru->devices_cnt, &dmaru->devices,
209                                 drhd->segment);
210         if (ret) {
211                 list_del(&dmaru->list);
212                 kfree(dmaru);
213         }
214         return ret;
215 }
216
217 #ifdef CONFIG_DMAR
218 LIST_HEAD(dmar_rmrr_units);
219
220 static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
221 {
222         list_add(&rmrr->list, &dmar_rmrr_units);
223 }
224
225
226 static int __init
227 dmar_parse_one_rmrr(struct acpi_dmar_header *header)
228 {
229         struct acpi_dmar_reserved_memory *rmrr;
230         struct dmar_rmrr_unit *rmrru;
231
232         rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
233         if (!rmrru)
234                 return -ENOMEM;
235
236         rmrru->hdr = header;
237         rmrr = (struct acpi_dmar_reserved_memory *)header;
238         rmrru->base_address = rmrr->base_address;
239         rmrru->end_address = rmrr->end_address;
240
241         dmar_register_rmrr_unit(rmrru);
242         return 0;
243 }
244
245 static int __init
246 rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
247 {
248         struct acpi_dmar_reserved_memory *rmrr;
249         int ret;
250
251         rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
252         ret = dmar_parse_dev_scope((void *)(rmrr + 1),
253                 ((void *)rmrr) + rmrr->header.length,
254                 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
255
256         if (ret || (rmrru->devices_cnt == 0)) {
257                 list_del(&rmrru->list);
258                 kfree(rmrru);
259         }
260         return ret;
261 }
262
263 static LIST_HEAD(dmar_atsr_units);
264
265 static int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
266 {
267         struct acpi_dmar_atsr *atsr;
268         struct dmar_atsr_unit *atsru;
269
270         atsr = container_of(hdr, struct acpi_dmar_atsr, header);
271         atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
272         if (!atsru)
273                 return -ENOMEM;
274
275         atsru->hdr = hdr;
276         atsru->include_all = atsr->flags & 0x1;
277
278         list_add(&atsru->list, &dmar_atsr_units);
279
280         return 0;
281 }
282
283 static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
284 {
285         int rc;
286         struct acpi_dmar_atsr *atsr;
287
288         if (atsru->include_all)
289                 return 0;
290
291         atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
292         rc = dmar_parse_dev_scope((void *)(atsr + 1),
293                                 (void *)atsr + atsr->header.length,
294                                 &atsru->devices_cnt, &atsru->devices,
295                                 atsr->segment);
296         if (rc || !atsru->devices_cnt) {
297                 list_del(&atsru->list);
298                 kfree(atsru);
299         }
300
301         return rc;
302 }
303
304 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
305 {
306         int i;
307         struct pci_bus *bus;
308         struct acpi_dmar_atsr *atsr;
309         struct dmar_atsr_unit *atsru;
310
311         list_for_each_entry(atsru, &dmar_atsr_units, list) {
312                 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
313                 if (atsr->segment == pci_domain_nr(dev->bus))
314                         goto found;
315         }
316
317         return 0;
318
319 found:
320         for (bus = dev->bus; bus; bus = bus->parent) {
321                 struct pci_dev *bridge = bus->self;
322
323                 if (!bridge || !bridge->is_pcie ||
324                     bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
325                         return 0;
326
327                 if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
328                         for (i = 0; i < atsru->devices_cnt; i++)
329                                 if (atsru->devices[i] == bridge)
330                                         return 1;
331                         break;
332                 }
333         }
334
335         if (atsru->include_all)
336                 return 1;
337
338         return 0;
339 }
340 #endif
341
342 #ifdef CONFIG_ACPI_NUMA
343 static int __init
344 dmar_parse_one_rhsa(struct acpi_dmar_header *header)
345 {
346         struct acpi_dmar_rhsa *rhsa;
347         struct dmar_drhd_unit *drhd;
348
349         rhsa = (struct acpi_dmar_rhsa *)header;
350         for_each_drhd_unit(drhd) {
351                 if (drhd->reg_base_addr == rhsa->base_address) {
352                         int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
353
354                         if (!node_online(node))
355                                 node = -1;
356                         drhd->iommu->node = node;
357                         return 0;
358                 }
359         }
360         WARN(1, "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
361              "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
362              drhd->reg_base_addr,
363              dmi_get_system_info(DMI_BIOS_VENDOR),
364              dmi_get_system_info(DMI_BIOS_VERSION),
365              dmi_get_system_info(DMI_PRODUCT_VERSION));
366
367         return 0;
368 }
369 #endif
370
371 static void __init
372 dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
373 {
374         struct acpi_dmar_hardware_unit *drhd;
375         struct acpi_dmar_reserved_memory *rmrr;
376         struct acpi_dmar_atsr *atsr;
377         struct acpi_dmar_rhsa *rhsa;
378
379         switch (header->type) {
380         case ACPI_DMAR_TYPE_HARDWARE_UNIT:
381                 drhd = container_of(header, struct acpi_dmar_hardware_unit,
382                                     header);
383                 printk (KERN_INFO PREFIX
384                         "DRHD base: %#016Lx flags: %#x\n",
385                         (unsigned long long)drhd->address, drhd->flags);
386                 break;
387         case ACPI_DMAR_TYPE_RESERVED_MEMORY:
388                 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
389                                     header);
390                 printk (KERN_INFO PREFIX
391                         "RMRR base: %#016Lx end: %#016Lx\n",
392                         (unsigned long long)rmrr->base_address,
393                         (unsigned long long)rmrr->end_address);
394                 break;
395         case ACPI_DMAR_TYPE_ATSR:
396                 atsr = container_of(header, struct acpi_dmar_atsr, header);
397                 printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags);
398                 break;
399         case ACPI_DMAR_HARDWARE_AFFINITY:
400                 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
401                 printk(KERN_INFO PREFIX "RHSA base: %#016Lx proximity domain: %#x\n",
402                        (unsigned long long)rhsa->base_address,
403                        rhsa->proximity_domain);
404                 break;
405         }
406 }
407
408 /**
409  * dmar_table_detect - checks to see if the platform supports DMAR devices
410  */
411 static int __init dmar_table_detect(void)
412 {
413         acpi_status status = AE_OK;
414
415         /* if we could find DMAR table, then there are DMAR devices */
416         status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
417                                 (struct acpi_table_header **)&dmar_tbl,
418                                 &dmar_tbl_size);
419
420         if (ACPI_SUCCESS(status) && !dmar_tbl) {
421                 printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
422                 status = AE_NOT_FOUND;
423         }
424
425         return (ACPI_SUCCESS(status) ? 1 : 0);
426 }
427
428 /**
429  * parse_dmar_table - parses the DMA reporting table
430  */
431 static int __init
432 parse_dmar_table(void)
433 {
434         struct acpi_table_dmar *dmar;
435         struct acpi_dmar_header *entry_header;
436         int ret = 0;
437
438         /*
439          * Do it again, earlier dmar_tbl mapping could be mapped with
440          * fixed map.
441          */
442         dmar_table_detect();
443
444         /*
445          * ACPI tables may not be DMA protected by tboot, so use DMAR copy
446          * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
447          */
448         dmar_tbl = tboot_get_dmar_table(dmar_tbl);
449
450         dmar = (struct acpi_table_dmar *)dmar_tbl;
451         if (!dmar)
452                 return -ENODEV;
453
454         if (dmar->width < PAGE_SHIFT - 1) {
455                 printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
456                 return -EINVAL;
457         }
458
459         printk (KERN_INFO PREFIX "Host address width %d\n",
460                 dmar->width + 1);
461
462         entry_header = (struct acpi_dmar_header *)(dmar + 1);
463         while (((unsigned long)entry_header) <
464                         (((unsigned long)dmar) + dmar_tbl->length)) {
465                 /* Avoid looping forever on bad ACPI tables */
466                 if (entry_header->length == 0) {
467                         printk(KERN_WARNING PREFIX
468                                 "Invalid 0-length structure\n");
469                         ret = -EINVAL;
470                         break;
471                 }
472
473                 dmar_table_print_dmar_entry(entry_header);
474
475                 switch (entry_header->type) {
476                 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
477                         ret = dmar_parse_one_drhd(entry_header);
478                         break;
479                 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
480 #ifdef CONFIG_DMAR
481                         ret = dmar_parse_one_rmrr(entry_header);
482 #endif
483                         break;
484                 case ACPI_DMAR_TYPE_ATSR:
485 #ifdef CONFIG_DMAR
486                         ret = dmar_parse_one_atsr(entry_header);
487 #endif
488                         break;
489                 case ACPI_DMAR_HARDWARE_AFFINITY:
490 #ifdef CONFIG_ACPI_NUMA
491                         ret = dmar_parse_one_rhsa(entry_header);
492 #endif
493                         break;
494                 default:
495                         printk(KERN_WARNING PREFIX
496                                 "Unknown DMAR structure type %d\n",
497                                 entry_header->type);
498                         ret = 0; /* for forward compatibility */
499                         break;
500                 }
501                 if (ret)
502                         break;
503
504                 entry_header = ((void *)entry_header + entry_header->length);
505         }
506         return ret;
507 }
508
509 int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
510                           struct pci_dev *dev)
511 {
512         int index;
513
514         while (dev) {
515                 for (index = 0; index < cnt; index++)
516                         if (dev == devices[index])
517                                 return 1;
518
519                 /* Check our parent */
520                 dev = dev->bus->self;
521         }
522
523         return 0;
524 }
525
526 struct dmar_drhd_unit *
527 dmar_find_matched_drhd_unit(struct pci_dev *dev)
528 {
529         struct dmar_drhd_unit *dmaru = NULL;
530         struct acpi_dmar_hardware_unit *drhd;
531
532         list_for_each_entry(dmaru, &dmar_drhd_units, list) {
533                 drhd = container_of(dmaru->hdr,
534                                     struct acpi_dmar_hardware_unit,
535                                     header);
536
537                 if (dmaru->include_all &&
538                     drhd->segment == pci_domain_nr(dev->bus))
539                         return dmaru;
540
541                 if (dmar_pci_device_match(dmaru->devices,
542                                           dmaru->devices_cnt, dev))
543                         return dmaru;
544         }
545
546         return NULL;
547 }
548
549 int __init dmar_dev_scope_init(void)
550 {
551         struct dmar_drhd_unit *drhd, *drhd_n;
552         int ret = -ENODEV;
553
554         list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
555                 ret = dmar_parse_dev(drhd);
556                 if (ret)
557                         return ret;
558         }
559
560 #ifdef CONFIG_DMAR
561         {
562                 struct dmar_rmrr_unit *rmrr, *rmrr_n;
563                 struct dmar_atsr_unit *atsr, *atsr_n;
564
565                 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
566                         ret = rmrr_parse_dev(rmrr);
567                         if (ret)
568                                 return ret;
569                 }
570
571                 list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
572                         ret = atsr_parse_dev(atsr);
573                         if (ret)
574                                 return ret;
575                 }
576         }
577 #endif
578
579         return ret;
580 }
581
582
583 int __init dmar_table_init(void)
584 {
585         static int dmar_table_initialized;
586         int ret;
587
588         if (dmar_table_initialized)
589                 return 0;
590
591         dmar_table_initialized = 1;
592
593         ret = parse_dmar_table();
594         if (ret) {
595                 if (ret != -ENODEV)
596                         printk(KERN_INFO PREFIX "parse DMAR table failure.\n");
597                 return ret;
598         }
599
600         if (list_empty(&dmar_drhd_units)) {
601                 printk(KERN_INFO PREFIX "No DMAR devices found\n");
602                 return -ENODEV;
603         }
604
605 #ifdef CONFIG_DMAR
606         if (list_empty(&dmar_rmrr_units))
607                 printk(KERN_INFO PREFIX "No RMRR found\n");
608
609         if (list_empty(&dmar_atsr_units))
610                 printk(KERN_INFO PREFIX "No ATSR found\n");
611 #endif
612
613         return 0;
614 }
615
616 int __init check_zero_address(void)
617 {
618         struct acpi_table_dmar *dmar;
619         struct acpi_dmar_header *entry_header;
620         struct acpi_dmar_hardware_unit *drhd;
621
622         dmar = (struct acpi_table_dmar *)dmar_tbl;
623         entry_header = (struct acpi_dmar_header *)(dmar + 1);
624
625         while (((unsigned long)entry_header) <
626                         (((unsigned long)dmar) + dmar_tbl->length)) {
627                 /* Avoid looping forever on bad ACPI tables */
628                 if (entry_header->length == 0) {
629                         printk(KERN_WARNING PREFIX
630                                 "Invalid 0-length structure\n");
631                         return 0;
632                 }
633
634                 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
635                         drhd = (void *)entry_header;
636                         if (!drhd->address) {
637                                 /* Promote an attitude of violence to a BIOS engineer today */
638                                 WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
639                                      "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
640                                      dmi_get_system_info(DMI_BIOS_VENDOR),
641                                      dmi_get_system_info(DMI_BIOS_VERSION),
642                                      dmi_get_system_info(DMI_PRODUCT_VERSION));
643 #ifdef CONFIG_DMAR
644                                 dmar_disabled = 1;
645 #endif
646                                 return 0;
647                         }
648                         break;
649                 }
650
651                 entry_header = ((void *)entry_header + entry_header->length);
652         }
653         return 1;
654 }
655
656 void __init detect_intel_iommu(void)
657 {
658         int ret;
659
660         ret = dmar_table_detect();
661         if (ret)
662                 ret = check_zero_address();
663         {
664 #ifdef CONFIG_INTR_REMAP
665                 struct acpi_table_dmar *dmar;
666                 /*
667                  * for now we will disable dma-remapping when interrupt
668                  * remapping is enabled.
669                  * When support for queued invalidation for IOTLB invalidation
670                  * is added, we will not need this any more.
671                  */
672                 dmar = (struct acpi_table_dmar *) dmar_tbl;
673                 if (ret && cpu_has_x2apic && dmar->flags & 0x1)
674                         printk(KERN_INFO
675                                "Queued invalidation will be enabled to support "
676                                "x2apic and Intr-remapping.\n");
677 #endif
678 #ifdef CONFIG_DMAR
679                 if (ret && !no_iommu && !iommu_detected && !dmar_disabled)
680                         iommu_detected = 1;
681 #endif
682 #ifdef CONFIG_X86
683                 if (ret)
684                         x86_init.iommu.iommu_init = intel_iommu_init;
685 #endif
686         }
687         early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
688         dmar_tbl = NULL;
689 }
690
691
692 int alloc_iommu(struct dmar_drhd_unit *drhd)
693 {
694         struct intel_iommu *iommu;
695         int map_size;
696         u32 ver;
697         static int iommu_allocated = 0;
698         int agaw = 0;
699         int msagaw = 0;
700
701         iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
702         if (!iommu)
703                 return -ENOMEM;
704
705         iommu->seq_id = iommu_allocated++;
706         sprintf (iommu->name, "dmar%d", iommu->seq_id);
707
708         iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
709         if (!iommu->reg) {
710                 printk(KERN_ERR "IOMMU: can't map the region\n");
711                 goto error;
712         }
713         iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
714         iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
715
716         if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
717                 /* Promote an attitude of violence to a BIOS engineer today */
718                 WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n"
719                      "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
720                      drhd->reg_base_addr,
721                      dmi_get_system_info(DMI_BIOS_VENDOR),
722                      dmi_get_system_info(DMI_BIOS_VERSION),
723                      dmi_get_system_info(DMI_PRODUCT_VERSION));
724                 goto err_unmap;
725         }
726
727 #ifdef CONFIG_DMAR
728         agaw = iommu_calculate_agaw(iommu);
729         if (agaw < 0) {
730                 printk(KERN_ERR
731                        "Cannot get a valid agaw for iommu (seq_id = %d)\n",
732                        iommu->seq_id);
733                 goto err_unmap;
734         }
735         msagaw = iommu_calculate_max_sagaw(iommu);
736         if (msagaw < 0) {
737                 printk(KERN_ERR
738                         "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
739                         iommu->seq_id);
740                 goto err_unmap;
741         }
742 #endif
743         iommu->agaw = agaw;
744         iommu->msagaw = msagaw;
745
746         iommu->node = -1;
747
748         /* the registers might be more than one page */
749         map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
750                 cap_max_fault_reg_offset(iommu->cap));
751         map_size = VTD_PAGE_ALIGN(map_size);
752         if (map_size > VTD_PAGE_SIZE) {
753                 iounmap(iommu->reg);
754                 iommu->reg = ioremap(drhd->reg_base_addr, map_size);
755                 if (!iommu->reg) {
756                         printk(KERN_ERR "IOMMU: can't map the region\n");
757                         goto error;
758                 }
759         }
760
761         ver = readl(iommu->reg + DMAR_VER_REG);
762         pr_info("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
763                 (unsigned long long)drhd->reg_base_addr,
764                 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
765                 (unsigned long long)iommu->cap,
766                 (unsigned long long)iommu->ecap);
767
768         spin_lock_init(&iommu->register_lock);
769
770         drhd->iommu = iommu;
771         return 0;
772
773  err_unmap:
774         iounmap(iommu->reg);
775  error:
776         kfree(iommu);
777         return -1;
778 }
779
780 void free_iommu(struct intel_iommu *iommu)
781 {
782         if (!iommu)
783                 return;
784
785 #ifdef CONFIG_DMAR
786         free_dmar_iommu(iommu);
787 #endif
788
789         if (iommu->reg)
790                 iounmap(iommu->reg);
791         kfree(iommu);
792 }
793
794 /*
795  * Reclaim all the submitted descriptors which have completed its work.
796  */
797 static inline void reclaim_free_desc(struct q_inval *qi)
798 {
799         while (qi->desc_status[qi->free_tail] == QI_DONE ||
800                qi->desc_status[qi->free_tail] == QI_ABORT) {
801                 qi->desc_status[qi->free_tail] = QI_FREE;
802                 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
803                 qi->free_cnt++;
804         }
805 }
806
807 static int qi_check_fault(struct intel_iommu *iommu, int index)
808 {
809         u32 fault;
810         int head, tail;
811         struct q_inval *qi = iommu->qi;
812         int wait_index = (index + 1) % QI_LENGTH;
813
814         if (qi->desc_status[wait_index] == QI_ABORT)
815                 return -EAGAIN;
816
817         fault = readl(iommu->reg + DMAR_FSTS_REG);
818
819         /*
820          * If IQE happens, the head points to the descriptor associated
821          * with the error. No new descriptors are fetched until the IQE
822          * is cleared.
823          */
824         if (fault & DMA_FSTS_IQE) {
825                 head = readl(iommu->reg + DMAR_IQH_REG);
826                 if ((head >> DMAR_IQ_SHIFT) == index) {
827                         printk(KERN_ERR "VT-d detected invalid descriptor: "
828                                 "low=%llx, high=%llx\n",
829                                 (unsigned long long)qi->desc[index].low,
830                                 (unsigned long long)qi->desc[index].high);
831                         memcpy(&qi->desc[index], &qi->desc[wait_index],
832                                         sizeof(struct qi_desc));
833                         __iommu_flush_cache(iommu, &qi->desc[index],
834                                         sizeof(struct qi_desc));
835                         writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
836                         return -EINVAL;
837                 }
838         }
839
840         /*
841          * If ITE happens, all pending wait_desc commands are aborted.
842          * No new descriptors are fetched until the ITE is cleared.
843          */
844         if (fault & DMA_FSTS_ITE) {
845                 head = readl(iommu->reg + DMAR_IQH_REG);
846                 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
847                 head |= 1;
848                 tail = readl(iommu->reg + DMAR_IQT_REG);
849                 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
850
851                 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
852
853                 do {
854                         if (qi->desc_status[head] == QI_IN_USE)
855                                 qi->desc_status[head] = QI_ABORT;
856                         head = (head - 2 + QI_LENGTH) % QI_LENGTH;
857                 } while (head != tail);
858
859                 if (qi->desc_status[wait_index] == QI_ABORT)
860                         return -EAGAIN;
861         }
862
863         if (fault & DMA_FSTS_ICE)
864                 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
865
866         return 0;
867 }
868
869 /*
870  * Submit the queued invalidation descriptor to the remapping
871  * hardware unit and wait for its completion.
872  */
873 int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
874 {
875         int rc;
876         struct q_inval *qi = iommu->qi;
877         struct qi_desc *hw, wait_desc;
878         int wait_index, index;
879         unsigned long flags;
880
881         if (!qi)
882                 return 0;
883
884         hw = qi->desc;
885
886 restart:
887         rc = 0;
888
889         spin_lock_irqsave(&qi->q_lock, flags);
890         while (qi->free_cnt < 3) {
891                 spin_unlock_irqrestore(&qi->q_lock, flags);
892                 cpu_relax();
893                 spin_lock_irqsave(&qi->q_lock, flags);
894         }
895
896         index = qi->free_head;
897         wait_index = (index + 1) % QI_LENGTH;
898
899         qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
900
901         hw[index] = *desc;
902
903         wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
904                         QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
905         wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
906
907         hw[wait_index] = wait_desc;
908
909         __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
910         __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
911
912         qi->free_head = (qi->free_head + 2) % QI_LENGTH;
913         qi->free_cnt -= 2;
914
915         /*
916          * update the HW tail register indicating the presence of
917          * new descriptors.
918          */
919         writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
920
921         while (qi->desc_status[wait_index] != QI_DONE) {
922                 /*
923                  * We will leave the interrupts disabled, to prevent interrupt
924                  * context to queue another cmd while a cmd is already submitted
925                  * and waiting for completion on this cpu. This is to avoid
926                  * a deadlock where the interrupt context can wait indefinitely
927                  * for free slots in the queue.
928                  */
929                 rc = qi_check_fault(iommu, index);
930                 if (rc)
931                         break;
932
933                 spin_unlock(&qi->q_lock);
934                 cpu_relax();
935                 spin_lock(&qi->q_lock);
936         }
937
938         qi->desc_status[index] = QI_DONE;
939
940         reclaim_free_desc(qi);
941         spin_unlock_irqrestore(&qi->q_lock, flags);
942
943         if (rc == -EAGAIN)
944                 goto restart;
945
946         return rc;
947 }
948
949 /*
950  * Flush the global interrupt entry cache.
951  */
952 void qi_global_iec(struct intel_iommu *iommu)
953 {
954         struct qi_desc desc;
955
956         desc.low = QI_IEC_TYPE;
957         desc.high = 0;
958
959         /* should never fail */
960         qi_submit_sync(&desc, iommu);
961 }
962
963 void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
964                       u64 type)
965 {
966         struct qi_desc desc;
967
968         desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
969                         | QI_CC_GRAN(type) | QI_CC_TYPE;
970         desc.high = 0;
971
972         qi_submit_sync(&desc, iommu);
973 }
974
975 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
976                     unsigned int size_order, u64 type)
977 {
978         u8 dw = 0, dr = 0;
979
980         struct qi_desc desc;
981         int ih = 0;
982
983         if (cap_write_drain(iommu->cap))
984                 dw = 1;
985
986         if (cap_read_drain(iommu->cap))
987                 dr = 1;
988
989         desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
990                 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
991         desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
992                 | QI_IOTLB_AM(size_order);
993
994         qi_submit_sync(&desc, iommu);
995 }
996
997 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
998                         u64 addr, unsigned mask)
999 {
1000         struct qi_desc desc;
1001
1002         if (mask) {
1003                 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
1004                 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
1005                 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
1006         } else
1007                 desc.high = QI_DEV_IOTLB_ADDR(addr);
1008
1009         if (qdep >= QI_DEV_IOTLB_MAX_INVS)
1010                 qdep = 0;
1011
1012         desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
1013                    QI_DIOTLB_TYPE;
1014
1015         qi_submit_sync(&desc, iommu);
1016 }
1017
1018 /*
1019  * Disable Queued Invalidation interface.
1020  */
1021 void dmar_disable_qi(struct intel_iommu *iommu)
1022 {
1023         unsigned long flags;
1024         u32 sts;
1025         cycles_t start_time = get_cycles();
1026
1027         if (!ecap_qis(iommu->ecap))
1028                 return;
1029
1030         spin_lock_irqsave(&iommu->register_lock, flags);
1031
1032         sts =  dmar_readq(iommu->reg + DMAR_GSTS_REG);
1033         if (!(sts & DMA_GSTS_QIES))
1034                 goto end;
1035
1036         /*
1037          * Give a chance to HW to complete the pending invalidation requests.
1038          */
1039         while ((readl(iommu->reg + DMAR_IQT_REG) !=
1040                 readl(iommu->reg + DMAR_IQH_REG)) &&
1041                 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1042                 cpu_relax();
1043
1044         iommu->gcmd &= ~DMA_GCMD_QIE;
1045         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1046
1047         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1048                       !(sts & DMA_GSTS_QIES), sts);
1049 end:
1050         spin_unlock_irqrestore(&iommu->register_lock, flags);
1051 }
1052
1053 /*
1054  * Enable queued invalidation.
1055  */
1056 static void __dmar_enable_qi(struct intel_iommu *iommu)
1057 {
1058         u32 sts;
1059         unsigned long flags;
1060         struct q_inval *qi = iommu->qi;
1061
1062         qi->free_head = qi->free_tail = 0;
1063         qi->free_cnt = QI_LENGTH;
1064
1065         spin_lock_irqsave(&iommu->register_lock, flags);
1066
1067         /* write zero to the tail reg */
1068         writel(0, iommu->reg + DMAR_IQT_REG);
1069
1070         dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1071
1072         iommu->gcmd |= DMA_GCMD_QIE;
1073         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1074
1075         /* Make sure hardware complete it */
1076         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1077
1078         spin_unlock_irqrestore(&iommu->register_lock, flags);
1079 }
1080
1081 /*
1082  * Enable Queued Invalidation interface. This is a must to support
1083  * interrupt-remapping. Also used by DMA-remapping, which replaces
1084  * register based IOTLB invalidation.
1085  */
1086 int dmar_enable_qi(struct intel_iommu *iommu)
1087 {
1088         struct q_inval *qi;
1089         struct page *desc_page;
1090
1091         if (!ecap_qis(iommu->ecap))
1092                 return -ENOENT;
1093
1094         /*
1095          * queued invalidation is already setup and enabled.
1096          */
1097         if (iommu->qi)
1098                 return 0;
1099
1100         iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
1101         if (!iommu->qi)
1102                 return -ENOMEM;
1103
1104         qi = iommu->qi;
1105
1106
1107         desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1108         if (!desc_page) {
1109                 kfree(qi);
1110                 iommu->qi = 0;
1111                 return -ENOMEM;
1112         }
1113
1114         qi->desc = page_address(desc_page);
1115
1116         qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
1117         if (!qi->desc_status) {
1118                 free_page((unsigned long) qi->desc);
1119                 kfree(qi);
1120                 iommu->qi = 0;
1121                 return -ENOMEM;
1122         }
1123
1124         qi->free_head = qi->free_tail = 0;
1125         qi->free_cnt = QI_LENGTH;
1126
1127         spin_lock_init(&qi->q_lock);
1128
1129         __dmar_enable_qi(iommu);
1130
1131         return 0;
1132 }
1133
1134 /* iommu interrupt handling. Most stuff are MSI-like. */
1135
1136 enum faulttype {
1137         DMA_REMAP,
1138         INTR_REMAP,
1139         UNKNOWN,
1140 };
1141
1142 static const char *dma_remap_fault_reasons[] =
1143 {
1144         "Software",
1145         "Present bit in root entry is clear",
1146         "Present bit in context entry is clear",
1147         "Invalid context entry",
1148         "Access beyond MGAW",
1149         "PTE Write access is not set",
1150         "PTE Read access is not set",
1151         "Next page table ptr is invalid",
1152         "Root table address invalid",
1153         "Context table ptr is invalid",
1154         "non-zero reserved fields in RTP",
1155         "non-zero reserved fields in CTP",
1156         "non-zero reserved fields in PTE",
1157 };
1158
1159 static const char *intr_remap_fault_reasons[] =
1160 {
1161         "Detected reserved fields in the decoded interrupt-remapped request",
1162         "Interrupt index exceeded the interrupt-remapping table size",
1163         "Present field in the IRTE entry is clear",
1164         "Error accessing interrupt-remapping table pointed by IRTA_REG",
1165         "Detected reserved fields in the IRTE entry",
1166         "Blocked a compatibility format interrupt request",
1167         "Blocked an interrupt request due to source-id verification failure",
1168 };
1169
1170 #define MAX_FAULT_REASON_IDX    (ARRAY_SIZE(fault_reason_strings) - 1)
1171
1172 const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
1173 {
1174         if (fault_reason >= 0x20 && (fault_reason <= 0x20 +
1175                                      ARRAY_SIZE(intr_remap_fault_reasons))) {
1176                 *fault_type = INTR_REMAP;
1177                 return intr_remap_fault_reasons[fault_reason - 0x20];
1178         } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1179                 *fault_type = DMA_REMAP;
1180                 return dma_remap_fault_reasons[fault_reason];
1181         } else {
1182                 *fault_type = UNKNOWN;
1183                 return "Unknown";
1184         }
1185 }
1186
1187 void dmar_msi_unmask(unsigned int irq)
1188 {
1189         struct intel_iommu *iommu = get_irq_data(irq);
1190         unsigned long flag;
1191
1192         /* unmask it */
1193         spin_lock_irqsave(&iommu->register_lock, flag);
1194         writel(0, iommu->reg + DMAR_FECTL_REG);
1195         /* Read a reg to force flush the post write */
1196         readl(iommu->reg + DMAR_FECTL_REG);
1197         spin_unlock_irqrestore(&iommu->register_lock, flag);
1198 }
1199
1200 void dmar_msi_mask(unsigned int irq)
1201 {
1202         unsigned long flag;
1203         struct intel_iommu *iommu = get_irq_data(irq);
1204
1205         /* mask it */
1206         spin_lock_irqsave(&iommu->register_lock, flag);
1207         writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1208         /* Read a reg to force flush the post write */
1209         readl(iommu->reg + DMAR_FECTL_REG);
1210         spin_unlock_irqrestore(&iommu->register_lock, flag);
1211 }
1212
1213 void dmar_msi_write(int irq, struct msi_msg *msg)
1214 {
1215         struct intel_iommu *iommu = get_irq_data(irq);
1216         unsigned long flag;
1217
1218         spin_lock_irqsave(&iommu->register_lock, flag);
1219         writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1220         writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1221         writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1222         spin_unlock_irqrestore(&iommu->register_lock, flag);
1223 }
1224
1225 void dmar_msi_read(int irq, struct msi_msg *msg)
1226 {
1227         struct intel_iommu *iommu = get_irq_data(irq);
1228         unsigned long flag;
1229
1230         spin_lock_irqsave(&iommu->register_lock, flag);
1231         msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1232         msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1233         msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1234         spin_unlock_irqrestore(&iommu->register_lock, flag);
1235 }
1236
1237 static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1238                 u8 fault_reason, u16 source_id, unsigned long long addr)
1239 {
1240         const char *reason;
1241         int fault_type;
1242
1243         reason = dmar_get_fault_reason(fault_reason, &fault_type);
1244
1245         if (fault_type == INTR_REMAP)
1246                 printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] "
1247                        "fault index %llx\n"
1248                         "INTR-REMAP:[fault reason %02d] %s\n",
1249                         (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1250                         PCI_FUNC(source_id & 0xFF), addr >> 48,
1251                         fault_reason, reason);
1252         else
1253                 printk(KERN_ERR
1254                        "DMAR:[%s] Request device [%02x:%02x.%d] "
1255                        "fault addr %llx \n"
1256                        "DMAR:[fault reason %02d] %s\n",
1257                        (type ? "DMA Read" : "DMA Write"),
1258                        (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1259                        PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
1260         return 0;
1261 }
1262
1263 #define PRIMARY_FAULT_REG_LEN (16)
1264 irqreturn_t dmar_fault(int irq, void *dev_id)
1265 {
1266         struct intel_iommu *iommu = dev_id;
1267         int reg, fault_index;
1268         u32 fault_status;
1269         unsigned long flag;
1270
1271         spin_lock_irqsave(&iommu->register_lock, flag);
1272         fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1273         if (fault_status)
1274                 printk(KERN_ERR "DRHD: handling fault status reg %x\n",
1275                        fault_status);
1276
1277         /* TBD: ignore advanced fault log currently */
1278         if (!(fault_status & DMA_FSTS_PPF))
1279                 goto clear_rest;
1280
1281         fault_index = dma_fsts_fault_record_index(fault_status);
1282         reg = cap_fault_reg_offset(iommu->cap);
1283         while (1) {
1284                 u8 fault_reason;
1285                 u16 source_id;
1286                 u64 guest_addr;
1287                 int type;
1288                 u32 data;
1289
1290                 /* highest 32 bits */
1291                 data = readl(iommu->reg + reg +
1292                                 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1293                 if (!(data & DMA_FRCD_F))
1294                         break;
1295
1296                 fault_reason = dma_frcd_fault_reason(data);
1297                 type = dma_frcd_type(data);
1298
1299                 data = readl(iommu->reg + reg +
1300                                 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1301                 source_id = dma_frcd_source_id(data);
1302
1303                 guest_addr = dmar_readq(iommu->reg + reg +
1304                                 fault_index * PRIMARY_FAULT_REG_LEN);
1305                 guest_addr = dma_frcd_page_addr(guest_addr);
1306                 /* clear the fault */
1307                 writel(DMA_FRCD_F, iommu->reg + reg +
1308                         fault_index * PRIMARY_FAULT_REG_LEN + 12);
1309
1310                 spin_unlock_irqrestore(&iommu->register_lock, flag);
1311
1312                 dmar_fault_do_one(iommu, type, fault_reason,
1313                                 source_id, guest_addr);
1314
1315                 fault_index++;
1316                 if (fault_index >= cap_num_fault_regs(iommu->cap))
1317                         fault_index = 0;
1318                 spin_lock_irqsave(&iommu->register_lock, flag);
1319         }
1320 clear_rest:
1321         /* clear all the other faults */
1322         fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1323         writel(fault_status, iommu->reg + DMAR_FSTS_REG);
1324
1325         spin_unlock_irqrestore(&iommu->register_lock, flag);
1326         return IRQ_HANDLED;
1327 }
1328
1329 int dmar_set_interrupt(struct intel_iommu *iommu)
1330 {
1331         int irq, ret;
1332
1333         /*
1334          * Check if the fault interrupt is already initialized.
1335          */
1336         if (iommu->irq)
1337                 return 0;
1338
1339         irq = create_irq();
1340         if (!irq) {
1341                 printk(KERN_ERR "IOMMU: no free vectors\n");
1342                 return -EINVAL;
1343         }
1344
1345         set_irq_data(irq, iommu);
1346         iommu->irq = irq;
1347
1348         ret = arch_setup_dmar_msi(irq);
1349         if (ret) {
1350                 set_irq_data(irq, NULL);
1351                 iommu->irq = 0;
1352                 destroy_irq(irq);
1353                 return ret;
1354         }
1355
1356         ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
1357         if (ret)
1358                 printk(KERN_ERR "IOMMU: can't request irq\n");
1359         return ret;
1360 }
1361
1362 int __init enable_drhd_fault_handling(void)
1363 {
1364         struct dmar_drhd_unit *drhd;
1365
1366         /*
1367          * Enable fault control interrupt.
1368          */
1369         for_each_drhd_unit(drhd) {
1370                 int ret;
1371                 struct intel_iommu *iommu = drhd->iommu;
1372                 ret = dmar_set_interrupt(iommu);
1373
1374                 if (ret) {
1375                         printk(KERN_ERR "DRHD %Lx: failed to enable fault, "
1376                                " interrupt, ret %d\n",
1377                                (unsigned long long)drhd->reg_base_addr, ret);
1378                         return -1;
1379                 }
1380         }
1381
1382         return 0;
1383 }
1384
1385 /*
1386  * Re-enable Queued Invalidation interface.
1387  */
1388 int dmar_reenable_qi(struct intel_iommu *iommu)
1389 {
1390         if (!ecap_qis(iommu->ecap))
1391                 return -ENOENT;
1392
1393         if (!iommu->qi)
1394                 return -ENOENT;
1395
1396         /*
1397          * First disable queued invalidation.
1398          */
1399         dmar_disable_qi(iommu);
1400         /*
1401          * Then enable queued invalidation again. Since there is no pending
1402          * invalidation requests now, it's safe to re-enable queued
1403          * invalidation.
1404          */
1405         __dmar_enable_qi(iommu);
1406
1407         return 0;
1408 }
1409
1410 /*
1411  * Check interrupt remapping support in DMAR table description.
1412  */
1413 int dmar_ir_support(void)
1414 {
1415         struct acpi_table_dmar *dmar;
1416         dmar = (struct acpi_table_dmar *)dmar_tbl;
1417         return dmar->flags & 0x1;
1418 }