]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/iommu/dmar.c
arm: imx: tx6: mfgtool defconfig
[karo-tx-linux.git] / drivers / iommu / dmar.c
1 /*
2  * Copyright (c) 2006, Intel Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15  * Place - Suite 330, Boston, MA 02111-1307 USA.
16  *
17  * Copyright (C) 2006-2008 Intel Corporation
18  * Author: Ashok Raj <ashok.raj@intel.com>
19  * Author: Shaohua Li <shaohua.li@intel.com>
20  * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21  *
22  * This file implements early detection/parsing of Remapping Devices
23  * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24  * tables.
25  *
26  * These routines are used by both DMA-remapping and Interrupt-remapping
27  */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* has to precede printk.h */
30
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/iova.h>
34 #include <linux/intel-iommu.h>
35 #include <linux/timer.h>
36 #include <linux/irq.h>
37 #include <linux/interrupt.h>
38 #include <linux/tboot.h>
39 #include <linux/dmi.h>
40 #include <linux/slab.h>
41 #include <asm/irq_remapping.h>
42 #include <asm/iommu_table.h>
43
44 #include "irq_remapping.h"
45
46 /*
47  * Assumptions:
48  * 1) The hotplug framework guarentees that DMAR unit will be hot-added
49  *    before IO devices managed by that unit.
50  * 2) The hotplug framework guarantees that DMAR unit will be hot-removed
51  *    after IO devices managed by that unit.
52  * 3) Hotplug events are rare.
53  *
54  * Locking rules for DMA and interrupt remapping related global data structures:
55  * 1) Use dmar_global_lock in process context
56  * 2) Use RCU in interrupt context
57  */
58 DECLARE_RWSEM(dmar_global_lock);
59 LIST_HEAD(dmar_drhd_units);
60
61 struct acpi_table_header * __initdata dmar_tbl;
62 static acpi_size dmar_tbl_size;
63 static int dmar_dev_scope_status = 1;
64
65 static int alloc_iommu(struct dmar_drhd_unit *drhd);
66 static void free_iommu(struct intel_iommu *iommu);
67
68 static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
69 {
70         /*
71          * add INCLUDE_ALL at the tail, so scan the list will find it at
72          * the very end.
73          */
74         if (drhd->include_all)
75                 list_add_tail_rcu(&drhd->list, &dmar_drhd_units);
76         else
77                 list_add_rcu(&drhd->list, &dmar_drhd_units);
78 }
79
80 void *dmar_alloc_dev_scope(void *start, void *end, int *cnt)
81 {
82         struct acpi_dmar_device_scope *scope;
83
84         *cnt = 0;
85         while (start < end) {
86                 scope = start;
87                 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ACPI ||
88                     scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
89                     scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
90                         (*cnt)++;
91                 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
92                         scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
93                         pr_warn("Unsupported device scope\n");
94                 }
95                 start += scope->length;
96         }
97         if (*cnt == 0)
98                 return NULL;
99
100         return kcalloc(*cnt, sizeof(struct dmar_dev_scope), GFP_KERNEL);
101 }
102
103 void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt)
104 {
105         int i;
106         struct device *tmp_dev;
107
108         if (*devices && *cnt) {
109                 for_each_active_dev_scope(*devices, *cnt, i, tmp_dev)
110                         put_device(tmp_dev);
111                 kfree(*devices);
112         }
113
114         *devices = NULL;
115         *cnt = 0;
116 }
117
118 /* Optimize out kzalloc()/kfree() for normal cases */
119 static char dmar_pci_notify_info_buf[64];
120
121 static struct dmar_pci_notify_info *
122 dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
123 {
124         int level = 0;
125         size_t size;
126         struct pci_dev *tmp;
127         struct dmar_pci_notify_info *info;
128
129         BUG_ON(dev->is_virtfn);
130
131         /* Only generate path[] for device addition event */
132         if (event == BUS_NOTIFY_ADD_DEVICE)
133                 for (tmp = dev; tmp; tmp = tmp->bus->self)
134                         level++;
135
136         size = sizeof(*info) + level * sizeof(struct acpi_dmar_pci_path);
137         if (size <= sizeof(dmar_pci_notify_info_buf)) {
138                 info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf;
139         } else {
140                 info = kzalloc(size, GFP_KERNEL);
141                 if (!info) {
142                         pr_warn("Out of memory when allocating notify_info "
143                                 "for %s.\n", pci_name(dev));
144                         if (dmar_dev_scope_status == 0)
145                                 dmar_dev_scope_status = -ENOMEM;
146                         return NULL;
147                 }
148         }
149
150         info->event = event;
151         info->dev = dev;
152         info->seg = pci_domain_nr(dev->bus);
153         info->level = level;
154         if (event == BUS_NOTIFY_ADD_DEVICE) {
155                 for (tmp = dev; tmp; tmp = tmp->bus->self) {
156                         level--;
157                         info->path[level].device = PCI_SLOT(tmp->devfn);
158                         info->path[level].function = PCI_FUNC(tmp->devfn);
159                         if (pci_is_root_bus(tmp->bus))
160                                 info->bus = tmp->bus->number;
161                 }
162         }
163
164         return info;
165 }
166
167 static inline void dmar_free_pci_notify_info(struct dmar_pci_notify_info *info)
168 {
169         if ((void *)info != dmar_pci_notify_info_buf)
170                 kfree(info);
171 }
172
173 static bool dmar_match_pci_path(struct dmar_pci_notify_info *info, int bus,
174                                 struct acpi_dmar_pci_path *path, int count)
175 {
176         int i;
177
178         if (info->bus != bus)
179                 return false;
180         if (info->level != count)
181                 return false;
182
183         for (i = 0; i < count; i++) {
184                 if (path[i].device != info->path[i].device ||
185                     path[i].function != info->path[i].function)
186                         return false;
187         }
188
189         return true;
190 }
191
192 /* Return: > 0 if match found, 0 if no match found, < 0 if error happens */
193 int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
194                           void *start, void*end, u16 segment,
195                           struct dmar_dev_scope *devices,
196                           int devices_cnt)
197 {
198         int i, level;
199         struct device *tmp, *dev = &info->dev->dev;
200         struct acpi_dmar_device_scope *scope;
201         struct acpi_dmar_pci_path *path;
202
203         if (segment != info->seg)
204                 return 0;
205
206         for (; start < end; start += scope->length) {
207                 scope = start;
208                 if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
209                     scope->entry_type != ACPI_DMAR_SCOPE_TYPE_BRIDGE)
210                         continue;
211
212                 path = (struct acpi_dmar_pci_path *)(scope + 1);
213                 level = (scope->length - sizeof(*scope)) / sizeof(*path);
214                 if (!dmar_match_pci_path(info, scope->bus, path, level))
215                         continue;
216
217                 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT) ^
218                     (info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL)) {
219                         pr_warn("Device scope type does not match for %s\n",
220                                 pci_name(info->dev));
221                         return -EINVAL;
222                 }
223
224                 for_each_dev_scope(devices, devices_cnt, i, tmp)
225                         if (tmp == NULL) {
226                                 devices[i].bus = info->dev->bus->number;
227                                 devices[i].devfn = info->dev->devfn;
228                                 rcu_assign_pointer(devices[i].dev,
229                                                    get_device(dev));
230                                 return 1;
231                         }
232                 BUG_ON(i >= devices_cnt);
233         }
234
235         return 0;
236 }
237
238 int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, u16 segment,
239                           struct dmar_dev_scope *devices, int count)
240 {
241         int index;
242         struct device *tmp;
243
244         if (info->seg != segment)
245                 return 0;
246
247         for_each_active_dev_scope(devices, count, index, tmp)
248                 if (tmp == &info->dev->dev) {
249                         rcu_assign_pointer(devices[index].dev, NULL);
250                         synchronize_rcu();
251                         put_device(tmp);
252                         return 1;
253                 }
254
255         return 0;
256 }
257
258 static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info *info)
259 {
260         int ret = 0;
261         struct dmar_drhd_unit *dmaru;
262         struct acpi_dmar_hardware_unit *drhd;
263
264         for_each_drhd_unit(dmaru) {
265                 if (dmaru->include_all)
266                         continue;
267
268                 drhd = container_of(dmaru->hdr,
269                                     struct acpi_dmar_hardware_unit, header);
270                 ret = dmar_insert_dev_scope(info, (void *)(drhd + 1),
271                                 ((void *)drhd) + drhd->header.length,
272                                 dmaru->segment,
273                                 dmaru->devices, dmaru->devices_cnt);
274                 if (ret != 0)
275                         break;
276         }
277         if (ret >= 0)
278                 ret = dmar_iommu_notify_scope_dev(info);
279         if (ret < 0 && dmar_dev_scope_status == 0)
280                 dmar_dev_scope_status = ret;
281
282         return ret;
283 }
284
285 static void  dmar_pci_bus_del_dev(struct dmar_pci_notify_info *info)
286 {
287         struct dmar_drhd_unit *dmaru;
288
289         for_each_drhd_unit(dmaru)
290                 if (dmar_remove_dev_scope(info, dmaru->segment,
291                         dmaru->devices, dmaru->devices_cnt))
292                         break;
293         dmar_iommu_notify_scope_dev(info);
294 }
295
296 static int dmar_pci_bus_notifier(struct notifier_block *nb,
297                                  unsigned long action, void *data)
298 {
299         struct pci_dev *pdev = to_pci_dev(data);
300         struct dmar_pci_notify_info *info;
301
302         /* Only care about add/remove events for physical functions */
303         if (pdev->is_virtfn)
304                 return NOTIFY_DONE;
305         if (action != BUS_NOTIFY_ADD_DEVICE && action != BUS_NOTIFY_DEL_DEVICE)
306                 return NOTIFY_DONE;
307
308         info = dmar_alloc_pci_notify_info(pdev, action);
309         if (!info)
310                 return NOTIFY_DONE;
311
312         down_write(&dmar_global_lock);
313         if (action == BUS_NOTIFY_ADD_DEVICE)
314                 dmar_pci_bus_add_dev(info);
315         else if (action == BUS_NOTIFY_DEL_DEVICE)
316                 dmar_pci_bus_del_dev(info);
317         up_write(&dmar_global_lock);
318
319         dmar_free_pci_notify_info(info);
320
321         return NOTIFY_OK;
322 }
323
324 static struct notifier_block dmar_pci_bus_nb = {
325         .notifier_call = dmar_pci_bus_notifier,
326         .priority = INT_MIN,
327 };
328
329 /**
330  * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
331  * structure which uniquely represent one DMA remapping hardware unit
332  * present in the platform
333  */
334 static int __init
335 dmar_parse_one_drhd(struct acpi_dmar_header *header)
336 {
337         struct acpi_dmar_hardware_unit *drhd;
338         struct dmar_drhd_unit *dmaru;
339         int ret = 0;
340
341         drhd = (struct acpi_dmar_hardware_unit *)header;
342         dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
343         if (!dmaru)
344                 return -ENOMEM;
345
346         dmaru->hdr = header;
347         dmaru->reg_base_addr = drhd->address;
348         dmaru->segment = drhd->segment;
349         dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
350         dmaru->devices = dmar_alloc_dev_scope((void *)(drhd + 1),
351                                               ((void *)drhd) + drhd->header.length,
352                                               &dmaru->devices_cnt);
353         if (dmaru->devices_cnt && dmaru->devices == NULL) {
354                 kfree(dmaru);
355                 return -ENOMEM;
356         }
357
358         ret = alloc_iommu(dmaru);
359         if (ret) {
360                 dmar_free_dev_scope(&dmaru->devices,
361                                     &dmaru->devices_cnt);
362                 kfree(dmaru);
363                 return ret;
364         }
365         dmar_register_drhd_unit(dmaru);
366         return 0;
367 }
368
369 static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
370 {
371         if (dmaru->devices && dmaru->devices_cnt)
372                 dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt);
373         if (dmaru->iommu)
374                 free_iommu(dmaru->iommu);
375         kfree(dmaru);
376 }
377
378 static int __init dmar_parse_one_andd(struct acpi_dmar_header *header)
379 {
380         struct acpi_dmar_andd *andd = (void *)header;
381
382         /* Check for NUL termination within the designated length */
383         if (strnlen(andd->object_name, header->length - 8) == header->length - 8) {
384                 WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND,
385                            "Your BIOS is broken; ANDD object name is not NUL-terminated\n"
386                            "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
387                            dmi_get_system_info(DMI_BIOS_VENDOR),
388                            dmi_get_system_info(DMI_BIOS_VERSION),
389                            dmi_get_system_info(DMI_PRODUCT_VERSION));
390                 return -EINVAL;
391         }
392         pr_info("ANDD device: %x name: %s\n", andd->device_number,
393                 andd->object_name);
394
395         return 0;
396 }
397
398 #ifdef CONFIG_ACPI_NUMA
399 static int __init
400 dmar_parse_one_rhsa(struct acpi_dmar_header *header)
401 {
402         struct acpi_dmar_rhsa *rhsa;
403         struct dmar_drhd_unit *drhd;
404
405         rhsa = (struct acpi_dmar_rhsa *)header;
406         for_each_drhd_unit(drhd) {
407                 if (drhd->reg_base_addr == rhsa->base_address) {
408                         int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
409
410                         if (!node_online(node))
411                                 node = -1;
412                         drhd->iommu->node = node;
413                         return 0;
414                 }
415         }
416         WARN_TAINT(
417                 1, TAINT_FIRMWARE_WORKAROUND,
418                 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
419                 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
420                 drhd->reg_base_addr,
421                 dmi_get_system_info(DMI_BIOS_VENDOR),
422                 dmi_get_system_info(DMI_BIOS_VERSION),
423                 dmi_get_system_info(DMI_PRODUCT_VERSION));
424
425         return 0;
426 }
427 #endif
428
429 static void __init
430 dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
431 {
432         struct acpi_dmar_hardware_unit *drhd;
433         struct acpi_dmar_reserved_memory *rmrr;
434         struct acpi_dmar_atsr *atsr;
435         struct acpi_dmar_rhsa *rhsa;
436
437         switch (header->type) {
438         case ACPI_DMAR_TYPE_HARDWARE_UNIT:
439                 drhd = container_of(header, struct acpi_dmar_hardware_unit,
440                                     header);
441                 pr_info("DRHD base: %#016Lx flags: %#x\n",
442                         (unsigned long long)drhd->address, drhd->flags);
443                 break;
444         case ACPI_DMAR_TYPE_RESERVED_MEMORY:
445                 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
446                                     header);
447                 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
448                         (unsigned long long)rmrr->base_address,
449                         (unsigned long long)rmrr->end_address);
450                 break;
451         case ACPI_DMAR_TYPE_ATSR:
452                 atsr = container_of(header, struct acpi_dmar_atsr, header);
453                 pr_info("ATSR flags: %#x\n", atsr->flags);
454                 break;
455         case ACPI_DMAR_HARDWARE_AFFINITY:
456                 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
457                 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
458                        (unsigned long long)rhsa->base_address,
459                        rhsa->proximity_domain);
460                 break;
461         case ACPI_DMAR_TYPE_ANDD:
462                 /* We don't print this here because we need to sanity-check
463                    it first. So print it in dmar_parse_one_andd() instead. */
464                 break;
465         }
466 }
467
468 /**
469  * dmar_table_detect - checks to see if the platform supports DMAR devices
470  */
471 static int __init dmar_table_detect(void)
472 {
473         acpi_status status = AE_OK;
474
475         /* if we could find DMAR table, then there are DMAR devices */
476         status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
477                                 (struct acpi_table_header **)&dmar_tbl,
478                                 &dmar_tbl_size);
479
480         if (ACPI_SUCCESS(status) && !dmar_tbl) {
481                 pr_warn("Unable to map DMAR\n");
482                 status = AE_NOT_FOUND;
483         }
484
485         return (ACPI_SUCCESS(status) ? 1 : 0);
486 }
487
488 /**
489  * parse_dmar_table - parses the DMA reporting table
490  */
491 static int __init
492 parse_dmar_table(void)
493 {
494         struct acpi_table_dmar *dmar;
495         struct acpi_dmar_header *entry_header;
496         int ret = 0;
497         int drhd_count = 0;
498
499         /*
500          * Do it again, earlier dmar_tbl mapping could be mapped with
501          * fixed map.
502          */
503         dmar_table_detect();
504
505         /*
506          * ACPI tables may not be DMA protected by tboot, so use DMAR copy
507          * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
508          */
509         dmar_tbl = tboot_get_dmar_table(dmar_tbl);
510
511         dmar = (struct acpi_table_dmar *)dmar_tbl;
512         if (!dmar)
513                 return -ENODEV;
514
515         if (dmar->width < PAGE_SHIFT - 1) {
516                 pr_warn("Invalid DMAR haw\n");
517                 return -EINVAL;
518         }
519
520         pr_info("Host address width %d\n", dmar->width + 1);
521
522         entry_header = (struct acpi_dmar_header *)(dmar + 1);
523         while (((unsigned long)entry_header) <
524                         (((unsigned long)dmar) + dmar_tbl->length)) {
525                 /* Avoid looping forever on bad ACPI tables */
526                 if (entry_header->length == 0) {
527                         pr_warn("Invalid 0-length structure\n");
528                         ret = -EINVAL;
529                         break;
530                 }
531
532                 dmar_table_print_dmar_entry(entry_header);
533
534                 switch (entry_header->type) {
535                 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
536                         drhd_count++;
537                         ret = dmar_parse_one_drhd(entry_header);
538                         break;
539                 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
540                         ret = dmar_parse_one_rmrr(entry_header);
541                         break;
542                 case ACPI_DMAR_TYPE_ATSR:
543                         ret = dmar_parse_one_atsr(entry_header);
544                         break;
545                 case ACPI_DMAR_HARDWARE_AFFINITY:
546 #ifdef CONFIG_ACPI_NUMA
547                         ret = dmar_parse_one_rhsa(entry_header);
548 #endif
549                         break;
550                 case ACPI_DMAR_TYPE_ANDD:
551                         ret = dmar_parse_one_andd(entry_header);
552                         break;
553                 default:
554                         pr_warn("Unknown DMAR structure type %d\n",
555                                 entry_header->type);
556                         ret = 0; /* for forward compatibility */
557                         break;
558                 }
559                 if (ret)
560                         break;
561
562                 entry_header = ((void *)entry_header + entry_header->length);
563         }
564         if (drhd_count == 0)
565                 pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
566         return ret;
567 }
568
569 static int dmar_pci_device_match(struct dmar_dev_scope devices[],
570                                  int cnt, struct pci_dev *dev)
571 {
572         int index;
573         struct device *tmp;
574
575         while (dev) {
576                 for_each_active_dev_scope(devices, cnt, index, tmp)
577                         if (dev_is_pci(tmp) && dev == to_pci_dev(tmp))
578                                 return 1;
579
580                 /* Check our parent */
581                 dev = dev->bus->self;
582         }
583
584         return 0;
585 }
586
587 struct dmar_drhd_unit *
588 dmar_find_matched_drhd_unit(struct pci_dev *dev)
589 {
590         struct dmar_drhd_unit *dmaru;
591         struct acpi_dmar_hardware_unit *drhd;
592
593         dev = pci_physfn(dev);
594
595         rcu_read_lock();
596         for_each_drhd_unit(dmaru) {
597                 drhd = container_of(dmaru->hdr,
598                                     struct acpi_dmar_hardware_unit,
599                                     header);
600
601                 if (dmaru->include_all &&
602                     drhd->segment == pci_domain_nr(dev->bus))
603                         goto out;
604
605                 if (dmar_pci_device_match(dmaru->devices,
606                                           dmaru->devices_cnt, dev))
607                         goto out;
608         }
609         dmaru = NULL;
610 out:
611         rcu_read_unlock();
612
613         return dmaru;
614 }
615
616 static void __init dmar_acpi_insert_dev_scope(u8 device_number,
617                                               struct acpi_device *adev)
618 {
619         struct dmar_drhd_unit *dmaru;
620         struct acpi_dmar_hardware_unit *drhd;
621         struct acpi_dmar_device_scope *scope;
622         struct device *tmp;
623         int i;
624         struct acpi_dmar_pci_path *path;
625
626         for_each_drhd_unit(dmaru) {
627                 drhd = container_of(dmaru->hdr,
628                                     struct acpi_dmar_hardware_unit,
629                                     header);
630
631                 for (scope = (void *)(drhd + 1);
632                      (unsigned long)scope < ((unsigned long)drhd) + drhd->header.length;
633                      scope = ((void *)scope) + scope->length) {
634                         if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_ACPI)
635                                 continue;
636                         if (scope->enumeration_id != device_number)
637                                 continue;
638
639                         path = (void *)(scope + 1);
640                         pr_info("ACPI device \"%s\" under DMAR at %llx as %02x:%02x.%d\n",
641                                 dev_name(&adev->dev), dmaru->reg_base_addr,
642                                 scope->bus, path->device, path->function);
643                         for_each_dev_scope(dmaru->devices, dmaru->devices_cnt, i, tmp)
644                                 if (tmp == NULL) {
645                                         dmaru->devices[i].bus = scope->bus;
646                                         dmaru->devices[i].devfn = PCI_DEVFN(path->device,
647                                                                             path->function);
648                                         rcu_assign_pointer(dmaru->devices[i].dev,
649                                                            get_device(&adev->dev));
650                                         return;
651                                 }
652                         BUG_ON(i >= dmaru->devices_cnt);
653                 }
654         }
655         pr_warn("No IOMMU scope found for ANDD enumeration ID %d (%s)\n",
656                 device_number, dev_name(&adev->dev));
657 }
658
659 static int __init dmar_acpi_dev_scope_init(void)
660 {
661         struct acpi_dmar_andd *andd;
662
663         if (dmar_tbl == NULL)
664                 return -ENODEV;
665
666         for (andd = (void *)dmar_tbl + sizeof(struct acpi_table_dmar);
667              ((unsigned long)andd) < ((unsigned long)dmar_tbl) + dmar_tbl->length;
668              andd = ((void *)andd) + andd->header.length) {
669                 if (andd->header.type == ACPI_DMAR_TYPE_ANDD) {
670                         acpi_handle h;
671                         struct acpi_device *adev;
672
673                         if (!ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT,
674                                                           andd->object_name,
675                                                           &h))) {
676                                 pr_err("Failed to find handle for ACPI object %s\n",
677                                        andd->object_name);
678                                 continue;
679                         }
680                         acpi_bus_get_device(h, &adev);
681                         if (!adev) {
682                                 pr_err("Failed to get device for ACPI object %s\n",
683                                        andd->object_name);
684                                 continue;
685                         }
686                         dmar_acpi_insert_dev_scope(andd->device_number, adev);
687                 }
688         }
689         return 0;
690 }
691
692 int __init dmar_dev_scope_init(void)
693 {
694         struct pci_dev *dev = NULL;
695         struct dmar_pci_notify_info *info;
696
697         if (dmar_dev_scope_status != 1)
698                 return dmar_dev_scope_status;
699
700         if (list_empty(&dmar_drhd_units)) {
701                 dmar_dev_scope_status = -ENODEV;
702         } else {
703                 dmar_dev_scope_status = 0;
704
705                 dmar_acpi_dev_scope_init();
706
707                 for_each_pci_dev(dev) {
708                         if (dev->is_virtfn)
709                                 continue;
710
711                         info = dmar_alloc_pci_notify_info(dev,
712                                         BUS_NOTIFY_ADD_DEVICE);
713                         if (!info) {
714                                 return dmar_dev_scope_status;
715                         } else {
716                                 dmar_pci_bus_add_dev(info);
717                                 dmar_free_pci_notify_info(info);
718                         }
719                 }
720
721                 bus_register_notifier(&pci_bus_type, &dmar_pci_bus_nb);
722         }
723
724         return dmar_dev_scope_status;
725 }
726
727
728 int __init dmar_table_init(void)
729 {
730         static int dmar_table_initialized;
731         int ret;
732
733         if (dmar_table_initialized == 0) {
734                 ret = parse_dmar_table();
735                 if (ret < 0) {
736                         if (ret != -ENODEV)
737                                 pr_info("parse DMAR table failure.\n");
738                 } else  if (list_empty(&dmar_drhd_units)) {
739                         pr_info("No DMAR devices found\n");
740                         ret = -ENODEV;
741                 }
742
743                 if (ret < 0)
744                         dmar_table_initialized = ret;
745                 else
746                         dmar_table_initialized = 1;
747         }
748
749         return dmar_table_initialized < 0 ? dmar_table_initialized : 0;
750 }
751
752 static void warn_invalid_dmar(u64 addr, const char *message)
753 {
754         WARN_TAINT_ONCE(
755                 1, TAINT_FIRMWARE_WORKAROUND,
756                 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
757                 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
758                 addr, message,
759                 dmi_get_system_info(DMI_BIOS_VENDOR),
760                 dmi_get_system_info(DMI_BIOS_VERSION),
761                 dmi_get_system_info(DMI_PRODUCT_VERSION));
762 }
763
764 static int __init check_zero_address(void)
765 {
766         struct acpi_table_dmar *dmar;
767         struct acpi_dmar_header *entry_header;
768         struct acpi_dmar_hardware_unit *drhd;
769
770         dmar = (struct acpi_table_dmar *)dmar_tbl;
771         entry_header = (struct acpi_dmar_header *)(dmar + 1);
772
773         while (((unsigned long)entry_header) <
774                         (((unsigned long)dmar) + dmar_tbl->length)) {
775                 /* Avoid looping forever on bad ACPI tables */
776                 if (entry_header->length == 0) {
777                         pr_warn("Invalid 0-length structure\n");
778                         return 0;
779                 }
780
781                 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
782                         void __iomem *addr;
783                         u64 cap, ecap;
784
785                         drhd = (void *)entry_header;
786                         if (!drhd->address) {
787                                 warn_invalid_dmar(0, "");
788                                 goto failed;
789                         }
790
791                         addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
792                         if (!addr ) {
793                                 printk("IOMMU: can't validate: %llx\n", drhd->address);
794                                 goto failed;
795                         }
796                         cap = dmar_readq(addr + DMAR_CAP_REG);
797                         ecap = dmar_readq(addr + DMAR_ECAP_REG);
798                         early_iounmap(addr, VTD_PAGE_SIZE);
799                         if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
800                                 warn_invalid_dmar(drhd->address,
801                                                   " returns all ones");
802                                 goto failed;
803                         }
804                 }
805
806                 entry_header = ((void *)entry_header + entry_header->length);
807         }
808         return 1;
809
810 failed:
811         return 0;
812 }
813
814 int __init detect_intel_iommu(void)
815 {
816         int ret;
817
818         down_write(&dmar_global_lock);
819         ret = dmar_table_detect();
820         if (ret)
821                 ret = check_zero_address();
822         {
823                 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
824                         iommu_detected = 1;
825                         /* Make sure ACS will be enabled */
826                         pci_request_acs();
827                 }
828
829 #ifdef CONFIG_X86
830                 if (ret)
831                         x86_init.iommu.iommu_init = intel_iommu_init;
832 #endif
833         }
834         early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size);
835         dmar_tbl = NULL;
836         up_write(&dmar_global_lock);
837
838         return ret ? 1 : -ENODEV;
839 }
840
841
842 static void unmap_iommu(struct intel_iommu *iommu)
843 {
844         iounmap(iommu->reg);
845         release_mem_region(iommu->reg_phys, iommu->reg_size);
846 }
847
848 /**
849  * map_iommu: map the iommu's registers
850  * @iommu: the iommu to map
851  * @phys_addr: the physical address of the base resgister
852  *
853  * Memory map the iommu's registers.  Start w/ a single page, and
854  * possibly expand if that turns out to be insufficent.
855  */
856 static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
857 {
858         int map_size, err=0;
859
860         iommu->reg_phys = phys_addr;
861         iommu->reg_size = VTD_PAGE_SIZE;
862
863         if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
864                 pr_err("IOMMU: can't reserve memory\n");
865                 err = -EBUSY;
866                 goto out;
867         }
868
869         iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
870         if (!iommu->reg) {
871                 pr_err("IOMMU: can't map the region\n");
872                 err = -ENOMEM;
873                 goto release;
874         }
875
876         iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
877         iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
878
879         if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
880                 err = -EINVAL;
881                 warn_invalid_dmar(phys_addr, " returns all ones");
882                 goto unmap;
883         }
884
885         /* the registers might be more than one page */
886         map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
887                          cap_max_fault_reg_offset(iommu->cap));
888         map_size = VTD_PAGE_ALIGN(map_size);
889         if (map_size > iommu->reg_size) {
890                 iounmap(iommu->reg);
891                 release_mem_region(iommu->reg_phys, iommu->reg_size);
892                 iommu->reg_size = map_size;
893                 if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
894                                         iommu->name)) {
895                         pr_err("IOMMU: can't reserve memory\n");
896                         err = -EBUSY;
897                         goto out;
898                 }
899                 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
900                 if (!iommu->reg) {
901                         pr_err("IOMMU: can't map the region\n");
902                         err = -ENOMEM;
903                         goto release;
904                 }
905         }
906         err = 0;
907         goto out;
908
909 unmap:
910         iounmap(iommu->reg);
911 release:
912         release_mem_region(iommu->reg_phys, iommu->reg_size);
913 out:
914         return err;
915 }
916
917 static int alloc_iommu(struct dmar_drhd_unit *drhd)
918 {
919         struct intel_iommu *iommu;
920         u32 ver, sts;
921         static int iommu_allocated = 0;
922         int agaw = 0;
923         int msagaw = 0;
924         int err;
925
926         if (!drhd->reg_base_addr) {
927                 warn_invalid_dmar(0, "");
928                 return -EINVAL;
929         }
930
931         iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
932         if (!iommu)
933                 return -ENOMEM;
934
935         iommu->seq_id = iommu_allocated++;
936         sprintf (iommu->name, "dmar%d", iommu->seq_id);
937
938         err = map_iommu(iommu, drhd->reg_base_addr);
939         if (err) {
940                 pr_err("IOMMU: failed to map %s\n", iommu->name);
941                 goto error;
942         }
943
944         err = -EINVAL;
945         agaw = iommu_calculate_agaw(iommu);
946         if (agaw < 0) {
947                 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
948                         iommu->seq_id);
949                 goto err_unmap;
950         }
951         msagaw = iommu_calculate_max_sagaw(iommu);
952         if (msagaw < 0) {
953                 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
954                         iommu->seq_id);
955                 goto err_unmap;
956         }
957         iommu->agaw = agaw;
958         iommu->msagaw = msagaw;
959         iommu->segment = drhd->segment;
960
961         iommu->node = -1;
962
963         ver = readl(iommu->reg + DMAR_VER_REG);
964         pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
965                 iommu->seq_id,
966                 (unsigned long long)drhd->reg_base_addr,
967                 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
968                 (unsigned long long)iommu->cap,
969                 (unsigned long long)iommu->ecap);
970
971         /* Reflect status in gcmd */
972         sts = readl(iommu->reg + DMAR_GSTS_REG);
973         if (sts & DMA_GSTS_IRES)
974                 iommu->gcmd |= DMA_GCMD_IRE;
975         if (sts & DMA_GSTS_TES)
976                 iommu->gcmd |= DMA_GCMD_TE;
977         if (sts & DMA_GSTS_QIES)
978                 iommu->gcmd |= DMA_GCMD_QIE;
979
980         raw_spin_lock_init(&iommu->register_lock);
981
982         drhd->iommu = iommu;
983         return 0;
984
985  err_unmap:
986         unmap_iommu(iommu);
987  error:
988         kfree(iommu);
989         return err;
990 }
991
992 static void free_iommu(struct intel_iommu *iommu)
993 {
994         if (iommu->irq) {
995                 free_irq(iommu->irq, iommu);
996                 irq_set_handler_data(iommu->irq, NULL);
997                 dmar_free_hwirq(iommu->irq);
998         }
999
1000         if (iommu->qi) {
1001                 free_page((unsigned long)iommu->qi->desc);
1002                 kfree(iommu->qi->desc_status);
1003                 kfree(iommu->qi);
1004         }
1005
1006         if (iommu->reg)
1007                 unmap_iommu(iommu);
1008
1009         kfree(iommu);
1010 }
1011
1012 /*
1013  * Reclaim all the submitted descriptors which have completed its work.
1014  */
1015 static inline void reclaim_free_desc(struct q_inval *qi)
1016 {
1017         while (qi->desc_status[qi->free_tail] == QI_DONE ||
1018                qi->desc_status[qi->free_tail] == QI_ABORT) {
1019                 qi->desc_status[qi->free_tail] = QI_FREE;
1020                 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
1021                 qi->free_cnt++;
1022         }
1023 }
1024
1025 static int qi_check_fault(struct intel_iommu *iommu, int index)
1026 {
1027         u32 fault;
1028         int head, tail;
1029         struct q_inval *qi = iommu->qi;
1030         int wait_index = (index + 1) % QI_LENGTH;
1031
1032         if (qi->desc_status[wait_index] == QI_ABORT)
1033                 return -EAGAIN;
1034
1035         fault = readl(iommu->reg + DMAR_FSTS_REG);
1036
1037         /*
1038          * If IQE happens, the head points to the descriptor associated
1039          * with the error. No new descriptors are fetched until the IQE
1040          * is cleared.
1041          */
1042         if (fault & DMA_FSTS_IQE) {
1043                 head = readl(iommu->reg + DMAR_IQH_REG);
1044                 if ((head >> DMAR_IQ_SHIFT) == index) {
1045                         pr_err("VT-d detected invalid descriptor: "
1046                                 "low=%llx, high=%llx\n",
1047                                 (unsigned long long)qi->desc[index].low,
1048                                 (unsigned long long)qi->desc[index].high);
1049                         memcpy(&qi->desc[index], &qi->desc[wait_index],
1050                                         sizeof(struct qi_desc));
1051                         __iommu_flush_cache(iommu, &qi->desc[index],
1052                                         sizeof(struct qi_desc));
1053                         writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
1054                         return -EINVAL;
1055                 }
1056         }
1057
1058         /*
1059          * If ITE happens, all pending wait_desc commands are aborted.
1060          * No new descriptors are fetched until the ITE is cleared.
1061          */
1062         if (fault & DMA_FSTS_ITE) {
1063                 head = readl(iommu->reg + DMAR_IQH_REG);
1064                 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
1065                 head |= 1;
1066                 tail = readl(iommu->reg + DMAR_IQT_REG);
1067                 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
1068
1069                 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
1070
1071                 do {
1072                         if (qi->desc_status[head] == QI_IN_USE)
1073                                 qi->desc_status[head] = QI_ABORT;
1074                         head = (head - 2 + QI_LENGTH) % QI_LENGTH;
1075                 } while (head != tail);
1076
1077                 if (qi->desc_status[wait_index] == QI_ABORT)
1078                         return -EAGAIN;
1079         }
1080
1081         if (fault & DMA_FSTS_ICE)
1082                 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
1083
1084         return 0;
1085 }
1086
1087 /*
1088  * Submit the queued invalidation descriptor to the remapping
1089  * hardware unit and wait for its completion.
1090  */
1091 int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
1092 {
1093         int rc;
1094         struct q_inval *qi = iommu->qi;
1095         struct qi_desc *hw, wait_desc;
1096         int wait_index, index;
1097         unsigned long flags;
1098
1099         if (!qi)
1100                 return 0;
1101
1102         hw = qi->desc;
1103
1104 restart:
1105         rc = 0;
1106
1107         raw_spin_lock_irqsave(&qi->q_lock, flags);
1108         while (qi->free_cnt < 3) {
1109                 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
1110                 cpu_relax();
1111                 raw_spin_lock_irqsave(&qi->q_lock, flags);
1112         }
1113
1114         index = qi->free_head;
1115         wait_index = (index + 1) % QI_LENGTH;
1116
1117         qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
1118
1119         hw[index] = *desc;
1120
1121         wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
1122                         QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
1123         wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
1124
1125         hw[wait_index] = wait_desc;
1126
1127         __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
1128         __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
1129
1130         qi->free_head = (qi->free_head + 2) % QI_LENGTH;
1131         qi->free_cnt -= 2;
1132
1133         /*
1134          * update the HW tail register indicating the presence of
1135          * new descriptors.
1136          */
1137         writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
1138
1139         while (qi->desc_status[wait_index] != QI_DONE) {
1140                 /*
1141                  * We will leave the interrupts disabled, to prevent interrupt
1142                  * context to queue another cmd while a cmd is already submitted
1143                  * and waiting for completion on this cpu. This is to avoid
1144                  * a deadlock where the interrupt context can wait indefinitely
1145                  * for free slots in the queue.
1146                  */
1147                 rc = qi_check_fault(iommu, index);
1148                 if (rc)
1149                         break;
1150
1151                 raw_spin_unlock(&qi->q_lock);
1152                 cpu_relax();
1153                 raw_spin_lock(&qi->q_lock);
1154         }
1155
1156         qi->desc_status[index] = QI_DONE;
1157
1158         reclaim_free_desc(qi);
1159         raw_spin_unlock_irqrestore(&qi->q_lock, flags);
1160
1161         if (rc == -EAGAIN)
1162                 goto restart;
1163
1164         return rc;
1165 }
1166
1167 /*
1168  * Flush the global interrupt entry cache.
1169  */
1170 void qi_global_iec(struct intel_iommu *iommu)
1171 {
1172         struct qi_desc desc;
1173
1174         desc.low = QI_IEC_TYPE;
1175         desc.high = 0;
1176
1177         /* should never fail */
1178         qi_submit_sync(&desc, iommu);
1179 }
1180
1181 void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
1182                       u64 type)
1183 {
1184         struct qi_desc desc;
1185
1186         desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
1187                         | QI_CC_GRAN(type) | QI_CC_TYPE;
1188         desc.high = 0;
1189
1190         qi_submit_sync(&desc, iommu);
1191 }
1192
1193 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1194                     unsigned int size_order, u64 type)
1195 {
1196         u8 dw = 0, dr = 0;
1197
1198         struct qi_desc desc;
1199         int ih = 0;
1200
1201         if (cap_write_drain(iommu->cap))
1202                 dw = 1;
1203
1204         if (cap_read_drain(iommu->cap))
1205                 dr = 1;
1206
1207         desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
1208                 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
1209         desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
1210                 | QI_IOTLB_AM(size_order);
1211
1212         qi_submit_sync(&desc, iommu);
1213 }
1214
1215 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
1216                         u64 addr, unsigned mask)
1217 {
1218         struct qi_desc desc;
1219
1220         if (mask) {
1221                 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
1222                 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
1223                 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
1224         } else
1225                 desc.high = QI_DEV_IOTLB_ADDR(addr);
1226
1227         if (qdep >= QI_DEV_IOTLB_MAX_INVS)
1228                 qdep = 0;
1229
1230         desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
1231                    QI_DIOTLB_TYPE;
1232
1233         qi_submit_sync(&desc, iommu);
1234 }
1235
1236 /*
1237  * Disable Queued Invalidation interface.
1238  */
1239 void dmar_disable_qi(struct intel_iommu *iommu)
1240 {
1241         unsigned long flags;
1242         u32 sts;
1243         cycles_t start_time = get_cycles();
1244
1245         if (!ecap_qis(iommu->ecap))
1246                 return;
1247
1248         raw_spin_lock_irqsave(&iommu->register_lock, flags);
1249
1250         sts =  dmar_readq(iommu->reg + DMAR_GSTS_REG);
1251         if (!(sts & DMA_GSTS_QIES))
1252                 goto end;
1253
1254         /*
1255          * Give a chance to HW to complete the pending invalidation requests.
1256          */
1257         while ((readl(iommu->reg + DMAR_IQT_REG) !=
1258                 readl(iommu->reg + DMAR_IQH_REG)) &&
1259                 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1260                 cpu_relax();
1261
1262         iommu->gcmd &= ~DMA_GCMD_QIE;
1263         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1264
1265         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1266                       !(sts & DMA_GSTS_QIES), sts);
1267 end:
1268         raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1269 }
1270
1271 /*
1272  * Enable queued invalidation.
1273  */
1274 static void __dmar_enable_qi(struct intel_iommu *iommu)
1275 {
1276         u32 sts;
1277         unsigned long flags;
1278         struct q_inval *qi = iommu->qi;
1279
1280         qi->free_head = qi->free_tail = 0;
1281         qi->free_cnt = QI_LENGTH;
1282
1283         raw_spin_lock_irqsave(&iommu->register_lock, flags);
1284
1285         /* write zero to the tail reg */
1286         writel(0, iommu->reg + DMAR_IQT_REG);
1287
1288         dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1289
1290         iommu->gcmd |= DMA_GCMD_QIE;
1291         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1292
1293         /* Make sure hardware complete it */
1294         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1295
1296         raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1297 }
1298
1299 /*
1300  * Enable Queued Invalidation interface. This is a must to support
1301  * interrupt-remapping. Also used by DMA-remapping, which replaces
1302  * register based IOTLB invalidation.
1303  */
1304 int dmar_enable_qi(struct intel_iommu *iommu)
1305 {
1306         struct q_inval *qi;
1307         struct page *desc_page;
1308
1309         if (!ecap_qis(iommu->ecap))
1310                 return -ENOENT;
1311
1312         /*
1313          * queued invalidation is already setup and enabled.
1314          */
1315         if (iommu->qi)
1316                 return 0;
1317
1318         iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
1319         if (!iommu->qi)
1320                 return -ENOMEM;
1321
1322         qi = iommu->qi;
1323
1324
1325         desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1326         if (!desc_page) {
1327                 kfree(qi);
1328                 iommu->qi = NULL;
1329                 return -ENOMEM;
1330         }
1331
1332         qi->desc = page_address(desc_page);
1333
1334         qi->desc_status = kzalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
1335         if (!qi->desc_status) {
1336                 free_page((unsigned long) qi->desc);
1337                 kfree(qi);
1338                 iommu->qi = NULL;
1339                 return -ENOMEM;
1340         }
1341
1342         qi->free_head = qi->free_tail = 0;
1343         qi->free_cnt = QI_LENGTH;
1344
1345         raw_spin_lock_init(&qi->q_lock);
1346
1347         __dmar_enable_qi(iommu);
1348
1349         return 0;
1350 }
1351
1352 /* iommu interrupt handling. Most stuff are MSI-like. */
1353
1354 enum faulttype {
1355         DMA_REMAP,
1356         INTR_REMAP,
1357         UNKNOWN,
1358 };
1359
1360 static const char *dma_remap_fault_reasons[] =
1361 {
1362         "Software",
1363         "Present bit in root entry is clear",
1364         "Present bit in context entry is clear",
1365         "Invalid context entry",
1366         "Access beyond MGAW",
1367         "PTE Write access is not set",
1368         "PTE Read access is not set",
1369         "Next page table ptr is invalid",
1370         "Root table address invalid",
1371         "Context table ptr is invalid",
1372         "non-zero reserved fields in RTP",
1373         "non-zero reserved fields in CTP",
1374         "non-zero reserved fields in PTE",
1375         "PCE for translation request specifies blocking",
1376 };
1377
1378 static const char *irq_remap_fault_reasons[] =
1379 {
1380         "Detected reserved fields in the decoded interrupt-remapped request",
1381         "Interrupt index exceeded the interrupt-remapping table size",
1382         "Present field in the IRTE entry is clear",
1383         "Error accessing interrupt-remapping table pointed by IRTA_REG",
1384         "Detected reserved fields in the IRTE entry",
1385         "Blocked a compatibility format interrupt request",
1386         "Blocked an interrupt request due to source-id verification failure",
1387 };
1388
1389 static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
1390 {
1391         if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1392                                         ARRAY_SIZE(irq_remap_fault_reasons))) {
1393                 *fault_type = INTR_REMAP;
1394                 return irq_remap_fault_reasons[fault_reason - 0x20];
1395         } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1396                 *fault_type = DMA_REMAP;
1397                 return dma_remap_fault_reasons[fault_reason];
1398         } else {
1399                 *fault_type = UNKNOWN;
1400                 return "Unknown";
1401         }
1402 }
1403
1404 void dmar_msi_unmask(struct irq_data *data)
1405 {
1406         struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1407         unsigned long flag;
1408
1409         /* unmask it */
1410         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1411         writel(0, iommu->reg + DMAR_FECTL_REG);
1412         /* Read a reg to force flush the post write */
1413         readl(iommu->reg + DMAR_FECTL_REG);
1414         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1415 }
1416
1417 void dmar_msi_mask(struct irq_data *data)
1418 {
1419         unsigned long flag;
1420         struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1421
1422         /* mask it */
1423         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1424         writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1425         /* Read a reg to force flush the post write */
1426         readl(iommu->reg + DMAR_FECTL_REG);
1427         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1428 }
1429
1430 void dmar_msi_write(int irq, struct msi_msg *msg)
1431 {
1432         struct intel_iommu *iommu = irq_get_handler_data(irq);
1433         unsigned long flag;
1434
1435         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1436         writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1437         writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1438         writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1439         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1440 }
1441
1442 void dmar_msi_read(int irq, struct msi_msg *msg)
1443 {
1444         struct intel_iommu *iommu = irq_get_handler_data(irq);
1445         unsigned long flag;
1446
1447         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1448         msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1449         msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1450         msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1451         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1452 }
1453
1454 static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1455                 u8 fault_reason, u16 source_id, unsigned long long addr)
1456 {
1457         const char *reason;
1458         int fault_type;
1459
1460         reason = dmar_get_fault_reason(fault_reason, &fault_type);
1461
1462         if (fault_type == INTR_REMAP)
1463                 pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] "
1464                        "fault index %llx\n"
1465                         "INTR-REMAP:[fault reason %02d] %s\n",
1466                         (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1467                         PCI_FUNC(source_id & 0xFF), addr >> 48,
1468                         fault_reason, reason);
1469         else
1470                 pr_err("DMAR:[%s] Request device [%02x:%02x.%d] "
1471                        "fault addr %llx \n"
1472                        "DMAR:[fault reason %02d] %s\n",
1473                        (type ? "DMA Read" : "DMA Write"),
1474                        (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1475                        PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
1476         return 0;
1477 }
1478
1479 #define PRIMARY_FAULT_REG_LEN (16)
1480 irqreturn_t dmar_fault(int irq, void *dev_id)
1481 {
1482         struct intel_iommu *iommu = dev_id;
1483         int reg, fault_index;
1484         u32 fault_status;
1485         unsigned long flag;
1486
1487         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1488         fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1489         if (fault_status)
1490                 pr_err("DRHD: handling fault status reg %x\n", fault_status);
1491
1492         /* TBD: ignore advanced fault log currently */
1493         if (!(fault_status & DMA_FSTS_PPF))
1494                 goto unlock_exit;
1495
1496         fault_index = dma_fsts_fault_record_index(fault_status);
1497         reg = cap_fault_reg_offset(iommu->cap);
1498         while (1) {
1499                 u8 fault_reason;
1500                 u16 source_id;
1501                 u64 guest_addr;
1502                 int type;
1503                 u32 data;
1504
1505                 /* highest 32 bits */
1506                 data = readl(iommu->reg + reg +
1507                                 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1508                 if (!(data & DMA_FRCD_F))
1509                         break;
1510
1511                 fault_reason = dma_frcd_fault_reason(data);
1512                 type = dma_frcd_type(data);
1513
1514                 data = readl(iommu->reg + reg +
1515                                 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1516                 source_id = dma_frcd_source_id(data);
1517
1518                 guest_addr = dmar_readq(iommu->reg + reg +
1519                                 fault_index * PRIMARY_FAULT_REG_LEN);
1520                 guest_addr = dma_frcd_page_addr(guest_addr);
1521                 /* clear the fault */
1522                 writel(DMA_FRCD_F, iommu->reg + reg +
1523                         fault_index * PRIMARY_FAULT_REG_LEN + 12);
1524
1525                 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1526
1527                 dmar_fault_do_one(iommu, type, fault_reason,
1528                                 source_id, guest_addr);
1529
1530                 fault_index++;
1531                 if (fault_index >= cap_num_fault_regs(iommu->cap))
1532                         fault_index = 0;
1533                 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1534         }
1535
1536         writel(DMA_FSTS_PFO | DMA_FSTS_PPF, iommu->reg + DMAR_FSTS_REG);
1537
1538 unlock_exit:
1539         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1540         return IRQ_HANDLED;
1541 }
1542
1543 int dmar_set_interrupt(struct intel_iommu *iommu)
1544 {
1545         int irq, ret;
1546
1547         /*
1548          * Check if the fault interrupt is already initialized.
1549          */
1550         if (iommu->irq)
1551                 return 0;
1552
1553         irq = dmar_alloc_hwirq();
1554         if (irq <= 0) {
1555                 pr_err("IOMMU: no free vectors\n");
1556                 return -EINVAL;
1557         }
1558
1559         irq_set_handler_data(irq, iommu);
1560         iommu->irq = irq;
1561
1562         ret = arch_setup_dmar_msi(irq);
1563         if (ret) {
1564                 irq_set_handler_data(irq, NULL);
1565                 iommu->irq = 0;
1566                 dmar_free_hwirq(irq);
1567                 return ret;
1568         }
1569
1570         ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
1571         if (ret)
1572                 pr_err("IOMMU: can't request irq\n");
1573         return ret;
1574 }
1575
1576 int __init enable_drhd_fault_handling(void)
1577 {
1578         struct dmar_drhd_unit *drhd;
1579         struct intel_iommu *iommu;
1580
1581         /*
1582          * Enable fault control interrupt.
1583          */
1584         for_each_iommu(iommu, drhd) {
1585                 u32 fault_status;
1586                 int ret = dmar_set_interrupt(iommu);
1587
1588                 if (ret) {
1589                         pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
1590                                (unsigned long long)drhd->reg_base_addr, ret);
1591                         return -1;
1592                 }
1593
1594                 /*
1595                  * Clear any previous faults.
1596                  */
1597                 dmar_fault(iommu->irq, iommu);
1598                 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1599                 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
1600         }
1601
1602         return 0;
1603 }
1604
1605 /*
1606  * Re-enable Queued Invalidation interface.
1607  */
1608 int dmar_reenable_qi(struct intel_iommu *iommu)
1609 {
1610         if (!ecap_qis(iommu->ecap))
1611                 return -ENOENT;
1612
1613         if (!iommu->qi)
1614                 return -ENOENT;
1615
1616         /*
1617          * First disable queued invalidation.
1618          */
1619         dmar_disable_qi(iommu);
1620         /*
1621          * Then enable queued invalidation again. Since there is no pending
1622          * invalidation requests now, it's safe to re-enable queued
1623          * invalidation.
1624          */
1625         __dmar_enable_qi(iommu);
1626
1627         return 0;
1628 }
1629
1630 /*
1631  * Check interrupt remapping support in DMAR table description.
1632  */
1633 int __init dmar_ir_support(void)
1634 {
1635         struct acpi_table_dmar *dmar;
1636         dmar = (struct acpi_table_dmar *)dmar_tbl;
1637         if (!dmar)
1638                 return 0;
1639         return dmar->flags & 0x1;
1640 }
1641
1642 static int __init dmar_free_unused_resources(void)
1643 {
1644         struct dmar_drhd_unit *dmaru, *dmaru_n;
1645
1646         /* DMAR units are in use */
1647         if (irq_remapping_enabled || intel_iommu_enabled)
1648                 return 0;
1649
1650         if (dmar_dev_scope_status != 1 && !list_empty(&dmar_drhd_units))
1651                 bus_unregister_notifier(&pci_bus_type, &dmar_pci_bus_nb);
1652
1653         down_write(&dmar_global_lock);
1654         list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
1655                 list_del(&dmaru->list);
1656                 dmar_free_drhd(dmaru);
1657         }
1658         up_write(&dmar_global_lock);
1659
1660         return 0;
1661 }
1662
1663 late_initcall(dmar_free_unused_resources);
1664 IOMMU_INIT_POST(detect_intel_iommu);