]> git.karo-electronics.de Git - karo-tx-linux.git/blob - arch/arm64/mm/dma-mapping.c
Merge tag 'sound-fix-4.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai...
[karo-tx-linux.git] / arch / arm64 / mm / dma-mapping.c
1 /*
2  * SWIOTLB-based DMA API implementation
3  *
4  * Copyright (C) 2012 ARM Ltd.
5  * Author: Catalin Marinas <catalin.marinas@arm.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19
20 #include <linux/gfp.h>
21 #include <linux/acpi.h>
22 #include <linux/bootmem.h>
23 #include <linux/cache.h>
24 #include <linux/export.h>
25 #include <linux/slab.h>
26 #include <linux/genalloc.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/dma-contiguous.h>
29 #include <linux/vmalloc.h>
30 #include <linux/swiotlb.h>
31 #include <linux/pci.h>
32
33 #include <asm/cacheflush.h>
34
35 static int swiotlb __ro_after_init;
36
37 static pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot,
38                                  bool coherent)
39 {
40         if (!coherent || (attrs & DMA_ATTR_WRITE_COMBINE))
41                 return pgprot_writecombine(prot);
42         return prot;
43 }
44
45 static struct gen_pool *atomic_pool;
46
47 #define DEFAULT_DMA_COHERENT_POOL_SIZE  SZ_256K
48 static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;
49
50 static int __init early_coherent_pool(char *p)
51 {
52         atomic_pool_size = memparse(p, &p);
53         return 0;
54 }
55 early_param("coherent_pool", early_coherent_pool);
56
57 static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
58 {
59         unsigned long val;
60         void *ptr = NULL;
61
62         if (!atomic_pool) {
63                 WARN(1, "coherent pool not initialised!\n");
64                 return NULL;
65         }
66
67         val = gen_pool_alloc(atomic_pool, size);
68         if (val) {
69                 phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
70
71                 *ret_page = phys_to_page(phys);
72                 ptr = (void *)val;
73                 memset(ptr, 0, size);
74         }
75
76         return ptr;
77 }
78
79 static bool __in_atomic_pool(void *start, size_t size)
80 {
81         return addr_in_gen_pool(atomic_pool, (unsigned long)start, size);
82 }
83
84 static int __free_from_pool(void *start, size_t size)
85 {
86         if (!__in_atomic_pool(start, size))
87                 return 0;
88
89         gen_pool_free(atomic_pool, (unsigned long)start, size);
90
91         return 1;
92 }
93
94 static void *__dma_alloc_coherent(struct device *dev, size_t size,
95                                   dma_addr_t *dma_handle, gfp_t flags,
96                                   unsigned long attrs)
97 {
98         if (dev == NULL) {
99                 WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
100                 return NULL;
101         }
102
103         if (IS_ENABLED(CONFIG_ZONE_DMA) &&
104             dev->coherent_dma_mask <= DMA_BIT_MASK(32))
105                 flags |= GFP_DMA;
106         if (dev_get_cma_area(dev) && gfpflags_allow_blocking(flags)) {
107                 struct page *page;
108                 void *addr;
109
110                 page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
111                                                  get_order(size), flags);
112                 if (!page)
113                         return NULL;
114
115                 *dma_handle = phys_to_dma(dev, page_to_phys(page));
116                 addr = page_address(page);
117                 memset(addr, 0, size);
118                 return addr;
119         } else {
120                 return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
121         }
122 }
123
124 static void __dma_free_coherent(struct device *dev, size_t size,
125                                 void *vaddr, dma_addr_t dma_handle,
126                                 unsigned long attrs)
127 {
128         bool freed;
129         phys_addr_t paddr = dma_to_phys(dev, dma_handle);
130
131         if (dev == NULL) {
132                 WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
133                 return;
134         }
135
136         freed = dma_release_from_contiguous(dev,
137                                         phys_to_page(paddr),
138                                         size >> PAGE_SHIFT);
139         if (!freed)
140                 swiotlb_free_coherent(dev, size, vaddr, dma_handle);
141 }
142
143 static void *__dma_alloc(struct device *dev, size_t size,
144                          dma_addr_t *dma_handle, gfp_t flags,
145                          unsigned long attrs)
146 {
147         struct page *page;
148         void *ptr, *coherent_ptr;
149         bool coherent = is_device_dma_coherent(dev);
150         pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, false);
151
152         size = PAGE_ALIGN(size);
153
154         if (!coherent && !gfpflags_allow_blocking(flags)) {
155                 struct page *page = NULL;
156                 void *addr = __alloc_from_pool(size, &page, flags);
157
158                 if (addr)
159                         *dma_handle = phys_to_dma(dev, page_to_phys(page));
160
161                 return addr;
162         }
163
164         ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs);
165         if (!ptr)
166                 goto no_mem;
167
168         /* no need for non-cacheable mapping if coherent */
169         if (coherent)
170                 return ptr;
171
172         /* remove any dirty cache lines on the kernel alias */
173         __dma_flush_area(ptr, size);
174
175         /* create a coherent mapping */
176         page = virt_to_page(ptr);
177         coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP,
178                                                    prot, NULL);
179         if (!coherent_ptr)
180                 goto no_map;
181
182         return coherent_ptr;
183
184 no_map:
185         __dma_free_coherent(dev, size, ptr, *dma_handle, attrs);
186 no_mem:
187         *dma_handle = DMA_ERROR_CODE;
188         return NULL;
189 }
190
191 static void __dma_free(struct device *dev, size_t size,
192                        void *vaddr, dma_addr_t dma_handle,
193                        unsigned long attrs)
194 {
195         void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
196
197         size = PAGE_ALIGN(size);
198
199         if (!is_device_dma_coherent(dev)) {
200                 if (__free_from_pool(vaddr, size))
201                         return;
202                 vunmap(vaddr);
203         }
204         __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
205 }
206
207 static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
208                                      unsigned long offset, size_t size,
209                                      enum dma_data_direction dir,
210                                      unsigned long attrs)
211 {
212         dma_addr_t dev_addr;
213
214         dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
215         if (!is_device_dma_coherent(dev) &&
216             (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
217                 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
218
219         return dev_addr;
220 }
221
222
223 static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
224                                  size_t size, enum dma_data_direction dir,
225                                  unsigned long attrs)
226 {
227         if (!is_device_dma_coherent(dev) &&
228             (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
229                 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
230         swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
231 }
232
233 static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
234                                   int nelems, enum dma_data_direction dir,
235                                   unsigned long attrs)
236 {
237         struct scatterlist *sg;
238         int i, ret;
239
240         ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
241         if (!is_device_dma_coherent(dev) &&
242             (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
243                 for_each_sg(sgl, sg, ret, i)
244                         __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
245                                        sg->length, dir);
246
247         return ret;
248 }
249
250 static void __swiotlb_unmap_sg_attrs(struct device *dev,
251                                      struct scatterlist *sgl, int nelems,
252                                      enum dma_data_direction dir,
253                                      unsigned long attrs)
254 {
255         struct scatterlist *sg;
256         int i;
257
258         if (!is_device_dma_coherent(dev) &&
259             (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
260                 for_each_sg(sgl, sg, nelems, i)
261                         __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
262                                          sg->length, dir);
263         swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs);
264 }
265
266 static void __swiotlb_sync_single_for_cpu(struct device *dev,
267                                           dma_addr_t dev_addr, size_t size,
268                                           enum dma_data_direction dir)
269 {
270         if (!is_device_dma_coherent(dev))
271                 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
272         swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir);
273 }
274
275 static void __swiotlb_sync_single_for_device(struct device *dev,
276                                              dma_addr_t dev_addr, size_t size,
277                                              enum dma_data_direction dir)
278 {
279         swiotlb_sync_single_for_device(dev, dev_addr, size, dir);
280         if (!is_device_dma_coherent(dev))
281                 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
282 }
283
284 static void __swiotlb_sync_sg_for_cpu(struct device *dev,
285                                       struct scatterlist *sgl, int nelems,
286                                       enum dma_data_direction dir)
287 {
288         struct scatterlist *sg;
289         int i;
290
291         if (!is_device_dma_coherent(dev))
292                 for_each_sg(sgl, sg, nelems, i)
293                         __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
294                                          sg->length, dir);
295         swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir);
296 }
297
298 static void __swiotlb_sync_sg_for_device(struct device *dev,
299                                          struct scatterlist *sgl, int nelems,
300                                          enum dma_data_direction dir)
301 {
302         struct scatterlist *sg;
303         int i;
304
305         swiotlb_sync_sg_for_device(dev, sgl, nelems, dir);
306         if (!is_device_dma_coherent(dev))
307                 for_each_sg(sgl, sg, nelems, i)
308                         __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
309                                        sg->length, dir);
310 }
311
312 static int __swiotlb_mmap_pfn(struct vm_area_struct *vma,
313                               unsigned long pfn, size_t size)
314 {
315         int ret = -ENXIO;
316         unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >>
317                                         PAGE_SHIFT;
318         unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
319         unsigned long off = vma->vm_pgoff;
320
321         if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
322                 ret = remap_pfn_range(vma, vma->vm_start,
323                                       pfn + off,
324                                       vma->vm_end - vma->vm_start,
325                                       vma->vm_page_prot);
326         }
327
328         return ret;
329 }
330
331 static int __swiotlb_mmap(struct device *dev,
332                           struct vm_area_struct *vma,
333                           void *cpu_addr, dma_addr_t dma_addr, size_t size,
334                           unsigned long attrs)
335 {
336         int ret;
337         unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT;
338
339         vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
340                                              is_device_dma_coherent(dev));
341
342         if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
343                 return ret;
344
345         return __swiotlb_mmap_pfn(vma, pfn, size);
346 }
347
348 static int __swiotlb_get_sgtable_page(struct sg_table *sgt,
349                                       struct page *page, size_t size)
350 {
351         int ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
352
353         if (!ret)
354                 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
355
356         return ret;
357 }
358
359 static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
360                                  void *cpu_addr, dma_addr_t handle, size_t size,
361                                  unsigned long attrs)
362 {
363         struct page *page = phys_to_page(dma_to_phys(dev, handle));
364
365         return __swiotlb_get_sgtable_page(sgt, page, size);
366 }
367
368 static int __swiotlb_dma_supported(struct device *hwdev, u64 mask)
369 {
370         if (swiotlb)
371                 return swiotlb_dma_supported(hwdev, mask);
372         return 1;
373 }
374
375 static int __swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t addr)
376 {
377         if (swiotlb)
378                 return swiotlb_dma_mapping_error(hwdev, addr);
379         return 0;
380 }
381
382 static const struct dma_map_ops swiotlb_dma_ops = {
383         .alloc = __dma_alloc,
384         .free = __dma_free,
385         .mmap = __swiotlb_mmap,
386         .get_sgtable = __swiotlb_get_sgtable,
387         .map_page = __swiotlb_map_page,
388         .unmap_page = __swiotlb_unmap_page,
389         .map_sg = __swiotlb_map_sg_attrs,
390         .unmap_sg = __swiotlb_unmap_sg_attrs,
391         .sync_single_for_cpu = __swiotlb_sync_single_for_cpu,
392         .sync_single_for_device = __swiotlb_sync_single_for_device,
393         .sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu,
394         .sync_sg_for_device = __swiotlb_sync_sg_for_device,
395         .dma_supported = __swiotlb_dma_supported,
396         .mapping_error = __swiotlb_dma_mapping_error,
397 };
398
399 static int __init atomic_pool_init(void)
400 {
401         pgprot_t prot = __pgprot(PROT_NORMAL_NC);
402         unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT;
403         struct page *page;
404         void *addr;
405         unsigned int pool_size_order = get_order(atomic_pool_size);
406
407         if (dev_get_cma_area(NULL))
408                 page = dma_alloc_from_contiguous(NULL, nr_pages,
409                                                  pool_size_order, GFP_KERNEL);
410         else
411                 page = alloc_pages(GFP_DMA, pool_size_order);
412
413         if (page) {
414                 int ret;
415                 void *page_addr = page_address(page);
416
417                 memset(page_addr, 0, atomic_pool_size);
418                 __dma_flush_area(page_addr, atomic_pool_size);
419
420                 atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
421                 if (!atomic_pool)
422                         goto free_page;
423
424                 addr = dma_common_contiguous_remap(page, atomic_pool_size,
425                                         VM_USERMAP, prot, atomic_pool_init);
426
427                 if (!addr)
428                         goto destroy_genpool;
429
430                 ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr,
431                                         page_to_phys(page),
432                                         atomic_pool_size, -1);
433                 if (ret)
434                         goto remove_mapping;
435
436                 gen_pool_set_algo(atomic_pool,
437                                   gen_pool_first_fit_order_align,
438                                   (void *)PAGE_SHIFT);
439
440                 pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
441                         atomic_pool_size / 1024);
442                 return 0;
443         }
444         goto out;
445
446 remove_mapping:
447         dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP);
448 destroy_genpool:
449         gen_pool_destroy(atomic_pool);
450         atomic_pool = NULL;
451 free_page:
452         if (!dma_release_from_contiguous(NULL, page, nr_pages))
453                 __free_pages(page, pool_size_order);
454 out:
455         pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
456                 atomic_pool_size / 1024);
457         return -ENOMEM;
458 }
459
460 /********************************************
461  * The following APIs are for dummy DMA ops *
462  ********************************************/
463
464 static void *__dummy_alloc(struct device *dev, size_t size,
465                            dma_addr_t *dma_handle, gfp_t flags,
466                            unsigned long attrs)
467 {
468         return NULL;
469 }
470
471 static void __dummy_free(struct device *dev, size_t size,
472                          void *vaddr, dma_addr_t dma_handle,
473                          unsigned long attrs)
474 {
475 }
476
477 static int __dummy_mmap(struct device *dev,
478                         struct vm_area_struct *vma,
479                         void *cpu_addr, dma_addr_t dma_addr, size_t size,
480                         unsigned long attrs)
481 {
482         return -ENXIO;
483 }
484
485 static dma_addr_t __dummy_map_page(struct device *dev, struct page *page,
486                                    unsigned long offset, size_t size,
487                                    enum dma_data_direction dir,
488                                    unsigned long attrs)
489 {
490         return DMA_ERROR_CODE;
491 }
492
493 static void __dummy_unmap_page(struct device *dev, dma_addr_t dev_addr,
494                                size_t size, enum dma_data_direction dir,
495                                unsigned long attrs)
496 {
497 }
498
499 static int __dummy_map_sg(struct device *dev, struct scatterlist *sgl,
500                           int nelems, enum dma_data_direction dir,
501                           unsigned long attrs)
502 {
503         return 0;
504 }
505
506 static void __dummy_unmap_sg(struct device *dev,
507                              struct scatterlist *sgl, int nelems,
508                              enum dma_data_direction dir,
509                              unsigned long attrs)
510 {
511 }
512
513 static void __dummy_sync_single(struct device *dev,
514                                 dma_addr_t dev_addr, size_t size,
515                                 enum dma_data_direction dir)
516 {
517 }
518
519 static void __dummy_sync_sg(struct device *dev,
520                             struct scatterlist *sgl, int nelems,
521                             enum dma_data_direction dir)
522 {
523 }
524
525 static int __dummy_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
526 {
527         return 1;
528 }
529
530 static int __dummy_dma_supported(struct device *hwdev, u64 mask)
531 {
532         return 0;
533 }
534
535 const struct dma_map_ops dummy_dma_ops = {
536         .alloc                  = __dummy_alloc,
537         .free                   = __dummy_free,
538         .mmap                   = __dummy_mmap,
539         .map_page               = __dummy_map_page,
540         .unmap_page             = __dummy_unmap_page,
541         .map_sg                 = __dummy_map_sg,
542         .unmap_sg               = __dummy_unmap_sg,
543         .sync_single_for_cpu    = __dummy_sync_single,
544         .sync_single_for_device = __dummy_sync_single,
545         .sync_sg_for_cpu        = __dummy_sync_sg,
546         .sync_sg_for_device     = __dummy_sync_sg,
547         .mapping_error          = __dummy_mapping_error,
548         .dma_supported          = __dummy_dma_supported,
549 };
550 EXPORT_SYMBOL(dummy_dma_ops);
551
552 static int __init arm64_dma_init(void)
553 {
554         if (swiotlb_force == SWIOTLB_FORCE ||
555             max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
556                 swiotlb = 1;
557
558         return atomic_pool_init();
559 }
560 arch_initcall(arm64_dma_init);
561
562 #define PREALLOC_DMA_DEBUG_ENTRIES      4096
563
564 static int __init dma_debug_do_init(void)
565 {
566         dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
567         return 0;
568 }
569 fs_initcall(dma_debug_do_init);
570
571
572 #ifdef CONFIG_IOMMU_DMA
573 #include <linux/dma-iommu.h>
574 #include <linux/platform_device.h>
575 #include <linux/amba/bus.h>
576
577 /* Thankfully, all cache ops are by VA so we can ignore phys here */
578 static void flush_page(struct device *dev, const void *virt, phys_addr_t phys)
579 {
580         __dma_flush_area(virt, PAGE_SIZE);
581 }
582
583 static void *__iommu_alloc_attrs(struct device *dev, size_t size,
584                                  dma_addr_t *handle, gfp_t gfp,
585                                  unsigned long attrs)
586 {
587         bool coherent = is_device_dma_coherent(dev);
588         int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
589         size_t iosize = size;
590         void *addr;
591
592         if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n"))
593                 return NULL;
594
595         size = PAGE_ALIGN(size);
596
597         /*
598          * Some drivers rely on this, and we probably don't want the
599          * possibility of stale kernel data being read by devices anyway.
600          */
601         gfp |= __GFP_ZERO;
602
603         if (!gfpflags_allow_blocking(gfp)) {
604                 struct page *page;
605                 /*
606                  * In atomic context we can't remap anything, so we'll only
607                  * get the virtually contiguous buffer we need by way of a
608                  * physically contiguous allocation.
609                  */
610                 if (coherent) {
611                         page = alloc_pages(gfp, get_order(size));
612                         addr = page ? page_address(page) : NULL;
613                 } else {
614                         addr = __alloc_from_pool(size, &page, gfp);
615                 }
616                 if (!addr)
617                         return NULL;
618
619                 *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
620                 if (iommu_dma_mapping_error(dev, *handle)) {
621                         if (coherent)
622                                 __free_pages(page, get_order(size));
623                         else
624                                 __free_from_pool(addr, size);
625                         addr = NULL;
626                 }
627         } else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
628                 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
629                 struct page *page;
630
631                 page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
632                                                  get_order(size), gfp);
633                 if (!page)
634                         return NULL;
635
636                 *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
637                 if (iommu_dma_mapping_error(dev, *handle)) {
638                         dma_release_from_contiguous(dev, page,
639                                                     size >> PAGE_SHIFT);
640                         return NULL;
641                 }
642                 if (!coherent)
643                         __dma_flush_area(page_to_virt(page), iosize);
644
645                 addr = dma_common_contiguous_remap(page, size, VM_USERMAP,
646                                                    prot,
647                                                    __builtin_return_address(0));
648                 if (!addr) {
649                         iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs);
650                         dma_release_from_contiguous(dev, page,
651                                                     size >> PAGE_SHIFT);
652                 }
653         } else {
654                 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
655                 struct page **pages;
656
657                 pages = iommu_dma_alloc(dev, iosize, gfp, attrs, ioprot,
658                                         handle, flush_page);
659                 if (!pages)
660                         return NULL;
661
662                 addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
663                                               __builtin_return_address(0));
664                 if (!addr)
665                         iommu_dma_free(dev, pages, iosize, handle);
666         }
667         return addr;
668 }
669
670 static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
671                                dma_addr_t handle, unsigned long attrs)
672 {
673         size_t iosize = size;
674
675         size = PAGE_ALIGN(size);
676         /*
677          * @cpu_addr will be one of 4 things depending on how it was allocated:
678          * - A remapped array of pages for contiguous allocations.
679          * - A remapped array of pages from iommu_dma_alloc(), for all
680          *   non-atomic allocations.
681          * - A non-cacheable alias from the atomic pool, for atomic
682          *   allocations by non-coherent devices.
683          * - A normal lowmem address, for atomic allocations by
684          *   coherent devices.
685          * Hence how dodgy the below logic looks...
686          */
687         if (__in_atomic_pool(cpu_addr, size)) {
688                 iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
689                 __free_from_pool(cpu_addr, size);
690         } else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
691                 struct page *page = vmalloc_to_page(cpu_addr);
692
693                 iommu_dma_unmap_page(dev, handle, iosize, 0, attrs);
694                 dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
695                 dma_common_free_remap(cpu_addr, size, VM_USERMAP);
696         } else if (is_vmalloc_addr(cpu_addr)){
697                 struct vm_struct *area = find_vm_area(cpu_addr);
698
699                 if (WARN_ON(!area || !area->pages))
700                         return;
701                 iommu_dma_free(dev, area->pages, iosize, &handle);
702                 dma_common_free_remap(cpu_addr, size, VM_USERMAP);
703         } else {
704                 iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
705                 __free_pages(virt_to_page(cpu_addr), get_order(size));
706         }
707 }
708
709 static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
710                               void *cpu_addr, dma_addr_t dma_addr, size_t size,
711                               unsigned long attrs)
712 {
713         struct vm_struct *area;
714         int ret;
715
716         vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
717                                              is_device_dma_coherent(dev));
718
719         if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
720                 return ret;
721
722         if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
723                 /*
724                  * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
725                  * hence in the vmalloc space.
726                  */
727                 unsigned long pfn = vmalloc_to_pfn(cpu_addr);
728                 return __swiotlb_mmap_pfn(vma, pfn, size);
729         }
730
731         area = find_vm_area(cpu_addr);
732         if (WARN_ON(!area || !area->pages))
733                 return -ENXIO;
734
735         return iommu_dma_mmap(area->pages, size, vma);
736 }
737
738 static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
739                                void *cpu_addr, dma_addr_t dma_addr,
740                                size_t size, unsigned long attrs)
741 {
742         unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
743         struct vm_struct *area = find_vm_area(cpu_addr);
744
745         if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
746                 /*
747                  * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
748                  * hence in the vmalloc space.
749                  */
750                 struct page *page = vmalloc_to_page(cpu_addr);
751                 return __swiotlb_get_sgtable_page(sgt, page, size);
752         }
753
754         if (WARN_ON(!area || !area->pages))
755                 return -ENXIO;
756
757         return sg_alloc_table_from_pages(sgt, area->pages, count, 0, size,
758                                          GFP_KERNEL);
759 }
760
761 static void __iommu_sync_single_for_cpu(struct device *dev,
762                                         dma_addr_t dev_addr, size_t size,
763                                         enum dma_data_direction dir)
764 {
765         phys_addr_t phys;
766
767         if (is_device_dma_coherent(dev))
768                 return;
769
770         phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr);
771         __dma_unmap_area(phys_to_virt(phys), size, dir);
772 }
773
774 static void __iommu_sync_single_for_device(struct device *dev,
775                                            dma_addr_t dev_addr, size_t size,
776                                            enum dma_data_direction dir)
777 {
778         phys_addr_t phys;
779
780         if (is_device_dma_coherent(dev))
781                 return;
782
783         phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr);
784         __dma_map_area(phys_to_virt(phys), size, dir);
785 }
786
787 static dma_addr_t __iommu_map_page(struct device *dev, struct page *page,
788                                    unsigned long offset, size_t size,
789                                    enum dma_data_direction dir,
790                                    unsigned long attrs)
791 {
792         bool coherent = is_device_dma_coherent(dev);
793         int prot = dma_info_to_prot(dir, coherent, attrs);
794         dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot);
795
796         if (!iommu_dma_mapping_error(dev, dev_addr) &&
797             (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
798                 __iommu_sync_single_for_device(dev, dev_addr, size, dir);
799
800         return dev_addr;
801 }
802
803 static void __iommu_unmap_page(struct device *dev, dma_addr_t dev_addr,
804                                size_t size, enum dma_data_direction dir,
805                                unsigned long attrs)
806 {
807         if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
808                 __iommu_sync_single_for_cpu(dev, dev_addr, size, dir);
809
810         iommu_dma_unmap_page(dev, dev_addr, size, dir, attrs);
811 }
812
813 static void __iommu_sync_sg_for_cpu(struct device *dev,
814                                     struct scatterlist *sgl, int nelems,
815                                     enum dma_data_direction dir)
816 {
817         struct scatterlist *sg;
818         int i;
819
820         if (is_device_dma_coherent(dev))
821                 return;
822
823         for_each_sg(sgl, sg, nelems, i)
824                 __dma_unmap_area(sg_virt(sg), sg->length, dir);
825 }
826
827 static void __iommu_sync_sg_for_device(struct device *dev,
828                                        struct scatterlist *sgl, int nelems,
829                                        enum dma_data_direction dir)
830 {
831         struct scatterlist *sg;
832         int i;
833
834         if (is_device_dma_coherent(dev))
835                 return;
836
837         for_each_sg(sgl, sg, nelems, i)
838                 __dma_map_area(sg_virt(sg), sg->length, dir);
839 }
840
841 static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
842                                 int nelems, enum dma_data_direction dir,
843                                 unsigned long attrs)
844 {
845         bool coherent = is_device_dma_coherent(dev);
846
847         if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
848                 __iommu_sync_sg_for_device(dev, sgl, nelems, dir);
849
850         return iommu_dma_map_sg(dev, sgl, nelems,
851                                 dma_info_to_prot(dir, coherent, attrs));
852 }
853
854 static void __iommu_unmap_sg_attrs(struct device *dev,
855                                    struct scatterlist *sgl, int nelems,
856                                    enum dma_data_direction dir,
857                                    unsigned long attrs)
858 {
859         if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
860                 __iommu_sync_sg_for_cpu(dev, sgl, nelems, dir);
861
862         iommu_dma_unmap_sg(dev, sgl, nelems, dir, attrs);
863 }
864
865 static const struct dma_map_ops iommu_dma_ops = {
866         .alloc = __iommu_alloc_attrs,
867         .free = __iommu_free_attrs,
868         .mmap = __iommu_mmap_attrs,
869         .get_sgtable = __iommu_get_sgtable,
870         .map_page = __iommu_map_page,
871         .unmap_page = __iommu_unmap_page,
872         .map_sg = __iommu_map_sg_attrs,
873         .unmap_sg = __iommu_unmap_sg_attrs,
874         .sync_single_for_cpu = __iommu_sync_single_for_cpu,
875         .sync_single_for_device = __iommu_sync_single_for_device,
876         .sync_sg_for_cpu = __iommu_sync_sg_for_cpu,
877         .sync_sg_for_device = __iommu_sync_sg_for_device,
878         .map_resource = iommu_dma_map_resource,
879         .unmap_resource = iommu_dma_unmap_resource,
880         .mapping_error = iommu_dma_mapping_error,
881 };
882
883 static int __init __iommu_dma_init(void)
884 {
885         return iommu_dma_init();
886 }
887 arch_initcall(__iommu_dma_init);
888
889 static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
890                                   const struct iommu_ops *ops)
891 {
892         struct iommu_domain *domain;
893
894         if (!ops)
895                 return;
896
897         /*
898          * The IOMMU core code allocates the default DMA domain, which the
899          * underlying IOMMU driver needs to support via the dma-iommu layer.
900          */
901         domain = iommu_get_domain_for_dev(dev);
902
903         if (!domain)
904                 goto out_err;
905
906         if (domain->type == IOMMU_DOMAIN_DMA) {
907                 if (iommu_dma_init_domain(domain, dma_base, size, dev))
908                         goto out_err;
909
910                 dev->dma_ops = &iommu_dma_ops;
911         }
912
913         return;
914
915 out_err:
916          pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
917                  dev_name(dev));
918 }
919
920 void arch_teardown_dma_ops(struct device *dev)
921 {
922         dev->dma_ops = NULL;
923 }
924
925 #else
926
927 static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
928                                   const struct iommu_ops *iommu)
929 { }
930
931 #endif  /* CONFIG_IOMMU_DMA */
932
933 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
934                         const struct iommu_ops *iommu, bool coherent)
935 {
936         if (!dev->dma_ops)
937                 dev->dma_ops = &swiotlb_dma_ops;
938
939         dev->archdata.dma_coherent = coherent;
940         __iommu_setup_dma_ops(dev, dma_base, size, iommu);
941
942 #ifdef CONFIG_XEN
943         if (xen_initial_domain()) {
944                 dev->archdata.dev_dma_ops = dev->dma_ops;
945                 dev->dma_ops = xen_dma_ops;
946         }
947 #endif
948 }