]> git.karo-electronics.de Git - karo-tx-linux.git/blob - arch/arm64/mm/dma-mapping.c
Merge commit 'e26a9e0' into stable/for-linus-3.15
[karo-tx-linux.git] / arch / arm64 / mm / dma-mapping.c
1 /*
2  * SWIOTLB-based DMA API implementation
3  *
4  * Copyright (C) 2012 ARM Ltd.
5  * Author: Catalin Marinas <catalin.marinas@arm.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19
20 #include <linux/gfp.h>
21 #include <linux/export.h>
22 #include <linux/slab.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/dma-contiguous.h>
25 #include <linux/vmalloc.h>
26 #include <linux/swiotlb.h>
27
28 #include <asm/cacheflush.h>
29
30 struct dma_map_ops *dma_ops;
31 EXPORT_SYMBOL(dma_ops);
32
33 static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
34                                  bool coherent)
35 {
36         if (!coherent || dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
37                 return pgprot_writecombine(prot);
38         return prot;
39 }
40
41 static void *__dma_alloc_coherent(struct device *dev, size_t size,
42                                   dma_addr_t *dma_handle, gfp_t flags,
43                                   struct dma_attrs *attrs)
44 {
45         if (dev == NULL) {
46                 WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
47                 return NULL;
48         }
49
50         if (IS_ENABLED(CONFIG_ZONE_DMA) &&
51             dev->coherent_dma_mask <= DMA_BIT_MASK(32))
52                 flags |= GFP_DMA;
53         if (IS_ENABLED(CONFIG_DMA_CMA)) {
54                 struct page *page;
55
56                 size = PAGE_ALIGN(size);
57                 page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
58                                                         get_order(size));
59                 if (!page)
60                         return NULL;
61
62                 *dma_handle = phys_to_dma(dev, page_to_phys(page));
63                 return page_address(page);
64         } else {
65                 return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
66         }
67 }
68
69 static void __dma_free_coherent(struct device *dev, size_t size,
70                                 void *vaddr, dma_addr_t dma_handle,
71                                 struct dma_attrs *attrs)
72 {
73         if (dev == NULL) {
74                 WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
75                 return;
76         }
77
78         if (IS_ENABLED(CONFIG_DMA_CMA)) {
79                 phys_addr_t paddr = dma_to_phys(dev, dma_handle);
80
81                 dma_release_from_contiguous(dev,
82                                         phys_to_page(paddr),
83                                         size >> PAGE_SHIFT);
84         } else {
85                 swiotlb_free_coherent(dev, size, vaddr, dma_handle);
86         }
87 }
88
89 static void *__dma_alloc_noncoherent(struct device *dev, size_t size,
90                                      dma_addr_t *dma_handle, gfp_t flags,
91                                      struct dma_attrs *attrs)
92 {
93         struct page *page, **map;
94         void *ptr, *coherent_ptr;
95         int order, i;
96
97         size = PAGE_ALIGN(size);
98         order = get_order(size);
99
100         ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs);
101         if (!ptr)
102                 goto no_mem;
103         map = kmalloc(sizeof(struct page *) << order, flags & ~GFP_DMA);
104         if (!map)
105                 goto no_map;
106
107         /* remove any dirty cache lines on the kernel alias */
108         __dma_flush_range(ptr, ptr + size);
109
110         /* create a coherent mapping */
111         page = virt_to_page(ptr);
112         for (i = 0; i < (size >> PAGE_SHIFT); i++)
113                 map[i] = page + i;
114         coherent_ptr = vmap(map, size >> PAGE_SHIFT, VM_MAP,
115                             __get_dma_pgprot(attrs, pgprot_default, false));
116         kfree(map);
117         if (!coherent_ptr)
118                 goto no_map;
119
120         return coherent_ptr;
121
122 no_map:
123         __dma_free_coherent(dev, size, ptr, *dma_handle, attrs);
124 no_mem:
125         *dma_handle = ~0;
126         return NULL;
127 }
128
129 static void __dma_free_noncoherent(struct device *dev, size_t size,
130                                    void *vaddr, dma_addr_t dma_handle,
131                                    struct dma_attrs *attrs)
132 {
133         void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
134
135         vunmap(vaddr);
136         __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
137 }
138
139 static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
140                                      unsigned long offset, size_t size,
141                                      enum dma_data_direction dir,
142                                      struct dma_attrs *attrs)
143 {
144         dma_addr_t dev_addr;
145
146         dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
147         __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
148
149         return dev_addr;
150 }
151
152
153 static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
154                                  size_t size, enum dma_data_direction dir,
155                                  struct dma_attrs *attrs)
156 {
157         __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
158         swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
159 }
160
161 static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
162                                   int nelems, enum dma_data_direction dir,
163                                   struct dma_attrs *attrs)
164 {
165         struct scatterlist *sg;
166         int i, ret;
167
168         ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
169         for_each_sg(sgl, sg, ret, i)
170                 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
171                                sg->length, dir);
172
173         return ret;
174 }
175
176 static void __swiotlb_unmap_sg_attrs(struct device *dev,
177                                      struct scatterlist *sgl, int nelems,
178                                      enum dma_data_direction dir,
179                                      struct dma_attrs *attrs)
180 {
181         struct scatterlist *sg;
182         int i;
183
184         for_each_sg(sgl, sg, nelems, i)
185                 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
186                                  sg->length, dir);
187         swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs);
188 }
189
190 static void __swiotlb_sync_single_for_cpu(struct device *dev,
191                                           dma_addr_t dev_addr, size_t size,
192                                           enum dma_data_direction dir)
193 {
194         __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
195         swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir);
196 }
197
198 static void __swiotlb_sync_single_for_device(struct device *dev,
199                                              dma_addr_t dev_addr, size_t size,
200                                              enum dma_data_direction dir)
201 {
202         swiotlb_sync_single_for_device(dev, dev_addr, size, dir);
203         __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
204 }
205
206 static void __swiotlb_sync_sg_for_cpu(struct device *dev,
207                                       struct scatterlist *sgl, int nelems,
208                                       enum dma_data_direction dir)
209 {
210         struct scatterlist *sg;
211         int i;
212
213         for_each_sg(sgl, sg, nelems, i)
214                 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
215                                  sg->length, dir);
216         swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir);
217 }
218
219 static void __swiotlb_sync_sg_for_device(struct device *dev,
220                                          struct scatterlist *sgl, int nelems,
221                                          enum dma_data_direction dir)
222 {
223         struct scatterlist *sg;
224         int i;
225
226         swiotlb_sync_sg_for_device(dev, sgl, nelems, dir);
227         for_each_sg(sgl, sg, nelems, i)
228                 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
229                                sg->length, dir);
230 }
231
232 /* vma->vm_page_prot must be set appropriately before calling this function */
233 static int __dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
234                              void *cpu_addr, dma_addr_t dma_addr, size_t size)
235 {
236         int ret = -ENXIO;
237         unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >>
238                                         PAGE_SHIFT;
239         unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
240         unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT;
241         unsigned long off = vma->vm_pgoff;
242
243         if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
244                 return ret;
245
246         if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
247                 ret = remap_pfn_range(vma, vma->vm_start,
248                                       pfn + off,
249                                       vma->vm_end - vma->vm_start,
250                                       vma->vm_page_prot);
251         }
252
253         return ret;
254 }
255
256 static int __swiotlb_mmap_noncoherent(struct device *dev,
257                 struct vm_area_struct *vma,
258                 void *cpu_addr, dma_addr_t dma_addr, size_t size,
259                 struct dma_attrs *attrs)
260 {
261         vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot, false);
262         return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
263 }
264
265 static int __swiotlb_mmap_coherent(struct device *dev,
266                 struct vm_area_struct *vma,
267                 void *cpu_addr, dma_addr_t dma_addr, size_t size,
268                 struct dma_attrs *attrs)
269 {
270         /* Just use whatever page_prot attributes were specified */
271         return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
272 }
273
274 struct dma_map_ops noncoherent_swiotlb_dma_ops = {
275         .alloc = __dma_alloc_noncoherent,
276         .free = __dma_free_noncoherent,
277         .mmap = __swiotlb_mmap_noncoherent,
278         .map_page = __swiotlb_map_page,
279         .unmap_page = __swiotlb_unmap_page,
280         .map_sg = __swiotlb_map_sg_attrs,
281         .unmap_sg = __swiotlb_unmap_sg_attrs,
282         .sync_single_for_cpu = __swiotlb_sync_single_for_cpu,
283         .sync_single_for_device = __swiotlb_sync_single_for_device,
284         .sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu,
285         .sync_sg_for_device = __swiotlb_sync_sg_for_device,
286         .dma_supported = swiotlb_dma_supported,
287         .mapping_error = swiotlb_dma_mapping_error,
288 };
289 EXPORT_SYMBOL(noncoherent_swiotlb_dma_ops);
290
291 struct dma_map_ops coherent_swiotlb_dma_ops = {
292         .alloc = __dma_alloc_coherent,
293         .free = __dma_free_coherent,
294         .mmap = __swiotlb_mmap_coherent,
295         .map_page = swiotlb_map_page,
296         .unmap_page = swiotlb_unmap_page,
297         .map_sg = swiotlb_map_sg_attrs,
298         .unmap_sg = swiotlb_unmap_sg_attrs,
299         .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
300         .sync_single_for_device = swiotlb_sync_single_for_device,
301         .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
302         .sync_sg_for_device = swiotlb_sync_sg_for_device,
303         .dma_supported = swiotlb_dma_supported,
304         .mapping_error = swiotlb_dma_mapping_error,
305 };
306 EXPORT_SYMBOL(coherent_swiotlb_dma_ops);
307
308 extern int swiotlb_late_init_with_default_size(size_t default_size);
309
310 static int __init swiotlb_late_init(void)
311 {
312         size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT);
313
314         dma_ops = &coherent_swiotlb_dma_ops;
315
316         return swiotlb_late_init_with_default_size(swiotlb_size);
317 }
318 subsys_initcall(swiotlb_late_init);
319
320 #define PREALLOC_DMA_DEBUG_ENTRIES      4096
321
322 static int __init dma_debug_do_init(void)
323 {
324         dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
325         return 0;
326 }
327 fs_initcall(dma_debug_do_init);