]> git.karo-electronics.de Git - karo-tx-linux.git/blob - arch/powerpc/mm/mmu_context_iommu.c
ad2e575fd418a6ca5beffb87e78332d88de118f6
[karo-tx-linux.git] / arch / powerpc / mm / mmu_context_iommu.c
1 /*
2  *  IOMMU helpers in MMU context.
3  *
4  *  Copyright (C) 2015 IBM Corp. <aik@ozlabs.ru>
5  *
6  *  This program is free software; you can redistribute it and/or
7  *  modify it under the terms of the GNU General Public License
8  *  as published by the Free Software Foundation; either version
9  *  2 of the License, or (at your option) any later version.
10  *
11  */
12
13 #include <linux/sched.h>
14 #include <linux/slab.h>
15 #include <linux/rculist.h>
16 #include <linux/vmalloc.h>
17 #include <linux/mutex.h>
18 #include <linux/migrate.h>
19 #include <linux/hugetlb.h>
20 #include <linux/swap.h>
21 #include <asm/mmu_context.h>
22
23 static DEFINE_MUTEX(mem_list_mutex);
24
25 struct mm_iommu_table_group_mem_t {
26         struct list_head next;
27         struct rcu_head rcu;
28         unsigned long used;
29         atomic64_t mapped;
30         u64 ua;                 /* userspace address */
31         u64 entries;            /* number of entries in hpas[] */
32         u64 *hpas;              /* vmalloc'ed */
33 };
34
35 static long mm_iommu_adjust_locked_vm(struct mm_struct *mm,
36                 unsigned long npages, bool incr)
37 {
38         long ret = 0, locked, lock_limit;
39
40         if (!npages)
41                 return 0;
42
43         down_write(&mm->mmap_sem);
44
45         if (incr) {
46                 locked = mm->locked_vm + npages;
47                 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
48                 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
49                         ret = -ENOMEM;
50                 else
51                         mm->locked_vm += npages;
52         } else {
53                 if (WARN_ON_ONCE(npages > mm->locked_vm))
54                         npages = mm->locked_vm;
55                 mm->locked_vm -= npages;
56         }
57
58         pr_debug("[%d] RLIMIT_MEMLOCK HASH64 %c%ld %ld/%ld\n",
59                         current->pid,
60                         incr ? '+' : '-',
61                         npages << PAGE_SHIFT,
62                         mm->locked_vm << PAGE_SHIFT,
63                         rlimit(RLIMIT_MEMLOCK));
64         up_write(&mm->mmap_sem);
65
66         return ret;
67 }
68
69 bool mm_iommu_preregistered(void)
70 {
71         if (!current || !current->mm)
72                 return false;
73
74         return !list_empty(&current->mm->context.iommu_group_mem_list);
75 }
76 EXPORT_SYMBOL_GPL(mm_iommu_preregistered);
77
78 /*
79  * Taken from alloc_migrate_target with changes to remove CMA allocations
80  */
81 struct page *new_iommu_non_cma_page(struct page *page, unsigned long private,
82                                         int **resultp)
83 {
84         gfp_t gfp_mask = GFP_USER;
85         struct page *new_page;
86
87         if (PageHuge(page) || PageTransHuge(page) || PageCompound(page))
88                 return NULL;
89
90         if (PageHighMem(page))
91                 gfp_mask |= __GFP_HIGHMEM;
92
93         /*
94          * We don't want the allocation to force an OOM if possibe
95          */
96         new_page = alloc_page(gfp_mask | __GFP_NORETRY | __GFP_NOWARN);
97         return new_page;
98 }
99
100 static int mm_iommu_move_page_from_cma(struct page *page)
101 {
102         int ret = 0;
103         LIST_HEAD(cma_migrate_pages);
104
105         /* Ignore huge pages for now */
106         if (PageHuge(page) || PageTransHuge(page) || PageCompound(page))
107                 return -EBUSY;
108
109         lru_add_drain();
110         ret = isolate_lru_page(page);
111         if (ret)
112                 return ret;
113
114         list_add(&page->lru, &cma_migrate_pages);
115         put_page(page); /* Drop the gup reference */
116
117         ret = migrate_pages(&cma_migrate_pages, new_iommu_non_cma_page,
118                                 NULL, 0, MIGRATE_SYNC, MR_CMA);
119         if (ret) {
120                 if (!list_empty(&cma_migrate_pages))
121                         putback_movable_pages(&cma_migrate_pages);
122         }
123
124         return 0;
125 }
126
127 long mm_iommu_get(unsigned long ua, unsigned long entries,
128                 struct mm_iommu_table_group_mem_t **pmem)
129 {
130         struct mm_iommu_table_group_mem_t *mem;
131         long i, j, ret = 0, locked_entries = 0;
132         struct page *page = NULL;
133
134         if (!current || !current->mm)
135                 return -ESRCH; /* process exited */
136
137         mutex_lock(&mem_list_mutex);
138
139         list_for_each_entry_rcu(mem, &current->mm->context.iommu_group_mem_list,
140                         next) {
141                 if ((mem->ua == ua) && (mem->entries == entries)) {
142                         ++mem->used;
143                         *pmem = mem;
144                         goto unlock_exit;
145                 }
146
147                 /* Overlap? */
148                 if ((mem->ua < (ua + (entries << PAGE_SHIFT))) &&
149                                 (ua < (mem->ua +
150                                        (mem->entries << PAGE_SHIFT)))) {
151                         ret = -EINVAL;
152                         goto unlock_exit;
153                 }
154
155         }
156
157         ret = mm_iommu_adjust_locked_vm(current->mm, entries, true);
158         if (ret)
159                 goto unlock_exit;
160
161         locked_entries = entries;
162
163         mem = kzalloc(sizeof(*mem), GFP_KERNEL);
164         if (!mem) {
165                 ret = -ENOMEM;
166                 goto unlock_exit;
167         }
168
169         mem->hpas = vzalloc(entries * sizeof(mem->hpas[0]));
170         if (!mem->hpas) {
171                 kfree(mem);
172                 ret = -ENOMEM;
173                 goto unlock_exit;
174         }
175
176         for (i = 0; i < entries; ++i) {
177                 if (1 != get_user_pages_fast(ua + (i << PAGE_SHIFT),
178                                         1/* pages */, 1/* iswrite */, &page)) {
179                         ret = -EFAULT;
180                         for (j = 0; j < i; ++j)
181                                 put_page(pfn_to_page(mem->hpas[j] >>
182                                                 PAGE_SHIFT));
183                         vfree(mem->hpas);
184                         kfree(mem);
185                         goto unlock_exit;
186                 }
187                 /*
188                  * If we get a page from the CMA zone, since we are going to
189                  * be pinning these entries, we might as well move them out
190                  * of the CMA zone if possible. NOTE: faulting in + migration
191                  * can be expensive. Batching can be considered later
192                  */
193                 if (get_pageblock_migratetype(page) == MIGRATE_CMA) {
194                         if (mm_iommu_move_page_from_cma(page))
195                                 goto populate;
196                         if (1 != get_user_pages_fast(ua + (i << PAGE_SHIFT),
197                                                 1/* pages */, 1/* iswrite */,
198                                                 &page)) {
199                                 ret = -EFAULT;
200                                 for (j = 0; j < i; ++j)
201                                         put_page(pfn_to_page(mem->hpas[j] >>
202                                                                 PAGE_SHIFT));
203                                 vfree(mem->hpas);
204                                 kfree(mem);
205                                 goto unlock_exit;
206                         }
207                 }
208 populate:
209                 mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT;
210         }
211
212         atomic64_set(&mem->mapped, 1);
213         mem->used = 1;
214         mem->ua = ua;
215         mem->entries = entries;
216         *pmem = mem;
217
218         list_add_rcu(&mem->next, &current->mm->context.iommu_group_mem_list);
219
220 unlock_exit:
221         if (locked_entries && ret)
222                 mm_iommu_adjust_locked_vm(current->mm, locked_entries, false);
223
224         mutex_unlock(&mem_list_mutex);
225
226         return ret;
227 }
228 EXPORT_SYMBOL_GPL(mm_iommu_get);
229
230 static void mm_iommu_unpin(struct mm_iommu_table_group_mem_t *mem)
231 {
232         long i;
233         struct page *page = NULL;
234
235         for (i = 0; i < mem->entries; ++i) {
236                 if (!mem->hpas[i])
237                         continue;
238
239                 page = pfn_to_page(mem->hpas[i] >> PAGE_SHIFT);
240                 if (!page)
241                         continue;
242
243                 put_page(page);
244                 mem->hpas[i] = 0;
245         }
246 }
247
248 static void mm_iommu_do_free(struct mm_iommu_table_group_mem_t *mem)
249 {
250
251         mm_iommu_unpin(mem);
252         vfree(mem->hpas);
253         kfree(mem);
254 }
255
256 static void mm_iommu_free(struct rcu_head *head)
257 {
258         struct mm_iommu_table_group_mem_t *mem = container_of(head,
259                         struct mm_iommu_table_group_mem_t, rcu);
260
261         mm_iommu_do_free(mem);
262 }
263
264 static void mm_iommu_release(struct mm_iommu_table_group_mem_t *mem)
265 {
266         list_del_rcu(&mem->next);
267         mm_iommu_adjust_locked_vm(current->mm, mem->entries, false);
268         call_rcu(&mem->rcu, mm_iommu_free);
269 }
270
271 long mm_iommu_put(struct mm_iommu_table_group_mem_t *mem)
272 {
273         long ret = 0;
274
275         if (!current || !current->mm)
276                 return -ESRCH; /* process exited */
277
278         mutex_lock(&mem_list_mutex);
279
280         if (mem->used == 0) {
281                 ret = -ENOENT;
282                 goto unlock_exit;
283         }
284
285         --mem->used;
286         /* There are still users, exit */
287         if (mem->used)
288                 goto unlock_exit;
289
290         /* Are there still mappings? */
291         if (atomic_cmpxchg(&mem->mapped, 1, 0) != 1) {
292                 ++mem->used;
293                 ret = -EBUSY;
294                 goto unlock_exit;
295         }
296
297         /* @mapped became 0 so now mappings are disabled, release the region */
298         mm_iommu_release(mem);
299
300 unlock_exit:
301         mutex_unlock(&mem_list_mutex);
302
303         return ret;
304 }
305 EXPORT_SYMBOL_GPL(mm_iommu_put);
306
307 struct mm_iommu_table_group_mem_t *mm_iommu_lookup(unsigned long ua,
308                 unsigned long size)
309 {
310         struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
311
312         list_for_each_entry_rcu(mem,
313                         &current->mm->context.iommu_group_mem_list,
314                         next) {
315                 if ((mem->ua <= ua) &&
316                                 (ua + size <= mem->ua +
317                                  (mem->entries << PAGE_SHIFT))) {
318                         ret = mem;
319                         break;
320                 }
321         }
322
323         return ret;
324 }
325 EXPORT_SYMBOL_GPL(mm_iommu_lookup);
326
327 struct mm_iommu_table_group_mem_t *mm_iommu_find(unsigned long ua,
328                 unsigned long entries)
329 {
330         struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
331
332         list_for_each_entry_rcu(mem,
333                         &current->mm->context.iommu_group_mem_list,
334                         next) {
335                 if ((mem->ua == ua) && (mem->entries == entries)) {
336                         ret = mem;
337                         break;
338                 }
339         }
340
341         return ret;
342 }
343 EXPORT_SYMBOL_GPL(mm_iommu_find);
344
345 long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
346                 unsigned long ua, unsigned long *hpa)
347 {
348         const long entry = (ua - mem->ua) >> PAGE_SHIFT;
349         u64 *va = &mem->hpas[entry];
350
351         if (entry >= mem->entries)
352                 return -EFAULT;
353
354         *hpa = *va | (ua & ~PAGE_MASK);
355
356         return 0;
357 }
358 EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa);
359
360 long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem)
361 {
362         if (atomic64_inc_not_zero(&mem->mapped))
363                 return 0;
364
365         /* Last mm_iommu_put() has been called, no more mappings allowed() */
366         return -ENXIO;
367 }
368 EXPORT_SYMBOL_GPL(mm_iommu_mapped_inc);
369
370 void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem)
371 {
372         atomic64_add_unless(&mem->mapped, -1, 1);
373 }
374 EXPORT_SYMBOL_GPL(mm_iommu_mapped_dec);
375
376 void mm_iommu_init(struct mm_struct *mm)
377 {
378         INIT_LIST_HEAD_RCU(&mm->context.iommu_group_mem_list);
379 }
380
381 void mm_iommu_cleanup(struct mm_struct *mm)
382 {
383         struct mm_iommu_table_group_mem_t *mem, *tmp;
384
385         list_for_each_entry_safe(mem, tmp, &mm->context.iommu_group_mem_list,
386                         next) {
387                 list_del_rcu(&mem->next);
388                 mm_iommu_do_free(mem);
389         }
390 }