2 * Support for Medifield PNW Camera Imaging ISP subsystem.
4 * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
6 * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
24 * ISP MMU management wrap code
26 #include <linux/kernel.h>
27 #include <linux/types.h>
28 #include <linux/gfp.h>
29 #include <linux/mm.h> /* for GFP_ATOMIC */
30 #include <linux/slab.h> /* for kmalloc */
31 #include <linux/list.h>
33 #include <asm/cacheflush.h>
34 #include <linux/module.h>
35 #include <linux/moduleparam.h>
36 #include <linux/string.h>
37 #include <linux/errno.h>
38 #include <linux/sizes.h>
40 #include "atomisp_internal.h"
41 #include "mmu/isp_mmu.h"
44 * 64-bit x86 processor physical address layout:
45 * 0 - 0x7fffffff DDR RAM (2GB)
46 * 0x80000000 - 0xffffffff MMIO (2GB)
47 * 0x100000000 - 0x3fffffffffff DDR RAM (64TB)
48 * So if the system has more than 2GB DDR memory, the lower 2GB occupies the
49 * physical address 0 - 0x7fffffff and the rest will start from 0x100000000.
50 * We have to make sure memory is allocated from the lower 2GB for devices
51 * that are only 32-bit capable(e.g. the ISP MMU).
53 * For any confusion, contact bin.gao@intel.com.
55 #define NR_PAGES_2GB (SZ_2G / PAGE_SIZE)
57 static void free_mmu_map(struct isp_mmu *mmu, unsigned int start_isp_virt,
58 unsigned int end_isp_virt);
60 static unsigned int atomisp_get_pte(phys_addr_t pt, unsigned int idx)
62 unsigned int *pt_virt = phys_to_virt(pt);
63 return *(pt_virt + idx);
66 static void atomisp_set_pte(phys_addr_t pt,
67 unsigned int idx, unsigned int pte)
69 unsigned int *pt_virt = phys_to_virt(pt);
70 *(pt_virt + idx) = pte;
73 static void *isp_pt_phys_to_virt(phys_addr_t phys)
75 return phys_to_virt(phys);
78 static phys_addr_t isp_pte_to_pgaddr(struct isp_mmu *mmu,
81 return mmu->driver->pte_to_phys(mmu, pte);
84 static unsigned int isp_pgaddr_to_pte_valid(struct isp_mmu *mmu,
87 unsigned int pte = mmu->driver->phys_to_pte(mmu, phys);
88 return (unsigned int) (pte | ISP_PTE_VALID_MASK(mmu));
92 * allocate a uncacheable page table.
93 * return physical address.
95 static phys_addr_t alloc_page_table(struct isp_mmu *mmu)
101 /*page table lock may needed here*/
103 * The slab allocator(kmem_cache and kmalloc family) doesn't handle
104 * GFP_DMA32 flag, so we have to use buddy allocator.
106 if (totalram_pages > (unsigned long)NR_PAGES_2GB)
107 virt = (void *)__get_free_page(GFP_KERNEL | GFP_DMA32);
109 virt = kmem_cache_zalloc(mmu->tbl_cache, GFP_KERNEL);
111 return (phys_addr_t)NULL_PAGE;
114 * we need a uncacheable page table.
117 set_memory_uc((unsigned long)virt, 1);
120 page = virt_to_phys(virt);
122 for (i = 0; i < 1024; i++) {
124 atomisp_set_pte(page, i, mmu->driver->null_pte);
130 static void free_page_table(struct isp_mmu *mmu, phys_addr_t page)
133 page &= ISP_PAGE_MASK;
135 * reset the page to write back before free
137 virt = phys_to_virt(page);
140 set_memory_wb((unsigned long)virt, 1);
143 kmem_cache_free(mmu->tbl_cache, virt);
146 static void mmu_remap_error(struct isp_mmu *mmu,
147 phys_addr_t l1_pt, unsigned int l1_idx,
148 phys_addr_t l2_pt, unsigned int l2_idx,
149 unsigned int isp_virt, phys_addr_t old_phys,
150 phys_addr_t new_phys)
152 dev_err(atomisp_dev, "address remap:\n\n"
153 "\tL1 PT: virt = %p, phys = 0x%llx, "
155 "\tL2 PT: virt = %p, phys = 0x%llx, "
157 "\told: isp_virt = 0x%x, phys = 0x%llx\n"
158 "\tnew: isp_virt = 0x%x, phys = 0x%llx\n",
159 isp_pt_phys_to_virt(l1_pt),
161 isp_pt_phys_to_virt(l2_pt),
162 (u64)l2_pt, l2_idx, isp_virt,
163 (u64)old_phys, isp_virt,
167 static void mmu_unmap_l2_pte_error(struct isp_mmu *mmu,
168 phys_addr_t l1_pt, unsigned int l1_idx,
169 phys_addr_t l2_pt, unsigned int l2_idx,
170 unsigned int isp_virt, unsigned int pte)
172 dev_err(atomisp_dev, "unmap unvalid L2 pte:\n\n"
173 "\tL1 PT: virt = %p, phys = 0x%llx, "
175 "\tL2 PT: virt = %p, phys = 0x%llx, "
177 "\tisp_virt = 0x%x, pte(page phys) = 0x%x\n",
178 isp_pt_phys_to_virt(l1_pt),
180 isp_pt_phys_to_virt(l2_pt),
181 (u64)l2_pt, l2_idx, isp_virt,
185 static void mmu_unmap_l1_pte_error(struct isp_mmu *mmu,
186 phys_addr_t l1_pt, unsigned int l1_idx,
187 unsigned int isp_virt, unsigned int pte)
189 dev_err(atomisp_dev, "unmap unvalid L1 pte (L2 PT):\n\n"
190 "\tL1 PT: virt = %p, phys = 0x%llx, "
192 "\tisp_virt = 0x%x, l1_pte(L2 PT) = 0x%x\n",
193 isp_pt_phys_to_virt(l1_pt),
194 (u64)l1_pt, l1_idx, (unsigned int)isp_virt,
198 static void mmu_unmap_l1_pt_error(struct isp_mmu *mmu, unsigned int pte)
200 dev_err(atomisp_dev, "unmap unvalid L1PT:\n\n"
201 "L1PT = 0x%x\n", (unsigned int)pte);
205 * Update L2 page table according to isp virtual address and page physical
208 static int mmu_l2_map(struct isp_mmu *mmu, phys_addr_t l1_pt,
209 unsigned int l1_idx, phys_addr_t l2_pt,
210 unsigned int start, unsigned int end, phys_addr_t phys)
216 l2_pt &= ISP_PAGE_MASK;
218 start = start & ISP_PAGE_MASK;
219 end = ISP_PAGE_ALIGN(end);
220 phys &= ISP_PAGE_MASK;
224 idx = ISP_PTR_TO_L2_IDX(ptr);
226 pte = atomisp_get_pte(l2_pt, idx);
228 if (ISP_PTE_VALID(mmu, pte)) {
229 mmu_remap_error(mmu, l1_pt, l1_idx,
230 l2_pt, idx, ptr, pte, phys);
232 /* free all mapped pages */
233 free_mmu_map(mmu, start, ptr);
238 pte = isp_pgaddr_to_pte_valid(mmu, phys);
240 atomisp_set_pte(l2_pt, idx, pte);
241 mmu->l2_pgt_refcount[l1_idx]++;
242 ptr += (1U << ISP_L2PT_OFFSET);
243 phys += (1U << ISP_L2PT_OFFSET);
244 } while (ptr < end && idx < ISP_L2PT_PTES - 1);
250 * Update L1 page table according to isp virtual address and page physical
253 static int mmu_l1_map(struct isp_mmu *mmu, phys_addr_t l1_pt,
254 unsigned int start, unsigned int end,
258 unsigned int ptr, l1_aligned;
263 l1_pt &= ISP_PAGE_MASK;
265 start = start & ISP_PAGE_MASK;
266 end = ISP_PAGE_ALIGN(end);
267 phys &= ISP_PAGE_MASK;
271 idx = ISP_PTR_TO_L1_IDX(ptr);
273 l2_pte = atomisp_get_pte(l1_pt, idx);
275 if (!ISP_PTE_VALID(mmu, l2_pte)) {
276 l2_pt = alloc_page_table(mmu);
277 if (l2_pt == NULL_PAGE) {
279 "alloc page table fail.\n");
281 /* free all mapped pages */
282 free_mmu_map(mmu, start, ptr);
287 l2_pte = isp_pgaddr_to_pte_valid(mmu, l2_pt);
289 atomisp_set_pte(l1_pt, idx, l2_pte);
290 mmu->l2_pgt_refcount[idx] = 0;
293 l2_pt = isp_pte_to_pgaddr(mmu, l2_pte);
295 l1_aligned = (ptr & ISP_PAGE_MASK) + (1U << ISP_L1PT_OFFSET);
297 if (l1_aligned < end) {
298 ret = mmu_l2_map(mmu, l1_pt, idx,
299 l2_pt, ptr, l1_aligned, phys);
300 phys += (l1_aligned - ptr);
303 ret = mmu_l2_map(mmu, l1_pt, idx,
304 l2_pt, ptr, end, phys);
310 dev_err(atomisp_dev, "setup mapping in L2PT fail.\n");
312 /* free all mapped pages */
313 free_mmu_map(mmu, start, ptr);
317 } while (ptr < end && idx < ISP_L1PT_PTES);
323 * Update page table according to isp virtual address and page physical
326 static int mmu_map(struct isp_mmu *mmu, unsigned int isp_virt,
327 phys_addr_t phys, unsigned int pgnr)
329 unsigned int start, end;
333 mutex_lock(&mmu->pt_mutex);
334 if (!ISP_PTE_VALID(mmu, mmu->l1_pte)) {
336 * allocate 1 new page for L1 page table
338 l1_pt = alloc_page_table(mmu);
339 if (l1_pt == NULL_PAGE) {
340 dev_err(atomisp_dev, "alloc page table fail.\n");
341 mutex_unlock(&mmu->pt_mutex);
346 * setup L1 page table physical addr to MMU
348 ret = mmu->driver->set_pd_base(mmu, l1_pt);
351 "set page directory base address fail.\n");
352 mutex_unlock(&mmu->pt_mutex);
355 mmu->base_address = l1_pt;
356 mmu->l1_pte = isp_pgaddr_to_pte_valid(mmu, l1_pt);
357 memset(mmu->l2_pgt_refcount, 0, sizeof(int) * ISP_L1PT_PTES);
360 l1_pt = isp_pte_to_pgaddr(mmu, mmu->l1_pte);
362 start = (isp_virt) & ISP_PAGE_MASK;
363 end = start + (pgnr << ISP_PAGE_OFFSET);
364 phys &= ISP_PAGE_MASK;
366 ret = mmu_l1_map(mmu, l1_pt, start, end, phys);
369 dev_err(atomisp_dev, "setup mapping in L1PT fail.\n");
371 mutex_unlock(&mmu->pt_mutex);
376 * Free L2 page table according to isp virtual address and page physical
379 static void mmu_l2_unmap(struct isp_mmu *mmu, phys_addr_t l1_pt,
380 unsigned int l1_idx, phys_addr_t l2_pt,
381 unsigned int start, unsigned int end)
388 l2_pt &= ISP_PAGE_MASK;
390 start = start & ISP_PAGE_MASK;
391 end = ISP_PAGE_ALIGN(end);
395 idx = ISP_PTR_TO_L2_IDX(ptr);
397 pte = atomisp_get_pte(l2_pt, idx);
399 if (!ISP_PTE_VALID(mmu, pte))
400 mmu_unmap_l2_pte_error(mmu, l1_pt, l1_idx,
401 l2_pt, idx, ptr, pte);
403 atomisp_set_pte(l2_pt, idx, mmu->driver->null_pte);
404 mmu->l2_pgt_refcount[l1_idx]--;
405 ptr += (1U << ISP_L2PT_OFFSET);
406 } while (ptr < end && idx < ISP_L2PT_PTES - 1);
408 if (mmu->l2_pgt_refcount[l1_idx] == 0) {
409 free_page_table(mmu, l2_pt);
410 atomisp_set_pte(l1_pt, l1_idx, mmu->driver->null_pte);
415 * Free L1 page table according to isp virtual address and page physical
418 static void mmu_l1_unmap(struct isp_mmu *mmu, phys_addr_t l1_pt,
419 unsigned int start, unsigned int end)
422 unsigned int ptr, l1_aligned;
426 l1_pt &= ISP_PAGE_MASK;
428 start = start & ISP_PAGE_MASK;
429 end = ISP_PAGE_ALIGN(end);
433 idx = ISP_PTR_TO_L1_IDX(ptr);
435 l2_pte = atomisp_get_pte(l1_pt, idx);
437 if (!ISP_PTE_VALID(mmu, l2_pte)) {
438 mmu_unmap_l1_pte_error(mmu, l1_pt, idx, ptr, l2_pte);
442 l2_pt = isp_pte_to_pgaddr(mmu, l2_pte);
444 l1_aligned = (ptr & ISP_PAGE_MASK) + (1U << ISP_L1PT_OFFSET);
446 if (l1_aligned < end) {
447 mmu_l2_unmap(mmu, l1_pt, idx, l2_pt, ptr, l1_aligned);
450 mmu_l2_unmap(mmu, l1_pt, idx, l2_pt, ptr, end);
454 * use the same L2 page next time, so we dont
455 * need to invalidate and free this PT.
457 /* atomisp_set_pte(l1_pt, idx, NULL_PTE); */
458 } while (ptr < end && idx < ISP_L1PT_PTES);
462 * Free page table according to isp virtual address and page physical
465 static void mmu_unmap(struct isp_mmu *mmu, unsigned int isp_virt,
468 unsigned int start, end;
471 mutex_lock(&mmu->pt_mutex);
472 if (!ISP_PTE_VALID(mmu, mmu->l1_pte)) {
473 mmu_unmap_l1_pt_error(mmu, mmu->l1_pte);
474 mutex_unlock(&mmu->pt_mutex);
478 l1_pt = isp_pte_to_pgaddr(mmu, mmu->l1_pte);
480 start = (isp_virt) & ISP_PAGE_MASK;
481 end = start + (pgnr << ISP_PAGE_OFFSET);
483 mmu_l1_unmap(mmu, l1_pt, start, end);
484 mutex_unlock(&mmu->pt_mutex);
488 * Free page tables according to isp start virtual address and end virtual
491 static void free_mmu_map(struct isp_mmu *mmu, unsigned int start_isp_virt,
492 unsigned int end_isp_virt)
495 unsigned int start, end;
497 start = (start_isp_virt) & ISP_PAGE_MASK;
498 end = (end_isp_virt) & ISP_PAGE_MASK;
499 pgnr = (end - start) >> ISP_PAGE_OFFSET;
500 mmu_unmap(mmu, start, pgnr);
503 int isp_mmu_map(struct isp_mmu *mmu, unsigned int isp_virt,
504 phys_addr_t phys, unsigned int pgnr)
506 return mmu_map(mmu, isp_virt, phys, pgnr);
509 void isp_mmu_unmap(struct isp_mmu *mmu, unsigned int isp_virt,
512 mmu_unmap(mmu, isp_virt, pgnr);
515 static void isp_mmu_flush_tlb_range_default(struct isp_mmu *mmu,
519 isp_mmu_flush_tlb(mmu);
522 /*MMU init for internal structure*/
523 int isp_mmu_init(struct isp_mmu *mmu, struct isp_mmu_client *driver)
525 if (!mmu) /* error */
527 if (!driver) /* error */
531 dev_warn(atomisp_dev, "NULL name for MMU driver...\n");
533 mmu->driver = driver;
535 if (!driver->set_pd_base || !driver->tlb_flush_all) {
537 "set_pd_base or tlb_flush_all operation "
542 if (!driver->tlb_flush_range)
543 driver->tlb_flush_range = isp_mmu_flush_tlb_range_default;
545 if (!driver->pte_valid_mask) {
546 dev_err(atomisp_dev, "PTE_MASK is missing from mmu driver\n");
550 mmu->l1_pte = driver->null_pte;
552 mutex_init(&mmu->pt_mutex);
554 mmu->tbl_cache = kmem_cache_create("iopte_cache", ISP_PAGE_SIZE,
555 ISP_PAGE_SIZE, SLAB_HWCACHE_ALIGN,
563 /*Free L1 and L2 page table*/
564 void isp_mmu_exit(struct isp_mmu *mmu)
568 phys_addr_t l1_pt, l2_pt;
573 if (!ISP_PTE_VALID(mmu, mmu->l1_pte)) {
574 dev_warn(atomisp_dev, "invalid L1PT: pte = 0x%x\n",
575 (unsigned int)mmu->l1_pte);
579 l1_pt = isp_pte_to_pgaddr(mmu, mmu->l1_pte);
581 for (idx = 0; idx < ISP_L1PT_PTES; idx++) {
582 pte = atomisp_get_pte(l1_pt, idx);
584 if (ISP_PTE_VALID(mmu, pte)) {
585 l2_pt = isp_pte_to_pgaddr(mmu, pte);
587 free_page_table(mmu, l2_pt);
591 free_page_table(mmu, l1_pt);
593 kmem_cache_destroy(mmu->tbl_cache);