2 * Support for Medifield PNW Camera Imaging ISP subsystem.
4 * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
6 * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
24 * ISP MMU management wrap code
26 #include <linux/kernel.h>
27 #include <linux/types.h>
28 #include <linux/gfp.h>
29 #include <linux/mm.h> /* for GFP_ATOMIC */
30 #include <linux/slab.h> /* for kmalloc */
31 #include <linux/list.h>
33 #include <linux/module.h>
34 #include <linux/moduleparam.h>
35 #include <linux/string.h>
36 #include <linux/errno.h>
37 #include <linux/sizes.h>
40 #include <asm/set_memory.h>
43 #include "atomisp_internal.h"
44 #include "mmu/isp_mmu.h"
47 * 64-bit x86 processor physical address layout:
48 * 0 - 0x7fffffff DDR RAM (2GB)
49 * 0x80000000 - 0xffffffff MMIO (2GB)
50 * 0x100000000 - 0x3fffffffffff DDR RAM (64TB)
51 * So if the system has more than 2GB DDR memory, the lower 2GB occupies the
52 * physical address 0 - 0x7fffffff and the rest will start from 0x100000000.
53 * We have to make sure memory is allocated from the lower 2GB for devices
54 * that are only 32-bit capable(e.g. the ISP MMU).
56 * For any confusion, contact bin.gao@intel.com.
58 #define NR_PAGES_2GB (SZ_2G / PAGE_SIZE)
60 static void free_mmu_map(struct isp_mmu *mmu, unsigned int start_isp_virt,
61 unsigned int end_isp_virt);
63 static unsigned int atomisp_get_pte(phys_addr_t pt, unsigned int idx)
65 unsigned int *pt_virt = phys_to_virt(pt);
66 return *(pt_virt + idx);
69 static void atomisp_set_pte(phys_addr_t pt,
70 unsigned int idx, unsigned int pte)
72 unsigned int *pt_virt = phys_to_virt(pt);
73 *(pt_virt + idx) = pte;
76 static void *isp_pt_phys_to_virt(phys_addr_t phys)
78 return phys_to_virt(phys);
81 static phys_addr_t isp_pte_to_pgaddr(struct isp_mmu *mmu,
84 return mmu->driver->pte_to_phys(mmu, pte);
87 static unsigned int isp_pgaddr_to_pte_valid(struct isp_mmu *mmu,
90 unsigned int pte = mmu->driver->phys_to_pte(mmu, phys);
91 return (unsigned int) (pte | ISP_PTE_VALID_MASK(mmu));
95 * allocate a uncacheable page table.
96 * return physical address.
98 static phys_addr_t alloc_page_table(struct isp_mmu *mmu)
104 /*page table lock may needed here*/
106 * The slab allocator(kmem_cache and kmalloc family) doesn't handle
107 * GFP_DMA32 flag, so we have to use buddy allocator.
109 if (totalram_pages > (unsigned long)NR_PAGES_2GB)
110 virt = (void *)__get_free_page(GFP_KERNEL | GFP_DMA32);
112 virt = kmem_cache_zalloc(mmu->tbl_cache, GFP_KERNEL);
114 return (phys_addr_t)NULL_PAGE;
117 * we need a uncacheable page table.
120 set_memory_uc((unsigned long)virt, 1);
123 page = virt_to_phys(virt);
125 for (i = 0; i < 1024; i++) {
127 atomisp_set_pte(page, i, mmu->driver->null_pte);
133 static void free_page_table(struct isp_mmu *mmu, phys_addr_t page)
136 page &= ISP_PAGE_MASK;
138 * reset the page to write back before free
140 virt = phys_to_virt(page);
143 set_memory_wb((unsigned long)virt, 1);
146 kmem_cache_free(mmu->tbl_cache, virt);
149 static void mmu_remap_error(struct isp_mmu *mmu,
150 phys_addr_t l1_pt, unsigned int l1_idx,
151 phys_addr_t l2_pt, unsigned int l2_idx,
152 unsigned int isp_virt, phys_addr_t old_phys,
153 phys_addr_t new_phys)
155 dev_err(atomisp_dev, "address remap:\n\n"
156 "\tL1 PT: virt = %p, phys = 0x%llx, "
158 "\tL2 PT: virt = %p, phys = 0x%llx, "
160 "\told: isp_virt = 0x%x, phys = 0x%llx\n"
161 "\tnew: isp_virt = 0x%x, phys = 0x%llx\n",
162 isp_pt_phys_to_virt(l1_pt),
164 isp_pt_phys_to_virt(l2_pt),
165 (u64)l2_pt, l2_idx, isp_virt,
166 (u64)old_phys, isp_virt,
170 static void mmu_unmap_l2_pte_error(struct isp_mmu *mmu,
171 phys_addr_t l1_pt, unsigned int l1_idx,
172 phys_addr_t l2_pt, unsigned int l2_idx,
173 unsigned int isp_virt, unsigned int pte)
175 dev_err(atomisp_dev, "unmap unvalid L2 pte:\n\n"
176 "\tL1 PT: virt = %p, phys = 0x%llx, "
178 "\tL2 PT: virt = %p, phys = 0x%llx, "
180 "\tisp_virt = 0x%x, pte(page phys) = 0x%x\n",
181 isp_pt_phys_to_virt(l1_pt),
183 isp_pt_phys_to_virt(l2_pt),
184 (u64)l2_pt, l2_idx, isp_virt,
188 static void mmu_unmap_l1_pte_error(struct isp_mmu *mmu,
189 phys_addr_t l1_pt, unsigned int l1_idx,
190 unsigned int isp_virt, unsigned int pte)
192 dev_err(atomisp_dev, "unmap unvalid L1 pte (L2 PT):\n\n"
193 "\tL1 PT: virt = %p, phys = 0x%llx, "
195 "\tisp_virt = 0x%x, l1_pte(L2 PT) = 0x%x\n",
196 isp_pt_phys_to_virt(l1_pt),
197 (u64)l1_pt, l1_idx, (unsigned int)isp_virt,
201 static void mmu_unmap_l1_pt_error(struct isp_mmu *mmu, unsigned int pte)
203 dev_err(atomisp_dev, "unmap unvalid L1PT:\n\n"
204 "L1PT = 0x%x\n", (unsigned int)pte);
208 * Update L2 page table according to isp virtual address and page physical
211 static int mmu_l2_map(struct isp_mmu *mmu, phys_addr_t l1_pt,
212 unsigned int l1_idx, phys_addr_t l2_pt,
213 unsigned int start, unsigned int end, phys_addr_t phys)
219 l2_pt &= ISP_PAGE_MASK;
221 start = start & ISP_PAGE_MASK;
222 end = ISP_PAGE_ALIGN(end);
223 phys &= ISP_PAGE_MASK;
227 idx = ISP_PTR_TO_L2_IDX(ptr);
229 pte = atomisp_get_pte(l2_pt, idx);
231 if (ISP_PTE_VALID(mmu, pte)) {
232 mmu_remap_error(mmu, l1_pt, l1_idx,
233 l2_pt, idx, ptr, pte, phys);
235 /* free all mapped pages */
236 free_mmu_map(mmu, start, ptr);
241 pte = isp_pgaddr_to_pte_valid(mmu, phys);
243 atomisp_set_pte(l2_pt, idx, pte);
244 mmu->l2_pgt_refcount[l1_idx]++;
245 ptr += (1U << ISP_L2PT_OFFSET);
246 phys += (1U << ISP_L2PT_OFFSET);
247 } while (ptr < end && idx < ISP_L2PT_PTES - 1);
253 * Update L1 page table according to isp virtual address and page physical
256 static int mmu_l1_map(struct isp_mmu *mmu, phys_addr_t l1_pt,
257 unsigned int start, unsigned int end,
261 unsigned int ptr, l1_aligned;
266 l1_pt &= ISP_PAGE_MASK;
268 start = start & ISP_PAGE_MASK;
269 end = ISP_PAGE_ALIGN(end);
270 phys &= ISP_PAGE_MASK;
274 idx = ISP_PTR_TO_L1_IDX(ptr);
276 l2_pte = atomisp_get_pte(l1_pt, idx);
278 if (!ISP_PTE_VALID(mmu, l2_pte)) {
279 l2_pt = alloc_page_table(mmu);
280 if (l2_pt == NULL_PAGE) {
282 "alloc page table fail.\n");
284 /* free all mapped pages */
285 free_mmu_map(mmu, start, ptr);
290 l2_pte = isp_pgaddr_to_pte_valid(mmu, l2_pt);
292 atomisp_set_pte(l1_pt, idx, l2_pte);
293 mmu->l2_pgt_refcount[idx] = 0;
296 l2_pt = isp_pte_to_pgaddr(mmu, l2_pte);
298 l1_aligned = (ptr & ISP_PAGE_MASK) + (1U << ISP_L1PT_OFFSET);
300 if (l1_aligned < end) {
301 ret = mmu_l2_map(mmu, l1_pt, idx,
302 l2_pt, ptr, l1_aligned, phys);
303 phys += (l1_aligned - ptr);
306 ret = mmu_l2_map(mmu, l1_pt, idx,
307 l2_pt, ptr, end, phys);
313 dev_err(atomisp_dev, "setup mapping in L2PT fail.\n");
315 /* free all mapped pages */
316 free_mmu_map(mmu, start, ptr);
320 } while (ptr < end && idx < ISP_L1PT_PTES);
326 * Update page table according to isp virtual address and page physical
329 static int mmu_map(struct isp_mmu *mmu, unsigned int isp_virt,
330 phys_addr_t phys, unsigned int pgnr)
332 unsigned int start, end;
336 mutex_lock(&mmu->pt_mutex);
337 if (!ISP_PTE_VALID(mmu, mmu->l1_pte)) {
339 * allocate 1 new page for L1 page table
341 l1_pt = alloc_page_table(mmu);
342 if (l1_pt == NULL_PAGE) {
343 dev_err(atomisp_dev, "alloc page table fail.\n");
344 mutex_unlock(&mmu->pt_mutex);
349 * setup L1 page table physical addr to MMU
351 ret = mmu->driver->set_pd_base(mmu, l1_pt);
354 "set page directory base address fail.\n");
355 mutex_unlock(&mmu->pt_mutex);
358 mmu->base_address = l1_pt;
359 mmu->l1_pte = isp_pgaddr_to_pte_valid(mmu, l1_pt);
360 memset(mmu->l2_pgt_refcount, 0, sizeof(int) * ISP_L1PT_PTES);
363 l1_pt = isp_pte_to_pgaddr(mmu, mmu->l1_pte);
365 start = (isp_virt) & ISP_PAGE_MASK;
366 end = start + (pgnr << ISP_PAGE_OFFSET);
367 phys &= ISP_PAGE_MASK;
369 ret = mmu_l1_map(mmu, l1_pt, start, end, phys);
372 dev_err(atomisp_dev, "setup mapping in L1PT fail.\n");
374 mutex_unlock(&mmu->pt_mutex);
379 * Free L2 page table according to isp virtual address and page physical
382 static void mmu_l2_unmap(struct isp_mmu *mmu, phys_addr_t l1_pt,
383 unsigned int l1_idx, phys_addr_t l2_pt,
384 unsigned int start, unsigned int end)
391 l2_pt &= ISP_PAGE_MASK;
393 start = start & ISP_PAGE_MASK;
394 end = ISP_PAGE_ALIGN(end);
398 idx = ISP_PTR_TO_L2_IDX(ptr);
400 pte = atomisp_get_pte(l2_pt, idx);
402 if (!ISP_PTE_VALID(mmu, pte))
403 mmu_unmap_l2_pte_error(mmu, l1_pt, l1_idx,
404 l2_pt, idx, ptr, pte);
406 atomisp_set_pte(l2_pt, idx, mmu->driver->null_pte);
407 mmu->l2_pgt_refcount[l1_idx]--;
408 ptr += (1U << ISP_L2PT_OFFSET);
409 } while (ptr < end && idx < ISP_L2PT_PTES - 1);
411 if (mmu->l2_pgt_refcount[l1_idx] == 0) {
412 free_page_table(mmu, l2_pt);
413 atomisp_set_pte(l1_pt, l1_idx, mmu->driver->null_pte);
418 * Free L1 page table according to isp virtual address and page physical
421 static void mmu_l1_unmap(struct isp_mmu *mmu, phys_addr_t l1_pt,
422 unsigned int start, unsigned int end)
425 unsigned int ptr, l1_aligned;
429 l1_pt &= ISP_PAGE_MASK;
431 start = start & ISP_PAGE_MASK;
432 end = ISP_PAGE_ALIGN(end);
436 idx = ISP_PTR_TO_L1_IDX(ptr);
438 l2_pte = atomisp_get_pte(l1_pt, idx);
440 if (!ISP_PTE_VALID(mmu, l2_pte)) {
441 mmu_unmap_l1_pte_error(mmu, l1_pt, idx, ptr, l2_pte);
445 l2_pt = isp_pte_to_pgaddr(mmu, l2_pte);
447 l1_aligned = (ptr & ISP_PAGE_MASK) + (1U << ISP_L1PT_OFFSET);
449 if (l1_aligned < end) {
450 mmu_l2_unmap(mmu, l1_pt, idx, l2_pt, ptr, l1_aligned);
453 mmu_l2_unmap(mmu, l1_pt, idx, l2_pt, ptr, end);
457 * use the same L2 page next time, so we dont
458 * need to invalidate and free this PT.
460 /* atomisp_set_pte(l1_pt, idx, NULL_PTE); */
461 } while (ptr < end && idx < ISP_L1PT_PTES);
465 * Free page table according to isp virtual address and page physical
468 static void mmu_unmap(struct isp_mmu *mmu, unsigned int isp_virt,
471 unsigned int start, end;
474 mutex_lock(&mmu->pt_mutex);
475 if (!ISP_PTE_VALID(mmu, mmu->l1_pte)) {
476 mmu_unmap_l1_pt_error(mmu, mmu->l1_pte);
477 mutex_unlock(&mmu->pt_mutex);
481 l1_pt = isp_pte_to_pgaddr(mmu, mmu->l1_pte);
483 start = (isp_virt) & ISP_PAGE_MASK;
484 end = start + (pgnr << ISP_PAGE_OFFSET);
486 mmu_l1_unmap(mmu, l1_pt, start, end);
487 mutex_unlock(&mmu->pt_mutex);
491 * Free page tables according to isp start virtual address and end virtual
494 static void free_mmu_map(struct isp_mmu *mmu, unsigned int start_isp_virt,
495 unsigned int end_isp_virt)
498 unsigned int start, end;
500 start = (start_isp_virt) & ISP_PAGE_MASK;
501 end = (end_isp_virt) & ISP_PAGE_MASK;
502 pgnr = (end - start) >> ISP_PAGE_OFFSET;
503 mmu_unmap(mmu, start, pgnr);
506 int isp_mmu_map(struct isp_mmu *mmu, unsigned int isp_virt,
507 phys_addr_t phys, unsigned int pgnr)
509 return mmu_map(mmu, isp_virt, phys, pgnr);
512 void isp_mmu_unmap(struct isp_mmu *mmu, unsigned int isp_virt,
515 mmu_unmap(mmu, isp_virt, pgnr);
518 static void isp_mmu_flush_tlb_range_default(struct isp_mmu *mmu,
522 isp_mmu_flush_tlb(mmu);
525 /*MMU init for internal structure*/
526 int isp_mmu_init(struct isp_mmu *mmu, struct isp_mmu_client *driver)
528 if (!mmu) /* error */
530 if (!driver) /* error */
534 dev_warn(atomisp_dev, "NULL name for MMU driver...\n");
536 mmu->driver = driver;
538 if (!driver->set_pd_base || !driver->tlb_flush_all) {
540 "set_pd_base or tlb_flush_all operation "
545 if (!driver->tlb_flush_range)
546 driver->tlb_flush_range = isp_mmu_flush_tlb_range_default;
548 if (!driver->pte_valid_mask) {
549 dev_err(atomisp_dev, "PTE_MASK is missing from mmu driver\n");
553 mmu->l1_pte = driver->null_pte;
555 mutex_init(&mmu->pt_mutex);
557 mmu->tbl_cache = kmem_cache_create("iopte_cache", ISP_PAGE_SIZE,
558 ISP_PAGE_SIZE, SLAB_HWCACHE_ALIGN,
566 /*Free L1 and L2 page table*/
567 void isp_mmu_exit(struct isp_mmu *mmu)
571 phys_addr_t l1_pt, l2_pt;
576 if (!ISP_PTE_VALID(mmu, mmu->l1_pte)) {
577 dev_warn(atomisp_dev, "invalid L1PT: pte = 0x%x\n",
578 (unsigned int)mmu->l1_pte);
582 l1_pt = isp_pte_to_pgaddr(mmu, mmu->l1_pte);
584 for (idx = 0; idx < ISP_L1PT_PTES; idx++) {
585 pte = atomisp_get_pte(l1_pt, idx);
587 if (ISP_PTE_VALID(mmu, pte)) {
588 l2_pt = isp_pte_to_pgaddr(mmu, pte);
590 free_page_table(mmu, l2_pt);
594 free_page_table(mmu, l1_pt);
596 kmem_cache_destroy(mmu->tbl_cache);