2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/errno.h>
19 #include <linux/swap.h>
20 #include <linux/highmem.h>
21 #include <linux/slab.h>
22 #include <linux/pagemap.h>
23 #include <linux/spinlock.h>
24 #include <linux/cpumask.h>
25 #include <linux/module.h>
27 #include <linux/vmalloc.h>
28 #include <linux/smp.h>
30 #include <asm/pgtable.h>
31 #include <asm/pgalloc.h>
32 #include <asm/fixmap.h>
34 #include <asm/tlbflush.h>
35 #include <asm/homecache.h>
37 #define K(x) ((x) << (PAGE_SHIFT-10))
40 * The normal show_free_areas() is too verbose on Tile, with dozens
41 * of processors and often four NUMA zones each with high and lowmem.
43 void show_mem(unsigned int filter)
47 pr_err("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu"
48 " free:%lu\n slab:%lu mapped:%lu pagetables:%lu bounce:%lu"
49 " pagecache:%lu swap:%lu\n",
50 (global_page_state(NR_ACTIVE_ANON) +
51 global_page_state(NR_ACTIVE_FILE)),
52 (global_page_state(NR_INACTIVE_ANON) +
53 global_page_state(NR_INACTIVE_FILE)),
54 global_page_state(NR_FILE_DIRTY),
55 global_page_state(NR_WRITEBACK),
56 global_page_state(NR_UNSTABLE_NFS),
57 global_page_state(NR_FREE_PAGES),
58 (global_page_state(NR_SLAB_RECLAIMABLE) +
59 global_page_state(NR_SLAB_UNRECLAIMABLE)),
60 global_page_state(NR_FILE_MAPPED),
61 global_page_state(NR_PAGETABLE),
62 global_page_state(NR_BOUNCE),
63 global_page_state(NR_FILE_PAGES),
67 unsigned long flags, order, total = 0, largest_order = -1;
69 if (!populated_zone(zone))
72 spin_lock_irqsave(&zone->lock, flags);
73 for (order = 0; order < MAX_ORDER; order++) {
74 int nr = zone->free_area[order].nr_free;
77 largest_order = order;
79 spin_unlock_irqrestore(&zone->lock, flags);
80 pr_err("Node %d %7s: %lukB (largest %luKb)\n",
81 zone_to_nid(zone), zone->name,
82 K(total), largest_order ? K(1UL) << largest_order : 0);
87 * Associate a virtual page frame with a given physical page frame
88 * and protection flags for that frame.
90 static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
97 pgd = swapper_pg_dir + pgd_index(vaddr);
102 pud = pud_offset(pgd, vaddr);
103 if (pud_none(*pud)) {
107 pmd = pmd_offset(pud, vaddr);
108 if (pmd_none(*pmd)) {
112 pte = pte_offset_kernel(pmd, vaddr);
113 /* <pfn,flags> stored as-is, to permit clearing entries */
114 set_pte(pte, pfn_pte(pfn, flags));
117 * It's enough to flush this one mapping.
118 * This appears conservative since it is only called
121 local_flush_tlb_page(NULL, vaddr, PAGE_SIZE);
124 void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
126 unsigned long address = __fix_to_virt(idx);
128 if (idx >= __end_of_fixed_addresses) {
132 set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
135 #if defined(CONFIG_HIGHPTE)
136 pte_t *_pte_offset_map(pmd_t *dir, unsigned long address)
138 pte_t *pte = kmap_atomic(pmd_page(*dir)) +
139 (pmd_ptfn(*dir) << HV_LOG2_PAGE_TABLE_ALIGN) & ~PAGE_MASK;
140 return &pte[pte_index(address)];
145 * shatter_huge_page() - ensure a given address is mapped by a small page.
147 * This function converts a huge PTE mapping kernel LOWMEM into a bunch
148 * of small PTEs with the same caching. No cache flush required, but we
149 * must do a global TLB flush.
151 * Any caller that wishes to modify a kernel mapping that might
152 * have been made with a huge page should call this function,
153 * since doing so properly avoids race conditions with installing the
154 * newly-shattered page and then flushing all the TLB entries.
156 * @addr: Address at which to shatter any existing huge page.
158 void shatter_huge_page(unsigned long addr)
163 unsigned long flags = 0; /* happy compiler */
164 #ifdef __PAGETABLE_PMD_FOLDED
165 struct list_head *pos;
168 /* Get a pointer to the pmd entry that we need to change. */
170 BUG_ON(pgd_addr_invalid(addr));
171 BUG_ON(addr < PAGE_OFFSET); /* only for kernel LOWMEM */
172 pgd = swapper_pg_dir + pgd_index(addr);
173 pud = pud_offset(pgd, addr);
174 BUG_ON(!pud_present(*pud));
175 pmd = pmd_offset(pud, addr);
176 BUG_ON(!pmd_present(*pmd));
177 if (!pmd_huge_page(*pmd))
181 * Grab the pgd_lock, since we may need it to walk the pgd_list,
182 * and since we need some kind of lock here to avoid races.
184 spin_lock_irqsave(&pgd_lock, flags);
185 if (!pmd_huge_page(*pmd)) {
186 /* Lost the race to convert the huge page. */
187 spin_unlock_irqrestore(&pgd_lock, flags);
191 /* Shatter the huge page into the preallocated L2 page table. */
192 pmd_populate_kernel(&init_mm, pmd,
193 get_prealloc_pte(pte_pfn(*(pte_t *)pmd)));
195 #ifdef __PAGETABLE_PMD_FOLDED
196 /* Walk every pgd on the system and update the pmd there. */
197 list_for_each(pos, &pgd_list) {
199 pgd = list_to_pgd(pos) + pgd_index(addr);
200 pud = pud_offset(pgd, addr);
201 copy_pmd = pmd_offset(pud, addr);
202 __set_pmd(copy_pmd, *pmd);
206 /* Tell every cpu to notice the change. */
207 flush_remote(0, 0, NULL, addr, HPAGE_SIZE, HPAGE_SIZE,
208 cpu_possible_mask, NULL, 0);
210 /* Hold the lock until the TLB flush is finished to avoid races. */
211 spin_unlock_irqrestore(&pgd_lock, flags);
215 * List of all pgd's needed so it can invalidate entries in both cached
216 * and uncached pgd's. This is essentially codepath-based locking
217 * against pageattr.c; it is the unique case in which a valid change
218 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
219 * vmalloc faults work because attached pagetables are never freed.
220 * The locking scheme was chosen on the basis of manfred's
221 * recommendations and having no core impact whatsoever.
224 DEFINE_SPINLOCK(pgd_lock);
227 static inline void pgd_list_add(pgd_t *pgd)
229 list_add(pgd_to_list(pgd), &pgd_list);
232 static inline void pgd_list_del(pgd_t *pgd)
234 list_del(pgd_to_list(pgd));
237 #define KERNEL_PGD_INDEX_START pgd_index(PAGE_OFFSET)
238 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_INDEX_START)
240 static void pgd_ctor(pgd_t *pgd)
244 memset(pgd, 0, KERNEL_PGD_INDEX_START*sizeof(pgd_t));
245 spin_lock_irqsave(&pgd_lock, flags);
249 * Check that the user interrupt vector has no L2.
250 * It never should for the swapper, and new page tables
251 * should always start with an empty user interrupt vector.
253 BUG_ON(((u64 *)swapper_pg_dir)[pgd_index(MEM_USER_INTRPT)] != 0);
256 memcpy(pgd + KERNEL_PGD_INDEX_START,
257 swapper_pg_dir + KERNEL_PGD_INDEX_START,
258 KERNEL_PGD_PTRS * sizeof(pgd_t));
261 spin_unlock_irqrestore(&pgd_lock, flags);
264 static void pgd_dtor(pgd_t *pgd)
266 unsigned long flags; /* can be called from interrupt context */
268 spin_lock_irqsave(&pgd_lock, flags);
270 spin_unlock_irqrestore(&pgd_lock, flags);
273 pgd_t *pgd_alloc(struct mm_struct *mm)
275 pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL);
281 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
284 kmem_cache_free(pgd_cache, pgd);
288 #define L2_USER_PGTABLE_PAGES (1 << L2_USER_PGTABLE_ORDER)
290 struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
292 gfp_t flags = GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO;
294 #if L2_USER_PGTABLE_ORDER > 0
298 #ifdef CONFIG_HIGHPTE
299 flags |= __GFP_HIGHMEM;
302 p = alloc_pages(flags, L2_USER_PGTABLE_ORDER);
306 #if L2_USER_PGTABLE_ORDER > 0
308 * Make every page have a page_count() of one, not just the first.
309 * We don't use __GFP_COMP since it doesn't look like it works
310 * correctly with tlb_remove_page().
312 for (i = 1; i < L2_USER_PGTABLE_PAGES; ++i) {
313 init_page_count(p+i);
314 inc_zone_page_state(p+i, NR_PAGETABLE);
318 pgtable_page_ctor(p);
323 * Free page immediately (used in __pte_alloc if we raced with another
324 * process). We have to correct whatever pte_alloc_one() did before
325 * returning the pages to the allocator.
327 void pte_free(struct mm_struct *mm, struct page *p)
331 pgtable_page_dtor(p);
334 for (i = 1; i < L2_USER_PGTABLE_PAGES; ++i) {
336 dec_zone_page_state(p+i, NR_PAGETABLE);
340 void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte,
341 unsigned long address)
345 pgtable_page_dtor(pte);
346 tlb_remove_page(tlb, pte);
348 for (i = 1; i < L2_USER_PGTABLE_PAGES; ++i) {
349 tlb_remove_page(tlb, pte + i);
350 dec_zone_page_state(pte + i, NR_PAGETABLE);
357 * FIXME: needs to be atomic vs hypervisor writes. For now we make the
358 * window of vulnerability a bit smaller by doing an unlocked 8-bit update.
360 int ptep_test_and_clear_young(struct vm_area_struct *vma,
361 unsigned long addr, pte_t *ptep)
363 #if HV_PTE_INDEX_ACCESSED < 8 || HV_PTE_INDEX_ACCESSED >= 16
364 # error Code assumes HV_PTE "accessed" bit in second byte
366 u8 *tmp = (u8 *)ptep;
367 u8 second_byte = tmp[1];
368 if (!(second_byte & (1 << (HV_PTE_INDEX_ACCESSED - 8))))
370 tmp[1] = second_byte & ~(1 << (HV_PTE_INDEX_ACCESSED - 8));
375 * This implementation is atomic vs hypervisor writes, since the hypervisor
376 * always writes the low word (where "accessed" and "dirty" are) and this
377 * routine only writes the high word.
379 void ptep_set_wrprotect(struct mm_struct *mm,
380 unsigned long addr, pte_t *ptep)
382 #if HV_PTE_INDEX_WRITABLE < 32
383 # error Code assumes HV_PTE "writable" bit in high word
385 u32 *tmp = (u32 *)ptep;
386 tmp[1] = tmp[1] & ~(1 << (HV_PTE_INDEX_WRITABLE - 32));
391 pte_t *virt_to_pte(struct mm_struct* mm, unsigned long addr)
397 if (pgd_addr_invalid(addr))
400 pgd = mm ? pgd_offset(mm, addr) : swapper_pg_dir + pgd_index(addr);
401 pud = pud_offset(pgd, addr);
402 if (!pud_present(*pud))
404 pmd = pmd_offset(pud, addr);
405 if (pmd_huge_page(*pmd))
407 if (!pmd_present(*pmd))
409 return pte_offset_kernel(pmd, addr);
412 pgprot_t set_remote_cache_cpu(pgprot_t prot, int cpu)
414 unsigned int width = smp_width;
417 BUG_ON(y >= smp_height);
418 BUG_ON(hv_pte_get_mode(prot) != HV_PTE_MODE_CACHE_TILE_L3);
419 BUG_ON(cpu < 0 || cpu >= NR_CPUS);
420 BUG_ON(!cpu_is_valid_lotar(cpu));
421 return hv_pte_set_lotar(prot, HV_XY_TO_LOTAR(x, y));
424 int get_remote_cache_cpu(pgprot_t prot)
426 HV_LOTAR lotar = hv_pte_get_lotar(prot);
427 int x = HV_LOTAR_X(lotar);
428 int y = HV_LOTAR_Y(lotar);
429 BUG_ON(hv_pte_get_mode(prot) != HV_PTE_MODE_CACHE_TILE_L3);
430 return x + y * smp_width;
434 * Convert a kernel VA to a PA and homing information.
436 int va_to_cpa_and_pte(void *va, unsigned long long *cpa, pte_t *pte)
438 struct page *page = virt_to_page(va);
439 pte_t null_pte = { 0 };
443 /* Note that this is not writing a page table, just returning a pte. */
444 *pte = pte_set_home(null_pte, page_home(page));
446 return 0; /* return non-zero if not hfh? */
448 EXPORT_SYMBOL(va_to_cpa_and_pte);
450 void __set_pte(pte_t *ptep, pte_t pte)
455 # if HV_PTE_INDEX_PRESENT >= 32 || HV_PTE_INDEX_MIGRATING >= 32
456 # error Must write the present and migrating bits last
458 if (pte_present(pte)) {
459 ((u32 *)ptep)[1] = (u32)(pte_val(pte) >> 32);
461 ((u32 *)ptep)[0] = (u32)(pte_val(pte));
463 ((u32 *)ptep)[0] = (u32)(pte_val(pte));
465 ((u32 *)ptep)[1] = (u32)(pte_val(pte) >> 32);
467 #endif /* __tilegx__ */
470 void set_pte(pte_t *ptep, pte_t pte)
472 struct page *page = pfn_to_page(pte_pfn(pte));
474 /* Update the home of a PTE if necessary */
475 pte = pte_set_home(pte, page_home(page));
477 __set_pte(ptep, pte);
480 /* Can this mm load a PTE with cached_priority set? */
481 static inline int mm_is_priority_cached(struct mm_struct *mm)
483 return mm->context.priority_cached;
487 * Add a priority mapping to an mm_context and
488 * notify the hypervisor if this is the first one.
490 void start_mm_caching(struct mm_struct *mm)
492 if (!mm_is_priority_cached(mm)) {
493 mm->context.priority_cached = -1U;
499 * Validate and return the priority_cached flag. We know if it's zero
500 * that we don't need to scan, since we immediately set it non-zero
501 * when we first consider a MAP_CACHE_PRIORITY mapping.
503 * We only _try_ to acquire the mmap_sem semaphore; if we can't acquire it,
504 * since we're in an interrupt context (servicing switch_mm) we don't
505 * worry about it and don't unset the "priority_cached" field.
506 * Presumably we'll come back later and have more luck and clear
507 * the value then; for now we'll just keep the cache marked for priority.
509 static unsigned int update_priority_cached(struct mm_struct *mm)
511 if (mm->context.priority_cached && down_write_trylock(&mm->mmap_sem)) {
512 struct vm_area_struct *vm;
513 for (vm = mm->mmap; vm; vm = vm->vm_next) {
514 if (hv_pte_get_cached_priority(vm->vm_page_prot))
518 mm->context.priority_cached = 0;
519 up_write(&mm->mmap_sem);
521 return mm->context.priority_cached;
524 /* Set caching correctly for an mm that we are switching to. */
525 void check_mm_caching(struct mm_struct *prev, struct mm_struct *next)
527 if (!mm_is_priority_cached(next)) {
529 * If the new mm doesn't use priority caching, just see if we
530 * need the hv_set_caching(), or can assume it's already zero.
532 if (mm_is_priority_cached(prev))
535 hv_set_caching(update_priority_cached(next));
541 /* Map an arbitrary MMIO address, homed according to pgprot, into VA space. */
542 void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
546 struct vm_struct *area;
547 unsigned long offset, last_addr;
550 /* Don't allow wraparound or zero size */
551 last_addr = phys_addr + size - 1;
552 if (!size || last_addr < phys_addr)
555 /* Create a read/write, MMIO VA mapping homed at the requested shim. */
556 pgprot = PAGE_KERNEL;
557 pgprot = hv_pte_set_mode(pgprot, HV_PTE_MODE_MMIO);
558 pgprot = hv_pte_set_lotar(pgprot, hv_pte_get_lotar(home));
561 * Mappings have to be page-aligned
563 offset = phys_addr & ~PAGE_MASK;
564 phys_addr &= PAGE_MASK;
565 size = PAGE_ALIGN(last_addr+1) - phys_addr;
570 area = get_vm_area(size, VM_IOREMAP /* | other flags? */);
573 area->phys_addr = phys_addr;
575 if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
576 phys_addr, pgprot)) {
577 remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr));
580 return (__force void __iomem *) (offset + (char *)addr);
582 EXPORT_SYMBOL(ioremap_prot);
584 /* Map a PCI MMIO bus address into VA space. */
585 void __iomem *ioremap(resource_size_t phys_addr, unsigned long size)
587 panic("ioremap for PCI MMIO is not supported");
589 EXPORT_SYMBOL(ioremap);
591 /* Unmap an MMIO VA mapping. */
592 void iounmap(volatile void __iomem *addr_in)
594 volatile void __iomem *addr = (volatile void __iomem *)
595 (PAGE_MASK & (unsigned long __force)addr_in);
597 vunmap((void * __force)addr);
599 /* x86 uses this complicated flow instead of vunmap(). Is
600 * there any particular reason we should do the same? */
601 struct vm_struct *p, *o;
603 /* Use the vm area unlocked, assuming the caller
604 ensures there isn't another iounmap for the same address
605 in parallel. Reuse of the virtual address is prevented by
606 leaving it in the global lists until we're done with it.
607 cpa takes care of the direct mappings. */
608 read_lock(&vmlist_lock);
609 for (p = vmlist; p; p = p->next) {
613 read_unlock(&vmlist_lock);
616 pr_err("iounmap: bad address %p\n", addr);
621 /* Finally remove it */
622 o = remove_vm_area((void *)addr);
623 BUG_ON(p != o || o == NULL);
627 EXPORT_SYMBOL(iounmap);
629 #endif /* CHIP_HAS_MMIO() */