2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/errno.h>
19 #include <linux/swap.h>
20 #include <linux/highmem.h>
21 #include <linux/slab.h>
22 #include <linux/pagemap.h>
23 #include <linux/spinlock.h>
24 #include <linux/cpumask.h>
25 #include <linux/module.h>
27 #include <linux/vmalloc.h>
28 #include <linux/smp.h>
30 #include <asm/pgtable.h>
31 #include <asm/pgalloc.h>
32 #include <asm/fixmap.h>
34 #include <asm/tlbflush.h>
35 #include <asm/homecache.h>
37 #define K(x) ((x) << (PAGE_SHIFT-10))
40 * The normal show_free_areas() is too verbose on Tile, with dozens
41 * of processors and often four NUMA zones each with high and lowmem.
43 void show_mem(unsigned int filter)
47 pr_err("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu"
48 " free:%lu\n slab:%lu mapped:%lu pagetables:%lu bounce:%lu"
49 " pagecache:%lu swap:%lu\n",
50 (global_page_state(NR_ACTIVE_ANON) +
51 global_page_state(NR_ACTIVE_FILE)),
52 (global_page_state(NR_INACTIVE_ANON) +
53 global_page_state(NR_INACTIVE_FILE)),
54 global_page_state(NR_FILE_DIRTY),
55 global_page_state(NR_WRITEBACK),
56 global_page_state(NR_UNSTABLE_NFS),
57 global_page_state(NR_FREE_PAGES),
58 (global_page_state(NR_SLAB_RECLAIMABLE) +
59 global_page_state(NR_SLAB_UNRECLAIMABLE)),
60 global_page_state(NR_FILE_MAPPED),
61 global_page_state(NR_PAGETABLE),
62 global_page_state(NR_BOUNCE),
63 global_page_state(NR_FILE_PAGES),
67 unsigned long flags, order, total = 0, largest_order = -1;
69 if (!populated_zone(zone))
72 spin_lock_irqsave(&zone->lock, flags);
73 for (order = 0; order < MAX_ORDER; order++) {
74 int nr = zone->free_area[order].nr_free;
77 largest_order = order;
79 spin_unlock_irqrestore(&zone->lock, flags);
80 pr_err("Node %d %7s: %lukB (largest %luKb)\n",
81 zone_to_nid(zone), zone->name,
82 K(total), largest_order ? K(1UL) << largest_order : 0);
87 * Associate a virtual page frame with a given physical page frame
88 * and protection flags for that frame.
90 static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
97 pgd = swapper_pg_dir + pgd_index(vaddr);
102 pud = pud_offset(pgd, vaddr);
103 if (pud_none(*pud)) {
107 pmd = pmd_offset(pud, vaddr);
108 if (pmd_none(*pmd)) {
112 pte = pte_offset_kernel(pmd, vaddr);
113 /* <pfn,flags> stored as-is, to permit clearing entries */
114 set_pte(pte, pfn_pte(pfn, flags));
117 * It's enough to flush this one mapping.
118 * This appears conservative since it is only called
121 local_flush_tlb_page(NULL, vaddr, PAGE_SIZE);
124 void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
126 unsigned long address = __fix_to_virt(idx);
128 if (idx >= __end_of_fixed_addresses) {
132 set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
135 #if defined(CONFIG_HIGHPTE)
136 pte_t *_pte_offset_map(pmd_t *dir, unsigned long address)
138 pte_t *pte = kmap_atomic(pmd_page(*dir)) +
139 (pmd_ptfn(*dir) << HV_LOG2_PAGE_TABLE_ALIGN) & ~PAGE_MASK;
140 return &pte[pte_index(address)];
145 * shatter_huge_page() - ensure a given address is mapped by a small page.
147 * This function converts a huge PTE mapping kernel LOWMEM into a bunch
148 * of small PTEs with the same caching. No cache flush required, but we
149 * must do a global TLB flush.
151 * Any caller that wishes to modify a kernel mapping that might
152 * have been made with a huge page should call this function,
153 * since doing so properly avoids race conditions with installing the
154 * newly-shattered page and then flushing all the TLB entries.
156 * @addr: Address at which to shatter any existing huge page.
158 void shatter_huge_page(unsigned long addr)
163 unsigned long flags = 0; /* happy compiler */
164 #ifdef __PAGETABLE_PMD_FOLDED
165 struct list_head *pos;
168 /* Get a pointer to the pmd entry that we need to change. */
170 BUG_ON(pgd_addr_invalid(addr));
171 BUG_ON(addr < PAGE_OFFSET); /* only for kernel LOWMEM */
172 pgd = swapper_pg_dir + pgd_index(addr);
173 pud = pud_offset(pgd, addr);
174 BUG_ON(!pud_present(*pud));
175 pmd = pmd_offset(pud, addr);
176 BUG_ON(!pmd_present(*pmd));
177 if (!pmd_huge_page(*pmd))
180 spin_lock_irqsave(&init_mm.page_table_lock, flags);
181 if (!pmd_huge_page(*pmd)) {
182 /* Lost the race to convert the huge page. */
183 spin_unlock_irqrestore(&init_mm.page_table_lock, flags);
187 /* Shatter the huge page into the preallocated L2 page table. */
188 pmd_populate_kernel(&init_mm, pmd,
189 get_prealloc_pte(pte_pfn(*(pte_t *)pmd)));
191 #ifdef __PAGETABLE_PMD_FOLDED
192 /* Walk every pgd on the system and update the pmd there. */
193 spin_lock(&pgd_lock);
194 list_for_each(pos, &pgd_list) {
196 pgd = list_to_pgd(pos) + pgd_index(addr);
197 pud = pud_offset(pgd, addr);
198 copy_pmd = pmd_offset(pud, addr);
199 __set_pmd(copy_pmd, *pmd);
201 spin_unlock(&pgd_lock);
204 /* Tell every cpu to notice the change. */
205 flush_remote(0, 0, NULL, addr, HPAGE_SIZE, HPAGE_SIZE,
206 cpu_possible_mask, NULL, 0);
208 /* Hold the lock until the TLB flush is finished to avoid races. */
209 spin_unlock_irqrestore(&init_mm.page_table_lock, flags);
213 * List of all pgd's needed so it can invalidate entries in both cached
214 * and uncached pgd's. This is essentially codepath-based locking
215 * against pageattr.c; it is the unique case in which a valid change
216 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
217 * vmalloc faults work because attached pagetables are never freed.
219 * The lock is always taken with interrupts disabled, unlike on x86
220 * and other platforms, because we need to take the lock in
221 * shatter_huge_page(), which may be called from an interrupt context.
222 * We are not at risk from the tlbflush IPI deadlock that was seen on
223 * x86, since we use the flush_remote() API to have the hypervisor do
224 * the TLB flushes regardless of irq disabling.
226 DEFINE_SPINLOCK(pgd_lock);
229 static inline void pgd_list_add(pgd_t *pgd)
231 list_add(pgd_to_list(pgd), &pgd_list);
234 static inline void pgd_list_del(pgd_t *pgd)
236 list_del(pgd_to_list(pgd));
239 #define KERNEL_PGD_INDEX_START pgd_index(PAGE_OFFSET)
240 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_INDEX_START)
242 static void pgd_ctor(pgd_t *pgd)
246 memset(pgd, 0, KERNEL_PGD_INDEX_START*sizeof(pgd_t));
247 spin_lock_irqsave(&pgd_lock, flags);
251 * Check that the user interrupt vector has no L2.
252 * It never should for the swapper, and new page tables
253 * should always start with an empty user interrupt vector.
255 BUG_ON(((u64 *)swapper_pg_dir)[pgd_index(MEM_USER_INTRPT)] != 0);
258 memcpy(pgd + KERNEL_PGD_INDEX_START,
259 swapper_pg_dir + KERNEL_PGD_INDEX_START,
260 KERNEL_PGD_PTRS * sizeof(pgd_t));
263 spin_unlock_irqrestore(&pgd_lock, flags);
266 static void pgd_dtor(pgd_t *pgd)
268 unsigned long flags; /* can be called from interrupt context */
270 spin_lock_irqsave(&pgd_lock, flags);
272 spin_unlock_irqrestore(&pgd_lock, flags);
275 pgd_t *pgd_alloc(struct mm_struct *mm)
277 pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL);
283 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
286 kmem_cache_free(pgd_cache, pgd);
290 #define L2_USER_PGTABLE_PAGES (1 << L2_USER_PGTABLE_ORDER)
292 struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
294 gfp_t flags = GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO;
296 #if L2_USER_PGTABLE_ORDER > 0
300 #ifdef CONFIG_HIGHPTE
301 flags |= __GFP_HIGHMEM;
304 p = alloc_pages(flags, L2_USER_PGTABLE_ORDER);
308 #if L2_USER_PGTABLE_ORDER > 0
310 * Make every page have a page_count() of one, not just the first.
311 * We don't use __GFP_COMP since it doesn't look like it works
312 * correctly with tlb_remove_page().
314 for (i = 1; i < L2_USER_PGTABLE_PAGES; ++i) {
315 init_page_count(p+i);
316 inc_zone_page_state(p+i, NR_PAGETABLE);
320 pgtable_page_ctor(p);
325 * Free page immediately (used in __pte_alloc if we raced with another
326 * process). We have to correct whatever pte_alloc_one() did before
327 * returning the pages to the allocator.
329 void pte_free(struct mm_struct *mm, struct page *p)
333 pgtable_page_dtor(p);
336 for (i = 1; i < L2_USER_PGTABLE_PAGES; ++i) {
338 dec_zone_page_state(p+i, NR_PAGETABLE);
342 void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte,
343 unsigned long address)
347 pgtable_page_dtor(pte);
348 tlb_remove_page(tlb, pte);
350 for (i = 1; i < L2_USER_PGTABLE_PAGES; ++i) {
351 tlb_remove_page(tlb, pte + i);
352 dec_zone_page_state(pte + i, NR_PAGETABLE);
359 * FIXME: needs to be atomic vs hypervisor writes. For now we make the
360 * window of vulnerability a bit smaller by doing an unlocked 8-bit update.
362 int ptep_test_and_clear_young(struct vm_area_struct *vma,
363 unsigned long addr, pte_t *ptep)
365 #if HV_PTE_INDEX_ACCESSED < 8 || HV_PTE_INDEX_ACCESSED >= 16
366 # error Code assumes HV_PTE "accessed" bit in second byte
368 u8 *tmp = (u8 *)ptep;
369 u8 second_byte = tmp[1];
370 if (!(second_byte & (1 << (HV_PTE_INDEX_ACCESSED - 8))))
372 tmp[1] = second_byte & ~(1 << (HV_PTE_INDEX_ACCESSED - 8));
377 * This implementation is atomic vs hypervisor writes, since the hypervisor
378 * always writes the low word (where "accessed" and "dirty" are) and this
379 * routine only writes the high word.
381 void ptep_set_wrprotect(struct mm_struct *mm,
382 unsigned long addr, pte_t *ptep)
384 #if HV_PTE_INDEX_WRITABLE < 32
385 # error Code assumes HV_PTE "writable" bit in high word
387 u32 *tmp = (u32 *)ptep;
388 tmp[1] = tmp[1] & ~(1 << (HV_PTE_INDEX_WRITABLE - 32));
393 pte_t *virt_to_pte(struct mm_struct* mm, unsigned long addr)
399 if (pgd_addr_invalid(addr))
402 pgd = mm ? pgd_offset(mm, addr) : swapper_pg_dir + pgd_index(addr);
403 pud = pud_offset(pgd, addr);
404 if (!pud_present(*pud))
406 pmd = pmd_offset(pud, addr);
407 if (pmd_huge_page(*pmd))
409 if (!pmd_present(*pmd))
411 return pte_offset_kernel(pmd, addr);
414 pgprot_t set_remote_cache_cpu(pgprot_t prot, int cpu)
416 unsigned int width = smp_width;
419 BUG_ON(y >= smp_height);
420 BUG_ON(hv_pte_get_mode(prot) != HV_PTE_MODE_CACHE_TILE_L3);
421 BUG_ON(cpu < 0 || cpu >= NR_CPUS);
422 BUG_ON(!cpu_is_valid_lotar(cpu));
423 return hv_pte_set_lotar(prot, HV_XY_TO_LOTAR(x, y));
426 int get_remote_cache_cpu(pgprot_t prot)
428 HV_LOTAR lotar = hv_pte_get_lotar(prot);
429 int x = HV_LOTAR_X(lotar);
430 int y = HV_LOTAR_Y(lotar);
431 BUG_ON(hv_pte_get_mode(prot) != HV_PTE_MODE_CACHE_TILE_L3);
432 return x + y * smp_width;
436 * Convert a kernel VA to a PA and homing information.
438 int va_to_cpa_and_pte(void *va, unsigned long long *cpa, pte_t *pte)
440 struct page *page = virt_to_page(va);
441 pte_t null_pte = { 0 };
445 /* Note that this is not writing a page table, just returning a pte. */
446 *pte = pte_set_home(null_pte, page_home(page));
448 return 0; /* return non-zero if not hfh? */
450 EXPORT_SYMBOL(va_to_cpa_and_pte);
452 void __set_pte(pte_t *ptep, pte_t pte)
457 # if HV_PTE_INDEX_PRESENT >= 32 || HV_PTE_INDEX_MIGRATING >= 32
458 # error Must write the present and migrating bits last
460 if (pte_present(pte)) {
461 ((u32 *)ptep)[1] = (u32)(pte_val(pte) >> 32);
463 ((u32 *)ptep)[0] = (u32)(pte_val(pte));
465 ((u32 *)ptep)[0] = (u32)(pte_val(pte));
467 ((u32 *)ptep)[1] = (u32)(pte_val(pte) >> 32);
469 #endif /* __tilegx__ */
472 void set_pte(pte_t *ptep, pte_t pte)
474 if (pte_present(pte) &&
475 (!CHIP_HAS_MMIO() || hv_pte_get_mode(pte) != HV_PTE_MODE_MMIO)) {
476 /* The PTE actually references physical memory. */
477 unsigned long pfn = pte_pfn(pte);
478 if (pfn_valid(pfn)) {
479 /* Update the home of the PTE from the struct page. */
480 pte = pte_set_home(pte, page_home(pfn_to_page(pfn)));
481 } else if (hv_pte_get_mode(pte) == 0) {
482 /* remap_pfn_range(), etc, must supply PTE mode. */
483 panic("set_pte(): out-of-range PFN and mode 0\n");
487 __set_pte(ptep, pte);
490 /* Can this mm load a PTE with cached_priority set? */
491 static inline int mm_is_priority_cached(struct mm_struct *mm)
493 return mm->context.priority_cached;
497 * Add a priority mapping to an mm_context and
498 * notify the hypervisor if this is the first one.
500 void start_mm_caching(struct mm_struct *mm)
502 if (!mm_is_priority_cached(mm)) {
503 mm->context.priority_cached = -1U;
509 * Validate and return the priority_cached flag. We know if it's zero
510 * that we don't need to scan, since we immediately set it non-zero
511 * when we first consider a MAP_CACHE_PRIORITY mapping.
513 * We only _try_ to acquire the mmap_sem semaphore; if we can't acquire it,
514 * since we're in an interrupt context (servicing switch_mm) we don't
515 * worry about it and don't unset the "priority_cached" field.
516 * Presumably we'll come back later and have more luck and clear
517 * the value then; for now we'll just keep the cache marked for priority.
519 static unsigned int update_priority_cached(struct mm_struct *mm)
521 if (mm->context.priority_cached && down_write_trylock(&mm->mmap_sem)) {
522 struct vm_area_struct *vm;
523 for (vm = mm->mmap; vm; vm = vm->vm_next) {
524 if (hv_pte_get_cached_priority(vm->vm_page_prot))
528 mm->context.priority_cached = 0;
529 up_write(&mm->mmap_sem);
531 return mm->context.priority_cached;
534 /* Set caching correctly for an mm that we are switching to. */
535 void check_mm_caching(struct mm_struct *prev, struct mm_struct *next)
537 if (!mm_is_priority_cached(next)) {
539 * If the new mm doesn't use priority caching, just see if we
540 * need the hv_set_caching(), or can assume it's already zero.
542 if (mm_is_priority_cached(prev))
545 hv_set_caching(update_priority_cached(next));
551 /* Map an arbitrary MMIO address, homed according to pgprot, into VA space. */
552 void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
556 struct vm_struct *area;
557 unsigned long offset, last_addr;
560 /* Don't allow wraparound or zero size */
561 last_addr = phys_addr + size - 1;
562 if (!size || last_addr < phys_addr)
565 /* Create a read/write, MMIO VA mapping homed at the requested shim. */
566 pgprot = PAGE_KERNEL;
567 pgprot = hv_pte_set_mode(pgprot, HV_PTE_MODE_MMIO);
568 pgprot = hv_pte_set_lotar(pgprot, hv_pte_get_lotar(home));
571 * Mappings have to be page-aligned
573 offset = phys_addr & ~PAGE_MASK;
574 phys_addr &= PAGE_MASK;
575 size = PAGE_ALIGN(last_addr+1) - phys_addr;
580 area = get_vm_area(size, VM_IOREMAP /* | other flags? */);
583 area->phys_addr = phys_addr;
585 if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
586 phys_addr, pgprot)) {
587 remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr));
590 return (__force void __iomem *) (offset + (char *)addr);
592 EXPORT_SYMBOL(ioremap_prot);
594 /* Map a PCI MMIO bus address into VA space. */
595 void __iomem *ioremap(resource_size_t phys_addr, unsigned long size)
597 panic("ioremap for PCI MMIO is not supported");
599 EXPORT_SYMBOL(ioremap);
601 /* Unmap an MMIO VA mapping. */
602 void iounmap(volatile void __iomem *addr_in)
604 volatile void __iomem *addr = (volatile void __iomem *)
605 (PAGE_MASK & (unsigned long __force)addr_in);
607 vunmap((void * __force)addr);
609 /* x86 uses this complicated flow instead of vunmap(). Is
610 * there any particular reason we should do the same? */
611 struct vm_struct *p, *o;
613 /* Use the vm area unlocked, assuming the caller
614 ensures there isn't another iounmap for the same address
615 in parallel. Reuse of the virtual address is prevented by
616 leaving it in the global lists until we're done with it.
617 cpa takes care of the direct mappings. */
618 read_lock(&vmlist_lock);
619 for (p = vmlist; p; p = p->next) {
623 read_unlock(&vmlist_lock);
626 pr_err("iounmap: bad address %p\n", addr);
631 /* Finally remove it */
632 o = remove_vm_area((void *)addr);
633 BUG_ON(p != o || o == NULL);
637 EXPORT_SYMBOL(iounmap);
639 #endif /* CHIP_HAS_MMIO() */