smaller kernel memory footprint results from using a smaller
value on chips with fewer tiles.
+if TILEGX
+
+choice
+ prompt "Kernel page size"
+ default PAGE_SIZE_64KB
+ help
+ This lets you select the page size of the kernel. For best
+ performance on memory-intensive applications, a page size of 64KB
+ is recommended. For workloads involving many small files, many
+ connections, etc., it may be better to select 16KB, which uses
+ memory more efficiently at some cost in TLB performance.
+
+ Note that this option is TILE-Gx specific; currently
+ TILEPro page size is set by rebuilding the hypervisor.
+
+config PAGE_SIZE_16KB
+ bool "16KB"
+
+config PAGE_SIZE_64KB
+ bool "64KB"
+
+endchoice
+
+endif
+
source "kernel/time/Kconfig"
source "kernel/Kconfig.hz"
generic-y += irq_regs.h
generic-y += kdebug.h
generic-y += local.h
-generic-y += module.h
generic-y += msgbuf.h
generic-y += mutex.h
generic-y += param.h
* Written under the mmap_sem semaphore; read without the
* semaphore but atomically, but it is conservatively set.
*/
- unsigned int priority_cached;
+ unsigned long priority_cached;
};
typedef struct mm_context mm_context_t;
return 0;
}
-/* Note that arch/tile/kernel/head.S also calls hv_install_context() */
+/*
+ * Note that arch/tile/kernel/head_NN.S and arch/tile/mm/migrate_NN.S
+ * also call hv_install_context().
+ */
static inline void __install_page_table(pgd_t *pgdir, int asid, pgprot_t prot)
{
/* FIXME: DIRECTIO should not always be set. FIXME. */
- int rc = hv_install_context(__pa(pgdir), prot, asid, HV_CTX_DIRECTIO);
+ int rc = hv_install_context(__pa(pgdir), prot, asid,
+ HV_CTX_DIRECTIO | CTX_PAGE_FLAG);
if (rc < 0)
panic("hv_install_context failed: %d", rc);
}
--- /dev/null
+/*
+ * Copyright 2011 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _ASM_TILE_MODULE_H
+#define _ASM_TILE_MODULE_H
+
+#include <arch/chip.h>
+
+#include <asm-generic/module.h>
+
+/* We can't use modules built with different page sizes. */
+#if defined(CONFIG_PAGE_SIZE_16KB)
+# define MODULE_PGSZ " 16KB"
+#elif defined(CONFIG_PAGE_SIZE_64KB)
+# define MODULE_PGSZ " 64KB"
+#else
+# define MODULE_PGSZ ""
+#endif
+
+/* We don't really support no-SMP so tag if someone tries. */
+#ifdef CONFIG_SMP
+#define MODULE_NOSMP ""
+#else
+#define MODULE_NOSMP " nosmp"
+#endif
+
+#define MODULE_ARCH_VERMAGIC CHIP_ARCH_NAME MODULE_PGSZ MODULE_NOSMP
+
+#endif /* _ASM_TILE_MODULE_H */
#include <arch/chip.h>
/* PAGE_SHIFT and HPAGE_SHIFT determine the page sizes. */
-#define PAGE_SHIFT HV_LOG2_PAGE_SIZE_SMALL
-#define HPAGE_SHIFT HV_LOG2_PAGE_SIZE_LARGE
+#if defined(CONFIG_PAGE_SIZE_16KB)
+#define PAGE_SHIFT 14
+#define CTX_PAGE_FLAG HV_CTX_PG_SM_16K
+#elif defined(CONFIG_PAGE_SIZE_64KB)
+#define PAGE_SHIFT 16
+#define CTX_PAGE_FLAG HV_CTX_PG_SM_64K
+#else
+#define PAGE_SHIFT HV_LOG2_DEFAULT_PAGE_SIZE_SMALL
+#define CTX_PAGE_FLAG 0
+#endif
+#define HPAGE_SHIFT HV_LOG2_DEFAULT_PAGE_SIZE_LARGE
#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
#include <linux/mm.h>
#include <linux/mmzone.h>
#include <asm/fixmap.h>
+#include <asm/page.h>
#include <hv/hypervisor.h>
/* Bits for the size of the second-level page table. */
-#define L2_KERNEL_PGTABLE_SHIFT \
- (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL + HV_LOG2_PTE_SIZE)
+#define L2_KERNEL_PGTABLE_SHIFT _HV_LOG2_L2_SIZE(HPAGE_SHIFT, PAGE_SHIFT)
+
+/* How big is a kernel L2 page table? */
+#define L2_KERNEL_PGTABLE_SIZE (1UL << L2_KERNEL_PGTABLE_SHIFT)
/* We currently allocate user L2 page tables by page (unlike kernel L2s). */
-#if L2_KERNEL_PGTABLE_SHIFT < HV_LOG2_PAGE_SIZE_SMALL
-#define L2_USER_PGTABLE_SHIFT HV_LOG2_PAGE_SIZE_SMALL
+#if L2_KERNEL_PGTABLE_SHIFT < PAGE_SHIFT
+#define L2_USER_PGTABLE_SHIFT PAGE_SHIFT
#else
#define L2_USER_PGTABLE_SHIFT L2_KERNEL_PGTABLE_SHIFT
#endif
/* How many pages do we need, as an "order", for a user L2 page table? */
-#define L2_USER_PGTABLE_ORDER (L2_USER_PGTABLE_SHIFT - HV_LOG2_PAGE_SIZE_SMALL)
-
-/* How big is a kernel L2 page table? */
-#define L2_KERNEL_PGTABLE_SIZE (1 << L2_KERNEL_PGTABLE_SHIFT)
+#define L2_USER_PGTABLE_ORDER (L2_USER_PGTABLE_SHIFT - PAGE_SHIFT)
static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
{
static inline void pmd_populate_kernel(struct mm_struct *mm,
pmd_t *pmd, pte_t *ptep)
{
- set_pmd(pmd, ptfn_pmd(__pa(ptep) >> HV_LOG2_PAGE_TABLE_ALIGN,
+ set_pmd(pmd, ptfn_pmd(HV_CPA_TO_PTFN(__pa(ptep)),
__pgprot(_PAGE_PRESENT)));
}
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
pgtable_t page)
{
- set_pmd(pmd, ptfn_pmd(HV_PFN_TO_PTFN(page_to_pfn(page)),
+ set_pmd(pmd, ptfn_pmd(HV_CPA_TO_PTFN(PFN_PHYS(page_to_pfn(page))),
__pgprot(_PAGE_PRESENT)));
}
extern pgd_t *pgd_alloc(struct mm_struct *mm);
extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
-extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address);
-extern void pte_free(struct mm_struct *mm, struct page *pte);
+extern pgtable_t pgtable_alloc_one(struct mm_struct *mm, unsigned long address,
+ int order);
+extern void pgtable_free(struct mm_struct *mm, struct page *pte, int order);
+
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
+ unsigned long address)
+{
+ return pgtable_alloc_one(mm, address, L2_USER_PGTABLE_ORDER);
+}
+
+static inline void pte_free(struct mm_struct *mm, struct page *pte)
+{
+ pgtable_free(mm, pte, L2_USER_PGTABLE_ORDER);
+}
#define pmd_pgtable(pmd) pmd_page(pmd)
pte_free(mm, virt_to_page(pte));
}
-extern void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte,
- unsigned long address);
+extern void __pgtable_free_tlb(struct mmu_gather *tlb, struct page *pte,
+ unsigned long address, int order);
+static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte,
+ unsigned long address)
+{
+ __pgtable_free_tlb(tlb, pte, address, L2_USER_PGTABLE_ORDER);
+}
#define check_pgt_cache() do { } while (0)
void shatter_huge_page(unsigned long addr);
#ifdef __tilegx__
-/* We share a single page allocator for both L1 and L2 page tables. */
-#if HV_L1_SIZE != HV_L2_SIZE
-# error Rework assumption that L1 and L2 page tables are same size.
-#endif
-#define L1_USER_PGTABLE_ORDER L2_USER_PGTABLE_ORDER
+
#define pud_populate(mm, pud, pmd) \
pmd_populate_kernel((mm), (pmd_t *)(pud), (pte_t *)(pmd))
-#define pmd_alloc_one(mm, addr) \
- ((pmd_t *)page_to_virt(pte_alloc_one((mm), (addr))))
-#define pmd_free(mm, pmdp) \
- pte_free((mm), virt_to_page(pmdp))
-#define __pmd_free_tlb(tlb, pmdp, address) \
- __pte_free_tlb((tlb), virt_to_page(pmdp), (address))
+
+/* Bits for the size of the L1 (intermediate) page table. */
+#define L1_KERNEL_PGTABLE_SHIFT _HV_LOG2_L1_SIZE(HPAGE_SHIFT)
+
+/* How big is a kernel L2 page table? */
+#define L1_KERNEL_PGTABLE_SIZE (1UL << L1_KERNEL_PGTABLE_SHIFT)
+
+/* We currently allocate L1 page tables by page. */
+#if L1_KERNEL_PGTABLE_SHIFT < PAGE_SHIFT
+#define L1_USER_PGTABLE_SHIFT PAGE_SHIFT
+#else
+#define L1_USER_PGTABLE_SHIFT L1_KERNEL_PGTABLE_SHIFT
#endif
+/* How many pages do we need, as an "order", for an L1 page table? */
+#define L1_USER_PGTABLE_ORDER (L1_USER_PGTABLE_SHIFT - PAGE_SHIFT)
+
+static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+ struct page *p = pgtable_alloc_one(mm, address, L1_USER_PGTABLE_ORDER);
+ return (pmd_t *)page_to_virt(p);
+}
+
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmdp)
+{
+ pgtable_free(mm, virt_to_page(pmdp), L1_USER_PGTABLE_ORDER);
+}
+
+static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
+ unsigned long address)
+{
+ __pgtable_free_tlb(tlb, virt_to_page(pmdp), address,
+ L1_USER_PGTABLE_ORDER);
+}
+
+#endif /* __tilegx__ */
+
#endif /* _ASM_TILE_PGALLOC_H */
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/spinlock.h>
+#include <linux/pfn.h>
#include <asm/processor.h>
#include <asm/fixmap.h>
+#include <asm/page.h>
struct mm_struct;
struct vm_area_struct;
(pgprot_t) { ((oldprot).val & ~_PAGE_ALL) | (newprot).val }
/* Just setting the PFN to zero suffices. */
-#define pte_pgprot(x) hv_pte_set_pfn((x), 0)
+#define pte_pgprot(x) hv_pte_set_pa((x), 0)
/*
* For PTEs and PDEs, we must clear the Present bit first when
static inline unsigned long pte_pfn(pte_t pte)
{
- return hv_pte_get_pfn(pte);
+ return PFN_DOWN(hv_pte_get_pa(pte));
}
/* Set or get the remote cache cpu in a pgprot with remote caching. */
static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
{
- return hv_pte_set_pfn(prot, pfn);
+ return hv_pte_set_pa(prot, PFN_PHYS(pfn));
}
/* Support for priority mappings. */
* OK for pte_lockptr(), since we just end up with potentially one
* lock being used for several pte_t arrays.
*/
-#define pmd_page(pmd) pfn_to_page(HV_PTFN_TO_PFN(pmd_ptfn(pmd)))
+#define pmd_page(pmd) pfn_to_page(PFN_DOWN(HV_PTFN_TO_CPA(pmd_ptfn(pmd))))
static inline void pmd_clear(pmd_t *pmdp)
{
* The level-1 index is defined by the huge page size. A PGD is composed
* of PTRS_PER_PGD pgd_t's and is the top level of the page table.
*/
-#define PGDIR_SHIFT HV_LOG2_PAGE_SIZE_LARGE
-#define PGDIR_SIZE HV_PAGE_SIZE_LARGE
+#define PGDIR_SHIFT HPAGE_SHIFT
+#define PGDIR_SIZE HPAGE_SIZE
#define PGDIR_MASK (~(PGDIR_SIZE-1))
-#define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT))
-#define SIZEOF_PGD (PTRS_PER_PGD * sizeof(pgd_t))
+#define PTRS_PER_PGD _HV_L1_ENTRIES(HPAGE_SHIFT)
+#define PGD_INDEX(va) _HV_L1_INDEX(va, HPAGE_SHIFT)
+#define SIZEOF_PGD _HV_L1_SIZE(HPAGE_SHIFT)
/*
* The level-2 index is defined by the difference between the huge
* Note that the hypervisor docs use PTE for what we call pte_t, so
* this nomenclature is somewhat confusing.
*/
-#define PTRS_PER_PTE (1 << (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL))
-#define SIZEOF_PTE (PTRS_PER_PTE * sizeof(pte_t))
+#define PTRS_PER_PTE _HV_L2_ENTRIES(HPAGE_SHIFT, PAGE_SHIFT)
+#define PTE_INDEX(va) _HV_L2_INDEX(va, HPAGE_SHIFT, PAGE_SHIFT)
+#define SIZEOF_PTE _HV_L2_SIZE(HPAGE_SHIFT, PAGE_SHIFT)
#ifndef __ASSEMBLY__
#define PGDIR_SIZE HV_L1_SPAN
#define PGDIR_MASK (~(PGDIR_SIZE-1))
#define PTRS_PER_PGD HV_L0_ENTRIES
-#define SIZEOF_PGD (PTRS_PER_PGD * sizeof(pgd_t))
+#define PGD_INDEX(va) HV_L0_INDEX(va)
+#define SIZEOF_PGD HV_L0_SIZE
/*
* The level-1 index is defined by the huge page size. A PMD is composed
* of PTRS_PER_PMD pgd_t's and is the middle level of the page table.
*/
-#define PMD_SHIFT HV_LOG2_PAGE_SIZE_LARGE
-#define PMD_SIZE HV_PAGE_SIZE_LARGE
+#define PMD_SHIFT HPAGE_SHIFT
+#define PMD_SIZE HPAGE_SIZE
#define PMD_MASK (~(PMD_SIZE-1))
-#define PTRS_PER_PMD (1 << (PGDIR_SHIFT - PMD_SHIFT))
-#define SIZEOF_PMD (PTRS_PER_PMD * sizeof(pmd_t))
+#define PTRS_PER_PMD _HV_L1_ENTRIES(HPAGE_SHIFT)
+#define PMD_INDEX(va) _HV_L1_INDEX(va, HPAGE_SHIFT)
+#define SIZEOF_PMD _HV_L1_SIZE(HPAGE_SHIFT)
/*
* The level-2 index is defined by the difference between the huge
* Note that the hypervisor docs use PTE for what we call pte_t, so
* this nomenclature is somewhat confusing.
*/
-#define PTRS_PER_PTE (1 << (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL))
-#define SIZEOF_PTE (PTRS_PER_PTE * sizeof(pte_t))
+#define PTRS_PER_PTE _HV_L2_ENTRIES(HPAGE_SHIFT, PAGE_SHIFT)
+#define PTE_INDEX(va) _HV_L2_INDEX(va, HPAGE_SHIFT, PAGE_SHIFT)
+#define SIZEOF_PTE _HV_L2_SIZE(HPAGE_SHIFT, PAGE_SHIFT)
/*
- * Align the vmalloc area to an L2 page table, and leave a guard page
- * at the beginning and end. The vmalloc code also puts in an internal
+ * Align the vmalloc area to an L2 page table. Omit guard pages at
+ * the beginning and end for simplicity (particularly in the per-cpu
+ * memory allocation code). The vmalloc code puts in an internal
* guard page between each allocation.
*/
#define _VMALLOC_END HUGE_VMAP_BASE
-#define VMALLOC_END (_VMALLOC_END - PAGE_SIZE)
-#define VMALLOC_START (_VMALLOC_START + PAGE_SIZE)
+#define VMALLOC_END _VMALLOC_END
+#define VMALLOC_START _VMALLOC_START
#define HUGE_VMAP_END (HUGE_VMAP_BASE + PGDIR_SIZE)
* A pud_t points to a pmd_t array. Since we can have multiple per
* page, we don't have a one-to-one mapping of pud_t's to pages.
*/
-#define pud_page(pud) pfn_to_page(HV_PTFN_TO_PFN(pud_ptfn(pud)))
+#define pud_page(pud) pfn_to_page(PFN_DOWN(HV_PTFN_TO_CPA(pud_ptfn(pud))))
static inline unsigned long pud_index(unsigned long address)
{
* linux's "MAX_SKB_FRAGS", and presumably over-estimates by one, for
* our page size of exactly 65536. We add one for a "body" fragment.
*/
-#define LEPP_MAX_FRAGS (65536 / HV_PAGE_SIZE_SMALL + 2 + 1)
+#define LEPP_MAX_FRAGS (65536 / HV_DEFAULT_PAGE_SIZE_SMALL + 2 + 1)
/** Total number of bytes needed for an lepp_tso_cmd_t. */
#define LEPP_TSO_CMD_SIZE(num_frags, header_size) \
* The hypervisor's public API.
*/
-#ifndef _TILE_HV_H
-#define _TILE_HV_H
+#ifndef _HV_HV_H
+#define _HV_HV_H
#include <arch/chip.h>
*/
#define HV_L1_SPAN (__HV_SIZE_ONE << HV_LOG2_L1_SPAN)
-/** The log2 of the size of small pages, in bytes. This value should
- * be verified at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL).
+/** The log2 of the initial size of small pages, in bytes.
+ * See HV_DEFAULT_PAGE_SIZE_SMALL.
*/
-#define HV_LOG2_PAGE_SIZE_SMALL 16
+#define HV_LOG2_DEFAULT_PAGE_SIZE_SMALL 16
-/** The size of small pages, in bytes. This value should be verified
+/** The initial size of small pages, in bytes. This value should be verified
* at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL).
+ * It may also be modified when installing a new context.
*/
-#define HV_PAGE_SIZE_SMALL (__HV_SIZE_ONE << HV_LOG2_PAGE_SIZE_SMALL)
+#define HV_DEFAULT_PAGE_SIZE_SMALL \
+ (__HV_SIZE_ONE << HV_LOG2_DEFAULT_PAGE_SIZE_SMALL)
-/** The log2 of the size of large pages, in bytes. This value should be
- * verified at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE).
+/** The log2 of the initial size of large pages, in bytes.
+ * See HV_DEFAULT_PAGE_SIZE_LARGE.
*/
-#define HV_LOG2_PAGE_SIZE_LARGE 24
+#define HV_LOG2_DEFAULT_PAGE_SIZE_LARGE 24
-/** The size of large pages, in bytes. This value should be verified
+/** The initial size of large pages, in bytes. This value should be verified
* at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE).
+ * It may also be modified when installing a new context.
*/
-#define HV_PAGE_SIZE_LARGE (__HV_SIZE_ONE << HV_LOG2_PAGE_SIZE_LARGE)
+#define HV_DEFAULT_PAGE_SIZE_LARGE \
+ (__HV_SIZE_ONE << HV_LOG2_DEFAULT_PAGE_SIZE_LARGE)
/** The log2 of the granularity at which page tables must be aligned;
* in other words, the CPA for a page table must have this many zero
* that the temperature has hit an upper limit and is no longer being
* accurately tracked.
*/
- HV_SYSCONF_BOARD_TEMP = 6
+ HV_SYSCONF_BOARD_TEMP = 6,
+
+ /** Legal page size bitmask for hv_install_context().
+ * For example, if 16KB and 64KB small pages are supported,
+ * it would return "HV_CTX_PG_SM_16K | HV_CTX_PG_SM_64K".
+ */
+ HV_SYSCONF_VALID_PAGE_SIZES = 7,
} HV_SysconfQuery;
* new page table does not need to contain any mapping for the
* hv_install_context address itself.
*
+ * At most one HV_CTX_PG_SM_* flag may be specified in "flags";
+ * if multiple flags are specified, HV_EINVAL is returned.
+ * Specifying none of the flags results in using the default page size.
+ * All cores participating in a given client must request the same
+ * page size, or the results are undefined.
+ *
* @param page_table Root of the page table.
* @param access PTE providing info on how to read the page table. This
* value must be consistent between multiple tiles sharing a page table,
#define HV_CTX_DIRECTIO 0x1 /**< Direct I/O requests are accepted from
PL0. */
+#define HV_CTX_PG_SM_4K 0x10 /**< Use 4K small pages, if available. */
+#define HV_CTX_PG_SM_16K 0x20 /**< Use 16K small pages, if available. */
+#define HV_CTX_PG_SM_64K 0x40 /**< Use 64K small pages, if available. */
+#define HV_CTX_PG_SM_MASK 0xf0 /**< Mask of all possible small pages. */
+
#ifndef __ASSEMBLER__
/** Value returned from hv_inquire_context(). */
* with the existing priority pages) or "red/black" (if they don't).
* The bitmask provides information on which parts of the cache
* have been used for pinned pages so far on this tile; if (1 << N)
- * appears in the bitmask, that indicates that a page has been marked
- * "priority" whose PFN equals N, mod 8.
+ * appears in the bitmask, that indicates that a 4KB region of the
+ * cache starting at (N * 4KB) is in use by a "priority" page.
+ * The portion of cache used by a particular page can be computed
+ * by taking the page's PA, modulo CHIP_L2_CACHE_SIZE(), and setting
+ * all the "4KB" bits corresponding to the actual page size.
* @param bitmask A bitmap of priority page set values
*/
-void hv_set_caching(unsigned int bitmask);
+void hv_set_caching(unsigned long bitmask);
/** Zero out a specified number of pages.
of word */
#define HV_PTE_PTFN_BITS 29 /**< Number of bits in a PTFN */
-/** Position of the PFN field within the PTE (subset of the PTFN). */
-#define HV_PTE_INDEX_PFN (HV_PTE_INDEX_PTFN + (HV_LOG2_PAGE_SIZE_SMALL - \
- HV_LOG2_PAGE_TABLE_ALIGN))
-
-/** Length of the PFN field within the PTE (subset of the PTFN). */
-#define HV_PTE_INDEX_PFN_BITS (HV_PTE_INDEX_PTFN_BITS - \
- (HV_LOG2_PAGE_SIZE_SMALL - \
- HV_LOG2_PAGE_TABLE_ALIGN))
-
/*
* Legal values for the PTE's mode field
*/
*
* This field contains the upper bits of the CPA (client physical
* address) of the target page; the complete CPA is this field with
- * HV_LOG2_PAGE_SIZE_SMALL zero bits appended to it.
+ * HV_LOG2_PAGE_TABLE_ALIGN zero bits appended to it.
*
- * For PTEs in a level-1 page table where the Page bit is set, the
- * CPA must be aligned modulo the large page size.
- */
-static __inline unsigned int
-hv_pte_get_pfn(const HV_PTE pte)
-{
- return pte.val >> HV_PTE_INDEX_PFN;
-}
-
-
-/** Set the page frame number into a PTE. See hv_pte_get_pfn. */
-static __inline HV_PTE
-hv_pte_set_pfn(HV_PTE pte, unsigned int val)
-{
- /*
- * Note that the use of "PTFN" in the next line is intentional; we
- * don't want any garbage lower bits left in that field.
- */
- pte.val &= ~(((1ULL << HV_PTE_PTFN_BITS) - 1) << HV_PTE_INDEX_PTFN);
- pte.val |= (__hv64) val << HV_PTE_INDEX_PFN;
- return pte;
-}
-
-/** Get the page table frame number from the PTE.
- *
- * This field contains the upper bits of the CPA (client physical
- * address) of the target page table; the complete CPA is this field with
- * with HV_PAGE_TABLE_ALIGN zero bits appended to it.
- *
- * For PTEs in a level-1 page table when the Page bit is not set, the
- * CPA must be aligned modulo the sticter of HV_PAGE_TABLE_ALIGN and
- * the level-2 page table size.
+ * For all PTEs in the lowest-level page table, and for all PTEs with
+ * the Page bit set in all page tables, the CPA must be aligned modulo
+ * the relevant page size.
*/
static __inline unsigned long
hv_pte_get_ptfn(const HV_PTE pte)
return pte.val >> HV_PTE_INDEX_PTFN;
}
-
/** Set the page table frame number into a PTE. See hv_pte_get_ptfn. */
static __inline HV_PTE
hv_pte_set_ptfn(HV_PTE pte, unsigned long val)
return pte;
}
+/** Get the client physical address from the PTE. See hv_pte_set_ptfn. */
+static __inline HV_PhysAddr
+hv_pte_get_pa(const HV_PTE pte)
+{
+ return (__hv64) hv_pte_get_ptfn(pte) << HV_LOG2_PAGE_TABLE_ALIGN;
+}
+
+/** Set the client physical address into a PTE. See hv_pte_get_ptfn. */
+static __inline HV_PTE
+hv_pte_set_pa(HV_PTE pte, HV_PhysAddr pa)
+{
+ return hv_pte_set_ptfn(pte, pa >> HV_LOG2_PAGE_TABLE_ALIGN);
+}
+
/** Get the remote tile caching this page.
*
#endif /* !__ASSEMBLER__ */
-/** Converts a client physical address to a pfn. */
-#define HV_CPA_TO_PFN(p) ((p) >> HV_LOG2_PAGE_SIZE_SMALL)
-
-/** Converts a pfn to a client physical address. */
-#define HV_PFN_TO_CPA(p) (((HV_PhysAddr)(p)) << HV_LOG2_PAGE_SIZE_SMALL)
-
/** Converts a client physical address to a ptfn. */
#define HV_CPA_TO_PTFN(p) ((p) >> HV_LOG2_PAGE_TABLE_ALIGN)
/** Converts a ptfn to a client physical address. */
#define HV_PTFN_TO_CPA(p) (((HV_PhysAddr)(p)) << HV_LOG2_PAGE_TABLE_ALIGN)
-/** Converts a ptfn to a pfn. */
-#define HV_PTFN_TO_PFN(p) \
- ((p) >> (HV_LOG2_PAGE_SIZE_SMALL - HV_LOG2_PAGE_TABLE_ALIGN))
-
-/** Converts a pfn to a ptfn. */
-#define HV_PFN_TO_PTFN(p) \
- ((p) << (HV_LOG2_PAGE_SIZE_SMALL - HV_LOG2_PAGE_TABLE_ALIGN))
-
#if CHIP_VA_WIDTH() > 32
+/*
+ * Note that we currently do not allow customizing the page size
+ * of the L0 pages, but fix them at 4GB, so we do not use the
+ * "_HV_xxx" nomenclature for the L0 macros.
+ */
+
/** Log number of HV_PTE entries in L0 page table */
#define HV_LOG2_L0_ENTRIES (CHIP_VA_WIDTH() - HV_LOG2_L1_SPAN)
#endif /* CHIP_VA_WIDTH() > 32 */
/** Log number of HV_PTE entries in L1 page table */
-#define HV_LOG2_L1_ENTRIES (HV_LOG2_L1_SPAN - HV_LOG2_PAGE_SIZE_LARGE)
+#define _HV_LOG2_L1_ENTRIES(log2_page_size_large) \
+ (HV_LOG2_L1_SPAN - log2_page_size_large)
/** Number of HV_PTE entries in L1 page table */
-#define HV_L1_ENTRIES (1 << HV_LOG2_L1_ENTRIES)
+#define _HV_L1_ENTRIES(log2_page_size_large) \
+ (1 << _HV_LOG2_L1_ENTRIES(log2_page_size_large))
/** Log size of L1 page table in bytes */
-#define HV_LOG2_L1_SIZE (HV_LOG2_PTE_SIZE + HV_LOG2_L1_ENTRIES)
+#define _HV_LOG2_L1_SIZE(log2_page_size_large) \
+ (HV_LOG2_PTE_SIZE + _HV_LOG2_L1_ENTRIES(log2_page_size_large))
/** Size of L1 page table in bytes */
-#define HV_L1_SIZE (1 << HV_LOG2_L1_SIZE)
+#define _HV_L1_SIZE(log2_page_size_large) \
+ (1 << _HV_LOG2_L1_SIZE(log2_page_size_large))
/** Log number of HV_PTE entries in level-2 page table */
-#define HV_LOG2_L2_ENTRIES (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL)
+#define _HV_LOG2_L2_ENTRIES(log2_page_size_large, log2_page_size_small) \
+ (log2_page_size_large - log2_page_size_small)
/** Number of HV_PTE entries in level-2 page table */
-#define HV_L2_ENTRIES (1 << HV_LOG2_L2_ENTRIES)
+#define _HV_L2_ENTRIES(log2_page_size_large, log2_page_size_small) \
+ (1 << _HV_LOG2_L2_ENTRIES(log2_page_size_large, log2_page_size_small))
/** Log size of level-2 page table in bytes */
-#define HV_LOG2_L2_SIZE (HV_LOG2_PTE_SIZE + HV_LOG2_L2_ENTRIES)
+#define _HV_LOG2_L2_SIZE(log2_page_size_large, log2_page_size_small) \
+ (HV_LOG2_PTE_SIZE + \
+ _HV_LOG2_L2_ENTRIES(log2_page_size_large, log2_page_size_small))
/** Size of level-2 page table in bytes */
-#define HV_L2_SIZE (1 << HV_LOG2_L2_SIZE)
+#define _HV_L2_SIZE(log2_page_size_large, log2_page_size_small) \
+ (1 << _HV_LOG2_L2_SIZE(log2_page_size_large, log2_page_size_small))
#ifdef __ASSEMBLER__
#if CHIP_VA_WIDTH() > 32
/** Index in L1 for a specific VA */
-#define HV_L1_INDEX(va) \
- (((va) >> HV_LOG2_PAGE_SIZE_LARGE) & (HV_L1_ENTRIES - 1))
+#define _HV_L1_INDEX(va, log2_page_size_large) \
+ (((va) >> log2_page_size_large) & (_HV_L1_ENTRIES(log2_page_size_large) - 1))
#else /* CHIP_VA_WIDTH() > 32 */
/** Index in L1 for a specific VA */
-#define HV_L1_INDEX(va) \
- (((va) >> HV_LOG2_PAGE_SIZE_LARGE))
+#define _HV_L1_INDEX(va, log2_page_size_large) \
+ (((va) >> log2_page_size_large))
#endif /* CHIP_VA_WIDTH() > 32 */
/** Index in level-2 page table for a specific VA */
-#define HV_L2_INDEX(va) \
- (((va) >> HV_LOG2_PAGE_SIZE_SMALL) & (HV_L2_ENTRIES - 1))
+#define _HV_L2_INDEX(va, log2_page_size_large, log2_page_size_small) \
+ (((va) >> log2_page_size_small) & \
+ (_HV_L2_ENTRIES(log2_page_size_large, log2_page_size_small) - 1))
#else /* __ASSEMBLER __ */
#if CHIP_VA_WIDTH() > 32
/** Index in L1 for a specific VA */
-#define HV_L1_INDEX(va) \
- (((HV_VirtAddr)(va) >> HV_LOG2_PAGE_SIZE_LARGE) & (HV_L1_ENTRIES - 1))
+#define _HV_L1_INDEX(va, log2_page_size_large) \
+ (((HV_VirtAddr)(va) >> log2_page_size_large) & \
+ (_HV_L1_ENTRIES(log2_page_size_large) - 1))
#else /* CHIP_VA_WIDTH() > 32 */
/** Index in L1 for a specific VA */
-#define HV_L1_INDEX(va) \
- (((HV_VirtAddr)(va) >> HV_LOG2_PAGE_SIZE_LARGE))
+#define _HV_L1_INDEX(va, log2_page_size_large) \
+ (((HV_VirtAddr)(va) >> log2_page_size_large))
#endif /* CHIP_VA_WIDTH() > 32 */
/** Index in level-2 page table for a specific VA */
-#define HV_L2_INDEX(va) \
- (((HV_VirtAddr)(va) >> HV_LOG2_PAGE_SIZE_SMALL) & (HV_L2_ENTRIES - 1))
+#define _HV_L2_INDEX(va, log2_page_size_large, log2_page_size_small) \
+ (((HV_VirtAddr)(va) >> log2_page_size_small) & \
+ (_HV_L2_ENTRIES(log2_page_size_large, log2_page_size_small) - 1))
#endif /* __ASSEMBLER __ */
-#endif /* _TILE_HV_H */
+/** Position of the PFN field within the PTE (subset of the PTFN). */
+#define _HV_PTE_INDEX_PFN(log2_page_size) \
+ (HV_PTE_INDEX_PTFN + (log2_page_size - HV_LOG2_PAGE_TABLE_ALIGN))
+
+/** Length of the PFN field within the PTE (subset of the PTFN). */
+#define _HV_PTE_INDEX_PFN_BITS(log2_page_size) \
+ (HV_PTE_INDEX_PTFN_BITS - (log2_page_size - HV_LOG2_PAGE_TABLE_ALIGN))
+
+/** Converts a client physical address to a pfn. */
+#define _HV_CPA_TO_PFN(p, log2_page_size) ((p) >> log2_page_size)
+
+/** Converts a pfn to a client physical address. */
+#define _HV_PFN_TO_CPA(p, log2_page_size) \
+ (((HV_PhysAddr)(p)) << log2_page_size)
+
+/** Converts a ptfn to a pfn. */
+#define _HV_PTFN_TO_PFN(p, log2_page_size) \
+ ((p) >> (log2_page_size - HV_LOG2_PAGE_TABLE_ALIGN))
+
+/** Converts a pfn to a ptfn. */
+#define _HV_PFN_TO_PTFN(p, log2_page_size) \
+ ((p) << (log2_page_size - HV_LOG2_PAGE_TABLE_ALIGN))
+
+#endif /* _HV_HV_H */
}
{
moveli lr, lo16(1f)
- move r5, zero
+ moveli r5, CTX_PAGE_FLAG
}
{
auli lr, lr, ha16(1f)
.macro PTE va, cpa, bits1, no_org=0
.ifeq \no_org
- .org swapper_pg_dir + HV_L1_INDEX(\va) * HV_PTE_SIZE
+ .org swapper_pg_dir + PGD_INDEX(\va) * HV_PTE_SIZE
.endif
.word HV_PTE_PAGE | HV_PTE_DIRTY | HV_PTE_PRESENT | HV_PTE_ACCESSED | \
(HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE)
- .word (\bits1) | (HV_CPA_TO_PFN(\cpa) << (HV_PTE_INDEX_PFN - 32))
+ .word (\bits1) | (HV_CPA_TO_PTFN(\cpa) << (HV_PTE_INDEX_PTFN - 32))
.endm
__PAGE_ALIGNED_DATA
/* The true text VAs are mapped as VA = PA + MEM_SV_INTRPT */
PTE MEM_SV_INTRPT, 0, (1 << (HV_PTE_INDEX_READABLE - 32)) | \
(1 << (HV_PTE_INDEX_EXECUTABLE - 32))
- .org swapper_pg_dir + HV_L1_SIZE
+ .org swapper_pg_dir + PGDIR_SIZE
END(swapper_pg_dir)
/*
shl16insli r0, r0, hw0(swapper_pg_dir - PAGE_OFFSET)
}
{
- move r3, zero
+ moveli r3, CTX_PAGE_FLAG
j hv_install_context
}
1:
.macro PTE cpa, bits1
.quad HV_PTE_PAGE | HV_PTE_DIRTY | HV_PTE_PRESENT | HV_PTE_ACCESSED |\
HV_PTE_GLOBAL | (HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE) |\
- (\bits1) | (HV_CPA_TO_PFN(\cpa) << HV_PTE_INDEX_PFN)
+ (\bits1) | (HV_CPA_TO_PTFN(\cpa) << HV_PTE_INDEX_PTFN)
.endm
__PAGE_ALIGNED_DATA
.align PAGE_SIZE
ENTRY(swapper_pg_dir)
- .org swapper_pg_dir + HV_L0_INDEX(PAGE_OFFSET) * HV_PTE_SIZE
+ .org swapper_pg_dir + PGD_INDEX(PAGE_OFFSET) * HV_PTE_SIZE
.Lsv_data_pmd:
.quad 0 /* PTE temp_data_pmd - PAGE_OFFSET, 0 */
- .org swapper_pg_dir + HV_L0_INDEX(MEM_SV_START) * HV_PTE_SIZE
+ .org swapper_pg_dir + PGD_INDEX(MEM_SV_START) * HV_PTE_SIZE
.Lsv_code_pmd:
.quad 0 /* PTE temp_code_pmd - PAGE_OFFSET, 0 */
- .org swapper_pg_dir + HV_L0_SIZE
+ .org swapper_pg_dir + SIZEOF_PGD
END(swapper_pg_dir)
.align HV_PAGE_TABLE_ALIGN
* permissions later.
*/
.set addr, 0
- .rept HV_L1_ENTRIES
+ .rept PTRS_PER_PMD
PTE addr, HV_PTE_READABLE | HV_PTE_WRITABLE
- .set addr, addr + HV_PAGE_SIZE_LARGE
+ .set addr, addr + HPAGE_SIZE
.endr
- .org temp_data_pmd + HV_L1_SIZE
+ .org temp_data_pmd + SIZEOF_PMD
END(temp_data_pmd)
.align HV_PAGE_TABLE_ALIGN
* permissions later.
*/
.set addr, 0
- .rept HV_L1_ENTRIES
+ .rept PTRS_PER_PMD
PTE addr, HV_PTE_READABLE | HV_PTE_EXECUTABLE
- .set addr, addr + HV_PAGE_SIZE_LARGE
+ .set addr, addr + HPAGE_SIZE
.endr
- .org temp_code_pmd + HV_L1_SIZE
+ .org temp_code_pmd + SIZEOF_PMD
END(temp_code_pmd)
/*
void machine_kexec(struct kimage *image)
{
void *reboot_code_buffer;
+ pte_t *ptep;
void (*rnk)(unsigned long, void *, unsigned long)
__noreturn;
*/
homecache_change_page_home(image->control_code_page, 0,
smp_processor_id());
- reboot_code_buffer = vmap(&image->control_code_page, 1, 0,
- __pgprot(_PAGE_KERNEL | _PAGE_EXECUTABLE));
+ reboot_code_buffer = page_address(image->control_code_page);
+ BUG_ON(reboot_code_buffer == NULL);
+ ptep = virt_to_pte(NULL, (unsigned long)reboot_code_buffer);
+ __set_pte(ptep, pte_mkexec(*ptep));
memcpy(reboot_code_buffer, relocate_new_kernel,
relocate_new_kernel_size);
__flush_icache_range(
for (i = 0; i < size; i += PAGE_SIZE, ++pfn, ++pg) {
/* Update the vmalloc mapping and page home. */
- pte_t *ptep =
- virt_to_pte(NULL, (unsigned long)ptr + i);
+ unsigned long addr = (unsigned long)ptr + i;
+ pte_t *ptep = virt_to_pte(NULL, addr);
pte_t pte = *ptep;
BUG_ON(pfn != pte_pfn(pte));
pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3);
pte = set_remote_cache_cpu(pte, cpu);
- set_pte(ptep, pte);
+ set_pte_at(&init_mm, addr, ptep, pte);
/* Update the lowmem mapping for consistency. */
lowmem_va = (unsigned long)pfn_to_kaddr(pfn);
BUG_ON(pte_huge(*ptep));
}
BUG_ON(pfn != pte_pfn(*ptep));
- set_pte(ptep, pte);
+ set_pte_at(&init_mm, lowmem_va, ptep, pte);
}
}
if (hv_get_ipi_pte(tile, KERNEL_PL, &pte) != 0)
panic("Failed to initialize IPI for cpu %d\n", cpu);
- offset = hv_pte_get_pfn(pte) << PAGE_SHIFT;
+ offset = PFN_PHYS(pte_pfn(pte));
ipi_mappings[cpu] = ioremap_prot(offset, PAGE_SIZE, pte);
}
#endif
if (l1_pgtable == NULL)
return 0; /* can't read user space in other tasks */
+ pte = l1_pgtable[PGD_INDEX(address)];
#ifdef CONFIG_64BIT
/* Find the real l1_pgtable by looking in the l0_pgtable. */
- pte = l1_pgtable[HV_L0_INDEX(address)];
if (!hv_pte_get_present(pte))
return 0;
- pfn = hv_pte_get_pfn(pte);
+ pfn = pte_pfn(pte);
if (pte_huge(pte)) {
if (!pfn_valid(pfn)) {
pr_err("L0 huge page has bad pfn %#lx\n", pfn);
page = pfn_to_page(pfn);
BUG_ON(PageHighMem(page)); /* No HIGHMEM on 64-bit. */
l1_pgtable = (HV_PTE *)pfn_to_kaddr(pfn);
+ pte = l1_pgtable[PMD_INDEX(address)];
#endif
- pte = l1_pgtable[HV_L1_INDEX(address)];
if (!hv_pte_get_present(pte))
return 0;
- pfn = hv_pte_get_pfn(pte);
+ pfn = pte_pfn(pte);
if (pte_huge(pte)) {
if (!pfn_valid(pfn)) {
pr_err("huge page has bad pfn %#lx\n", pfn);
page = pfn_to_page(pfn);
if (PageHighMem(page)) {
- pr_err("L2 page table not in LOWMEM (%#llx)\n",
- HV_PFN_TO_CPA(pfn));
+ pr_err("L2 page table not in LOWMEM (%#llx)\n", PFN_PHYS(pfn));
return 0;
}
l2_pgtable = (HV_PTE *)pfn_to_kaddr(pfn);
- pte = l2_pgtable[HV_L2_INDEX(address)];
+ pte = l2_pgtable[PTE_INDEX(address)];
return hv_pte_get_present(pte) && hv_pte_get_readable(pte);
}
break;
if (get_remote_cache_cpu(src_pte) == smp_processor_id())
break;
- src_page = pfn_to_page(hv_pte_get_pfn(src_pte));
+ src_page = pfn_to_page(pte_pfn(src_pte));
get_page(src_page);
if (pte_val(src_pte) != pte_val(*src_ptep)) {
put_page(src_page);
}
if (pte_huge(src_pte)) {
/* Adjust the PTE to correspond to a small page */
- int pfn = hv_pte_get_pfn(src_pte);
+ int pfn = pte_pfn(src_pte);
pfn += (((unsigned long)source & (HPAGE_SIZE-1))
>> PAGE_SHIFT);
src_pte = pfn_pte(pfn, src_pte);
put_page(src_page);
break;
}
- dst_page = pfn_to_page(hv_pte_get_pfn(dst_pte));
+ dst_page = pfn_to_page(pte_pfn(dst_pte));
if (dst_page == src_page) {
/*
* Source and dest are on the same page; this
}
if (pte_huge(dst_pte)) {
/* Adjust the PTE to correspond to a small page */
- int pfn = hv_pte_get_pfn(dst_pte);
+ int pfn = pte_pfn(dst_pte);
pfn += (((unsigned long)dest & (HPAGE_SIZE-1))
>> PAGE_SHIFT);
dst_pte = pfn_pte(pfn, dst_pte);
static void init_prealloc_ptes(int node, int pages)
{
- BUG_ON(pages & (HV_L2_ENTRIES-1));
+ BUG_ON(pages & (PTRS_PER_PTE - 1));
if (pages) {
num_l2_ptes[node] = pages;
l2_ptes[node] = __alloc_bootmem(pages * sizeof(pte_t),
#ifdef __tilegx__
-#if HV_L1_SIZE != HV_L2_SIZE
-# error Rework assumption that L1 and L2 page tables are same size.
-#endif
-
-/* Since pmd_t arrays and pte_t arrays are the same size, just use casts. */
static inline pmd_t *alloc_pmd(void)
{
- return (pmd_t *)alloc_pte();
+ return __alloc_bootmem(L1_KERNEL_PGTABLE_SIZE, HV_PAGE_TABLE_ALIGN, 0);
}
static inline void assign_pmd(pud_t *pud, pmd_t *pmd)
* changing init_mm once we get up and running, and there's no
* need for e.g. vmalloc_sync_all().
*/
- BUILD_BUG_ON(pgd_index(VMALLOC_START) != pgd_index(VMALLOC_END));
+ BUILD_BUG_ON(pgd_index(VMALLOC_START) != pgd_index(VMALLOC_END - 1));
pud = pud_offset(pgd_base + pgd_index(VMALLOC_START), VMALLOC_START);
assign_pmd(pud, alloc_pmd());
#endif
#define L2_USER_PGTABLE_PAGES (1 << L2_USER_PGTABLE_ORDER)
-struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
+struct page *pgtable_alloc_one(struct mm_struct *mm, unsigned long address,
+ int order)
{
gfp_t flags = GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO;
struct page *p;
-#if L2_USER_PGTABLE_ORDER > 0
int i;
-#endif
#ifdef CONFIG_HIGHPTE
flags |= __GFP_HIGHMEM;
if (p == NULL)
return NULL;
-#if L2_USER_PGTABLE_ORDER > 0
/*
* Make every page have a page_count() of one, not just the first.
* We don't use __GFP_COMP since it doesn't look like it works
* correctly with tlb_remove_page().
*/
- for (i = 1; i < L2_USER_PGTABLE_PAGES; ++i) {
+ for (i = 1; i < order; ++i) {
init_page_count(p+i);
inc_zone_page_state(p+i, NR_PAGETABLE);
}
-#endif
pgtable_page_ctor(p);
return p;
* process). We have to correct whatever pte_alloc_one() did before
* returning the pages to the allocator.
*/
-void pte_free(struct mm_struct *mm, struct page *p)
+void pgtable_free(struct mm_struct *mm, struct page *p, int order)
{
int i;
pgtable_page_dtor(p);
__free_page(p);
- for (i = 1; i < L2_USER_PGTABLE_PAGES; ++i) {
+ for (i = 1; i < order; ++i) {
__free_page(p+i);
dec_zone_page_state(p+i, NR_PAGETABLE);
}
}
-void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte,
- unsigned long address)
+void __pgtable_free_tlb(struct mmu_gather *tlb, struct page *pte,
+ unsigned long address, int order)
{
int i;
pgtable_page_dtor(pte);
tlb_remove_page(tlb, pte);
- for (i = 1; i < L2_USER_PGTABLE_PAGES; ++i) {
+ for (i = 1; i < order; ++i) {
tlb_remove_page(tlb, pte + i);
dec_zone_page_state(pte + i, NR_PAGETABLE);
}
/* Can this mm load a PTE with cached_priority set? */
static inline int mm_is_priority_cached(struct mm_struct *mm)
{
- return mm->context.priority_cached;
+ return mm->context.priority_cached != 0;
}
/*
void start_mm_caching(struct mm_struct *mm)
{
if (!mm_is_priority_cached(mm)) {
- mm->context.priority_cached = -1U;
- hv_set_caching(-1U);
+ mm->context.priority_cached = -1UL;
+ hv_set_caching(-1UL);
}
}
* Presumably we'll come back later and have more luck and clear
* the value then; for now we'll just keep the cache marked for priority.
*/
-static unsigned int update_priority_cached(struct mm_struct *mm)
+static unsigned long update_priority_cached(struct mm_struct *mm)
{
if (mm->context.priority_cached && down_write_trylock(&mm->mmap_sem)) {
struct vm_area_struct *vm;