From cf627156c450cd5a0741b31f55181db3400d4887 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 12 Feb 2006 21:10:07 -0800 Subject: [PATCH] [SPARC64]: Use inline patching for critical PTE operations. This handles the SUN4U vs SUN4V PTE layout differences with near zero performance cost. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 211 +-------------- include/asm-sparc64/pgtable.h | 488 +++++++++++++++++++++++++++++++++- 2 files changed, 488 insertions(+), 211 deletions(-) diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 81f9f4bffaff..6f860c39db82 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -1502,217 +1502,12 @@ unsigned long pte_sz_bits(unsigned long sz) pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size) { pte_t pte; - if (tlb_type == hypervisor) { - pte_val(pte) = (((page) | pgprot_val(prot) | _PAGE_E_4V) & - ~(unsigned long)_PAGE_CACHE_4V); - } else { - pte_val(pte) = (((page) | pgprot_val(prot) | _PAGE_E_4U) & - ~(unsigned long)_PAGE_CACHE_4U); - } + + pte_val(pte) = page | pgprot_val(pgprot_noncached(prot)); pte_val(pte) |= (((unsigned long)space) << 32); pte_val(pte) |= pte_sz_bits(page_size); - return pte; -} - -unsigned long pte_present(pte_t pte) -{ - return (pte_val(pte) & - ((tlb_type == hypervisor) ? - _PAGE_PRESENT_4V : _PAGE_PRESENT_4U)); -} - -unsigned long pte_file(pte_t pte) -{ - return (pte_val(pte) & - ((tlb_type == hypervisor) ? - _PAGE_FILE_4V : _PAGE_FILE_4U)); -} - -unsigned long pte_read(pte_t pte) -{ - return (pte_val(pte) & - ((tlb_type == hypervisor) ? - _PAGE_READ_4V : _PAGE_READ_4U)); -} - -unsigned long pte_exec(pte_t pte) -{ - return (pte_val(pte) & - ((tlb_type == hypervisor) ? - _PAGE_EXEC_4V : _PAGE_EXEC_4U)); -} - -unsigned long pte_write(pte_t pte) -{ - return (pte_val(pte) & - ((tlb_type == hypervisor) ? - _PAGE_WRITE_4V : _PAGE_WRITE_4U)); -} - -unsigned long pte_dirty(pte_t pte) -{ - return (pte_val(pte) & - ((tlb_type == hypervisor) ? - _PAGE_MODIFIED_4V : _PAGE_MODIFIED_4U)); -} - -unsigned long pte_young(pte_t pte) -{ - return (pte_val(pte) & - ((tlb_type == hypervisor) ? - _PAGE_ACCESSED_4V : _PAGE_ACCESSED_4U)); -} -pte_t pte_wrprotect(pte_t pte) -{ - unsigned long mask = _PAGE_WRITE_4U | _PAGE_W_4U; - - if (tlb_type == hypervisor) - mask = _PAGE_WRITE_4V | _PAGE_W_4V; - - return __pte(pte_val(pte) & ~mask); -} - -pte_t pte_rdprotect(pte_t pte) -{ - unsigned long mask = _PAGE_R | _PAGE_READ_4U; - - if (tlb_type == hypervisor) - mask = _PAGE_R | _PAGE_READ_4V; - - return __pte(pte_val(pte) & ~mask); -} - -pte_t pte_mkclean(pte_t pte) -{ - unsigned long mask = _PAGE_MODIFIED_4U | _PAGE_W_4U; - - if (tlb_type == hypervisor) - mask = _PAGE_MODIFIED_4V | _PAGE_W_4V; - - return __pte(pte_val(pte) & ~mask); -} - -pte_t pte_mkold(pte_t pte) -{ - unsigned long mask = _PAGE_R | _PAGE_ACCESSED_4U; - - if (tlb_type == hypervisor) - mask = _PAGE_R | _PAGE_ACCESSED_4V; - - return __pte(pte_val(pte) & ~mask); -} - -pte_t pte_mkyoung(pte_t pte) -{ - unsigned long mask = _PAGE_R | _PAGE_ACCESSED_4U; - - if (tlb_type == hypervisor) - mask = _PAGE_R | _PAGE_ACCESSED_4V; - - return __pte(pte_val(pte) | mask); -} - -pte_t pte_mkwrite(pte_t pte) -{ - unsigned long mask = _PAGE_WRITE_4U; - - if (tlb_type == hypervisor) - mask = _PAGE_WRITE_4V; - - return __pte(pte_val(pte) | mask); -} - -pte_t pte_mkdirty(pte_t pte) -{ - unsigned long mask = _PAGE_MODIFIED_4U | _PAGE_W_4U; - - if (tlb_type == hypervisor) - mask = _PAGE_MODIFIED_4V | _PAGE_W_4V; - - return __pte(pte_val(pte) | mask); -} - -pte_t pte_mkhuge(pte_t pte) -{ - unsigned long mask = _PAGE_SZHUGE_4U; - - if (tlb_type == hypervisor) - mask = _PAGE_SZHUGE_4V; - - return __pte(pte_val(pte) | mask); -} - -pte_t pgoff_to_pte(unsigned long off) -{ - unsigned long bit = _PAGE_FILE_4U; - - if (tlb_type == hypervisor) - bit = _PAGE_FILE_4V; - - return __pte((off << PAGE_SHIFT) | bit); -} - -pgprot_t pgprot_noncached(pgprot_t prot) -{ - unsigned long val = pgprot_val(prot); - unsigned long off = _PAGE_CP_4U | _PAGE_CV_4U; - unsigned long on = _PAGE_E_4U; - - if (tlb_type == hypervisor) { - off = _PAGE_CP_4V | _PAGE_CV_4V; - on = _PAGE_E_4V; - } - - return __pgprot((val & ~off) | on); -} - -pte_t pfn_pte(unsigned long pfn, pgprot_t prot) -{ - unsigned long sz_bits = _PAGE_SZBITS_4U; - - if (tlb_type == hypervisor) - sz_bits = _PAGE_SZBITS_4V; - - return __pte((pfn << PAGE_SHIFT) | pgprot_val(prot) | sz_bits); -} - -unsigned long pte_pfn(pte_t pte) -{ - unsigned long mask = _PAGE_PADDR_4U; - - if (tlb_type == hypervisor) - mask = _PAGE_PADDR_4V; - - return (pte_val(pte) & mask) >> PAGE_SHIFT; -} - -pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot) -{ - unsigned long preserve_mask; - unsigned long val; - - preserve_mask = (_PAGE_PADDR_4U | - _PAGE_MODIFIED_4U | - _PAGE_ACCESSED_4U | - _PAGE_CP_4U | - _PAGE_CV_4U | - _PAGE_E_4U | - _PAGE_PRESENT_4U | - _PAGE_SZBITS_4U); - if (tlb_type == hypervisor) - preserve_mask = (_PAGE_PADDR_4V | - _PAGE_MODIFIED_4V | - _PAGE_ACCESSED_4V | - _PAGE_CP_4V | - _PAGE_CV_4V | - _PAGE_E_4V | - _PAGE_PRESENT_4V | - _PAGE_SZBITS_4V); - - val = (pte_val(orig_pte) & preserve_mask); - - return __pte(val | (pgprot_val(new_prot) & ~preserve_mask)); + return pte; } static unsigned long kern_large_tte(unsigned long paddr) diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h index 3c02d5d9a533..00eecbb52f95 100644 --- a/include/asm-sparc64/pgtable.h +++ b/include/asm-sparc64/pgtable.h @@ -227,11 +227,493 @@ extern struct page *mem_map_zero; * the first physical page in the machine is at some huge physical address, * such as 4GB. This is common on a partitioned E10000, for example. */ -extern pte_t pfn_pte(unsigned long, pgprot_t); +static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot) +{ + unsigned long paddr = pfn << PAGE_SHIFT; + unsigned long sz_bits; + + BUILD_BUG_ON(!__builtin_constant_p(_PAGE_SZBITS_4U) || + !__builtin_constant_p(_PAGE_SZBITS_4V)); + + sz_bits = 0UL; + if (_PAGE_SZBITS_4U != 0UL || _PAGE_SZBITS_4V != 0UL) { + BUILD_BUG_ON((_PAGE_SZBITS_4U & ~(0xfffffc0000000000UL)) || + (_PAGE_SZBITS_4V & ~(0x0000000000000fffUL))); + __asm__ __volatile__( + "\n661: sethi %uhi(%1), %0\n" + " sllx %0, 32, %0\n" + " .section .sun4v_2insn_patch, \"ax\"\n" + " .word 661b\n" + " mov %2, %0\n" + " nop\n" + " .previous\n" + : "=r" (sz_bits) + : "i" (_PAGE_SZBITS_4U), "i" (_PAGE_SZBITS_4V)); + } + return __pte(paddr | sz_bits | pgprot_val(prot)); +} #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) -extern unsigned long pte_pfn(pte_t); + +/* This one can be done with two shifts. */ +static inline unsigned long pte_pfn(pte_t pte) +{ + const unsigned long pte_paddr_shl_sun4u = 21; + const unsigned long pte_paddr_shr_sun4u = 21 + PAGE_SHIFT; + const unsigned long pte_paddr_shl_sun4v = 8; + const unsigned long pte_paddr_shr_sun4v = 8 + PAGE_SHIFT; + unsigned long ret; + + __asm__ __volatile__( + "\n661: sllx %1, %2, %0\n" + " srlx %0, %3, %0\n" + " .section .sun4v_2insn_patch, \"ax\"\n" + " .word 661b\n" + " sllx %1, %4, %0\n" + " srlx %0, %5, %0\n" + " .previous\n" + : "=r" (ret) + : "r" (pte_val(pte)), + "i" (pte_paddr_shl_sun4u), "i" (pte_paddr_shr_sun4u), + "i" (pte_paddr_shl_sun4v), "i" (pte_paddr_shr_sun4v)); + + return ret; +} #define pte_page(x) pfn_to_page(pte_pfn(x)) -extern pte_t pte_modify(pte_t, pgprot_t); + +static inline pte_t pte_modify(pte_t pte, pgprot_t prot) +{ + const unsigned long preserve_mask_sun4u = (_PAGE_PADDR_4U | + _PAGE_MODIFIED_4U | + _PAGE_ACCESSED_4U | + _PAGE_CP_4U | + _PAGE_CV_4U | + _PAGE_E_4U | + _PAGE_PRESENT_4U | + _PAGE_SZBITS_4U); + const unsigned long preserve_mask_sun4v = (_PAGE_PADDR_4V | + _PAGE_MODIFIED_4V | + _PAGE_ACCESSED_4V | + _PAGE_CP_4V | + _PAGE_CV_4V | + _PAGE_E_4V | + _PAGE_PRESENT_4V | + _PAGE_SZBITS_4V); + unsigned long mask, tmp; + + /* SUN4U: 0x600307ffffffecb8 (negated == 0x9ffcf80000001347) + * SUN4V: 0x30ffffffffffee17 (negated == 0xcf000000000011e8) + * + * Even if we use negation tricks the result is still a 6 + * instruction sequence, so don't try to play fancy and just + * do the most straightforward implementation. + * + * Note: We encode this into 3 sun4v 2-insn patch sequences. + */ + + __asm__ __volatile__( + "\n661: sethi %%uhi(%2), %1\n" + " sethi %%hi(%2), %0\n" + "\n662: or %1, %%ulo(%2), %1\n" + " or %0, %%lo(%2), %0\n" + "\n663: sllx %1, 32, %1\n" + " or %0, %1, %0\n" + " .section .sun4v_2insn_patch, \"ax\"\n" + " .word 661b\n" + " sethi %%uhi(%3), %1\n" + " sethi %%hi(%3), %0\n" + " .word 662b\n" + " or %1, %%ulo(%3), %1\n" + " or %0, %%lo(%3), %0\n" + " .word 663b\n" + " sllx %1, 32, %1\n" + " or %0, %1, %0\n" + " .previous\n" + : "=r" (mask), "=r" (tmp) + : "i" (preserve_mask_sun4u), "i" (preserve_mask_sun4v)); + + return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask)); +} + +static inline pte_t pgoff_to_pte(unsigned long off) +{ + off <<= PAGE_SHIFT; + + BUILD_BUG_ON((_PAGE_FILE_4U & ~0xfffUL) || + (_PAGE_FILE_4V & ~0xfffUL)); + + __asm__ __volatile__( + "\n661: or %0, %2, %0\n" + " .section .sun4v_1insn_patch, \"ax\"\n" + " .word 661b\n" + " or %0, %3, %0\n" + " .previous\n" + : "=r" (off) + : "0" (off), "i" (_PAGE_FILE_4U), "i" (_PAGE_FILE_4V)); + + return __pte(off); +} + +static inline pgprot_t pgprot_noncached(pgprot_t prot) +{ + unsigned long val = pgprot_val(prot); + + BUILD_BUG_ON(((_PAGE_CP_4U | _PAGE_CP_4U | _PAGE_E_4U) & ~(0xfffUL)) || + ((_PAGE_CP_4V | _PAGE_CP_4V | _PAGE_E_4V) & ~(0xfffUL))); + + __asm__ __volatile__( + "\n661: andn %0, %2, %0\n" + " or %0, %3, %0\n" + " .section .sun4v_2insn_patch, \"ax\"\n" + " .word 661b\n" + " andn %0, %4, %0\n" + " or %0, %3, %0\n" + " .previous\n" + : "=r" (val) + : "0" (val), "i" (_PAGE_CP_4U | _PAGE_CV_4U), "i" (_PAGE_E_4U), + "i" (_PAGE_CP_4V | _PAGE_CV_4V), "i" (_PAGE_E_4V)); + + return __pgprot(val); +} +/* Various pieces of code check for platform support by ifdef testing + * on "pgprot_noncached". That's broken and should be fixed, but for + * now... + */ +#define pgprot_noncached pgprot_noncached + +static inline pte_t pte_mkhuge(pte_t pte) +{ + const unsigned long mask_4u = _PAGE_SZHUGE_4U; + const unsigned long mask_4v = _PAGE_SZHUGE_4V; + unsigned long mask; + + BUILD_BUG_ON((mask_4u & ~(0xfffffc0000000000UL)) || + (mask_4v & ~(0xfffUL))); + + __asm__ __volatile__( + "\n661: sethi %%uhi(%1), %0\n" + " sllx %0, 32, %0\n" + " .section .sun4v_2insn_patch, \"ax\"\n" + " .word 661b\n" + " mov %2, %0\n" + " nop\n" + " .previous\n" + : "=r" (mask) + : "i" (mask_4u), "i" (mask_4v)); + + return __pte(pte_val(pte) | mask); +} + +static inline pte_t pte_mkdirty(pte_t pte) +{ + const unsigned long mask_4u = _PAGE_MODIFIED_4U | _PAGE_W_4U; + const unsigned long mask_4v = _PAGE_MODIFIED_4V | _PAGE_W_4V; + unsigned long val = pte_val(pte), tmp; + + BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) || + (mask_4v & ~(0xfffffc0000000fffUL))); + + __asm__ __volatile__( + "\n661: or %0, %3, %0\n" + " nop\n" + "\n662: nop\n" + " nop\n" + " .section .sun4v_2insn_patch, \"ax\"\n" + " .word 661b\n" + " sethi %%uhi(%4), %1\n" + " sllx %1, 32, %1\n" + " .word 662b\n" + " or %1, %%lo(%4), %1\n" + " or %0, %1, %0\n" + " .previous\n" + : "=r" (val), "=r" (tmp) + : "0" (val), "i" (mask_4u), "i" (mask_4v)); + + return __pte(val); +} + +static inline pte_t pte_mkclean(pte_t pte) +{ + const unsigned long mask_4u = _PAGE_MODIFIED_4U | _PAGE_W_4U; + const unsigned long mask_4v = _PAGE_MODIFIED_4V | _PAGE_W_4V; + unsigned long val = pte_val(pte), tmp; + + BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) || + (mask_4v & ~(0xfffffc0000000fffUL))); + + __asm__ __volatile__( + "\n661: andn %0, %3, %0\n" + " nop\n" + "\n662: nop\n" + " nop\n" + " .section .sun4v_2insn_patch, \"ax\"\n" + " .word 661b\n" + " sethi %%uhi(%4), %1\n" + " sllx %1, 32, %1\n" + " .word 662b\n" + " or %1, %%lo(%4), %1\n" + " andn %0, %1, %0\n" + " .previous\n" + : "=r" (val), "=r" (tmp) + : "0" (val), "i" (mask_4u), "i" (mask_4v)); + + return __pte(val); +} + +static inline pte_t pte_mkwrite(pte_t pte) +{ + const unsigned long mask_4u = _PAGE_WRITE_4U; + const unsigned long mask_4v = _PAGE_WRITE_4V; + unsigned long val = pte_val(pte), mask; + + BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) || + (mask_4v & ~(0xfffffc0000000000UL))); + + __asm__ __volatile__( + "\n661: mov %1, %0\n" + " nop\n" + " .section .sun4v_2insn_patch, \"ax\"\n" + " .word 661b\n" + " sethi %%uhi(%2), %0\n" + " sllx %0, 32, %0\n" + " .previous\n" + : "=r" (mask) + : "i" (mask_4u), "i" (mask_4v)); + + return __pte(val | mask); +} + +static inline pte_t pte_wrprotect(pte_t pte) +{ + const unsigned long mask_4u = _PAGE_WRITE_4U | _PAGE_W_4U; + const unsigned long mask_4v = _PAGE_WRITE_4V | _PAGE_W_4V; + unsigned long val = pte_val(pte), tmp; + + BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) || + (mask_4v & ~(0xfffffc0000000fffUL))); + + __asm__ __volatile__( + "\n661: andn %0, %3, %0\n" + " nop\n" + "\n662: nop\n" + " nop\n" + " .section .sun4v_2insn_patch, \"ax\"\n" + " .word 661b\n" + " sethi %%uhi(%4), %1\n" + " sllx %1, 32, %1\n" + " .word 662b\n" + " or %1, %%lo(%4), %1\n" + " andn %0, %1, %0\n" + " .previous\n" + : "=r" (val), "=r" (tmp) + : "0" (val), "i" (mask_4u), "i" (mask_4v)); + + return __pte(val); +} + +static inline pte_t pte_mkold(pte_t pte) +{ + const unsigned long mask_4u = _PAGE_ACCESSED_4U; + const unsigned long mask_4v = _PAGE_ACCESSED_4V; + unsigned long mask; + + BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) || + (mask_4v & ~(0xfffffc0000000000UL))); + + __asm__ __volatile__( + "\n661: mov %1, %0\n" + " nop\n" + " .section .sun4v_2insn_patch, \"ax\"\n" + " .word 661b\n" + " sethi %%uhi(%2), %0\n" + " sllx %0, 32, %0\n" + " .previous\n" + : "=r" (mask) + : "i" (mask_4u), "i" (mask_4v)); + + mask |= _PAGE_R; + + return __pte(pte_val(pte) & ~mask); +} + +static inline pte_t pte_mkyoung(pte_t pte) +{ + const unsigned long mask_4u = _PAGE_ACCESSED_4U; + const unsigned long mask_4v = _PAGE_ACCESSED_4V; + unsigned long mask; + + BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) || + (mask_4v & ~(0xfffffc0000000000UL))); + + __asm__ __volatile__( + "\n661: mov %1, %0\n" + " nop\n" + " .section .sun4v_2insn_patch, \"ax\"\n" + " .word 661b\n" + " sethi %%uhi(%2), %0\n" + " sllx %0, 32, %0\n" + " .previous\n" + : "=r" (mask) + : "i" (mask_4u), "i" (mask_4v)); + + mask |= _PAGE_R; + + return __pte(pte_val(pte) | mask); +} + +static inline unsigned long pte_young(pte_t pte) +{ + const unsigned long mask_4u = _PAGE_ACCESSED_4U; + const unsigned long mask_4v = _PAGE_ACCESSED_4V; + unsigned long mask; + + BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) || + (mask_4v & ~(0xfffffc0000000000UL))); + + __asm__ __volatile__( + "\n661: mov %1, %0\n" + " nop\n" + " .section .sun4v_2insn_patch, \"ax\"\n" + " .word 661b\n" + " sethi %%uhi(%2), %0\n" + " sllx %0, 32, %0\n" + " .previous\n" + : "=r" (mask) + : "i" (mask_4u), "i" (mask_4v)); + + return (pte_val(pte) & mask); +} + +static inline unsigned long pte_dirty(pte_t pte) +{ + const unsigned long mask_4u = _PAGE_MODIFIED_4U; + const unsigned long mask_4v = _PAGE_MODIFIED_4V; + unsigned long mask; + + BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) || + (mask_4v & ~(0xfffffc0000000000UL))); + + __asm__ __volatile__( + "\n661: mov %1, %0\n" + " nop\n" + " .section .sun4v_2insn_patch, \"ax\"\n" + " .word 661b\n" + " sethi %%uhi(%2), %0\n" + " sllx %0, 32, %0\n" + " .previous\n" + : "=r" (mask) + : "i" (mask_4u), "i" (mask_4v)); + + return (pte_val(pte) & mask); +} + +static inline unsigned long pte_write(pte_t pte) +{ + const unsigned long mask_4u = _PAGE_WRITE_4U; + const unsigned long mask_4v = _PAGE_WRITE_4V; + unsigned long mask; + + BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) || + (mask_4v & ~(0xfffffc0000000000UL))); + + __asm__ __volatile__( + "\n661: mov %1, %0\n" + " nop\n" + " .section .sun4v_2insn_patch, \"ax\"\n" + " .word 661b\n" + " sethi %%uhi(%2), %0\n" + " sllx %0, 32, %0\n" + " .previous\n" + : "=r" (mask) + : "i" (mask_4u), "i" (mask_4v)); + + return (pte_val(pte) & mask); +} + +static inline unsigned long pte_exec(pte_t pte) +{ + const unsigned long mask_4u = _PAGE_EXEC_4U; + const unsigned long mask_4v = _PAGE_EXEC_4V; + unsigned long mask; + + BUILD_BUG_ON((mask_4u & ~(0x00000000fffffc00UL)) || + (mask_4v & ~(0x0000000000000fffUL))); + + __asm__ __volatile__( + "\n661: sethi %%hi(%1), %0\n" + " .section .sun4v_1insn_patch, \"ax\"\n" + " .word 661b\n" + " mov %2, %0\n" + " .previous\n" + : "=r" (mask) + : "i" (mask_4u), "i" (mask_4v)); + + return (pte_val(pte) & mask); +} + +static inline unsigned long pte_read(pte_t pte) +{ + const unsigned long mask_4u = _PAGE_READ_4U; + const unsigned long mask_4v = _PAGE_READ_4V; + unsigned long mask; + + BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) || + (mask_4v & ~(0xfffffc0000000000UL))); + + __asm__ __volatile__( + "\n661: mov %1, %0\n" + " nop\n" + " .section .sun4v_2insn_patch, \"ax\"\n" + " .word 661b\n" + " sethi %%uhi(%2), %0\n" + " sllx %0, 32, %0\n" + " .previous\n" + : "=r" (mask) + : "i" (mask_4u), "i" (mask_4v)); + + return (pte_val(pte) & mask); +} + +static inline unsigned long pte_file(pte_t pte) +{ + const unsigned long mask_4u = _PAGE_FILE_4U; + const unsigned long mask_4v = _PAGE_FILE_4V; + unsigned long val = pte_val(pte); + + BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) || + (mask_4v & ~(0x0000000000000fffUL))); + + __asm__ __volatile__( + "\n661: and %0, %2, %0\n" + " .section .sun4v_1insn_patch, \"ax\"\n" + " .word 661b\n" + " and %0, %3, %0\n" + " .previous\n" + : "=r" (val) + : "0" (val), "i" (mask_4u), "i" (mask_4v)); + + return val; +} + +static inline unsigned long pte_present(pte_t pte) +{ + const unsigned long mask_4u = _PAGE_PRESENT_4U; + const unsigned long mask_4v = _PAGE_PRESENT_4V; + unsigned long val = pte_val(pte); + + BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) || + (mask_4v & ~(0x0000000000000fffUL))); + + __asm__ __volatile__( + "\n661: and %0, %2, %0\n" + " .section .sun4v_1insn_patch, \"ax\"\n" + " .word 661b\n" + " and %0, %3, %0\n" + " .previous\n" + : "=r" (val) + : "0" (val), "i" (mask_4u), "i" (mask_4v)); + + return val; +} #define pmd_set(pmdp, ptep) \ (pmd_val(*(pmdp)) = (__pa((unsigned long) (ptep)) >> 11UL)) -- 2.39.5