2 * PPC64 Huge TLB Page Support for hash based MMUs (POWER4 and later)
4 * Copyright (C) 2003 David Gibson, IBM Corporation.
6 * Based on the IA-32 version:
7 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
11 #include <linux/hugetlb.h>
12 #include <asm/pgtable.h>
13 #include <asm/pgalloc.h>
14 #include <asm/cacheflush.h>
15 #include <asm/machdep.h>
17 extern long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
18 unsigned long pa, unsigned long rlags,
19 unsigned long vflags, int psize, int ssize);
21 int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
22 pte_t *ptep, unsigned long trap, unsigned long flags,
23 int ssize, unsigned int shift, unsigned int mmu_psize)
26 unsigned long old_pte, new_pte;
27 unsigned long rflags, pa, sz;
30 BUG_ON(shift != mmu_psize_defs[mmu_psize].shift);
32 /* Search the Linux page table for a match with va */
33 vpn = hpt_vpn(ea, vsid, ssize);
35 /* At this point, we have a pte (old_pte) which can be used to build
36 * or update an HPTE. There are 2 cases:
38 * 1. There is a valid (present) pte with no associated HPTE (this is
39 * the most common case)
40 * 2. There is a valid (present) pte with an associated HPTE. The
41 * current values of the pp bits in the HPTE prevent access
42 * because we are doing software DIRTY bit management and the
43 * page is currently not DIRTY.
48 old_pte = pte_val(*ptep);
49 /* If PTE busy, retry the access */
50 if (unlikely(old_pte & H_PAGE_BUSY))
52 /* If PTE permissions don't match, take page fault */
53 if (unlikely(!check_pte_access(access, old_pte)))
56 /* Try to lock the PTE, add ACCESSED and DIRTY if it was
58 new_pte = old_pte | H_PAGE_BUSY | _PAGE_ACCESSED;
59 if (access & _PAGE_WRITE)
60 new_pte |= _PAGE_DIRTY;
61 } while(!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));
63 rflags = htab_convert_pte_flags(new_pte);
65 sz = ((1UL) << shift);
66 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
67 /* No CPU has hugepages but lacks no execute, so we
68 * don't need to worry about that case */
69 rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap);
71 /* Check if pte already has an hpte (case 2) */
72 if (unlikely(old_pte & H_PAGE_HASHPTE)) {
73 /* There MIGHT be an HPTE for this pte */
74 unsigned long hash, slot;
76 hash = hpt_hash(vpn, shift, ssize);
77 if (old_pte & H_PAGE_F_SECOND)
79 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
80 slot += (old_pte & H_PAGE_F_GIX) >> H_PAGE_F_GIX_SHIFT;
82 if (mmu_hash_ops.hpte_updatepp(slot, rflags, vpn, mmu_psize,
83 mmu_psize, ssize, flags) == -1)
84 old_pte &= ~_PAGE_HPTEFLAGS;
87 if (likely(!(old_pte & H_PAGE_HASHPTE))) {
88 unsigned long hash = hpt_hash(vpn, shift, ssize);
90 pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
92 /* clear HPTE slot informations in new PTE */
93 new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | H_PAGE_HASHPTE;
95 slot = hpte_insert_repeating(hash, vpn, pa, rflags, 0,
99 * Hypervisor failure. Restore old pte and return -1
100 * similar to __hash_page_*
102 if (unlikely(slot == -2)) {
103 *ptep = __pte(old_pte);
104 hash_failure_debug(ea, access, vsid, trap, ssize,
105 mmu_psize, mmu_psize, old_pte);
109 new_pte |= (slot << H_PAGE_F_GIX_SHIFT) &
110 (H_PAGE_F_SECOND | H_PAGE_F_GIX);
114 * No need to use ldarx/stdcx here
116 *ptep = __pte(new_pte & ~H_PAGE_BUSY);