2 * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
5 * Alexander Graf <agraf@suse.de>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License, version 2, as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
21 #include <linux/kvm_host.h>
22 #include <linux/hash.h>
23 #include <linux/slab.h>
25 #include <asm/kvm_ppc.h>
26 #include <asm/kvm_book3s.h>
27 #include <asm/machdep.h>
28 #include <asm/mmu_context.h>
29 #include <asm/hw_irq.h>
33 /* #define DEBUG_MMU */
36 #define dprintk_mmu(a, ...) printk(KERN_INFO a, __VA_ARGS__)
38 #define dprintk_mmu(a, ...) do { } while(0)
41 static struct kmem_cache *hpte_cache;
43 static inline u64 kvmppc_mmu_hash_pte(u64 eaddr)
45 return hash_64(eaddr >> PTE_SIZE, HPTEG_HASH_BITS_PTE);
48 static inline u64 kvmppc_mmu_hash_pte_long(u64 eaddr)
50 return hash_64((eaddr & 0x0ffff000) >> PTE_SIZE,
51 HPTEG_HASH_BITS_PTE_LONG);
54 static inline u64 kvmppc_mmu_hash_vpte(u64 vpage)
56 return hash_64(vpage & 0xfffffffffULL, HPTEG_HASH_BITS_VPTE);
59 static inline u64 kvmppc_mmu_hash_vpte_long(u64 vpage)
61 return hash_64((vpage & 0xffffff000ULL) >> 12,
62 HPTEG_HASH_BITS_VPTE_LONG);
65 void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
69 spin_lock(&vcpu->arch.mmu_lock);
71 /* Add to ePTE list */
72 index = kvmppc_mmu_hash_pte(pte->pte.eaddr);
73 hlist_add_head_rcu(&pte->list_pte, &vcpu->arch.hpte_hash_pte[index]);
75 /* Add to ePTE_long list */
76 index = kvmppc_mmu_hash_pte_long(pte->pte.eaddr);
77 hlist_add_head_rcu(&pte->list_pte_long,
78 &vcpu->arch.hpte_hash_pte_long[index]);
80 /* Add to vPTE list */
81 index = kvmppc_mmu_hash_vpte(pte->pte.vpage);
82 hlist_add_head_rcu(&pte->list_vpte, &vcpu->arch.hpte_hash_vpte[index]);
84 /* Add to vPTE_long list */
85 index = kvmppc_mmu_hash_vpte_long(pte->pte.vpage);
86 hlist_add_head_rcu(&pte->list_vpte_long,
87 &vcpu->arch.hpte_hash_vpte_long[index]);
89 spin_unlock(&vcpu->arch.mmu_lock);
92 static void free_pte_rcu(struct rcu_head *head)
94 struct hpte_cache *pte = container_of(head, struct hpte_cache, rcu_head);
95 kmem_cache_free(hpte_cache, pte);
98 static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
100 /* pte already invalidated? */
101 if (hlist_unhashed(&pte->list_pte))
104 dprintk_mmu("KVM: Flushing SPT: 0x%lx (0x%llx) -> 0x%llx\n",
105 pte->pte.eaddr, pte->pte.vpage, pte->host_va);
107 /* Different for 32 and 64 bit */
108 kvmppc_mmu_invalidate_pte(vcpu, pte);
110 spin_lock(&vcpu->arch.mmu_lock);
112 hlist_del_init_rcu(&pte->list_pte);
113 hlist_del_init_rcu(&pte->list_pte_long);
114 hlist_del_init_rcu(&pte->list_vpte);
115 hlist_del_init_rcu(&pte->list_vpte_long);
117 spin_unlock(&vcpu->arch.mmu_lock);
119 if (pte->pte.may_write)
120 kvm_release_pfn_dirty(pte->pfn);
122 kvm_release_pfn_clean(pte->pfn);
124 vcpu->arch.hpte_cache_count--;
125 call_rcu(&pte->rcu_head, free_pte_rcu);
128 static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
130 struct hpte_cache *pte;
131 struct hlist_node *node;
136 for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
137 struct hlist_head *list = &vcpu->arch.hpte_hash_vpte_long[i];
139 hlist_for_each_entry_rcu(pte, node, list, list_vpte_long)
140 invalidate_pte(vcpu, pte);
146 static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea)
148 struct hlist_head *list;
149 struct hlist_node *node;
150 struct hpte_cache *pte;
152 /* Find the list of entries in the map */
153 list = &vcpu->arch.hpte_hash_pte[kvmppc_mmu_hash_pte(guest_ea)];
157 /* Check the list for matching entries and invalidate */
158 hlist_for_each_entry_rcu(pte, node, list, list_pte)
159 if ((pte->pte.eaddr & ~0xfffUL) == guest_ea)
160 invalidate_pte(vcpu, pte);
165 static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea)
167 struct hlist_head *list;
168 struct hlist_node *node;
169 struct hpte_cache *pte;
171 /* Find the list of entries in the map */
172 list = &vcpu->arch.hpte_hash_pte_long[
173 kvmppc_mmu_hash_pte_long(guest_ea)];
177 /* Check the list for matching entries and invalidate */
178 hlist_for_each_entry_rcu(pte, node, list, list_pte_long)
179 if ((pte->pte.eaddr & 0x0ffff000UL) == guest_ea)
180 invalidate_pte(vcpu, pte);
185 void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask)
187 dprintk_mmu("KVM: Flushing %d Shadow PTEs: 0x%lx & 0x%lx\n",
188 vcpu->arch.hpte_cache_count, guest_ea, ea_mask);
194 kvmppc_mmu_pte_flush_page(vcpu, guest_ea);
197 kvmppc_mmu_pte_flush_long(vcpu, guest_ea);
200 /* Doing a complete flush -> start from scratch */
201 kvmppc_mmu_pte_flush_all(vcpu);
209 /* Flush with mask 0xfffffffff */
210 static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
212 struct hlist_head *list;
213 struct hlist_node *node;
214 struct hpte_cache *pte;
215 u64 vp_mask = 0xfffffffffULL;
217 list = &vcpu->arch.hpte_hash_vpte[kvmppc_mmu_hash_vpte(guest_vp)];
221 /* Check the list for matching entries and invalidate */
222 hlist_for_each_entry_rcu(pte, node, list, list_vpte)
223 if ((pte->pte.vpage & vp_mask) == guest_vp)
224 invalidate_pte(vcpu, pte);
229 /* Flush with mask 0xffffff000 */
230 static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp)
232 struct hlist_head *list;
233 struct hlist_node *node;
234 struct hpte_cache *pte;
235 u64 vp_mask = 0xffffff000ULL;
237 list = &vcpu->arch.hpte_hash_vpte_long[
238 kvmppc_mmu_hash_vpte_long(guest_vp)];
242 /* Check the list for matching entries and invalidate */
243 hlist_for_each_entry_rcu(pte, node, list, list_vpte_long)
244 if ((pte->pte.vpage & vp_mask) == guest_vp)
245 invalidate_pte(vcpu, pte);
250 void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
252 dprintk_mmu("KVM: Flushing %d Shadow vPTEs: 0x%llx & 0x%llx\n",
253 vcpu->arch.hpte_cache_count, guest_vp, vp_mask);
258 kvmppc_mmu_pte_vflush_short(vcpu, guest_vp);
261 kvmppc_mmu_pte_vflush_long(vcpu, guest_vp);
269 void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
271 struct hlist_node *node;
272 struct hpte_cache *pte;
275 dprintk_mmu("KVM: Flushing %d Shadow pPTEs: 0x%lx - 0x%lx\n",
276 vcpu->arch.hpte_cache_count, pa_start, pa_end);
280 for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
281 struct hlist_head *list = &vcpu->arch.hpte_hash_vpte_long[i];
283 hlist_for_each_entry_rcu(pte, node, list, list_vpte_long)
284 if ((pte->pte.raddr >= pa_start) &&
285 (pte->pte.raddr < pa_end))
286 invalidate_pte(vcpu, pte);
292 struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu)
294 struct hpte_cache *pte;
296 pte = kmem_cache_zalloc(hpte_cache, GFP_KERNEL);
297 vcpu->arch.hpte_cache_count++;
299 if (vcpu->arch.hpte_cache_count == HPTEG_CACHE_NUM)
300 kvmppc_mmu_pte_flush_all(vcpu);
305 void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu)
307 kvmppc_mmu_pte_flush(vcpu, 0, 0);
310 static void kvmppc_mmu_hpte_init_hash(struct hlist_head *hash_list, int len)
314 for (i = 0; i < len; i++)
315 INIT_HLIST_HEAD(&hash_list[i]);
318 int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu)
320 /* init hpte lookup hashes */
321 kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_pte,
322 ARRAY_SIZE(vcpu->arch.hpte_hash_pte));
323 kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_pte_long,
324 ARRAY_SIZE(vcpu->arch.hpte_hash_pte_long));
325 kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_vpte,
326 ARRAY_SIZE(vcpu->arch.hpte_hash_vpte));
327 kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_vpte_long,
328 ARRAY_SIZE(vcpu->arch.hpte_hash_vpte_long));
330 spin_lock_init(&vcpu->arch.mmu_lock);
335 int kvmppc_mmu_hpte_sysinit(void)
337 /* init hpte slab cache */
338 hpte_cache = kmem_cache_create("kvm-spt", sizeof(struct hpte_cache),
339 sizeof(struct hpte_cache), 0, NULL);
344 void kvmppc_mmu_hpte_sysexit(void)
346 kmem_cache_destroy(hpte_cache);