2 * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
5 * Alexander Graf <agraf@suse.de>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License, version 2, as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
21 #include <linux/kvm_host.h>
23 #include <asm/kvm_ppc.h>
24 #include <asm/kvm_book3s.h>
25 #include <asm/book3s/32/mmu-hash.h>
26 #include <asm/machdep.h>
27 #include <asm/mmu_context.h>
28 #include <asm/hw_irq.h>
31 /* #define DEBUG_MMU */
32 /* #define DEBUG_SR */
35 #define dprintk_mmu(a, ...) printk(KERN_INFO a, __VA_ARGS__)
37 #define dprintk_mmu(a, ...) do { } while(0)
41 #define dprintk_sr(a, ...) printk(KERN_INFO a, __VA_ARGS__)
43 #define dprintk_sr(a, ...) do { } while(0)
47 #error Unknown page size
51 #error XXX need to grab mmu_hash_lock
54 #ifdef CONFIG_PTE_64BIT
55 #error Only 32 bit pages are supported for now
61 void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
65 /* Remove from host HTAB */
66 pteg = (u32*)pte->slot;
69 /* And make sure it's gone from the TLB too */
70 asm volatile ("sync");
71 asm volatile ("tlbie %0" : : "r" (pte->pte.eaddr) : "memory");
72 asm volatile ("sync");
73 asm volatile ("tlbsync");
76 /* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using
77 * a hash, so we don't waste cycles on looping */
78 static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid)
80 return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^
81 ((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^
82 ((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^
83 ((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^
84 ((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^
85 ((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^
86 ((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^
87 ((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK));
91 static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
93 struct kvmppc_sid_map *map;
96 if (kvmppc_get_msr(vcpu) & MSR_PR)
99 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
100 map = &to_book3s(vcpu)->sid_map[sid_map_mask];
101 if (map->guest_vsid == gvsid) {
102 dprintk_sr("SR: Searching 0x%llx -> 0x%llx\n",
103 gvsid, map->host_vsid);
107 map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask];
108 if (map->guest_vsid == gvsid) {
109 dprintk_sr("SR: Searching 0x%llx -> 0x%llx\n",
110 gvsid, map->host_vsid);
114 dprintk_sr("SR: Searching 0x%llx -> not found\n", gvsid);
118 static u32 *kvmppc_mmu_get_pteg(struct kvm_vcpu *vcpu, u32 vsid, u32 eaddr,
124 page = (eaddr & ~ESID_MASK) >> 12;
126 hash = ((vsid ^ page) << 6);
134 dprintk_mmu("htab: %lx | hash: %x | htabmask: %x | pteg: %lx\n",
135 htab, hash, htabmask, pteg);
142 int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
148 struct kvmppc_sid_map *map;
150 u32 eaddr = orig_pte->eaddr;
153 bool primary = false;
155 struct hpte_cache *pte;
159 /* Get host physical address for gpa */
160 hpaddr = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable);
161 if (is_error_noslot_pfn(hpaddr)) {
162 printk(KERN_INFO "Couldn't get guest page for gpa %lx!\n",
167 hpaddr <<= PAGE_SHIFT;
169 /* and write the mapping ea -> hpa into the pt */
170 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
171 map = find_sid_vsid(vcpu, vsid);
173 kvmppc_mmu_map_segment(vcpu, eaddr);
174 map = find_sid_vsid(vcpu, vsid);
178 vsid = map->host_vsid;
179 vpn = (vsid << (SID_SHIFT - VPN_SHIFT)) |
180 ((eaddr & ~ESID_MASK) >> VPN_SHIFT);
188 pteg = kvmppc_mmu_get_pteg(vcpu, vsid, eaddr, primary);
190 /* not evicting yet */
191 if (!evict && (pteg[rr] & PTE_V)) {
196 dprintk_mmu("KVM: old PTEG: %p (%d)\n", pteg, rr);
197 dprintk_mmu("KVM: %08x - %08x\n", pteg[0], pteg[1]);
198 dprintk_mmu("KVM: %08x - %08x\n", pteg[2], pteg[3]);
199 dprintk_mmu("KVM: %08x - %08x\n", pteg[4], pteg[5]);
200 dprintk_mmu("KVM: %08x - %08x\n", pteg[6], pteg[7]);
201 dprintk_mmu("KVM: %08x - %08x\n", pteg[8], pteg[9]);
202 dprintk_mmu("KVM: %08x - %08x\n", pteg[10], pteg[11]);
203 dprintk_mmu("KVM: %08x - %08x\n", pteg[12], pteg[13]);
204 dprintk_mmu("KVM: %08x - %08x\n", pteg[14], pteg[15]);
206 pteg0 = ((eaddr & 0x0fffffff) >> 22) | (vsid << 7) | PTE_V |
207 (primary ? 0 : PTE_SEC);
208 pteg1 = hpaddr | PTE_M | PTE_R | PTE_C;
210 if (orig_pte->may_write && writable) {
212 mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
217 if (orig_pte->may_execute)
218 kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT);
224 asm volatile ("sync");
226 pteg[rr + 1] = pteg1;
228 asm volatile ("sync");
232 dprintk_mmu("KVM: new PTEG: %p\n", pteg);
233 dprintk_mmu("KVM: %08x - %08x\n", pteg[0], pteg[1]);
234 dprintk_mmu("KVM: %08x - %08x\n", pteg[2], pteg[3]);
235 dprintk_mmu("KVM: %08x - %08x\n", pteg[4], pteg[5]);
236 dprintk_mmu("KVM: %08x - %08x\n", pteg[6], pteg[7]);
237 dprintk_mmu("KVM: %08x - %08x\n", pteg[8], pteg[9]);
238 dprintk_mmu("KVM: %08x - %08x\n", pteg[10], pteg[11]);
239 dprintk_mmu("KVM: %08x - %08x\n", pteg[12], pteg[13]);
240 dprintk_mmu("KVM: %08x - %08x\n", pteg[14], pteg[15]);
243 /* Now tell our Shadow PTE code about the new page */
245 pte = kvmppc_mmu_hpte_cache_next(vcpu);
247 kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT);
252 dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%llx (0x%llx) -> %lx\n",
253 orig_pte->may_write ? 'w' : '-',
254 orig_pte->may_execute ? 'x' : '-',
255 orig_pte->eaddr, (ulong)pteg, vpn,
256 orig_pte->vpage, hpaddr);
258 pte->slot = (ulong)&pteg[rr];
260 pte->pte = *orig_pte;
261 pte->pfn = hpaddr >> PAGE_SHIFT;
263 kvmppc_mmu_hpte_cache_map(vcpu, pte);
265 kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT);
270 void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
272 kvmppc_mmu_pte_vflush(vcpu, pte->vpage, 0xfffffffffULL);
275 static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
277 struct kvmppc_sid_map *map;
278 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
280 static int backwards_map = 0;
282 if (kvmppc_get_msr(vcpu) & MSR_PR)
285 /* We might get collisions that trap in preceding order, so let's
286 map them differently */
288 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
290 sid_map_mask = SID_MAP_MASK - sid_map_mask;
292 map = &to_book3s(vcpu)->sid_map[sid_map_mask];
294 /* Make sure we're taking the other map next time */
295 backwards_map = !backwards_map;
297 /* Uh-oh ... out of mappings. Let's flush! */
298 if (vcpu_book3s->vsid_next >= VSID_POOL_SIZE) {
299 vcpu_book3s->vsid_next = 0;
300 memset(vcpu_book3s->sid_map, 0,
301 sizeof(struct kvmppc_sid_map) * SID_MAP_NUM);
302 kvmppc_mmu_pte_flush(vcpu, 0, 0);
303 kvmppc_mmu_flush_segments(vcpu);
305 map->host_vsid = vcpu_book3s->vsid_pool[vcpu_book3s->vsid_next];
306 vcpu_book3s->vsid_next++;
308 map->guest_vsid = gvsid;
314 int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
316 u32 esid = eaddr >> SID_SHIFT;
319 struct kvmppc_sid_map *map;
320 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
323 if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
324 /* Invalidate an entry */
325 svcpu->sr[esid] = SR_INVALID;
330 map = find_sid_vsid(vcpu, gvsid);
332 map = create_sid_map(vcpu, gvsid);
334 map->guest_esid = esid;
335 sr = map->host_vsid | SR_KP;
336 svcpu->sr[esid] = sr;
338 dprintk_sr("MMU: mtsr %d, 0x%x\n", esid, sr);
345 void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
348 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
350 dprintk_sr("MMU: flushing all segments (%d)\n", ARRAY_SIZE(svcpu->sr));
351 for (i = 0; i < ARRAY_SIZE(svcpu->sr); i++)
352 svcpu->sr[i] = SR_INVALID;
357 void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu)
361 kvmppc_mmu_hpte_destroy(vcpu);
363 for (i = 0; i < SID_CONTEXTS; i++)
364 __destroy_context(to_book3s(vcpu)->context_id[i]);
368 /* From mm/mmu_context_hash32.c */
369 #define CTX_TO_VSID(c, id) ((((c) * (897 * 16)) + (id * 0x111)) & 0xffffff)
371 int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
373 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
379 for (i = 0; i < SID_CONTEXTS; i++) {
380 err = __init_new_context();
383 vcpu3s->context_id[i] = err;
385 /* Remember context id for this combination */
386 for (j = 0; j < 16; j++)
387 vcpu3s->vsid_pool[(i * 16) + j] = CTX_TO_VSID(err, j);
390 vcpu3s->vsid_next = 0;
392 /* Remember where the HTAB is */
393 asm ( "mfsdr1 %0" : "=r"(sdr1) );
394 htabmask = ((sdr1 & 0x1FF) << 16) | 0xFFC0;
395 htab = (ulong)__va(sdr1 & 0xffff0000);
397 kvmppc_mmu_hpte_init(vcpu);
402 for (j = 0; j < i; j++) {
403 if (!vcpu3s->context_id[j])
406 __destroy_context(to_book3s(vcpu)->context_id[j]);