]> git.karo-electronics.de Git - karo-tx-linux.git/blob - arch/powerpc/kvm/book3s_64_mmu.c
Merge v3.12-rc1 into kbuild/for-next
[karo-tx-linux.git] / arch / powerpc / kvm / book3s_64_mmu.c
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright SUSE Linux Products GmbH 2009
16  *
17  * Authors: Alexander Graf <agraf@suse.de>
18  */
19
20 #include <linux/types.h>
21 #include <linux/string.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/highmem.h>
25
26 #include <asm/tlbflush.h>
27 #include <asm/kvm_ppc.h>
28 #include <asm/kvm_book3s.h>
29 #include <asm/mmu-hash64.h>
30
31 /* #define DEBUG_MMU */
32
33 #ifdef DEBUG_MMU
34 #define dprintk(X...) printk(KERN_INFO X)
35 #else
36 #define dprintk(X...) do { } while(0)
37 #endif
38
39 static void kvmppc_mmu_book3s_64_reset_msr(struct kvm_vcpu *vcpu)
40 {
41         kvmppc_set_msr(vcpu, MSR_SF);
42 }
43
44 static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe(
45                                 struct kvm_vcpu *vcpu,
46                                 gva_t eaddr)
47 {
48         int i;
49         u64 esid = GET_ESID(eaddr);
50         u64 esid_1t = GET_ESID_1T(eaddr);
51
52         for (i = 0; i < vcpu->arch.slb_nr; i++) {
53                 u64 cmp_esid = esid;
54
55                 if (!vcpu->arch.slb[i].valid)
56                         continue;
57
58                 if (vcpu->arch.slb[i].tb)
59                         cmp_esid = esid_1t;
60
61                 if (vcpu->arch.slb[i].esid == cmp_esid)
62                         return &vcpu->arch.slb[i];
63         }
64
65         dprintk("KVM: No SLB entry found for 0x%lx [%llx | %llx]\n",
66                 eaddr, esid, esid_1t);
67         for (i = 0; i < vcpu->arch.slb_nr; i++) {
68             if (vcpu->arch.slb[i].vsid)
69                 dprintk("  %d: %c%c%c %llx %llx\n", i,
70                         vcpu->arch.slb[i].valid ? 'v' : ' ',
71                         vcpu->arch.slb[i].large ? 'l' : ' ',
72                         vcpu->arch.slb[i].tb    ? 't' : ' ',
73                         vcpu->arch.slb[i].esid,
74                         vcpu->arch.slb[i].vsid);
75         }
76
77         return NULL;
78 }
79
80 static int kvmppc_slb_sid_shift(struct kvmppc_slb *slbe)
81 {
82         return slbe->tb ? SID_SHIFT_1T : SID_SHIFT;
83 }
84
85 static u64 kvmppc_slb_offset_mask(struct kvmppc_slb *slbe)
86 {
87         return (1ul << kvmppc_slb_sid_shift(slbe)) - 1;
88 }
89
90 static u64 kvmppc_slb_calc_vpn(struct kvmppc_slb *slb, gva_t eaddr)
91 {
92         eaddr &= kvmppc_slb_offset_mask(slb);
93
94         return (eaddr >> VPN_SHIFT) |
95                 ((slb->vsid) << (kvmppc_slb_sid_shift(slb) - VPN_SHIFT));
96 }
97
98 static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
99                                          bool data)
100 {
101         struct kvmppc_slb *slb;
102
103         slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr);
104         if (!slb)
105                 return 0;
106
107         return kvmppc_slb_calc_vpn(slb, eaddr);
108 }
109
110 static int kvmppc_mmu_book3s_64_get_pagesize(struct kvmppc_slb *slbe)
111 {
112         return slbe->large ? 24 : 12;
113 }
114
115 static u32 kvmppc_mmu_book3s_64_get_page(struct kvmppc_slb *slbe, gva_t eaddr)
116 {
117         int p = kvmppc_mmu_book3s_64_get_pagesize(slbe);
118
119         return ((eaddr & kvmppc_slb_offset_mask(slbe)) >> p);
120 }
121
122 static hva_t kvmppc_mmu_book3s_64_get_pteg(
123                                 struct kvmppc_vcpu_book3s *vcpu_book3s,
124                                 struct kvmppc_slb *slbe, gva_t eaddr,
125                                 bool second)
126 {
127         u64 hash, pteg, htabsize;
128         u32 ssize;
129         hva_t r;
130         u64 vpn;
131
132         htabsize = ((1 << ((vcpu_book3s->sdr1 & 0x1f) + 11)) - 1);
133
134         vpn = kvmppc_slb_calc_vpn(slbe, eaddr);
135         ssize = slbe->tb ? MMU_SEGSIZE_1T : MMU_SEGSIZE_256M;
136         hash = hpt_hash(vpn, kvmppc_mmu_book3s_64_get_pagesize(slbe), ssize);
137         if (second)
138                 hash = ~hash;
139         hash &= ((1ULL << 39ULL) - 1ULL);
140         hash &= htabsize;
141         hash <<= 7ULL;
142
143         pteg = vcpu_book3s->sdr1 & 0xfffffffffffc0000ULL;
144         pteg |= hash;
145
146         dprintk("MMU: page=0x%x sdr1=0x%llx pteg=0x%llx vsid=0x%llx\n",
147                 page, vcpu_book3s->sdr1, pteg, slbe->vsid);
148
149         /* When running a PAPR guest, SDR1 contains a HVA address instead
150            of a GPA */
151         if (vcpu_book3s->vcpu.arch.papr_enabled)
152                 r = pteg;
153         else
154                 r = gfn_to_hva(vcpu_book3s->vcpu.kvm, pteg >> PAGE_SHIFT);
155
156         if (kvm_is_error_hva(r))
157                 return r;
158         return r | (pteg & ~PAGE_MASK);
159 }
160
161 static u64 kvmppc_mmu_book3s_64_get_avpn(struct kvmppc_slb *slbe, gva_t eaddr)
162 {
163         int p = kvmppc_mmu_book3s_64_get_pagesize(slbe);
164         u64 avpn;
165
166         avpn = kvmppc_mmu_book3s_64_get_page(slbe, eaddr);
167         avpn |= slbe->vsid << (kvmppc_slb_sid_shift(slbe) - p);
168
169         if (p < 24)
170                 avpn >>= ((80 - p) - 56) - 8;
171         else
172                 avpn <<= 8;
173
174         return avpn;
175 }
176
177 static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
178                                 struct kvmppc_pte *gpte, bool data)
179 {
180         struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
181         struct kvmppc_slb *slbe;
182         hva_t ptegp;
183         u64 pteg[16];
184         u64 avpn = 0;
185         u64 v, r;
186         u64 v_val, v_mask;
187         u64 eaddr_mask;
188         int i;
189         u8 pp, key = 0;
190         bool found = false;
191         bool second = false;
192         ulong mp_ea = vcpu->arch.magic_page_ea;
193
194         /* Magic page override */
195         if (unlikely(mp_ea) &&
196             unlikely((eaddr & ~0xfffULL) == (mp_ea & ~0xfffULL)) &&
197             !(vcpu->arch.shared->msr & MSR_PR)) {
198                 gpte->eaddr = eaddr;
199                 gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data);
200                 gpte->raddr = vcpu->arch.magic_page_pa | (gpte->raddr & 0xfff);
201                 gpte->raddr &= KVM_PAM;
202                 gpte->may_execute = true;
203                 gpte->may_read = true;
204                 gpte->may_write = true;
205
206                 return 0;
207         }
208
209         slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr);
210         if (!slbe)
211                 goto no_seg_found;
212
213         avpn = kvmppc_mmu_book3s_64_get_avpn(slbe, eaddr);
214         v_val = avpn & HPTE_V_AVPN;
215
216         if (slbe->tb)
217                 v_val |= SLB_VSID_B_1T;
218         if (slbe->large)
219                 v_val |= HPTE_V_LARGE;
220         v_val |= HPTE_V_VALID;
221
222         v_mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_LARGE | HPTE_V_VALID |
223                 HPTE_V_SECONDARY;
224
225 do_second:
226         ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu_book3s, slbe, eaddr, second);
227         if (kvm_is_error_hva(ptegp))
228                 goto no_page_found;
229
230         if(copy_from_user(pteg, (void __user *)ptegp, sizeof(pteg))) {
231                 printk(KERN_ERR "KVM can't copy data from 0x%lx!\n", ptegp);
232                 goto no_page_found;
233         }
234
235         if ((vcpu->arch.shared->msr & MSR_PR) && slbe->Kp)
236                 key = 4;
237         else if (!(vcpu->arch.shared->msr & MSR_PR) && slbe->Ks)
238                 key = 4;
239
240         for (i=0; i<16; i+=2) {
241                 /* Check all relevant fields of 1st dword */
242                 if ((pteg[i] & v_mask) == v_val) {
243                         found = true;
244                         break;
245                 }
246         }
247
248         if (!found) {
249                 if (second)
250                         goto no_page_found;
251                 v_val |= HPTE_V_SECONDARY;
252                 second = true;
253                 goto do_second;
254         }
255
256         v = pteg[i];
257         r = pteg[i+1];
258         pp = (r & HPTE_R_PP) | key;
259         eaddr_mask = 0xFFF;
260
261         gpte->eaddr = eaddr;
262         gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data);
263         if (slbe->large)
264                 eaddr_mask = 0xFFFFFF;
265         gpte->raddr = (r & HPTE_R_RPN & ~eaddr_mask) | (eaddr & eaddr_mask);
266         gpte->may_execute = ((r & HPTE_R_N) ? false : true);
267         gpte->may_read = false;
268         gpte->may_write = false;
269
270         switch (pp) {
271         case 0:
272         case 1:
273         case 2:
274         case 6:
275                 gpte->may_write = true;
276                 /* fall through */
277         case 3:
278         case 5:
279         case 7:
280                 gpte->may_read = true;
281                 break;
282         }
283
284         dprintk("KVM MMU: Translated 0x%lx [0x%llx] -> 0x%llx "
285                 "-> 0x%lx\n",
286                 eaddr, avpn, gpte->vpage, gpte->raddr);
287
288         /* Update PTE R and C bits, so the guest's swapper knows we used the
289          * page */
290         if (gpte->may_read) {
291                 /* Set the accessed flag */
292                 r |= HPTE_R_R;
293         }
294         if (data && gpte->may_write) {
295                 /* Set the dirty flag -- XXX even if not writing */
296                 r |= HPTE_R_C;
297         }
298
299         /* Write back into the PTEG */
300         if (pteg[i+1] != r) {
301                 pteg[i+1] = r;
302                 copy_to_user((void __user *)ptegp, pteg, sizeof(pteg));
303         }
304
305         if (!gpte->may_read)
306                 return -EPERM;
307         return 0;
308
309 no_page_found:
310         return -ENOENT;
311
312 no_seg_found:
313
314         dprintk("KVM MMU: Trigger segment fault\n");
315         return -EINVAL;
316 }
317
318 static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb)
319 {
320         struct kvmppc_vcpu_book3s *vcpu_book3s;
321         u64 esid, esid_1t;
322         int slb_nr;
323         struct kvmppc_slb *slbe;
324
325         dprintk("KVM MMU: slbmte(0x%llx, 0x%llx)\n", rs, rb);
326
327         vcpu_book3s = to_book3s(vcpu);
328
329         esid = GET_ESID(rb);
330         esid_1t = GET_ESID_1T(rb);
331         slb_nr = rb & 0xfff;
332
333         if (slb_nr > vcpu->arch.slb_nr)
334                 return;
335
336         slbe = &vcpu->arch.slb[slb_nr];
337
338         slbe->large = (rs & SLB_VSID_L) ? 1 : 0;
339         slbe->tb    = (rs & SLB_VSID_B_1T) ? 1 : 0;
340         slbe->esid  = slbe->tb ? esid_1t : esid;
341         slbe->vsid  = (rs & ~SLB_VSID_B) >> (kvmppc_slb_sid_shift(slbe) - 16);
342         slbe->valid = (rb & SLB_ESID_V) ? 1 : 0;
343         slbe->Ks    = (rs & SLB_VSID_KS) ? 1 : 0;
344         slbe->Kp    = (rs & SLB_VSID_KP) ? 1 : 0;
345         slbe->nx    = (rs & SLB_VSID_N) ? 1 : 0;
346         slbe->class = (rs & SLB_VSID_C) ? 1 : 0;
347
348         slbe->orige = rb & (ESID_MASK | SLB_ESID_V);
349         slbe->origv = rs;
350
351         /* Map the new segment */
352         kvmppc_mmu_map_segment(vcpu, esid << SID_SHIFT);
353 }
354
355 static u64 kvmppc_mmu_book3s_64_slbmfee(struct kvm_vcpu *vcpu, u64 slb_nr)
356 {
357         struct kvmppc_slb *slbe;
358
359         if (slb_nr > vcpu->arch.slb_nr)
360                 return 0;
361
362         slbe = &vcpu->arch.slb[slb_nr];
363
364         return slbe->orige;
365 }
366
367 static u64 kvmppc_mmu_book3s_64_slbmfev(struct kvm_vcpu *vcpu, u64 slb_nr)
368 {
369         struct kvmppc_slb *slbe;
370
371         if (slb_nr > vcpu->arch.slb_nr)
372                 return 0;
373
374         slbe = &vcpu->arch.slb[slb_nr];
375
376         return slbe->origv;
377 }
378
379 static void kvmppc_mmu_book3s_64_slbie(struct kvm_vcpu *vcpu, u64 ea)
380 {
381         struct kvmppc_slb *slbe;
382         u64 seg_size;
383
384         dprintk("KVM MMU: slbie(0x%llx)\n", ea);
385
386         slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea);
387
388         if (!slbe)
389                 return;
390
391         dprintk("KVM MMU: slbie(0x%llx, 0x%llx)\n", ea, slbe->esid);
392
393         slbe->valid = false;
394         slbe->orige = 0;
395         slbe->origv = 0;
396
397         seg_size = 1ull << kvmppc_slb_sid_shift(slbe);
398         kvmppc_mmu_flush_segment(vcpu, ea & ~(seg_size - 1), seg_size);
399 }
400
401 static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu)
402 {
403         int i;
404
405         dprintk("KVM MMU: slbia()\n");
406
407         for (i = 1; i < vcpu->arch.slb_nr; i++) {
408                 vcpu->arch.slb[i].valid = false;
409                 vcpu->arch.slb[i].orige = 0;
410                 vcpu->arch.slb[i].origv = 0;
411         }
412
413         if (vcpu->arch.shared->msr & MSR_IR) {
414                 kvmppc_mmu_flush_segments(vcpu);
415                 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
416         }
417 }
418
419 static void kvmppc_mmu_book3s_64_mtsrin(struct kvm_vcpu *vcpu, u32 srnum,
420                                         ulong value)
421 {
422         u64 rb = 0, rs = 0;
423
424         /*
425          * According to Book3 2.01 mtsrin is implemented as:
426          *
427          * The SLB entry specified by (RB)32:35 is loaded from register
428          * RS, as follows.
429          *
430          * SLBE Bit     Source                  SLB Field
431          *
432          * 0:31         0x0000_0000             ESID-0:31
433          * 32:35        (RB)32:35               ESID-32:35
434          * 36           0b1                     V
435          * 37:61        0x00_0000|| 0b0         VSID-0:24
436          * 62:88        (RS)37:63               VSID-25:51
437          * 89:91        (RS)33:35               Ks Kp N
438          * 92           (RS)36                  L ((RS)36 must be 0b0)
439          * 93           0b0                     C
440          */
441
442         dprintk("KVM MMU: mtsrin(0x%x, 0x%lx)\n", srnum, value);
443
444         /* ESID = srnum */
445         rb |= (srnum & 0xf) << 28;
446         /* Set the valid bit */
447         rb |= 1 << 27;
448         /* Index = ESID */
449         rb |= srnum;
450
451         /* VSID = VSID */
452         rs |= (value & 0xfffffff) << 12;
453         /* flags = flags */
454         rs |= ((value >> 28) & 0x7) << 9;
455
456         kvmppc_mmu_book3s_64_slbmte(vcpu, rs, rb);
457 }
458
459 static void kvmppc_mmu_book3s_64_tlbie(struct kvm_vcpu *vcpu, ulong va,
460                                        bool large)
461 {
462         u64 mask = 0xFFFFFFFFFULL;
463
464         dprintk("KVM MMU: tlbie(0x%lx)\n", va);
465
466         if (large)
467                 mask = 0xFFFFFF000ULL;
468         kvmppc_mmu_pte_vflush(vcpu, va >> 12, mask);
469 }
470
471 static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
472                                              u64 *vsid)
473 {
474         ulong ea = esid << SID_SHIFT;
475         struct kvmppc_slb *slb;
476         u64 gvsid = esid;
477         ulong mp_ea = vcpu->arch.magic_page_ea;
478
479         if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
480                 slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea);
481                 if (slb) {
482                         gvsid = slb->vsid;
483                         if (slb->tb) {
484                                 gvsid <<= SID_SHIFT_1T - SID_SHIFT;
485                                 gvsid |= esid & ((1ul << (SID_SHIFT_1T - SID_SHIFT)) - 1);
486                                 gvsid |= VSID_1T;
487                         }
488                 }
489         }
490
491         switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
492         case 0:
493                 *vsid = VSID_REAL | esid;
494                 break;
495         case MSR_IR:
496                 *vsid = VSID_REAL_IR | gvsid;
497                 break;
498         case MSR_DR:
499                 *vsid = VSID_REAL_DR | gvsid;
500                 break;
501         case MSR_DR|MSR_IR:
502                 if (!slb)
503                         goto no_slb;
504
505                 *vsid = gvsid;
506                 break;
507         default:
508                 BUG();
509                 break;
510         }
511
512         if (vcpu->arch.shared->msr & MSR_PR)
513                 *vsid |= VSID_PR;
514
515         return 0;
516
517 no_slb:
518         /* Catch magic page case */
519         if (unlikely(mp_ea) &&
520             unlikely(esid == (mp_ea >> SID_SHIFT)) &&
521             !(vcpu->arch.shared->msr & MSR_PR)) {
522                 *vsid = VSID_REAL | esid;
523                 return 0;
524         }
525
526         return -EINVAL;
527 }
528
529 static bool kvmppc_mmu_book3s_64_is_dcbz32(struct kvm_vcpu *vcpu)
530 {
531         return (to_book3s(vcpu)->hid[5] & 0x80);
532 }
533
534 void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu)
535 {
536         struct kvmppc_mmu *mmu = &vcpu->arch.mmu;
537
538         mmu->mfsrin = NULL;
539         mmu->mtsrin = kvmppc_mmu_book3s_64_mtsrin;
540         mmu->slbmte = kvmppc_mmu_book3s_64_slbmte;
541         mmu->slbmfee = kvmppc_mmu_book3s_64_slbmfee;
542         mmu->slbmfev = kvmppc_mmu_book3s_64_slbmfev;
543         mmu->slbie = kvmppc_mmu_book3s_64_slbie;
544         mmu->slbia = kvmppc_mmu_book3s_64_slbia;
545         mmu->xlate = kvmppc_mmu_book3s_64_xlate;
546         mmu->reset_msr = kvmppc_mmu_book3s_64_reset_msr;
547         mmu->tlbie = kvmppc_mmu_book3s_64_tlbie;
548         mmu->esid_to_vsid = kvmppc_mmu_book3s_64_esid_to_vsid;
549         mmu->ea_to_vp = kvmppc_mmu_book3s_64_ea_to_vp;
550         mmu->is_dcbz32 = kvmppc_mmu_book3s_64_is_dcbz32;
551
552         vcpu->arch.hflags |= BOOK3S_HFLAG_SLB;
553 }