]> git.karo-electronics.de Git - karo-tx-linux.git/blob - arch/s390/kvm/intercept.c
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm into next
[karo-tx-linux.git] / arch / s390 / kvm / intercept.c
1 /*
2  * in-kernel handling for sie intercepts
3  *
4  * Copyright IBM Corp. 2008, 2014
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  */
13
14 #include <linux/kvm_host.h>
15 #include <linux/errno.h>
16 #include <linux/pagemap.h>
17
18 #include <asm/kvm_host.h>
19 #include <asm/asm-offsets.h>
20 #include <asm/irq.h>
21
22 #include "kvm-s390.h"
23 #include "gaccess.h"
24 #include "trace.h"
25 #include "trace-s390.h"
26
27
28 static const intercept_handler_t instruction_handlers[256] = {
29         [0x01] = kvm_s390_handle_01,
30         [0x82] = kvm_s390_handle_lpsw,
31         [0x83] = kvm_s390_handle_diag,
32         [0xae] = kvm_s390_handle_sigp,
33         [0xb2] = kvm_s390_handle_b2,
34         [0xb6] = kvm_s390_handle_stctl,
35         [0xb7] = kvm_s390_handle_lctl,
36         [0xb9] = kvm_s390_handle_b9,
37         [0xe5] = kvm_s390_handle_e5,
38         [0xeb] = kvm_s390_handle_eb,
39 };
40
41 static int handle_noop(struct kvm_vcpu *vcpu)
42 {
43         switch (vcpu->arch.sie_block->icptcode) {
44         case 0x0:
45                 vcpu->stat.exit_null++;
46                 break;
47         case 0x10:
48                 vcpu->stat.exit_external_request++;
49                 break;
50         default:
51                 break; /* nothing */
52         }
53         return 0;
54 }
55
56 static int handle_stop(struct kvm_vcpu *vcpu)
57 {
58         int rc = 0;
59
60         vcpu->stat.exit_stop_request++;
61         spin_lock_bh(&vcpu->arch.local_int.lock);
62
63         trace_kvm_s390_stop_request(vcpu->arch.local_int.action_bits);
64
65         if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) {
66                 kvm_s390_vcpu_stop(vcpu);
67                 vcpu->arch.local_int.action_bits &= ~ACTION_STOP_ON_STOP;
68                 VCPU_EVENT(vcpu, 3, "%s", "cpu stopped");
69                 rc = -EOPNOTSUPP;
70         }
71
72         if (vcpu->arch.local_int.action_bits & ACTION_STORE_ON_STOP) {
73                 vcpu->arch.local_int.action_bits &= ~ACTION_STORE_ON_STOP;
74                 /* store status must be called unlocked. Since local_int.lock
75                  * only protects local_int.* and not guest memory we can give
76                  * up the lock here */
77                 spin_unlock_bh(&vcpu->arch.local_int.lock);
78                 rc = kvm_s390_vcpu_store_status(vcpu,
79                                                 KVM_S390_STORE_STATUS_NOADDR);
80                 if (rc >= 0)
81                         rc = -EOPNOTSUPP;
82         } else
83                 spin_unlock_bh(&vcpu->arch.local_int.lock);
84         return rc;
85 }
86
87 static int handle_validity(struct kvm_vcpu *vcpu)
88 {
89         int viwhy = vcpu->arch.sie_block->ipb >> 16;
90
91         vcpu->stat.exit_validity++;
92         trace_kvm_s390_intercept_validity(vcpu, viwhy);
93         WARN_ONCE(true, "kvm: unhandled validity intercept 0x%x\n", viwhy);
94         return -EOPNOTSUPP;
95 }
96
97 static int handle_instruction(struct kvm_vcpu *vcpu)
98 {
99         intercept_handler_t handler;
100
101         vcpu->stat.exit_instruction++;
102         trace_kvm_s390_intercept_instruction(vcpu,
103                                              vcpu->arch.sie_block->ipa,
104                                              vcpu->arch.sie_block->ipb);
105         handler = instruction_handlers[vcpu->arch.sie_block->ipa >> 8];
106         if (handler)
107                 return handler(vcpu);
108         return -EOPNOTSUPP;
109 }
110
111 static void __extract_prog_irq(struct kvm_vcpu *vcpu,
112                                struct kvm_s390_pgm_info *pgm_info)
113 {
114         memset(pgm_info, 0, sizeof(struct kvm_s390_pgm_info));
115         pgm_info->code = vcpu->arch.sie_block->iprcc;
116
117         switch (vcpu->arch.sie_block->iprcc & ~PGM_PER) {
118         case PGM_AFX_TRANSLATION:
119         case PGM_ASX_TRANSLATION:
120         case PGM_EX_TRANSLATION:
121         case PGM_LFX_TRANSLATION:
122         case PGM_LSTE_SEQUENCE:
123         case PGM_LSX_TRANSLATION:
124         case PGM_LX_TRANSLATION:
125         case PGM_PRIMARY_AUTHORITY:
126         case PGM_SECONDARY_AUTHORITY:
127         case PGM_SPACE_SWITCH:
128                 pgm_info->trans_exc_code = vcpu->arch.sie_block->tecmc;
129                 break;
130         case PGM_ALEN_TRANSLATION:
131         case PGM_ALE_SEQUENCE:
132         case PGM_ASTE_INSTANCE:
133         case PGM_ASTE_SEQUENCE:
134         case PGM_ASTE_VALIDITY:
135         case PGM_EXTENDED_AUTHORITY:
136                 pgm_info->exc_access_id = vcpu->arch.sie_block->eai;
137                 break;
138         case PGM_ASCE_TYPE:
139         case PGM_PAGE_TRANSLATION:
140         case PGM_REGION_FIRST_TRANS:
141         case PGM_REGION_SECOND_TRANS:
142         case PGM_REGION_THIRD_TRANS:
143         case PGM_SEGMENT_TRANSLATION:
144                 pgm_info->trans_exc_code = vcpu->arch.sie_block->tecmc;
145                 pgm_info->exc_access_id  = vcpu->arch.sie_block->eai;
146                 pgm_info->op_access_id  = vcpu->arch.sie_block->oai;
147                 break;
148         case PGM_MONITOR:
149                 pgm_info->mon_class_nr = vcpu->arch.sie_block->mcn;
150                 pgm_info->mon_code = vcpu->arch.sie_block->tecmc;
151                 break;
152         case PGM_DATA:
153                 pgm_info->data_exc_code = vcpu->arch.sie_block->dxc;
154                 break;
155         case PGM_PROTECTION:
156                 pgm_info->trans_exc_code = vcpu->arch.sie_block->tecmc;
157                 pgm_info->exc_access_id  = vcpu->arch.sie_block->eai;
158                 break;
159         default:
160                 break;
161         }
162
163         if (vcpu->arch.sie_block->iprcc & PGM_PER) {
164                 pgm_info->per_code = vcpu->arch.sie_block->perc;
165                 pgm_info->per_atmid = vcpu->arch.sie_block->peratmid;
166                 pgm_info->per_address = vcpu->arch.sie_block->peraddr;
167                 pgm_info->per_access_id = vcpu->arch.sie_block->peraid;
168         }
169 }
170
171 /*
172  * restore ITDB to program-interruption TDB in guest lowcore
173  * and set TX abort indication if required
174 */
175 static int handle_itdb(struct kvm_vcpu *vcpu)
176 {
177         struct kvm_s390_itdb *itdb;
178         int rc;
179
180         if (!IS_TE_ENABLED(vcpu) || !IS_ITDB_VALID(vcpu))
181                 return 0;
182         if (current->thread.per_flags & PER_FLAG_NO_TE)
183                 return 0;
184         itdb = (struct kvm_s390_itdb *)vcpu->arch.sie_block->itdba;
185         rc = write_guest_lc(vcpu, __LC_PGM_TDB, itdb, sizeof(*itdb));
186         if (rc)
187                 return rc;
188         memset(itdb, 0, sizeof(*itdb));
189
190         return 0;
191 }
192
193 #define per_event(vcpu) (vcpu->arch.sie_block->iprcc & PGM_PER)
194
195 static int handle_prog(struct kvm_vcpu *vcpu)
196 {
197         struct kvm_s390_pgm_info pgm_info;
198         psw_t psw;
199         int rc;
200
201         vcpu->stat.exit_program_interruption++;
202
203         if (guestdbg_enabled(vcpu) && per_event(vcpu)) {
204                 kvm_s390_handle_per_event(vcpu);
205                 /* the interrupt might have been filtered out completely */
206                 if (vcpu->arch.sie_block->iprcc == 0)
207                         return 0;
208         }
209
210         trace_kvm_s390_intercept_prog(vcpu, vcpu->arch.sie_block->iprcc);
211         if (vcpu->arch.sie_block->iprcc == PGM_SPECIFICATION) {
212                 rc = read_guest_lc(vcpu, __LC_PGM_NEW_PSW, &psw, sizeof(psw_t));
213                 if (rc)
214                         return rc;
215                 /* Avoid endless loops of specification exceptions */
216                 if (!is_valid_psw(&psw))
217                         return -EOPNOTSUPP;
218         }
219         rc = handle_itdb(vcpu);
220         if (rc)
221                 return rc;
222
223         __extract_prog_irq(vcpu, &pgm_info);
224         return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
225 }
226
227 static int handle_instruction_and_prog(struct kvm_vcpu *vcpu)
228 {
229         int rc, rc2;
230
231         vcpu->stat.exit_instr_and_program++;
232         rc = handle_instruction(vcpu);
233         rc2 = handle_prog(vcpu);
234
235         if (rc == -EOPNOTSUPP)
236                 vcpu->arch.sie_block->icptcode = 0x04;
237         if (rc)
238                 return rc;
239         return rc2;
240 }
241
242 /**
243  * handle_external_interrupt - used for external interruption interceptions
244  *
245  * This interception only occurs if the CPUSTAT_EXT_INT bit was set, or if
246  * the new PSW does not have external interrupts disabled. In the first case,
247  * we've got to deliver the interrupt manually, and in the second case, we
248  * drop to userspace to handle the situation there.
249  */
250 static int handle_external_interrupt(struct kvm_vcpu *vcpu)
251 {
252         u16 eic = vcpu->arch.sie_block->eic;
253         struct kvm_s390_interrupt irq;
254         psw_t newpsw;
255         int rc;
256
257         vcpu->stat.exit_external_interrupt++;
258
259         rc = read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &newpsw, sizeof(psw_t));
260         if (rc)
261                 return rc;
262         /* We can not handle clock comparator or timer interrupt with bad PSW */
263         if ((eic == EXT_IRQ_CLK_COMP || eic == EXT_IRQ_CPU_TIMER) &&
264             (newpsw.mask & PSW_MASK_EXT))
265                 return -EOPNOTSUPP;
266
267         switch (eic) {
268         case EXT_IRQ_CLK_COMP:
269                 irq.type = KVM_S390_INT_CLOCK_COMP;
270                 break;
271         case EXT_IRQ_CPU_TIMER:
272                 irq.type = KVM_S390_INT_CPU_TIMER;
273                 break;
274         case EXT_IRQ_EXTERNAL_CALL:
275                 if (kvm_s390_si_ext_call_pending(vcpu))
276                         return 0;
277                 irq.type = KVM_S390_INT_EXTERNAL_CALL;
278                 irq.parm = vcpu->arch.sie_block->extcpuaddr;
279                 break;
280         default:
281                 return -EOPNOTSUPP;
282         }
283
284         return kvm_s390_inject_vcpu(vcpu, &irq);
285 }
286
287 /**
288  * Handle MOVE PAGE partial execution interception.
289  *
290  * This interception can only happen for guests with DAT disabled and
291  * addresses that are currently not mapped in the host. Thus we try to
292  * set up the mappings for the corresponding user pages here (or throw
293  * addressing exceptions in case of illegal guest addresses).
294  */
295 static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
296 {
297         psw_t *psw = &vcpu->arch.sie_block->gpsw;
298         unsigned long srcaddr, dstaddr;
299         int reg1, reg2, rc;
300
301         kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
302
303         /* Make sure that the source is paged-in */
304         srcaddr = kvm_s390_real_to_abs(vcpu, vcpu->run->s.regs.gprs[reg2]);
305         if (kvm_is_error_gpa(vcpu->kvm, srcaddr))
306                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
307         rc = kvm_arch_fault_in_page(vcpu, srcaddr, 0);
308         if (rc != 0)
309                 return rc;
310
311         /* Make sure that the destination is paged-in */
312         dstaddr = kvm_s390_real_to_abs(vcpu, vcpu->run->s.regs.gprs[reg1]);
313         if (kvm_is_error_gpa(vcpu->kvm, dstaddr))
314                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
315         rc = kvm_arch_fault_in_page(vcpu, dstaddr, 1);
316         if (rc != 0)
317                 return rc;
318
319         psw->addr = __rewind_psw(*psw, 4);
320
321         return 0;
322 }
323
324 static int handle_partial_execution(struct kvm_vcpu *vcpu)
325 {
326         if (vcpu->arch.sie_block->ipa == 0xb254)        /* MVPG */
327                 return handle_mvpg_pei(vcpu);
328         if (vcpu->arch.sie_block->ipa >> 8 == 0xae)     /* SIGP */
329                 return kvm_s390_handle_sigp_pei(vcpu);
330
331         return -EOPNOTSUPP;
332 }
333
334 static const intercept_handler_t intercept_funcs[] = {
335         [0x00 >> 2] = handle_noop,
336         [0x04 >> 2] = handle_instruction,
337         [0x08 >> 2] = handle_prog,
338         [0x0C >> 2] = handle_instruction_and_prog,
339         [0x10 >> 2] = handle_noop,
340         [0x14 >> 2] = handle_external_interrupt,
341         [0x18 >> 2] = handle_noop,
342         [0x1C >> 2] = kvm_s390_handle_wait,
343         [0x20 >> 2] = handle_validity,
344         [0x28 >> 2] = handle_stop,
345         [0x38 >> 2] = handle_partial_execution,
346 };
347
348 int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
349 {
350         intercept_handler_t func;
351         u8 code = vcpu->arch.sie_block->icptcode;
352
353         if (code & 3 || (code >> 2) >= ARRAY_SIZE(intercept_funcs))
354                 return -EOPNOTSUPP;
355         func = intercept_funcs[code >> 2];
356         if (func)
357                 return func(vcpu);
358         return -EOPNOTSUPP;
359 }