2 * kvm guest debug support
4 * Copyright IBM Corp. 2014
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com>
12 #include <linux/kvm_host.h>
13 #include <linux/errno.h>
18 * Extends the address range given by *start and *stop to include the address
19 * range starting with estart and the length len. Takes care of overflowing
20 * intervals and tries to minimize the overall interval size.
22 static void extend_address_range(u64 *start, u64 *stop, u64 estart, int len)
33 /* 0-0 range represents "not set" */
34 if ((*start == 0) && (*stop == 0)) {
37 } else if (*start <= *stop) {
38 /* increase the existing range */
44 /* "overflowing" interval, whereby *stop > *start */
45 if (estart <= *stop) {
48 } else if (estop > *start) {
52 /* minimize the range */
53 else if ((estop - *stop) < (*start - estart))
60 #define MAX_INST_SIZE 6
62 static void enable_all_hw_bp(struct kvm_vcpu *vcpu)
64 unsigned long start, len;
65 u64 *cr9 = &vcpu->arch.sie_block->gcr[9];
66 u64 *cr10 = &vcpu->arch.sie_block->gcr[10];
67 u64 *cr11 = &vcpu->arch.sie_block->gcr[11];
70 if (vcpu->arch.guestdbg.nr_hw_bp <= 0 ||
71 vcpu->arch.guestdbg.hw_bp_info == NULL)
75 * If the guest is not interested in branching events, we can safely
76 * limit them to the PER address range.
78 if (!(*cr9 & PER_EVENT_BRANCH))
79 *cr9 |= PER_CONTROL_BRANCH_ADDRESS;
80 *cr9 |= PER_EVENT_IFETCH | PER_EVENT_BRANCH;
82 for (i = 0; i < vcpu->arch.guestdbg.nr_hw_bp; i++) {
83 start = vcpu->arch.guestdbg.hw_bp_info[i].addr;
84 len = vcpu->arch.guestdbg.hw_bp_info[i].len;
87 * The instruction in front of the desired bp has to
88 * report instruction-fetching events
90 if (start < MAX_INST_SIZE) {
94 start -= MAX_INST_SIZE;
98 extend_address_range(cr10, cr11, start, len);
102 static void enable_all_hw_wp(struct kvm_vcpu *vcpu)
104 unsigned long start, len;
105 u64 *cr9 = &vcpu->arch.sie_block->gcr[9];
106 u64 *cr10 = &vcpu->arch.sie_block->gcr[10];
107 u64 *cr11 = &vcpu->arch.sie_block->gcr[11];
110 if (vcpu->arch.guestdbg.nr_hw_wp <= 0 ||
111 vcpu->arch.guestdbg.hw_wp_info == NULL)
114 /* if host uses storage alternation for special address
115 * spaces, enable all events and give all to the guest */
116 if (*cr9 & PER_EVENT_STORE && *cr9 & PER_CONTROL_ALTERATION) {
117 *cr9 &= ~PER_CONTROL_ALTERATION;
121 *cr9 &= ~PER_CONTROL_ALTERATION;
122 *cr9 |= PER_EVENT_STORE;
124 for (i = 0; i < vcpu->arch.guestdbg.nr_hw_wp; i++) {
125 start = vcpu->arch.guestdbg.hw_wp_info[i].addr;
126 len = vcpu->arch.guestdbg.hw_wp_info[i].len;
128 extend_address_range(cr10, cr11, start, len);
133 void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu)
135 vcpu->arch.guestdbg.cr0 = vcpu->arch.sie_block->gcr[0];
136 vcpu->arch.guestdbg.cr9 = vcpu->arch.sie_block->gcr[9];
137 vcpu->arch.guestdbg.cr10 = vcpu->arch.sie_block->gcr[10];
138 vcpu->arch.guestdbg.cr11 = vcpu->arch.sie_block->gcr[11];
141 void kvm_s390_restore_guest_per_regs(struct kvm_vcpu *vcpu)
143 vcpu->arch.sie_block->gcr[0] = vcpu->arch.guestdbg.cr0;
144 vcpu->arch.sie_block->gcr[9] = vcpu->arch.guestdbg.cr9;
145 vcpu->arch.sie_block->gcr[10] = vcpu->arch.guestdbg.cr10;
146 vcpu->arch.sie_block->gcr[11] = vcpu->arch.guestdbg.cr11;
149 void kvm_s390_patch_guest_per_regs(struct kvm_vcpu *vcpu)
152 * TODO: if guest psw has per enabled, otherwise 0s!
153 * This reduces the amount of reported events.
154 * Need to intercept all psw changes!
157 if (guestdbg_sstep_enabled(vcpu)) {
158 /* disable timer (clock-comparator) interrupts */
159 vcpu->arch.sie_block->gcr[0] &= ~0x800ul;
160 vcpu->arch.sie_block->gcr[9] |= PER_EVENT_IFETCH;
161 vcpu->arch.sie_block->gcr[10] = 0;
162 vcpu->arch.sie_block->gcr[11] = -1UL;
165 if (guestdbg_hw_bp_enabled(vcpu)) {
166 enable_all_hw_bp(vcpu);
167 enable_all_hw_wp(vcpu);
170 /* TODO: Instruction-fetching-nullification not allowed for now */
171 if (vcpu->arch.sie_block->gcr[9] & PER_EVENT_NULLIFICATION)
172 vcpu->arch.sie_block->gcr[9] &= ~PER_EVENT_NULLIFICATION;
175 #define MAX_WP_SIZE 100
177 static int __import_wp_info(struct kvm_vcpu *vcpu,
178 struct kvm_hw_breakpoint *bp_data,
179 struct kvm_hw_wp_info_arch *wp_info)
182 wp_info->len = bp_data->len;
183 wp_info->addr = bp_data->addr;
184 wp_info->phys_addr = bp_data->phys_addr;
185 wp_info->old_data = NULL;
187 if (wp_info->len < 0 || wp_info->len > MAX_WP_SIZE)
190 wp_info->old_data = kmalloc(bp_data->len, GFP_KERNEL);
191 if (!wp_info->old_data)
193 /* try to backup the original value */
194 ret = read_guest_abs(vcpu, wp_info->phys_addr, wp_info->old_data,
197 kfree(wp_info->old_data);
198 wp_info->old_data = NULL;
204 #define MAX_BP_COUNT 50
206 int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu,
207 struct kvm_guest_debug *dbg)
209 int ret = 0, nr_wp = 0, nr_bp = 0, i;
210 struct kvm_hw_breakpoint *bp_data = NULL;
211 struct kvm_hw_wp_info_arch *wp_info = NULL;
212 struct kvm_hw_bp_info_arch *bp_info = NULL;
214 if (dbg->arch.nr_hw_bp <= 0 || !dbg->arch.hw_bp)
216 else if (dbg->arch.nr_hw_bp > MAX_BP_COUNT)
219 bp_data = kmalloc_array(dbg->arch.nr_hw_bp,
227 if (copy_from_user(bp_data,
229 sizeof(*bp_data) * dbg->arch.nr_hw_bp)) {
234 for (i = 0; i < dbg->arch.nr_hw_bp; i++) {
235 switch (bp_data[i].type) {
236 case KVM_HW_WP_WRITE:
248 wp_info = kmalloc_array(nr_wp,
257 bp_info = kmalloc_array(nr_bp,
266 for (nr_wp = 0, nr_bp = 0, i = 0; i < dbg->arch.nr_hw_bp; i++) {
267 switch (bp_data[i].type) {
268 case KVM_HW_WP_WRITE:
269 ret = __import_wp_info(vcpu, &bp_data[i],
276 bp_info[nr_bp].len = bp_data[i].len;
277 bp_info[nr_bp].addr = bp_data[i].addr;
283 vcpu->arch.guestdbg.nr_hw_bp = nr_bp;
284 vcpu->arch.guestdbg.hw_bp_info = bp_info;
285 vcpu->arch.guestdbg.nr_hw_wp = nr_wp;
286 vcpu->arch.guestdbg.hw_wp_info = wp_info;
295 void kvm_s390_clear_bp_data(struct kvm_vcpu *vcpu)
298 struct kvm_hw_wp_info_arch *hw_wp_info = NULL;
300 for (i = 0; i < vcpu->arch.guestdbg.nr_hw_wp; i++) {
301 hw_wp_info = &vcpu->arch.guestdbg.hw_wp_info[i];
302 kfree(hw_wp_info->old_data);
303 hw_wp_info->old_data = NULL;
305 kfree(vcpu->arch.guestdbg.hw_wp_info);
306 vcpu->arch.guestdbg.hw_wp_info = NULL;
308 kfree(vcpu->arch.guestdbg.hw_bp_info);
309 vcpu->arch.guestdbg.hw_bp_info = NULL;
311 vcpu->arch.guestdbg.nr_hw_wp = 0;
312 vcpu->arch.guestdbg.nr_hw_bp = 0;
315 static inline int in_addr_range(u64 addr, u64 a, u64 b)
318 return (addr >= a) && (addr <= b);
320 /* "overflowing" interval */
321 return (addr <= a) && (addr >= b);
324 #define end_of_range(bp_info) (bp_info->addr + bp_info->len - 1)
326 static struct kvm_hw_bp_info_arch *find_hw_bp(struct kvm_vcpu *vcpu,
329 struct kvm_hw_bp_info_arch *bp_info = vcpu->arch.guestdbg.hw_bp_info;
332 if (vcpu->arch.guestdbg.nr_hw_bp == 0)
335 for (i = 0; i < vcpu->arch.guestdbg.nr_hw_bp; i++) {
336 /* addr is directly the start or in the range of a bp */
337 if (addr == bp_info->addr)
339 if (bp_info->len > 0 &&
340 in_addr_range(addr, bp_info->addr, end_of_range(bp_info)))
351 static struct kvm_hw_wp_info_arch *any_wp_changed(struct kvm_vcpu *vcpu)
354 struct kvm_hw_wp_info_arch *wp_info = NULL;
357 if (vcpu->arch.guestdbg.nr_hw_wp == 0)
360 for (i = 0; i < vcpu->arch.guestdbg.nr_hw_wp; i++) {
361 wp_info = &vcpu->arch.guestdbg.hw_wp_info[i];
362 if (!wp_info || !wp_info->old_data || wp_info->len <= 0)
365 temp = kmalloc(wp_info->len, GFP_KERNEL);
369 /* refetch the wp data and compare it to the old value */
370 if (!read_guest_abs(vcpu, wp_info->phys_addr, temp,
372 if (memcmp(temp, wp_info->old_data, wp_info->len)) {
384 void kvm_s390_prepare_debug_exit(struct kvm_vcpu *vcpu)
386 vcpu->run->exit_reason = KVM_EXIT_DEBUG;
387 vcpu->guest_debug &= ~KVM_GUESTDBG_EXIT_PENDING;
390 #define PER_CODE_MASK (PER_EVENT_MASK >> 24)
391 #define PER_CODE_BRANCH (PER_EVENT_BRANCH >> 24)
392 #define PER_CODE_IFETCH (PER_EVENT_IFETCH >> 24)
393 #define PER_CODE_STORE (PER_EVENT_STORE >> 24)
394 #define PER_CODE_STORE_REAL (PER_EVENT_STORE_REAL >> 24)
396 #define per_bp_event(code) \
397 (code & (PER_CODE_IFETCH | PER_CODE_BRANCH))
398 #define per_write_wp_event(code) \
399 (code & (PER_CODE_STORE | PER_CODE_STORE_REAL))
401 static int debug_exit_required(struct kvm_vcpu *vcpu)
403 u8 perc = vcpu->arch.sie_block->perc;
404 struct kvm_debug_exit_arch *debug_exit = &vcpu->run->debug.arch;
405 struct kvm_hw_wp_info_arch *wp_info = NULL;
406 struct kvm_hw_bp_info_arch *bp_info = NULL;
407 unsigned long addr = vcpu->arch.sie_block->gpsw.addr;
408 unsigned long peraddr = vcpu->arch.sie_block->peraddr;
410 if (guestdbg_hw_bp_enabled(vcpu)) {
411 if (per_write_wp_event(perc) &&
412 vcpu->arch.guestdbg.nr_hw_wp > 0) {
413 wp_info = any_wp_changed(vcpu);
415 debug_exit->addr = wp_info->addr;
416 debug_exit->type = KVM_HW_WP_WRITE;
420 if (per_bp_event(perc) &&
421 vcpu->arch.guestdbg.nr_hw_bp > 0) {
422 bp_info = find_hw_bp(vcpu, addr);
423 /* remove duplicate events if PC==PER address */
424 if (bp_info && (addr != peraddr)) {
425 debug_exit->addr = addr;
426 debug_exit->type = KVM_HW_BP;
427 vcpu->arch.guestdbg.last_bp = addr;
430 /* breakpoint missed */
431 bp_info = find_hw_bp(vcpu, peraddr);
432 if (bp_info && vcpu->arch.guestdbg.last_bp != peraddr) {
433 debug_exit->addr = peraddr;
434 debug_exit->type = KVM_HW_BP;
439 if (guestdbg_sstep_enabled(vcpu) && per_bp_event(perc)) {
440 debug_exit->addr = addr;
441 debug_exit->type = KVM_SINGLESTEP;
450 #define guest_per_enabled(vcpu) \
451 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER)
453 int kvm_s390_handle_per_ifetch_icpt(struct kvm_vcpu *vcpu)
455 const u8 ilen = kvm_s390_get_ilen(vcpu);
456 struct kvm_s390_pgm_info pgm_info = {
458 .per_code = PER_CODE_IFETCH,
459 .per_address = __rewind_psw(vcpu->arch.sie_block->gpsw, ilen),
463 * The PSW points to the next instruction, therefore the intercepted
464 * instruction generated a PER i-fetch event. PER address therefore
465 * points at the previous PSW address (could be an EXECUTE function).
467 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
470 static void filter_guest_per_event(struct kvm_vcpu *vcpu)
472 const u8 perc = vcpu->arch.sie_block->perc;
473 u64 peraddr = vcpu->arch.sie_block->peraddr;
474 u64 addr = vcpu->arch.sie_block->gpsw.addr;
475 u64 cr9 = vcpu->arch.sie_block->gcr[9];
476 u64 cr10 = vcpu->arch.sie_block->gcr[10];
477 u64 cr11 = vcpu->arch.sie_block->gcr[11];
478 /* filter all events, demanded by the guest */
479 u8 guest_perc = perc & (cr9 >> 24) & PER_CODE_MASK;
481 if (!guest_per_enabled(vcpu))
484 /* filter "successful-branching" events */
485 if (guest_perc & PER_CODE_BRANCH &&
486 cr9 & PER_CONTROL_BRANCH_ADDRESS &&
487 !in_addr_range(addr, cr10, cr11))
488 guest_perc &= ~PER_CODE_BRANCH;
490 /* filter "instruction-fetching" events */
491 if (guest_perc & PER_CODE_IFETCH &&
492 !in_addr_range(peraddr, cr10, cr11))
493 guest_perc &= ~PER_CODE_IFETCH;
495 /* All other PER events will be given to the guest */
496 /* TODO: Check altered address/address space */
498 vcpu->arch.sie_block->perc = guest_perc;
501 vcpu->arch.sie_block->iprcc &= ~PGM_PER;
504 #define pssec(vcpu) (vcpu->arch.sie_block->gcr[1] & _ASCE_SPACE_SWITCH)
505 #define hssec(vcpu) (vcpu->arch.sie_block->gcr[13] & _ASCE_SPACE_SWITCH)
506 #define old_ssec(vcpu) ((vcpu->arch.sie_block->tecmc >> 31) & 0x1)
507 #define old_as_is_home(vcpu) !(vcpu->arch.sie_block->tecmc & 0xffff)
509 void kvm_s390_handle_per_event(struct kvm_vcpu *vcpu)
513 if (debug_exit_required(vcpu))
514 vcpu->guest_debug |= KVM_GUESTDBG_EXIT_PENDING;
516 filter_guest_per_event(vcpu);
519 * Only RP, SAC, SACF, PT, PTI, PR, PC instructions can trigger
520 * a space-switch event. PER events enforce space-switch events
521 * for these instructions. So if no PER event for the guest is left,
522 * we might have to filter the space-switch element out, too.
524 if (vcpu->arch.sie_block->iprcc == PGM_SPACE_SWITCH) {
525 vcpu->arch.sie_block->iprcc = 0;
526 new_as = psw_bits(vcpu->arch.sie_block->gpsw).as;
529 * If the AS changed from / to home, we had RP, SAC or SACF
530 * instruction. Check primary and home space-switch-event
531 * controls. (theoretically home -> home produced no event)
533 if (((new_as == PSW_AS_HOME) ^ old_as_is_home(vcpu)) &&
534 (pssec(vcpu) || hssec(vcpu)))
535 vcpu->arch.sie_block->iprcc = PGM_SPACE_SWITCH;
538 * PT, PTI, PR, PC instruction operate on primary AS only. Check
539 * if the primary-space-switch-event control was or got set.
541 if (new_as == PSW_AS_PRIMARY && !old_as_is_home(vcpu) &&
542 (pssec(vcpu) || old_ssec(vcpu)))
543 vcpu->arch.sie_block->iprcc = PGM_SPACE_SWITCH;