Code under this lock requires non-preemptibility. Ensure this also over
-rt by converting it to raw spinlock.
Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
#define ASYNC_PF_PER_VCPU 64
#define ASYNC_PF_PER_VCPU 64
-extern spinlock_t kvm_lock;
+extern raw_spinlock_t kvm_lock;
extern struct list_head vm_list;
struct kvm_vcpu;
extern struct list_head vm_list;
struct kvm_vcpu;
if (nr_to_scan == 0)
goto out;
if (nr_to_scan == 0)
goto out;
+ raw_spin_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list) {
int idx, freed_pages;
list_for_each_entry(kvm, &vm_list, vm_list) {
int idx, freed_pages;
if (kvm_freed)
list_move_tail(&kvm_freed->vm_list, &vm_list);
if (kvm_freed)
list_move_tail(&kvm_freed->vm_list, &vm_list);
- spin_unlock(&kvm_lock);
+ raw_spin_unlock(&kvm_lock);
out:
return percpu_counter_read_positive(&kvm_total_used_mmu_pages);
out:
return percpu_counter_read_positive(&kvm_total_used_mmu_pages);
smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
+ raw_spin_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list) {
kvm_for_each_vcpu(i, vcpu, kvm) {
if (vcpu->cpu != freq->cpu)
list_for_each_entry(kvm, &vm_list, vm_list) {
kvm_for_each_vcpu(i, vcpu, kvm) {
if (vcpu->cpu != freq->cpu)
- spin_unlock(&kvm_lock);
+ raw_spin_unlock(&kvm_lock);
if (freq->old < freq->new && send_ipi) {
/*
if (freq->old < freq->new && send_ipi) {
/*
* kvm->lock --> kvm->slots_lock --> kvm->irq_lock
*/
* kvm->lock --> kvm->slots_lock --> kvm->irq_lock
*/
-DEFINE_SPINLOCK(kvm_lock);
+DEFINE_RAW_SPINLOCK(kvm_lock);
LIST_HEAD(vm_list);
static cpumask_var_t cpus_hardware_enabled;
LIST_HEAD(vm_list);
static cpumask_var_t cpus_hardware_enabled;
mutex_init(&kvm->irq_lock);
mutex_init(&kvm->slots_lock);
atomic_set(&kvm->users_count, 1);
mutex_init(&kvm->irq_lock);
mutex_init(&kvm->slots_lock);
atomic_set(&kvm->users_count, 1);
+ raw_spin_lock(&kvm_lock);
list_add(&kvm->vm_list, &vm_list);
list_add(&kvm->vm_list, &vm_list);
- spin_unlock(&kvm_lock);
+ raw_spin_unlock(&kvm_lock);
struct mm_struct *mm = kvm->mm;
kvm_arch_sync_events(kvm);
struct mm_struct *mm = kvm->mm;
kvm_arch_sync_events(kvm);
+ raw_spin_lock(&kvm_lock);
- spin_unlock(&kvm_lock);
+ raw_spin_unlock(&kvm_lock);
kvm_free_irq_routing(kvm);
for (i = 0; i < KVM_NR_BUSES; i++)
kvm_io_bus_destroy(kvm->buses[i]);
kvm_free_irq_routing(kvm);
for (i = 0; i < KVM_NR_BUSES; i++)
kvm_io_bus_destroy(kvm->buses[i]);
static void hardware_enable(void *junk)
{
static void hardware_enable(void *junk)
{
+ raw_spin_lock(&kvm_lock);
hardware_enable_nolock(junk);
hardware_enable_nolock(junk);
- spin_unlock(&kvm_lock);
+ raw_spin_unlock(&kvm_lock);
}
static void hardware_disable_nolock(void *junk)
}
static void hardware_disable_nolock(void *junk)
static void hardware_disable(void *junk)
{
static void hardware_disable(void *junk)
{
+ raw_spin_lock(&kvm_lock);
hardware_disable_nolock(junk);
hardware_disable_nolock(junk);
- spin_unlock(&kvm_lock);
+ raw_spin_unlock(&kvm_lock);
}
static void hardware_disable_all_nolock(void)
}
static void hardware_disable_all_nolock(void)
static void hardware_disable_all(void)
{
static void hardware_disable_all(void)
{
+ raw_spin_lock(&kvm_lock);
hardware_disable_all_nolock();
hardware_disable_all_nolock();
- spin_unlock(&kvm_lock);
+ raw_spin_unlock(&kvm_lock);
}
static int hardware_enable_all(void)
{
int r = 0;
}
static int hardware_enable_all(void)
{
int r = 0;
+ raw_spin_lock(&kvm_lock);
kvm_usage_count++;
if (kvm_usage_count == 1) {
kvm_usage_count++;
if (kvm_usage_count == 1) {
- spin_unlock(&kvm_lock);
+ raw_spin_unlock(&kvm_lock);
struct kvm *kvm;
*val = 0;
struct kvm *kvm;
*val = 0;
+ raw_spin_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list)
*val += *(u32 *)((void *)kvm + offset);
list_for_each_entry(kvm, &vm_list, vm_list)
*val += *(u32 *)((void *)kvm + offset);
- spin_unlock(&kvm_lock);
+ raw_spin_unlock(&kvm_lock);
+ raw_spin_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list)
kvm_for_each_vcpu(i, vcpu, kvm)
*val += *(u32 *)((void *)vcpu + offset);
list_for_each_entry(kvm, &vm_list, vm_list)
kvm_for_each_vcpu(i, vcpu, kvm)
*val += *(u32 *)((void *)vcpu + offset);
- spin_unlock(&kvm_lock);
+ raw_spin_unlock(&kvm_lock);
static int kvm_resume(struct sys_device *dev)
{
if (kvm_usage_count) {
static int kvm_resume(struct sys_device *dev)
{
if (kvm_usage_count) {
- WARN_ON(spin_is_locked(&kvm_lock));
+ WARN_ON(raw_spin_is_locked(&kvm_lock));
hardware_enable_nolock(NULL);
}
return 0;
hardware_enable_nolock(NULL);
}
return 0;