1
0
Fork 0

KVM: arm/arm64: vgic: Make vgic_dist->lpi_list_lock a raw_spinlock

vgic_dist->lpi_list_lock must always be taken with interrupts disabled as
it is used in interrupt context.

For configurations such as PREEMPT_RT_FULL, this means that it should
be a raw_spinlock since RT spinlocks are interruptible.

Signed-off-by: Julien Thierry <julien.thierry@arm.com>
Acked-by: Christoffer Dall <christoffer.dall@arm.com>
Acked-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Christoffer Dall <christoffer.dall@arm.com>
hifive-unleashed-5.1
Julien Thierry 2019-01-07 15:06:16 +00:00 committed by Christoffer Dall
parent 8fa3adb8c6
commit fc3bc47523
4 changed files with 11 additions and 11 deletions

View File

@ -256,7 +256,7 @@ struct vgic_dist {
u64 propbaser;
/* Protects the lpi_list and the count value below. */
spinlock_t lpi_list_lock;
raw_spinlock_t lpi_list_lock;
struct list_head lpi_list_head;
int lpi_list_count;

View File

@ -64,7 +64,7 @@ void kvm_vgic_early_init(struct kvm *kvm)
struct vgic_dist *dist = &kvm->arch.vgic;
INIT_LIST_HEAD(&dist->lpi_list_head);
spin_lock_init(&dist->lpi_list_lock);
raw_spin_lock_init(&dist->lpi_list_lock);
}
/* CREATION */

View File

@ -73,7 +73,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
irq->target_vcpu = vcpu;
irq->group = 1;
spin_lock_irqsave(&dist->lpi_list_lock, flags);
raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
/*
* There could be a race with another vgic_add_lpi(), so we need to
@ -101,7 +101,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
dist->lpi_list_count++;
out_unlock:
spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
/*
* We "cache" the configuration table entries in our struct vgic_irq's.
@ -332,7 +332,7 @@ int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
if (!intids)
return -ENOMEM;
spin_lock_irqsave(&dist->lpi_list_lock, flags);
raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
if (i == irq_count)
break;
@ -341,7 +341,7 @@ int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
continue;
intids[i++] = irq->intid;
}
spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
*intid_ptr = intids;
return i;

View File

@ -72,7 +72,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
struct vgic_irq *irq = NULL;
unsigned long flags;
spin_lock_irqsave(&dist->lpi_list_lock, flags);
raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
if (irq->intid != intid)
@ -88,7 +88,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
irq = NULL;
out_unlock:
spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
return irq;
}
@ -138,15 +138,15 @@ void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
if (irq->intid < VGIC_MIN_LPI)
return;
spin_lock_irqsave(&dist->lpi_list_lock, flags);
raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
if (!kref_put(&irq->refcount, vgic_irq_release)) {
spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
return;
};
list_del(&irq->lpi_list);
dist->lpi_list_count--;
spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
kfree(irq);
}