aboutsummaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
authorJintack Lim2017-03-06 07:42:37 -0600
committerMarc Zyngier2017-03-07 08:48:16 -0600
commit370a0ec1819990f8e2a93df7cc9c0146980ed45f (patch)
treec511955854887f5b6d7eb93b2d966d28951f2a2e /virt
parent4dfc050571523ac2bc02cbf948dd47621f7dd83f (diff)
downloadkernel-370a0ec1819990f8e2a93df7cc9c0146980ed45f.tar.gz
kernel-370a0ec1819990f8e2a93df7cc9c0146980ed45f.tar.xz
kernel-370a0ec1819990f8e2a93df7cc9c0146980ed45f.zip
KVM: arm/arm64: Let vcpu thread modify its own active state
Currently, if a vcpu thread tries to change the active state of an interrupt which is already on the same vcpu's AP list, it will loop forever. Since the VGIC mmio handler is called after a vcpu has already synced back the LR state to the struct vgic_irq, we can just let it proceed safely. Cc: stable@vger.kernel.org Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Jintack Lim <jintack@cs.columbia.edu> Signed-off-by: Christoffer Dall <cdall@linaro.org> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio.c32
1 files changed, 24 insertions, 8 deletions
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
index 3654b4c835ef..2a5db1352722 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.c
+++ b/virt/kvm/arm/vgic/vgic-mmio.c
@@ -180,21 +180,37 @@ unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
180static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, 180static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
181 bool new_active_state) 181 bool new_active_state)
182{ 182{
183 struct kvm_vcpu *requester_vcpu;
183 spin_lock(&irq->irq_lock); 184 spin_lock(&irq->irq_lock);
185
186 /*
187 * The vcpu parameter here can mean multiple things depending on how
188 * this function is called; when handling a trap from the kernel it
189 * depends on the GIC version, and these functions are also called as
190 * part of save/restore from userspace.
191 *
192 * Therefore, we have to figure out the requester in a reliable way.
193 *
194 * When accessing VGIC state from user space, the requester_vcpu is
195 * NULL, which is fine, because we guarantee that no VCPUs are running
196 * when accessing VGIC state from user space so irq->vcpu->cpu is
197 * always -1.
198 */
199 requester_vcpu = kvm_arm_get_running_vcpu();
200
184 /* 201 /*
185 * If this virtual IRQ was written into a list register, we 202 * If this virtual IRQ was written into a list register, we
186 * have to make sure the CPU that runs the VCPU thread has 203 * have to make sure the CPU that runs the VCPU thread has
187 * synced back LR state to the struct vgic_irq. We can only 204 * synced back the LR state to the struct vgic_irq.
188 * know this for sure, when either this irq is not assigned to
189 * anyone's AP list anymore, or the VCPU thread is not
190 * running on any CPUs.
191 * 205 *
192 * In the opposite case, we know the VCPU thread may be on its 206 * As long as the conditions below are true, we know the VCPU thread
193 * way back from the guest and still has to sync back this 207 * may be on its way back from the guest (we kicked the VCPU thread in
194 * IRQ, so we release and re-acquire the spin_lock to let the 208 * vgic_change_active_prepare) and still has to sync back this IRQ,
195 * other thread sync back the IRQ. 209 * so we release and re-acquire the spin_lock to let the other thread
210 * sync back the IRQ.
196 */ 211 */
197 while (irq->vcpu && /* IRQ may have state in an LR somewhere */ 212 while (irq->vcpu && /* IRQ may have state in an LR somewhere */
213 irq->vcpu != requester_vcpu && /* Current thread is not the VCPU thread */
198 irq->vcpu->cpu != -1) /* VCPU thread is running */ 214 irq->vcpu->cpu != -1) /* VCPU thread is running */
199 cond_resched_lock(&irq->irq_lock); 215 cond_resched_lock(&irq->irq_lock);
200 216