aboutsummaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
authorPaolo Bonzini2017-04-27 07:33:43 -0500
committerPaolo Bonzini2017-04-27 07:36:44 -0500
commit7a97cec26b94c909f4cbad2dc3186af3e457a522 (patch)
treece711b80c36bb3c2828cc63b9f65187fa95e2305 /virt
parent178f02ffafafc59d4d4b135242e5cc1515743680 (diff)
downloadkernel-7a97cec26b94c909f4cbad2dc3186af3e457a522.tar.gz
kernel-7a97cec26b94c909f4cbad2dc3186af3e457a522.tar.xz
kernel-7a97cec26b94c909f4cbad2dc3186af3e457a522.zip
KVM: mark requests that need synchronization
kvm_make_all_requests() provides a synchronization that waits until all kicked VCPUs have acknowledged the kick. This is important for KVM_REQ_MMU_RELOAD as it prevents freeing while lockless paging is underway. This patch adds the synchronization property into all requests that are currently being used with kvm_make_all_requests() in order to preserve the current behavior and only introduce a new framework. Removing it from requests where it is not necessary is left for future patches. Signed-off-by: Radim Krčmář <rkrcmar@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/kvm_main.c25
1 files changed, 22 insertions, 3 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 632f7b3e198c..035bc51f656f 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -165,6 +165,24 @@ void vcpu_put(struct kvm_vcpu *vcpu)
165} 165}
166EXPORT_SYMBOL_GPL(vcpu_put); 166EXPORT_SYMBOL_GPL(vcpu_put);
167 167
168/* TODO: merge with kvm_arch_vcpu_should_kick */
169static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned req)
170{
171 int mode = kvm_vcpu_exiting_guest_mode(vcpu);
172
173 /*
174 * We need to wait for the VCPU to reenable interrupts and get out of
175 * READING_SHADOW_PAGE_TABLES mode.
176 */
177 if (req & KVM_REQUEST_WAIT)
178 return mode != OUTSIDE_GUEST_MODE;
179
180 /*
181 * Need to kick a running VCPU, but otherwise there is nothing to do.
182 */
183 return mode == IN_GUEST_MODE;
184}
185
168static void ack_flush(void *_completed) 186static void ack_flush(void *_completed)
169{ 187{
170} 188}
@@ -174,6 +192,7 @@ bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
174 int i, cpu, me; 192 int i, cpu, me;
175 cpumask_var_t cpus; 193 cpumask_var_t cpus;
176 bool called = true; 194 bool called = true;
195 bool wait = req & KVM_REQUEST_WAIT;
177 struct kvm_vcpu *vcpu; 196 struct kvm_vcpu *vcpu;
178 197
179 zalloc_cpumask_var(&cpus, GFP_ATOMIC); 198 zalloc_cpumask_var(&cpus, GFP_ATOMIC);
@@ -187,13 +206,13 @@ bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
187 continue; 206 continue;
188 207
189 if (cpus != NULL && cpu != -1 && cpu != me && 208 if (cpus != NULL && cpu != -1 && cpu != me &&
190 kvm_vcpu_exiting_guest_mode(vcpu) != OUTSIDE_GUEST_MODE) 209 kvm_request_needs_ipi(vcpu, req))
191 cpumask_set_cpu(cpu, cpus); 210 cpumask_set_cpu(cpu, cpus);
192 } 211 }
193 if (unlikely(cpus == NULL)) 212 if (unlikely(cpus == NULL))
194 smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1); 213 smp_call_function_many(cpu_online_mask, ack_flush, NULL, wait);
195 else if (!cpumask_empty(cpus)) 214 else if (!cpumask_empty(cpus))
196 smp_call_function_many(cpus, ack_flush, NULL, 1); 215 smp_call_function_many(cpus, ack_flush, NULL, wait);
197 else 216 else
198 called = false; 217 called = false;
199 put_cpu(); 218 put_cpu();