aboutsummaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/eventfd.c3
-rw-r--r--virt/kvm/kvm_main.c40
-rw-r--r--virt/kvm/vfio.c27
3 files changed, 49 insertions, 21 deletions
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index 46dbc0a7dfc1..49001fa84ead 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -868,7 +868,8 @@ kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
868 continue; 868 continue;
869 869
870 kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev); 870 kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
871 kvm->buses[bus_idx]->ioeventfd_count--; 871 if (kvm->buses[bus_idx])
872 kvm->buses[bus_idx]->ioeventfd_count--;
872 ioeventfd_release(p); 873 ioeventfd_release(p);
873 ret = 0; 874 ret = 0;
874 break; 875 break;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 336ed267c407..cb092bd9965b 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -654,8 +654,11 @@ static void kvm_destroy_vm(struct kvm *kvm)
654 list_del(&kvm->vm_list); 654 list_del(&kvm->vm_list);
655 spin_unlock(&kvm_lock); 655 spin_unlock(&kvm_lock);
656 kvm_free_irq_routing(kvm); 656 kvm_free_irq_routing(kvm);
657 for (i = 0; i < KVM_NR_BUSES; i++) 657 for (i = 0; i < KVM_NR_BUSES; i++) {
658 kvm_io_bus_destroy(kvm->buses[i]); 658 if (kvm->buses[i])
659 kvm_io_bus_destroy(kvm->buses[i]);
660 kvm->buses[i] = NULL;
661 }
659 kvm_coalesced_mmio_free(kvm); 662 kvm_coalesced_mmio_free(kvm);
660#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 663#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
661 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); 664 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
@@ -3271,6 +3274,8 @@ int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
3271 }; 3274 };
3272 3275
3273 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 3276 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
3277 if (!bus)
3278 return -ENOMEM;
3274 r = __kvm_io_bus_write(vcpu, bus, &range, val); 3279 r = __kvm_io_bus_write(vcpu, bus, &range, val);
3275 return r < 0 ? r : 0; 3280 return r < 0 ? r : 0;
3276} 3281}
@@ -3288,6 +3293,8 @@ int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
3288 }; 3293 };
3289 3294
3290 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 3295 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
3296 if (!bus)
3297 return -ENOMEM;
3291 3298
3292 /* First try the device referenced by cookie. */ 3299 /* First try the device referenced by cookie. */
3293 if ((cookie >= 0) && (cookie < bus->dev_count) && 3300 if ((cookie >= 0) && (cookie < bus->dev_count) &&
@@ -3338,6 +3345,8 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
3338 }; 3345 };
3339 3346
3340 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 3347 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
3348 if (!bus)
3349 return -ENOMEM;
3341 r = __kvm_io_bus_read(vcpu, bus, &range, val); 3350 r = __kvm_io_bus_read(vcpu, bus, &range, val);
3342 return r < 0 ? r : 0; 3351 return r < 0 ? r : 0;
3343} 3352}
@@ -3350,6 +3359,9 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
3350 struct kvm_io_bus *new_bus, *bus; 3359 struct kvm_io_bus *new_bus, *bus;
3351 3360
3352 bus = kvm->buses[bus_idx]; 3361 bus = kvm->buses[bus_idx];
3362 if (!bus)
3363 return -ENOMEM;
3364
3353 /* exclude ioeventfd which is limited by maximum fd */ 3365 /* exclude ioeventfd which is limited by maximum fd */
3354 if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1) 3366 if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1)
3355 return -ENOSPC; 3367 return -ENOSPC;
@@ -3369,37 +3381,41 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
3369} 3381}
3370 3382
3371/* Caller must hold slots_lock. */ 3383/* Caller must hold slots_lock. */
3372int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, 3384void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
3373 struct kvm_io_device *dev) 3385 struct kvm_io_device *dev)
3374{ 3386{
3375 int i, r; 3387 int i;
3376 struct kvm_io_bus *new_bus, *bus; 3388 struct kvm_io_bus *new_bus, *bus;
3377 3389
3378 bus = kvm->buses[bus_idx]; 3390 bus = kvm->buses[bus_idx];
3379 r = -ENOENT; 3391 if (!bus)
3392 return;
3393
3380 for (i = 0; i < bus->dev_count; i++) 3394 for (i = 0; i < bus->dev_count; i++)
3381 if (bus->range[i].dev == dev) { 3395 if (bus->range[i].dev == dev) {
3382 r = 0;
3383 break; 3396 break;
3384 } 3397 }
3385 3398
3386 if (r) 3399 if (i == bus->dev_count)
3387 return r; 3400 return;
3388 3401
3389 new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count - 1) * 3402 new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count - 1) *
3390 sizeof(struct kvm_io_range)), GFP_KERNEL); 3403 sizeof(struct kvm_io_range)), GFP_KERNEL);
3391 if (!new_bus) 3404 if (!new_bus) {
3392 return -ENOMEM; 3405 pr_err("kvm: failed to shrink bus, removing it completely\n");
3406 goto broken;
3407 }
3393 3408
3394 memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range)); 3409 memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
3395 new_bus->dev_count--; 3410 new_bus->dev_count--;
3396 memcpy(new_bus->range + i, bus->range + i + 1, 3411 memcpy(new_bus->range + i, bus->range + i + 1,
3397 (new_bus->dev_count - i) * sizeof(struct kvm_io_range)); 3412 (new_bus->dev_count - i) * sizeof(struct kvm_io_range));
3398 3413
3414broken:
3399 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 3415 rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
3400 synchronize_srcu_expedited(&kvm->srcu); 3416 synchronize_srcu_expedited(&kvm->srcu);
3401 kfree(bus); 3417 kfree(bus);
3402 return r; 3418 return;
3403} 3419}
3404 3420
3405static struct notifier_block kvm_cpu_notifier = { 3421static struct notifier_block kvm_cpu_notifier = {
diff --git a/virt/kvm/vfio.c b/virt/kvm/vfio.c
index 1dd087da6f31..111e09c3f4bf 100644
--- a/virt/kvm/vfio.c
+++ b/virt/kvm/vfio.c
@@ -47,6 +47,22 @@ static struct vfio_group *kvm_vfio_group_get_external_user(struct file *filep)
47 return vfio_group; 47 return vfio_group;
48} 48}
49 49
50static bool kvm_vfio_external_group_match_file(struct vfio_group *group,
51 struct file *filep)
52{
53 bool ret, (*fn)(struct vfio_group *, struct file *);
54
55 fn = symbol_get(vfio_external_group_match_file);
56 if (!fn)
57 return false;
58
59 ret = fn(group, filep);
60
61 symbol_put(vfio_external_group_match_file);
62
63 return ret;
64}
65
50static void kvm_vfio_group_put_external_user(struct vfio_group *vfio_group) 66static void kvm_vfio_group_put_external_user(struct vfio_group *vfio_group)
51{ 67{
52 void (*fn)(struct vfio_group *); 68 void (*fn)(struct vfio_group *);
@@ -171,18 +187,13 @@ static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg)
171 if (!f.file) 187 if (!f.file)
172 return -EBADF; 188 return -EBADF;
173 189
174 vfio_group = kvm_vfio_group_get_external_user(f.file);
175 fdput(f);
176
177 if (IS_ERR(vfio_group))
178 return PTR_ERR(vfio_group);
179
180 ret = -ENOENT; 190 ret = -ENOENT;
181 191
182 mutex_lock(&kv->lock); 192 mutex_lock(&kv->lock);
183 193
184 list_for_each_entry(kvg, &kv->group_list, node) { 194 list_for_each_entry(kvg, &kv->group_list, node) {
185 if (kvg->vfio_group != vfio_group) 195 if (!kvm_vfio_external_group_match_file(kvg->vfio_group,
196 f.file))
186 continue; 197 continue;
187 198
188 list_del(&kvg->node); 199 list_del(&kvg->node);
@@ -196,7 +207,7 @@ static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg)
196 207
197 mutex_unlock(&kv->lock); 208 mutex_unlock(&kv->lock);
198 209
199 kvm_vfio_group_put_external_user(vfio_group); 210 fdput(f);
200 211
201 kvm_vfio_update_coherency(dev); 212 kvm_vfio_update_coherency(dev);
202 213