aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMaarten Lankhorst2015-02-26 04:54:03 -0600
committerMaarten Lankhorst2015-03-13 14:26:33 -0500
commit5ea6f1c32628887c9df0c53bc8c199eb12633fec (patch)
treef466d5ed08d547ebb20da6ccc6fa1cbb2efc3785 /nouveau
parent7caa442e761ab5e48698c937aea9ce18f4522ecb (diff)
downloadexternal-libgbm-5ea6f1c32628887c9df0c53bc8c199eb12633fec.tar.gz
external-libgbm-5ea6f1c32628887c9df0c53bc8c199eb12633fec.tar.xz
external-libgbm-5ea6f1c32628887c9df0c53bc8c199eb12633fec.zip
nouveau: make nouveau importing global buffers completely thread-safe, with tests
While I've closed off most races in a previous patch, a small race still existed where importing then unreffing cound cause an invalid bo. Add a test for this case. Racing sequence fixed: - thread 1 releases bo, refcount drops to zero, blocks on acquiring nvdev->lock. - thread 2 increases refcount to 1. - thread 2 decreases refcount to zero, blocks on acquiring nvdev->lock. At this point the 2 threads will clean up the same bo. Signed-off-by: Maarten Lankhorst <maarten.lankhorst@ubuntu.com> Reviewed-By: Emil Velikov <emil.l.velikov@gmail.com>
Diffstat (limited to 'nouveau')
-rw-r--r--nouveau/nouveau.c69
1 files changed, 32 insertions, 37 deletions
diff --git a/nouveau/nouveau.c b/nouveau/nouveau.c
index c6c153a9..1c723b9e 100644
--- a/nouveau/nouveau.c
+++ b/nouveau/nouveau.c
@@ -351,29 +351,18 @@ nouveau_bo_del(struct nouveau_bo *bo)
351 351
352 pthread_mutex_lock(&nvdev->lock); 352 pthread_mutex_lock(&nvdev->lock);
353 if (nvbo->name) { 353 if (nvbo->name) {
354 if (atomic_read(&nvbo->refcnt)) { 354 if (atomic_read(&nvbo->refcnt) == 0) {
355 DRMLISTDEL(&nvbo->head);
355 /* 356 /*
356 * bo has been revived by a race with 357 * This bo has to be closed with the lock held because
357 * nouveau_bo_prime_handle_ref, or nouveau_bo_name_ref. 358 * gem handles are not refcounted. If a shared bo is
358 * 359 * closed and re-opened in another thread a race
359 * In theory there's still a race possible with 360 * against DRM_IOCTL_GEM_OPEN or drmPrimeFDToHandle
360 * nouveau_bo_wrap, but when using this function 361 * might cause the bo to be closed accidentally while
361 * the lifetime of the handle is probably already 362 * re-importing.
362 * handled in another way. If there are races
363 * you're probably using nouveau_bo_wrap wrong.
364 */ 363 */
365 pthread_mutex_unlock(&nvdev->lock); 364 drmIoctl(bo->device->fd, DRM_IOCTL_GEM_CLOSE, &req);
366 return;
367 } 365 }
368 DRMLISTDEL(&nvbo->head);
369 /*
370 * This bo has to be closed with the lock held because gem
371 * handles are not refcounted. If a shared bo is closed and
372 * re-opened in another thread a race against
373 * DRM_IOCTL_GEM_OPEN or drmPrimeFDToHandle might cause the
374 * bo to be closed accidentally while re-importing.
375 */
376 drmIoctl(bo->device->fd, DRM_IOCTL_GEM_CLOSE, &req);
377 pthread_mutex_unlock(&nvdev->lock); 366 pthread_mutex_unlock(&nvdev->lock);
378 } else { 367 } else {
379 DRMLISTDEL(&nvbo->head); 368 DRMLISTDEL(&nvbo->head);
@@ -418,7 +407,7 @@ nouveau_bo_new(struct nouveau_device *dev, uint32_t flags, uint32_t align,
418 407
419static int 408static int
420nouveau_bo_wrap_locked(struct nouveau_device *dev, uint32_t handle, 409nouveau_bo_wrap_locked(struct nouveau_device *dev, uint32_t handle,
421 struct nouveau_bo **pbo) 410 struct nouveau_bo **pbo, int name)
422{ 411{
423 struct nouveau_device_priv *nvdev = nouveau_device(dev); 412 struct nouveau_device_priv *nvdev = nouveau_device(dev);
424 struct drm_nouveau_gem_info req = { .handle = handle }; 413 struct drm_nouveau_gem_info req = { .handle = handle };
@@ -427,8 +416,24 @@ nouveau_bo_wrap_locked(struct nouveau_device *dev, uint32_t handle,
427 416
428 DRMLISTFOREACHENTRY(nvbo, &nvdev->bo_list, head) { 417 DRMLISTFOREACHENTRY(nvbo, &nvdev->bo_list, head) {
429 if (nvbo->base.handle == handle) { 418 if (nvbo->base.handle == handle) {
430 *pbo = NULL; 419 if (atomic_inc_return(&nvbo->refcnt) == 1) {
431 nouveau_bo_ref(&nvbo->base, pbo); 420 /*
421 * Uh oh, this bo is dead and someone else
422 * will free it, but because refcnt is
423 * now non-zero fortunately they won't
424 * call the ioctl to close the bo.
425 *
426 * Remove this bo from the list so other
427 * calls to nouveau_bo_wrap_locked will
428 * see our replacement nvbo.
429 */
430 DRMLISTDEL(&nvbo->head);
431 if (!name)
432 name = nvbo->name;
433 break;
434 }
435
436 *pbo = &nvbo->base;
432 return 0; 437 return 0;
433 } 438 }
434 } 439 }
@@ -443,6 +448,7 @@ nouveau_bo_wrap_locked(struct nouveau_device *dev, uint32_t handle,
443 atomic_set(&nvbo->refcnt, 1); 448 atomic_set(&nvbo->refcnt, 1);
444 nvbo->base.device = dev; 449 nvbo->base.device = dev;
445 abi16_bo_info(&nvbo->base, &req); 450 abi16_bo_info(&nvbo->base, &req);
451 nvbo->name = name;
446 DRMLISTADD(&nvbo->head, &nvdev->bo_list); 452 DRMLISTADD(&nvbo->head, &nvdev->bo_list);
447 *pbo = &nvbo->base; 453 *pbo = &nvbo->base;
448 return 0; 454 return 0;
@@ -458,7 +464,7 @@ nouveau_bo_wrap(struct nouveau_device *dev, uint32_t handle,
458 struct nouveau_device_priv *nvdev = nouveau_device(dev); 464 struct nouveau_device_priv *nvdev = nouveau_device(dev);
459 int ret; 465 int ret;
460 pthread_mutex_lock(&nvdev->lock); 466 pthread_mutex_lock(&nvdev->lock);
461 ret = nouveau_bo_wrap_locked(dev, handle, pbo); 467 ret = nouveau_bo_wrap_locked(dev, handle, pbo, 0);
462 pthread_mutex_unlock(&nvdev->lock); 468 pthread_mutex_unlock(&nvdev->lock);
463 return ret; 469 return ret;
464} 470}
@@ -468,24 +474,13 @@ nouveau_bo_name_ref(struct nouveau_device *dev, uint32_t name,
468 struct nouveau_bo **pbo) 474 struct nouveau_bo **pbo)
469{ 475{
470 struct nouveau_device_priv *nvdev = nouveau_device(dev); 476 struct nouveau_device_priv *nvdev = nouveau_device(dev);
471 struct nouveau_bo_priv *nvbo;
472 struct drm_gem_open req = { .name = name }; 477 struct drm_gem_open req = { .name = name };
473 int ret; 478 int ret;
474 479
475 pthread_mutex_lock(&nvdev->lock); 480 pthread_mutex_lock(&nvdev->lock);
476 DRMLISTFOREACHENTRY(nvbo, &nvdev->bo_list, head) {
477 if (nvbo->name == name) {
478 *pbo = NULL;
479 nouveau_bo_ref(&nvbo->base, pbo);
480 pthread_mutex_unlock(&nvdev->lock);
481 return 0;
482 }
483 }
484
485 ret = drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req); 481 ret = drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req);
486 if (ret == 0) { 482 if (ret == 0) {
487 ret = nouveau_bo_wrap_locked(dev, req.handle, pbo); 483 ret = nouveau_bo_wrap_locked(dev, req.handle, pbo, name);
488 nouveau_bo((*pbo))->name = name;
489 } 484 }
490 485
491 pthread_mutex_unlock(&nvdev->lock); 486 pthread_mutex_unlock(&nvdev->lock);
@@ -537,7 +532,7 @@ nouveau_bo_prime_handle_ref(struct nouveau_device *dev, int prime_fd,
537 pthread_mutex_lock(&nvdev->lock); 532 pthread_mutex_lock(&nvdev->lock);
538 ret = drmPrimeFDToHandle(dev->fd, prime_fd, &handle); 533 ret = drmPrimeFDToHandle(dev->fd, prime_fd, &handle);
539 if (ret == 0) { 534 if (ret == 0) {
540 ret = nouveau_bo_wrap_locked(dev, handle, bo); 535 ret = nouveau_bo_wrap_locked(dev, handle, bo, 0);
541 if (!ret) { 536 if (!ret) {
542 struct nouveau_bo_priv *nvbo = nouveau_bo(*bo); 537 struct nouveau_bo_priv *nvbo = nouveau_bo(*bo);
543 if (!nvbo->name) { 538 if (!nvbo->name) {