aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMaarten Lankhorst2015-02-26 04:54:04 -0600
committerMaarten Lankhorst2015-03-13 14:28:16 -0500
commitba5a0b6274ad9c493ed3ddaf4e13559b9ff55ac1 (patch)
treef0d862ee9204bb1316779726c251c93c9acdb9d2 /nouveau
parent5ea6f1c32628887c9df0c53bc8c199eb12633fec (diff)
downloadexternal-libdrm-ba5a0b6274ad9c493ed3ddaf4e13559b9ff55ac1.tar.gz
external-libdrm-ba5a0b6274ad9c493ed3ddaf4e13559b9ff55ac1.tar.xz
external-libdrm-ba5a0b6274ad9c493ed3ddaf4e13559b9ff55ac1.zip
nouveau: Do not add most bo's to the global bo list.
Only add wrapped bo's and bo's that have been exported through flink or dma-buf. This avoids a lock in the common case, and decreases traversal needed for importing a dma-buf or flink. Signed-off-by: Maarten Lankhorst <maarten.lankhorst@ubuntu.com> Tested-By: Emil Velikov <emil.l.velikov@gmail.com>
Diffstat (limited to 'nouveau')
-rw-r--r--nouveau/nouveau.c46
1 files changed, 22 insertions, 24 deletions
diff --git a/nouveau/nouveau.c b/nouveau/nouveau.c
index 1c723b9e..2d95b74b 100644
--- a/nouveau/nouveau.c
+++ b/nouveau/nouveau.c
@@ -349,8 +349,8 @@ nouveau_bo_del(struct nouveau_bo *bo)
349 struct nouveau_bo_priv *nvbo = nouveau_bo(bo); 349 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
350 struct drm_gem_close req = { bo->handle }; 350 struct drm_gem_close req = { bo->handle };
351 351
352 pthread_mutex_lock(&nvdev->lock); 352 if (nvbo->head.next) {
353 if (nvbo->name) { 353 pthread_mutex_lock(&nvdev->lock);
354 if (atomic_read(&nvbo->refcnt) == 0) { 354 if (atomic_read(&nvbo->refcnt) == 0) {
355 DRMLISTDEL(&nvbo->head); 355 DRMLISTDEL(&nvbo->head);
356 /* 356 /*
@@ -365,8 +365,6 @@ nouveau_bo_del(struct nouveau_bo *bo)
365 } 365 }
366 pthread_mutex_unlock(&nvdev->lock); 366 pthread_mutex_unlock(&nvdev->lock);
367 } else { 367 } else {
368 DRMLISTDEL(&nvbo->head);
369 pthread_mutex_unlock(&nvdev->lock);
370 drmIoctl(bo->device->fd, DRM_IOCTL_GEM_CLOSE, &req); 368 drmIoctl(bo->device->fd, DRM_IOCTL_GEM_CLOSE, &req);
371 } 369 }
372 if (bo->map) 370 if (bo->map)
@@ -379,7 +377,6 @@ nouveau_bo_new(struct nouveau_device *dev, uint32_t flags, uint32_t align,
379 uint64_t size, union nouveau_bo_config *config, 377 uint64_t size, union nouveau_bo_config *config,
380 struct nouveau_bo **pbo) 378 struct nouveau_bo **pbo)
381{ 379{
382 struct nouveau_device_priv *nvdev = nouveau_device(dev);
383 struct nouveau_bo_priv *nvbo = calloc(1, sizeof(*nvbo)); 380 struct nouveau_bo_priv *nvbo = calloc(1, sizeof(*nvbo));
384 struct nouveau_bo *bo = &nvbo->base; 381 struct nouveau_bo *bo = &nvbo->base;
385 int ret; 382 int ret;
@@ -397,10 +394,6 @@ nouveau_bo_new(struct nouveau_device *dev, uint32_t flags, uint32_t align,
397 return ret; 394 return ret;
398 } 395 }
399 396
400 pthread_mutex_lock(&nvdev->lock);
401 DRMLISTADD(&nvbo->head, &nvdev->bo_list);
402 pthread_mutex_unlock(&nvdev->lock);
403
404 *pbo = bo; 397 *pbo = bo;
405 return 0; 398 return 0;
406} 399}
@@ -457,6 +450,18 @@ nouveau_bo_wrap_locked(struct nouveau_device *dev, uint32_t handle,
457 return -ENOMEM; 450 return -ENOMEM;
458} 451}
459 452
453static void
454nouveau_bo_make_global(struct nouveau_bo_priv *nvbo)
455{
456 if (!nvbo->head.next) {
457 struct nouveau_device_priv *nvdev = nouveau_device(nvbo->base.device);
458 pthread_mutex_lock(&nvdev->lock);
459 if (!nvbo->head.next)
460 DRMLISTADD(&nvbo->head, &nvdev->bo_list);
461 pthread_mutex_unlock(&nvdev->lock);
462 }
463}
464
460drm_public int 465drm_public int
461nouveau_bo_wrap(struct nouveau_device *dev, uint32_t handle, 466nouveau_bo_wrap(struct nouveau_device *dev, uint32_t handle,
462 struct nouveau_bo **pbo) 467 struct nouveau_bo **pbo)
@@ -494,13 +499,16 @@ nouveau_bo_name_get(struct nouveau_bo *bo, uint32_t *name)
494 struct nouveau_bo_priv *nvbo = nouveau_bo(bo); 499 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
495 500
496 *name = nvbo->name; 501 *name = nvbo->name;
497 if (!*name || *name == ~0U) { 502 if (!*name) {
498 int ret = drmIoctl(bo->device->fd, DRM_IOCTL_GEM_FLINK, &req); 503 int ret = drmIoctl(bo->device->fd, DRM_IOCTL_GEM_FLINK, &req);
504
499 if (ret) { 505 if (ret) {
500 *name = 0; 506 *name = 0;
501 return ret; 507 return ret;
502 } 508 }
503 nvbo->name = *name = req.name; 509 nvbo->name = *name = req.name;
510
511 nouveau_bo_make_global(nvbo);
504 } 512 }
505 return 0; 513 return 0;
506} 514}
@@ -533,16 +541,6 @@ nouveau_bo_prime_handle_ref(struct nouveau_device *dev, int prime_fd,
533 ret = drmPrimeFDToHandle(dev->fd, prime_fd, &handle); 541 ret = drmPrimeFDToHandle(dev->fd, prime_fd, &handle);
534 if (ret == 0) { 542 if (ret == 0) {
535 ret = nouveau_bo_wrap_locked(dev, handle, bo, 0); 543 ret = nouveau_bo_wrap_locked(dev, handle, bo, 0);
536 if (!ret) {
537 struct nouveau_bo_priv *nvbo = nouveau_bo(*bo);
538 if (!nvbo->name) {
539 /*
540 * XXX: Force locked DRM_IOCTL_GEM_CLOSE
541 * to rule out race conditions
542 */
543 nvbo->name = ~0;
544 }
545 }
546 } 544 }
547 pthread_mutex_unlock(&nvdev->lock); 545 pthread_mutex_unlock(&nvdev->lock);
548 return ret; 546 return ret;
@@ -557,8 +555,8 @@ nouveau_bo_set_prime(struct nouveau_bo *bo, int *prime_fd)
557 ret = drmPrimeHandleToFD(bo->device->fd, nvbo->base.handle, DRM_CLOEXEC, prime_fd); 555 ret = drmPrimeHandleToFD(bo->device->fd, nvbo->base.handle, DRM_CLOEXEC, prime_fd);
558 if (ret) 556 if (ret)
559 return ret; 557 return ret;
560 if (!nvbo->name) 558
561 nvbo->name = ~0; 559 nouveau_bo_make_global(nvbo);
562 return 0; 560 return 0;
563} 561}
564 562
@@ -578,8 +576,8 @@ nouveau_bo_wait(struct nouveau_bo *bo, uint32_t access,
578 if (push && push->channel) 576 if (push && push->channel)
579 nouveau_pushbuf_kick(push, push->channel); 577 nouveau_pushbuf_kick(push, push->channel);
580 578
581 if (!nvbo->name && !(nvbo->access & NOUVEAU_BO_WR) && 579 if (!nvbo->head.next && !(nvbo->access & NOUVEAU_BO_WR) &&
582 !( access & NOUVEAU_BO_WR)) 580 !(access & NOUVEAU_BO_WR))
583 return 0; 581 return 0;
584 582
585 req.handle = bo->handle; 583 req.handle = bo->handle;