aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRob Clark2016-06-28 12:33:07 -0500
committerRob Clark2016-07-20 18:42:21 -0500
commit6a23bd4b3c52fcd7529062b31c36dd03ae0cdd75 (patch)
tree2bbeb14cd7be273fe2718bf3f9b61ed9131a3e22 /freedreno
parent419a154dbef839b920689bea72aa9af41b2b114f (diff)
downloadexternal-libgbm-6a23bd4b3c52fcd7529062b31c36dd03ae0cdd75.tar.gz
external-libgbm-6a23bd4b3c52fcd7529062b31c36dd03ae0cdd75.tar.xz
external-libgbm-6a23bd4b3c52fcd7529062b31c36dd03ae0cdd75.zip
freedreno/msm: use hashtable to track bo idx
Note: cache the last ring the bo was emitted on, to avoid excess hashtable lookups. We do this by tracking ring seqno to avoid problems with dangling pointers. Signed-off-by: Rob Clark <robclark@freedesktop.org>
Diffstat (limited to 'freedreno')
-rw-r--r--freedreno/msm/msm_priv.h16
-rw-r--r--freedreno/msm/msm_ringbuffer.c38
2 files changed, 31 insertions, 23 deletions
diff --git a/freedreno/msm/msm_priv.h b/freedreno/msm/msm_priv.h
index 1f44398d..6d670aab 100644
--- a/freedreno/msm/msm_priv.h
+++ b/freedreno/msm/msm_priv.h
@@ -40,6 +40,7 @@
40struct msm_device { 40struct msm_device {
41 struct fd_device base; 41 struct fd_device base;
42 struct fd_bo_cache ring_cache; 42 struct fd_bo_cache ring_cache;
43 unsigned ring_cnt;
43}; 44};
44 45
45static inline struct msm_device * to_msm_device(struct fd_device *x) 46static inline struct msm_device * to_msm_device(struct fd_device *x)
@@ -72,18 +73,11 @@ struct msm_bo {
72 struct fd_bo base; 73 struct fd_bo base;
73 uint64_t offset; 74 uint64_t offset;
74 uint64_t presumed; 75 uint64_t presumed;
75 /* in the common case, a bo won't be referenced by more than a single 76 /* to avoid excess hashtable lookups, cache the ring this bo was
76 * (parent) ring[*]. So to avoid looping over all the bo's in the 77 * last emitted on (since that will probably also be the next ring
77 * reloc table to find the idx of a bo that might already be in the 78 * it is emitted on)
78 * table, we cache the idx in the bo. But in order to detect the
79 * slow-path where bo is ref'd in multiple rb's, we also must track
80 * the current_ring for which the idx is valid. See bo2idx().
81 *
82 * [*] in case multiple ringbuffers, ie. one toplevel and other rb(s)
83 * used for IB target(s), the toplevel rb is the parent which is
84 * tracking bo's for the submit
85 */ 79 */
86 struct fd_ringbuffer *current_ring; 80 unsigned current_ring_seqno;
87 uint32_t idx; 81 uint32_t idx;
88}; 82};
89 83
diff --git a/freedreno/msm/msm_ringbuffer.c b/freedreno/msm/msm_ringbuffer.c
index 86fc83e0..fbfaefae 100644
--- a/freedreno/msm/msm_ringbuffer.c
+++ b/freedreno/msm/msm_ringbuffer.c
@@ -92,6 +92,11 @@ struct msm_ringbuffer {
92 92
93 int is_growable; 93 int is_growable;
94 unsigned cmd_count; 94 unsigned cmd_count;
95
96 unsigned seqno;
97
98 /* maps fd_bo to idx: */
99 void *bo_table;
95}; 100};
96 101
97static inline struct msm_ringbuffer * to_msm_ringbuffer(struct fd_ringbuffer *x) 102static inline struct msm_ringbuffer * to_msm_ringbuffer(struct fd_ringbuffer *x)
@@ -217,21 +222,24 @@ static uint32_t bo2idx(struct fd_ringbuffer *ring, struct fd_bo *bo, uint32_t fl
217 struct msm_bo *msm_bo = to_msm_bo(bo); 222 struct msm_bo *msm_bo = to_msm_bo(bo);
218 uint32_t idx; 223 uint32_t idx;
219 pthread_mutex_lock(&idx_lock); 224 pthread_mutex_lock(&idx_lock);
220 if (!msm_bo->current_ring) { 225 if (msm_bo->current_ring_seqno == msm_ring->seqno) {
221 idx = append_bo(ring, bo);
222 msm_bo->current_ring = ring;
223 msm_bo->idx = idx;
224 } else if (msm_bo->current_ring == ring) {
225 idx = msm_bo->idx; 226 idx = msm_bo->idx;
226 } else { 227 } else {
227 /* slow-path: */ 228 void *val;
228 for (idx = 0; idx < msm_ring->nr_bos; idx++) 229
229 if (msm_ring->bos[idx] == bo) 230 if (!msm_ring->bo_table)
230 break; 231 msm_ring->bo_table = drmHashCreate();
231 if (idx == msm_ring->nr_bos) { 232
232 /* not found */ 233 if (!drmHashLookup(msm_ring->bo_table, bo->handle, &val)) {
234 /* found */
235 idx = (uint32_t)val;
236 } else {
233 idx = append_bo(ring, bo); 237 idx = append_bo(ring, bo);
238 val = (void *)idx;
239 drmHashInsert(msm_ring->bo_table, bo->handle, val);
234 } 240 }
241 msm_bo->current_ring_seqno = msm_ring->seqno;
242 msm_bo->idx = idx;
235 } 243 }
236 pthread_mutex_unlock(&idx_lock); 244 pthread_mutex_unlock(&idx_lock);
237 if (flags & FD_RELOC_READ) 245 if (flags & FD_RELOC_READ)
@@ -318,7 +326,7 @@ static void flush_reset(struct fd_ringbuffer *ring)
318 326
319 for (i = 0; i < msm_ring->nr_bos; i++) { 327 for (i = 0; i < msm_ring->nr_bos; i++) {
320 struct msm_bo *msm_bo = to_msm_bo(msm_ring->bos[i]); 328 struct msm_bo *msm_bo = to_msm_bo(msm_ring->bos[i]);
321 msm_bo->current_ring = NULL; 329 msm_bo->current_ring_seqno = 0;
322 fd_bo_del(&msm_bo->base); 330 fd_bo_del(&msm_bo->base);
323 } 331 }
324 332
@@ -333,6 +341,11 @@ static void flush_reset(struct fd_ringbuffer *ring)
333 msm_ring->nr_cmds = 0; 341 msm_ring->nr_cmds = 0;
334 msm_ring->nr_bos = 0; 342 msm_ring->nr_bos = 0;
335 343
344 if (msm_ring->bo_table) {
345 drmHashDestroy(msm_ring->bo_table);
346 msm_ring->bo_table = NULL;
347 }
348
336 if (msm_ring->is_growable) { 349 if (msm_ring->is_growable) {
337 delete_cmds(msm_ring); 350 delete_cmds(msm_ring);
338 } else { 351 } else {
@@ -551,6 +564,7 @@ drm_private struct fd_ringbuffer * msm_ringbuffer_new(struct fd_pipe *pipe,
551 } 564 }
552 565
553 list_inithead(&msm_ring->cmd_list); 566 list_inithead(&msm_ring->cmd_list);
567 msm_ring->seqno = ++to_msm_device(pipe->dev)->ring_cnt;
554 568
555 ring = &msm_ring->base; 569 ring = &msm_ring->base;
556 ring->funcs = &funcs; 570 ring->funcs = &funcs;