aboutsummaryrefslogtreecommitdiffstats
path: root/amdgpu
diff options
context:
space:
mode:
authorMichel Dänzer2018-02-08 02:50:53 -0600
committerMichel Dänzer2018-02-08 02:50:53 -0600
commitfa35b51f6366bd44185177f0a66e02191905d774 (patch)
tree5cc6562e9d4061120d0a77297d81ec062f84ced0 /amdgpu
parent09642c073e8af71127cf98b48fe1b2a376c606cf (diff)
downloadexternal-libdrm-fa35b51f6366bd44185177f0a66e02191905d774.tar.gz
external-libdrm-fa35b51f6366bd44185177f0a66e02191905d774.tar.xz
external-libdrm-fa35b51f6366bd44185177f0a66e02191905d774.zip
Revert "amdgpu: clean up non list code path for vamgr"
This reverts commit 41b94a3fb6e87d057fad78568d920d29489e5060. It caused crashes with radeonsi in at least glxgears and Xorg.
Diffstat (limited to 'amdgpu')
-rw-r--r--amdgpu/amdgpu_internal.h2
-rw-r--r--amdgpu/amdgpu_vamgr.c121
2 files changed, 80 insertions, 43 deletions
diff --git a/amdgpu/amdgpu_internal.h b/amdgpu/amdgpu_internal.h
index 75276a99..3e044f11 100644
--- a/amdgpu/amdgpu_internal.h
+++ b/amdgpu/amdgpu_internal.h
@@ -53,6 +53,8 @@ struct amdgpu_bo_va_hole {
53}; 53};
54 54
55struct amdgpu_bo_va_mgr { 55struct amdgpu_bo_va_mgr {
56 /* the start virtual address */
57 uint64_t va_offset;
56 uint64_t va_max; 58 uint64_t va_max;
57 struct list_head va_holes; 59 struct list_head va_holes;
58 pthread_mutex_t bo_va_mutex; 60 pthread_mutex_t bo_va_mutex;
diff --git a/amdgpu/amdgpu_vamgr.c b/amdgpu/amdgpu_vamgr.c
index 2311e5eb..a2852b55 100644
--- a/amdgpu/amdgpu_vamgr.c
+++ b/amdgpu/amdgpu_vamgr.c
@@ -48,19 +48,12 @@ int amdgpu_va_range_query(amdgpu_device_handle dev,
48drm_private void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, uint64_t start, 48drm_private void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, uint64_t start,
49 uint64_t max, uint64_t alignment) 49 uint64_t max, uint64_t alignment)
50{ 50{
51 struct amdgpu_bo_va_hole *n; 51 mgr->va_offset = start;
52
53 mgr->va_max = max; 52 mgr->va_max = max;
54 mgr->va_alignment = alignment; 53 mgr->va_alignment = alignment;
55 54
56 list_inithead(&mgr->va_holes); 55 list_inithead(&mgr->va_holes);
57 pthread_mutex_init(&mgr->bo_va_mutex, NULL); 56 pthread_mutex_init(&mgr->bo_va_mutex, NULL);
58 pthread_mutex_lock(&mgr->bo_va_mutex);
59 n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
60 n->size = mgr->va_max;
61 n->offset = start;
62 list_add(&n->list, &mgr->va_holes);
63 pthread_mutex_unlock(&mgr->bo_va_mutex);
64} 57}
65 58
66drm_private void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr) 59drm_private void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr)
@@ -129,14 +122,41 @@ amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size,
129 } 122 }
130 } 123 }
131 124
125 if (base_required) {
126 if (base_required < mgr->va_offset) {
127 pthread_mutex_unlock(&mgr->bo_va_mutex);
128 return AMDGPU_INVALID_VA_ADDRESS;
129 }
130 offset = mgr->va_offset;
131 waste = base_required - mgr->va_offset;
132 } else {
133 offset = mgr->va_offset;
134 waste = offset % alignment;
135 waste = waste ? alignment - waste : 0;
136 }
137
138 if (offset + waste + size > mgr->va_max) {
139 pthread_mutex_unlock(&mgr->bo_va_mutex);
140 return AMDGPU_INVALID_VA_ADDRESS;
141 }
142
143 if (waste) {
144 n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
145 n->size = waste;
146 n->offset = offset;
147 list_add(&n->list, &mgr->va_holes);
148 }
149
150 offset += waste;
151 mgr->va_offset += size + waste;
132 pthread_mutex_unlock(&mgr->bo_va_mutex); 152 pthread_mutex_unlock(&mgr->bo_va_mutex);
133 return AMDGPU_INVALID_VA_ADDRESS; 153 return offset;
134} 154}
135 155
136static drm_private void 156static drm_private void
137amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va, uint64_t size) 157amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va, uint64_t size)
138{ 158{
139 struct amdgpu_bo_va_hole *hole, *next; 159 struct amdgpu_bo_va_hole *hole;
140 160
141 if (va == AMDGPU_INVALID_VA_ADDRESS) 161 if (va == AMDGPU_INVALID_VA_ADDRESS)
142 return; 162 return;
@@ -144,46 +164,61 @@ amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va, uint64_t size)
144 size = ALIGN(size, mgr->va_alignment); 164 size = ALIGN(size, mgr->va_alignment);
145 165
146 pthread_mutex_lock(&mgr->bo_va_mutex); 166 pthread_mutex_lock(&mgr->bo_va_mutex);
147 hole = container_of(&mgr->va_holes, hole, list); 167 if ((va + size) == mgr->va_offset) {
148 LIST_FOR_EACH_ENTRY(next, &mgr->va_holes, list) { 168 mgr->va_offset = va;
149 if (next->offset < va) 169 /* Delete uppermost hole if it reaches the new top */
150 break; 170 if (!LIST_IS_EMPTY(&mgr->va_holes)) {
151 hole = next; 171 hole = container_of(mgr->va_holes.next, hole, list);
152 } 172 if ((hole->offset + hole->size) == va) {
153 173 mgr->va_offset = hole->offset;
154 if (&hole->list != &mgr->va_holes) {
155 /* Grow upper hole if it's adjacent */
156 if (hole->offset == (va + size)) {
157 hole->offset = va;
158 hole->size += size;
159 /* Merge lower hole if it's adjacent */
160 if (next != hole &&
161 &next->list != &mgr->va_holes &&
162 (next->offset + next->size) == va) {
163 next->size += hole->size;
164 list_del(&hole->list); 174 list_del(&hole->list);
165 free(hole); 175 free(hole);
166 } 176 }
167 } 177 }
168 } 178 } else {
179 struct amdgpu_bo_va_hole *next;
169 180
170 /* Grow lower hole if it's adjacent */ 181 hole = container_of(&mgr->va_holes, hole, list);
171 if (next != hole && &next->list != &mgr->va_holes && 182 LIST_FOR_EACH_ENTRY(next, &mgr->va_holes, list) {
172 (next->offset + next->size) == va) { 183 if (next->offset < va)
173 next->size += size; 184 break;
174 goto out; 185 hole = next;
175 } 186 }
176 187
177 /* FIXME on allocation failure we just lose virtual address space 188 if (&hole->list != &mgr->va_holes) {
178 * maybe print a warning 189 /* Grow upper hole if it's adjacent */
179 */ 190 if (hole->offset == (va + size)) {
180 next = calloc(1, sizeof(struct amdgpu_bo_va_hole)); 191 hole->offset = va;
181 if (next) { 192 hole->size += size;
182 next->size = size; 193 /* Merge lower hole if it's adjacent */
183 next->offset = va; 194 if (next != hole &&
184 list_add(&next->list, &hole->list); 195 &next->list != &mgr->va_holes &&
185 } 196 (next->offset + next->size) == va) {
197 next->size += hole->size;
198 list_del(&hole->list);
199 free(hole);
200 }
201 goto out;
202 }
203 }
204
205 /* Grow lower hole if it's adjacent */
206 if (next != hole && &next->list != &mgr->va_holes &&
207 (next->offset + next->size) == va) {
208 next->size += size;
209 goto out;
210 }
186 211
212 /* FIXME on allocation failure we just lose virtual address space
213 * maybe print a warning
214 */
215 next = calloc(1, sizeof(struct amdgpu_bo_va_hole));
216 if (next) {
217 next->size = size;
218 next->offset = va;
219 list_add(&next->list, &hole->list);
220 }
221 }
187out: 222out:
188 pthread_mutex_unlock(&mgr->bo_va_mutex); 223 pthread_mutex_unlock(&mgr->bo_va_mutex);
189} 224}