aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChunming Zhou2018-02-08 00:52:11 -0600
committerChunming Zhou2018-02-08 00:52:23 -0600
commit41b94a3fb6e87d057fad78568d920d29489e5060 (patch)
treeffd03a70bdbfb85a452fcfa8807524447df5bff9 /amdgpu/amdgpu_vamgr.c
parentd07be74a4afe9d22f987aca7e8e84cccaa210248 (diff)
downloadexternal-libgbm-41b94a3fb6e87d057fad78568d920d29489e5060.tar.gz
external-libgbm-41b94a3fb6e87d057fad78568d920d29489e5060.tar.xz
external-libgbm-41b94a3fb6e87d057fad78568d920d29489e5060.zip
amdgpu: clean up non list code path for vamgr
Signed-off-by: Chunming Zhou <david1.zhou@amd.com> Reviewed-by: Christian K├Ânig <christian.koenig@amd.com>
Diffstat (limited to 'amdgpu/amdgpu_vamgr.c')
-rw-r--r--amdgpu/amdgpu_vamgr.c121
1 files changed, 43 insertions, 78 deletions
diff --git a/amdgpu/amdgpu_vamgr.c b/amdgpu/amdgpu_vamgr.c
index a2852b55..2311e5eb 100644
--- a/amdgpu/amdgpu_vamgr.c
+++ b/amdgpu/amdgpu_vamgr.c
@@ -48,12 +48,19 @@ int amdgpu_va_range_query(amdgpu_device_handle dev,
48drm_private void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, uint64_t start, 48drm_private void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, uint64_t start,
49 uint64_t max, uint64_t alignment) 49 uint64_t max, uint64_t alignment)
50{ 50{
51 mgr->va_offset = start; 51 struct amdgpu_bo_va_hole *n;
52
52 mgr->va_max = max; 53 mgr->va_max = max;
53 mgr->va_alignment = alignment; 54 mgr->va_alignment = alignment;
54 55
55 list_inithead(&mgr->va_holes); 56 list_inithead(&mgr->va_holes);
56 pthread_mutex_init(&mgr->bo_va_mutex, NULL); 57 pthread_mutex_init(&mgr->bo_va_mutex, NULL);
58 pthread_mutex_lock(&mgr->bo_va_mutex);
59 n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
60 n->size = mgr->va_max;
61 n->offset = start;
62 list_add(&n->list, &mgr->va_holes);
63 pthread_mutex_unlock(&mgr->bo_va_mutex);
57} 64}
58 65
59drm_private void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr) 66drm_private void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr)
@@ -122,41 +129,14 @@ amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size,
122 } 129 }
123 } 130 }
124 131
125 if (base_required) {
126 if (base_required < mgr->va_offset) {
127 pthread_mutex_unlock(&mgr->bo_va_mutex);
128 return AMDGPU_INVALID_VA_ADDRESS;
129 }
130 offset = mgr->va_offset;
131 waste = base_required - mgr->va_offset;
132 } else {
133 offset = mgr->va_offset;
134 waste = offset % alignment;
135 waste = waste ? alignment - waste : 0;
136 }
137
138 if (offset + waste + size > mgr->va_max) {
139 pthread_mutex_unlock(&mgr->bo_va_mutex);
140 return AMDGPU_INVALID_VA_ADDRESS;
141 }
142
143 if (waste) {
144 n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
145 n->size = waste;
146 n->offset = offset;
147 list_add(&n->list, &mgr->va_holes);
148 }
149
150 offset += waste;
151 mgr->va_offset += size + waste;
152 pthread_mutex_unlock(&mgr->bo_va_mutex); 132 pthread_mutex_unlock(&mgr->bo_va_mutex);
153 return offset; 133 return AMDGPU_INVALID_VA_ADDRESS;
154} 134}
155 135
156static drm_private void 136static drm_private void
157amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va, uint64_t size) 137amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va, uint64_t size)
158{ 138{
159 struct amdgpu_bo_va_hole *hole; 139 struct amdgpu_bo_va_hole *hole, *next;
160 140
161 if (va == AMDGPU_INVALID_VA_ADDRESS) 141 if (va == AMDGPU_INVALID_VA_ADDRESS)
162 return; 142 return;
@@ -164,61 +144,46 @@ amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va, uint64_t size)
164 size = ALIGN(size, mgr->va_alignment); 144 size = ALIGN(size, mgr->va_alignment);
165 145
166 pthread_mutex_lock(&mgr->bo_va_mutex); 146 pthread_mutex_lock(&mgr->bo_va_mutex);
167 if ((va + size) == mgr->va_offset) { 147 hole = container_of(&mgr->va_holes, hole, list);
168 mgr->va_offset = va; 148 LIST_FOR_EACH_ENTRY(next, &mgr->va_holes, list) {
169 /* Delete uppermost hole if it reaches the new top */ 149 if (next->offset < va)
170 if (!LIST_IS_EMPTY(&mgr->va_holes)) { 150 break;
171 hole = container_of(mgr->va_holes.next, hole, list); 151 hole = next;
172 if ((hole->offset + hole->size) == va) { 152 }
173 mgr->va_offset = hole->offset; 153
154 if (&hole->list != &mgr->va_holes) {
155 /* Grow upper hole if it's adjacent */
156 if (hole->offset == (va + size)) {
157 hole->offset = va;
158 hole->size += size;
159 /* Merge lower hole if it's adjacent */
160 if (next != hole &&
161 &next->list != &mgr->va_holes &&
162 (next->offset + next->size) == va) {
163 next->size += hole->size;
174 list_del(&hole->list); 164 list_del(&hole->list);
175 free(hole); 165 free(hole);
176 } 166 }
177 } 167 }
178 } else { 168 }
179 struct amdgpu_bo_va_hole *next;
180
181 hole = container_of(&mgr->va_holes, hole, list);
182 LIST_FOR_EACH_ENTRY(next, &mgr->va_holes, list) {
183 if (next->offset < va)
184 break;
185 hole = next;
186 }
187
188 if (&hole->list != &mgr->va_holes) {
189 /* Grow upper hole if it's adjacent */
190 if (hole->offset == (va + size)) {
191 hole->offset = va;
192 hole->size += size;
193 /* Merge lower hole if it's adjacent */
194 if (next != hole &&
195 &next->list != &mgr->va_holes &&
196 (next->offset + next->size) == va) {
197 next->size += hole->size;
198 list_del(&hole->list);
199 free(hole);
200 }
201 goto out;
202 }
203 }
204 169
205 /* Grow lower hole if it's adjacent */ 170 /* Grow lower hole if it's adjacent */
206 if (next != hole && &next->list != &mgr->va_holes && 171 if (next != hole && &next->list != &mgr->va_holes &&
207 (next->offset + next->size) == va) { 172 (next->offset + next->size) == va) {
208 next->size += size; 173 next->size += size;
209 goto out; 174 goto out;
210 } 175 }
211 176
212 /* FIXME on allocation failure we just lose virtual address space 177 /* FIXME on allocation failure we just lose virtual address space
213 * maybe print a warning 178 * maybe print a warning
214 */ 179 */
215 next = calloc(1, sizeof(struct amdgpu_bo_va_hole)); 180 next = calloc(1, sizeof(struct amdgpu_bo_va_hole));
216 if (next) { 181 if (next) {
217 next->size = size; 182 next->size = size;
218 next->offset = va; 183 next->offset = va;
219 list_add(&next->list, &hole->list); 184 list_add(&next->list, &hole->list);
220 }
221 } 185 }
186
222out: 187out:
223 pthread_mutex_unlock(&mgr->bo_va_mutex); 188 pthread_mutex_unlock(&mgr->bo_va_mutex);
224} 189}