aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--amdgpu/amdgpu_vamgr.c236
1 files changed, 118 insertions, 118 deletions
diff --git a/amdgpu/amdgpu_vamgr.c b/amdgpu/amdgpu_vamgr.c
index 070ecc47..877e0baa 100644
--- a/amdgpu/amdgpu_vamgr.c
+++ b/amdgpu/amdgpu_vamgr.c
@@ -43,134 +43,134 @@ void amdgpu_vamgr_init(struct amdgpu_device *dev)
43uint64_t amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, 43uint64_t amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr,
44 uint64_t size, uint64_t alignment) 44 uint64_t size, uint64_t alignment)
45{ 45{
46 struct amdgpu_bo_va_hole *hole, *n; 46 struct amdgpu_bo_va_hole *hole, *n;
47 uint64_t offset = 0, waste = 0; 47 uint64_t offset = 0, waste = 0;
48 48
49 alignment = MAX2(alignment, mgr->va_alignment); 49 alignment = MAX2(alignment, mgr->va_alignment);
50 size = ALIGN(size, mgr->va_alignment); 50 size = ALIGN(size, mgr->va_alignment);
51 51
52 pthread_mutex_lock(&mgr->bo_va_mutex); 52 pthread_mutex_lock(&mgr->bo_va_mutex);
53 /* TODO: using more appropriate way to track the holes */ 53 /* TODO: using more appropriate way to track the holes */
54 /* first look for a hole */ 54 /* first look for a hole */
55 LIST_FOR_EACH_ENTRY_SAFE(hole, n, &mgr->va_holes, list) { 55 LIST_FOR_EACH_ENTRY_SAFE(hole, n, &mgr->va_holes, list) {
56 offset = hole->offset; 56 offset = hole->offset;
57 waste = offset % alignment; 57 waste = offset % alignment;
58 waste = waste ? alignment - waste : 0; 58 waste = waste ? alignment - waste : 0;
59 offset += waste; 59 offset += waste;
60 if (offset >= (hole->offset + hole->size)) { 60 if (offset >= (hole->offset + hole->size)) {
61 continue; 61 continue;
62 } 62 }
63 if (!waste && hole->size == size) { 63 if (!waste && hole->size == size) {
64 offset = hole->offset; 64 offset = hole->offset;
65 list_del(&hole->list); 65 list_del(&hole->list);
66 free(hole); 66 free(hole);
67 pthread_mutex_unlock(&mgr->bo_va_mutex); 67 pthread_mutex_unlock(&mgr->bo_va_mutex);
68 return offset; 68 return offset;
69 } 69 }
70 if ((hole->size - waste) > size) { 70 if ((hole->size - waste) > size) {
71 if (waste) { 71 if (waste) {
72 n = calloc(1, 72 n = calloc(1,
73 sizeof(struct amdgpu_bo_va_hole)); 73 sizeof(struct amdgpu_bo_va_hole));
74 n->size = waste; 74 n->size = waste;
75 n->offset = hole->offset; 75 n->offset = hole->offset;
76 list_add(&n->list, &hole->list); 76 list_add(&n->list, &hole->list);
77 } 77 }
78 hole->size -= (size + waste); 78 hole->size -= (size + waste);
79 hole->offset += size + waste; 79 hole->offset += size + waste;
80 pthread_mutex_unlock(&mgr->bo_va_mutex); 80 pthread_mutex_unlock(&mgr->bo_va_mutex);
81 return offset; 81 return offset;
82 } 82 }
83 if ((hole->size - waste) == size) { 83 if ((hole->size - waste) == size) {
84 hole->size = waste; 84 hole->size = waste;
85 pthread_mutex_unlock(&mgr->bo_va_mutex); 85 pthread_mutex_unlock(&mgr->bo_va_mutex);
86 return offset; 86 return offset;
87 } 87 }
88 } 88 }
89 89
90 offset = mgr->va_offset; 90 offset = mgr->va_offset;
91 waste = offset % alignment; 91 waste = offset % alignment;
92 waste = waste ? alignment - waste : 0; 92 waste = waste ? alignment - waste : 0;
93 93
94 if (offset + waste + size > mgr->va_max) { 94 if (offset + waste + size > mgr->va_max) {
95 pthread_mutex_unlock(&mgr->bo_va_mutex); 95 pthread_mutex_unlock(&mgr->bo_va_mutex);
96 return AMDGPU_INVALID_VA_ADDRESS; 96 return AMDGPU_INVALID_VA_ADDRESS;
97 } 97 }
98 98
99 if (waste) { 99 if (waste) {
100 n = calloc(1, sizeof(struct amdgpu_bo_va_hole)); 100 n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
101 n->size = waste; 101 n->size = waste;
102 n->offset = offset; 102 n->offset = offset;
103 list_add(&n->list, &mgr->va_holes); 103 list_add(&n->list, &mgr->va_holes);
104 } 104 }
105 offset += waste; 105 offset += waste;
106 mgr->va_offset += size + waste; 106 mgr->va_offset += size + waste;
107 pthread_mutex_unlock(&mgr->bo_va_mutex); 107 pthread_mutex_unlock(&mgr->bo_va_mutex);
108 return offset; 108 return offset;
109} 109}
110 110
111void amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va, 111void amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va,
112 uint64_t size) 112 uint64_t size)
113{ 113{
114 struct amdgpu_bo_va_hole *hole; 114 struct amdgpu_bo_va_hole *hole;
115 115
116 size = ALIGN(size, mgr->va_alignment); 116 size = ALIGN(size, mgr->va_alignment);
117 117
118 pthread_mutex_lock(&mgr->bo_va_mutex); 118 pthread_mutex_lock(&mgr->bo_va_mutex);
119 if ((va + size) == mgr->va_offset) { 119 if ((va + size) == mgr->va_offset) {
120 mgr->va_offset = va; 120 mgr->va_offset = va;
121 /* Delete uppermost hole if it reaches the new top */ 121 /* Delete uppermost hole if it reaches the new top */
122 if (!LIST_IS_EMPTY(&mgr->va_holes)) { 122 if (!LIST_IS_EMPTY(&mgr->va_holes)) {
123 hole = container_of(mgr->va_holes.next, hole, list); 123 hole = container_of(mgr->va_holes.next, hole, list);
124 if ((hole->offset + hole->size) == va) { 124 if ((hole->offset + hole->size) == va) {
125 mgr->va_offset = hole->offset; 125 mgr->va_offset = hole->offset;
126 list_del(&hole->list); 126 list_del(&hole->list);
127 free(hole); 127 free(hole);
128 } 128 }
129 } 129 }
130 } else { 130 } else {
131 struct amdgpu_bo_va_hole *next; 131 struct amdgpu_bo_va_hole *next;
132 132
133 hole = container_of(&mgr->va_holes, hole, list); 133 hole = container_of(&mgr->va_holes, hole, list);
134 LIST_FOR_EACH_ENTRY(next, &mgr->va_holes, list) { 134 LIST_FOR_EACH_ENTRY(next, &mgr->va_holes, list) {
135 if (next->offset < va) 135 if (next->offset < va)
136 break; 136 break;
137 hole = next; 137 hole = next;
138 } 138 }
139 139
140 if (&hole->list != &mgr->va_holes) { 140 if (&hole->list != &mgr->va_holes) {
141 /* Grow upper hole if it's adjacent */ 141 /* Grow upper hole if it's adjacent */
142 if (hole->offset == (va + size)) { 142 if (hole->offset == (va + size)) {
143 hole->offset = va; 143 hole->offset = va;
144 hole->size += size; 144 hole->size += size;
145 /* Merge lower hole if it's adjacent */ 145 /* Merge lower hole if it's adjacent */
146 if (next != hole 146 if (next != hole
147 && &next->list != &mgr->va_holes 147 && &next->list != &mgr->va_holes
148 && (next->offset + next->size) == va) { 148 && (next->offset + next->size) == va) {
149 next->size += hole->size; 149 next->size += hole->size;
150 list_del(&hole->list); 150 list_del(&hole->list);
151 free(hole); 151 free(hole);
152 } 152 }
153 goto out; 153 goto out;
154 } 154 }
155 } 155 }
156 156
157 /* Grow lower hole if it's adjacent */ 157 /* Grow lower hole if it's adjacent */
158 if (next != hole && &next->list != &mgr->va_holes && 158 if (next != hole && &next->list != &mgr->va_holes &&
159 (next->offset + next->size) == va) { 159 (next->offset + next->size) == va) {
160 next->size += size; 160 next->size += size;
161 goto out; 161 goto out;
162 } 162 }
163 163
164 /* FIXME on allocation failure we just lose virtual address space 164 /* FIXME on allocation failure we just lose virtual address space
165 * maybe print a warning 165 * maybe print a warning
166 */ 166 */
167 next = calloc(1, sizeof(struct amdgpu_bo_va_hole)); 167 next = calloc(1, sizeof(struct amdgpu_bo_va_hole));
168 if (next) { 168 if (next) {
169 next->size = size; 169 next->size = size;
170 next->offset = va; 170 next->offset = va;
171 list_add(&next->list, &hole->list); 171 list_add(&next->list, &hole->list);
172 } 172 }
173 } 173 }
174out: 174out:
175 pthread_mutex_unlock(&mgr->bo_va_mutex); 175 pthread_mutex_unlock(&mgr->bo_va_mutex);
176} 176}