1 /*
2 * Copyright (c) 2010, Texas Instruments Incorporated
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * * Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 *
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * * Neither the name of Texas Instruments Incorporated nor the names of
17 * its contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
24 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
25 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
26 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
27 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
28 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
29 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
30 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 * */
32 /*
33 * tmm_pat.c
34 *
35 * DMM driver support functions for TI OMAP processors.
36 */
37 #include "proto.h"
38 #include "tmm.h"
39 #include "tiler.h"
40 #include <dlfcn.h>
42 /**
43 * Number of pages to allocate when
44 * refilling the free page stack.
45 */
46 #define TILER_GROW_SIZE 16
47 #define DMM_PAGE 0x1000
48 static unsigned long grow_size = TILER_GROW_SIZE;
50 /* Max pages in free page stack */
51 #define PAGE_CAP (256 * 128)
53 /* Number of pages currently allocated */
54 static unsigned long count;
56 struct dmm *(*dmm_pat_init_sym) (u32);
57 void (*dmm_pat_release_sym)(struct dmm *);
58 s32 (*dmm_pat_refill_sym)(struct dmm *, struct pat *, enum pat_mode);
59 extern void * tiler_pat_lib;
61 /**
62 * Used to keep track of mem per
63 * dmm_get_pages call.
64 */
65 struct fast {
66 struct list_head list;
67 struct mem **mem;
68 u32 *pa;
69 u32 num;
70 };
72 /**
73 * Used to keep track of the page struct ptrs
74 * and physical addresses of each page.
75 */
76 struct mem {
77 struct list_head list;
78 //struct page *pg;
79 void *pg;
80 u32 pa;
81 };
83 /**
84 * TMM PAT private structure
85 */
86 struct dmm_mem {
87 struct fast fast_list;
88 struct mem free_list;
89 struct mem used_list;
90 //struct mutex mtx;
91 pthread_mutex_t mtx;
92 struct dmm *dmm;
93 };
95 static void dmm_free_fast_list(struct fast *fast)
96 {
97 struct list_head *pos = NULL, *q = NULL;
98 struct fast *f = NULL;
99 s32 i = 0;
101 /* mutex is locked */
102 list_for_each_safe(pos, q, &fast->list) {
103 f = list_entry(pos, struct fast, list);
104 for (i = 0; i < f->num; i++) {
105 munmap(f->mem[i]->pg, PAGE_SIZE);
106 kfree(f->mem[i]);
107 }
108 kfree(f->pa);
109 kfree(f->mem);
110 list_del(pos);
111 kfree(f);
112 }
113 }
115 static u32 fill_page_stack(struct mem *mem, pthread_mutex_t *mtx, int npages)
116 {
117 s32 i = 0;
118 struct mem *m = NULL;
119 s32 ret = 0;
120 u32 len = 0;
121 s64 offset = 0;
122 void *chunk_va = NULL;
123 u32 chunk_pa = 0;
124 u32 page_size = PAGE_SIZE;
125 u32 bytes = npages * page_size;
127 chunk_va = mmap64(NULL, bytes, PROT_NOCACHE | PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, NOFD, 0);
128 if (chunk_va == MAP_FAILED) {
129 return -ENOMEM;
130 }
132 do {
133 ret = mem_offset64(chunk_va, NOFD, bytes, &offset, &len);
134 if (ret) {
135 fprintf(stderr, "Tiler: Error from mem_offset [%d]\n", errno);
136 return -ENOMEM;
137 }
138 else {
139 chunk_pa = (u32)offset;
140 }
141 bytes -= len;
143 for (i = 0; i < (len/page_size); i++) {
144 m = kmalloc(sizeof(*m), GFP_KERNEL);
145 if (!m)
146 return -ENOMEM;
147 memset(m, 0x0, sizeof(*m));
149 m->pg = chunk_va;
150 m->pa = chunk_pa;
152 mutex_lock(mtx);
153 count++;
154 list_add(&m->list, &mem->list);
155 mutex_unlock(mtx);
157 chunk_va += page_size;
158 chunk_pa += page_size;
159 }
160 } while(bytes > 0);
162 return 0x0;
163 }
165 static void dmm_free_page_stack(struct mem *mem)
166 {
167 struct list_head *pos = NULL, *q = NULL;
168 struct mem *m = NULL;
170 /* mutex is locked */
171 list_for_each_safe(pos, q, &mem->list) {
172 m = list_entry(pos, struct mem, list);
173 munmap(m->pg, PAGE_SIZE);
174 list_del(pos);
175 kfree(m);
176 count--;
177 }
178 }
180 static void tmm_pat_deinit(struct tmm *tmm)
181 {
182 struct dmm_mem *pvt = (struct dmm_mem *) tmm->pvt;
184 mutex_lock(&pvt->mtx);
185 dmm_free_fast_list(&pvt->fast_list);
186 dmm_free_page_stack(&pvt->free_list);
187 dmm_free_page_stack(&pvt->used_list);
188 mutex_destroy(&pvt->mtx);
189 }
191 /* free up memory that is currently only on the free lists */
192 static void tmm_pat_purge(struct tmm *tmm)
193 {
194 struct dmm_mem *pvt = (struct dmm_mem *) tmm->pvt;
196 mutex_lock(&pvt->mtx);
198 dmm_free_page_stack(&pvt->free_list);
200 mutex_unlock(&pvt->mtx);
201 }
203 static u32 *tmm_pat_get_pages(struct tmm *tmm, s32 n)
204 {
205 s32 i = 0;
206 struct list_head *pos = NULL, *q = NULL;
207 struct mem *m = NULL;
208 struct fast *f = NULL;
209 struct dmm_mem *pvt = (struct dmm_mem *) tmm->pvt;
211 if (n <= 0 || n > 0x8000)
212 return NULL;
214 if (list_empty_careful(&pvt->free_list.list))
215 if (fill_page_stack(&pvt->free_list, &pvt->mtx, n))
216 return NULL;
218 f = kmalloc(sizeof(*f), GFP_KERNEL);
219 if (!f)
220 return NULL;
221 memset(f, 0x0, sizeof(*f));
223 /* array of mem struct pointers */
224 f->mem = kmalloc(n * sizeof(*f->mem), GFP_KERNEL);
225 if (!f->mem) {
226 kfree(f); return NULL;
227 }
228 memset(f->mem, 0x0, n * sizeof(*f->mem));
230 /* array of physical addresses */
231 f->pa = kmalloc(n * sizeof(*f->pa), GFP_KERNEL);
232 if (!f->pa) {
233 kfree(f->mem); kfree(f); return NULL;
234 }
235 memset(f->pa, 0x0, n * sizeof(*f->pa));
237 /*
238 * store the number of mem structs so that we
239 * know how many to free later.
240 */
241 f->num = n;
243 for (i = 0; i < n; i++) {
244 if (list_empty_careful(&pvt->free_list.list))
245 if (fill_page_stack(&pvt->free_list, &pvt->mtx, (n - i)))
246 goto cleanup;
248 mutex_lock(&pvt->mtx);
249 pos = NULL;
250 q = NULL;
251 m = NULL;
253 /*
254 * remove one mem struct from the free list and
255 * add the address to the fast struct mem array
256 */
257 list_for_each_safe(pos, q, &pvt->free_list.list) {
258 m = list_entry(pos, struct mem, list);
259 list_del(pos);
260 break;
261 }
262 mutex_unlock(&pvt->mtx);
264 if (m != NULL) {
265 f->mem[i] = m;
266 f->pa[i] = m->pa;
267 }
268 else {
269 goto cleanup;
270 }
271 }
273 mutex_lock(&pvt->mtx);
274 list_add(&f->list, &pvt->fast_list.list);
275 mutex_unlock(&pvt->mtx);
277 if (f != NULL)
278 return f->pa;
279 cleanup:
280 for (; i > 0; i--) {
281 mutex_lock(&pvt->mtx);
282 list_add(&f->mem[i - 1]->list, &pvt->free_list.list);
283 mutex_unlock(&pvt->mtx);
284 }
285 kfree(f->pa);
286 kfree(f->mem);
287 kfree(f);
288 return NULL;
289 }
291 static void tmm_pat_free_pages(struct tmm *tmm, u32 *list)
292 {
293 struct dmm_mem *pvt = (struct dmm_mem *) tmm->pvt;
294 struct list_head *pos = NULL, *q = NULL;
295 struct fast *f = NULL;
296 s32 i = 0;
298 mutex_lock(&pvt->mtx);
299 pos = NULL;
300 q = NULL;
301 list_for_each_safe(pos, q, &pvt->fast_list.list) {
302 f = list_entry(pos, struct fast, list);
303 if (f->pa[0] == list[0]) {
304 for (i = 0; i < f->num; i++) {
305 if (count < PAGE_CAP && !tiler_islowmem()) {
306 memset(((struct mem *)f->mem[i])->pg, 0x0, PAGE_SIZE);
307 list_add(
308 &((struct mem *)f->mem[i])->list,
309 &pvt->free_list.list);
310 } else {
311 munmap(
312 ((struct mem *)f->mem[i])->pg, PAGE_SIZE);
313 kfree(f->mem[i]);
314 count--;
315 }
316 }
317 list_del(pos);
318 kfree(f->pa);
319 kfree(f->mem);
320 kfree(f);
321 break;
322 }
323 }
324 mutex_unlock(&pvt->mtx);
325 }
327 static s32 tmm_pat_map(struct tmm *tmm, struct pat_area area, u32 page_pa)
328 {
329 struct dmm_mem *pvt = (struct dmm_mem *) tmm->pvt;
330 struct pat pat_desc = {0};
332 /* send pat descriptor to dmm driver */
333 pat_desc.ctrl.dir = 0;
334 pat_desc.ctrl.ini = 0;
335 pat_desc.ctrl.lut_id = 0;
336 pat_desc.ctrl.start = 1;
337 pat_desc.ctrl.sync = 0;
338 pat_desc.area = area;
339 pat_desc.next = NULL;
341 /* must be a 16-byte aligned physical address */
342 pat_desc.data = page_pa;
343 return (*dmm_pat_refill_sym)(pvt->dmm, &pat_desc, MANUAL);
344 }
346 struct tmm *tmm_pat_init(u32 pat_id, u32 size)
347 {
348 struct tmm *tmm = NULL;
349 struct dmm_mem *pvt = NULL;
350 struct dmm *dmm = NULL;
352 dmm_pat_init_sym = dlsym(tiler_pat_lib, "dmm_pat_init");
353 if (dmm_pat_init_sym == NULL) {
354 fprintf(stderr, "tmm_pat_init: Error getting shared lib sym [%d]\n", errno);
355 goto error;
356 }
358 dmm_pat_release_sym = dlsym(tiler_pat_lib, "dmm_pat_release");
359 if (dmm_pat_release_sym == NULL) {
360 fprintf(stderr, "tmm_pat_init: Error getting shared lib sym [%d]\n", errno);
361 goto error;
362 }
364 dmm_pat_refill_sym = dlsym(tiler_pat_lib, "dmm_pat_refill");
365 if (dmm_pat_refill_sym == NULL) {
366 fprintf(stderr, "tmm_pat_init: Error getting shared lib sym [%d]\n", errno);
367 goto error;
368 }
370 dmm = (*dmm_pat_init_sym)(pat_id);
371 if (dmm)
372 tmm = kmalloc(sizeof(*tmm), GFP_KERNEL);
373 if (tmm)
374 pvt = kmalloc(sizeof(*pvt), GFP_KERNEL);
375 if (pvt) {
376 /* private data */
377 pvt->dmm = dmm;
378 INIT_LIST_HEAD(&pvt->free_list.list);
379 INIT_LIST_HEAD(&pvt->used_list.list);
380 INIT_LIST_HEAD(&pvt->fast_list.list);
381 mutex_init(&pvt->mtx);
383 if (size)
384 grow_size = size;
386 fprintf(stderr, "configured grow size is %d\n", (int)grow_size);
388 count = 0;
389 if (list_empty_careful(&pvt->free_list.list))
390 if (fill_page_stack(&pvt->free_list, &pvt->mtx, grow_size))
391 goto error;
393 /* public data */
394 tmm->pvt = pvt;
395 tmm->deinit = tmm_pat_deinit;
396 tmm->purge = tmm_pat_purge;
397 tmm->get = tmm_pat_get_pages;
398 tmm->free = tmm_pat_free_pages;
399 tmm->map = tmm_pat_map;
401 return tmm;
402 }
404 error:
405 kfree(pvt);
406 kfree(tmm);
407 if (dmm_pat_release_sym)
408 (*dmm_pat_release_sym)(dmm);
409 return NULL;
410 }