f3e41a71063df33842d979f302f79463ea529244
1 /*
2 * drivers/video/omap2/dsscomp/gralloc.c
3 *
4 * DSS Composition gralloc file
5 *
6 * Copyright (C) 2011 Texas Instruments, Inc
7 * Author: Lajos Molnar <molnar@ti.com>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License version 2 as published by
11 * the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program. If not, see <http://www.gnu.org/licenses/>.
20 */
22 #include <linux/kernel.h>
23 #include <linux/slab.h>
24 #include <linux/sched.h>
25 #include <linux/vmalloc.h>
26 #include "../../../drivers/gpu/drm/omapdrm/omap_dmm_tiler.h"
27 #include <video/dsscomp.h>
28 #include <plat/dsscomp.h>
29 #include "dsscomp.h"
30 #include "tiler-utils.h"
32 #ifdef CONFIG_HAS_EARLYSUSPEND
33 #include <linux/earlysuspend.h>
34 #endif
35 static bool blanked;
36 static bool presentation_mode;
38 #define NUM_TILER1D_SLOTS 2
39 #define MAX_NUM_TILER1D_SLOTS 4
41 static struct tiler1d_slot {
42 struct list_head q;
43 struct tiler_block *block_handle;
44 u32 phys;
45 u32 size;
46 u32 *page_map;
47 short id;
48 } slots[MAX_NUM_TILER1D_SLOTS];
49 static struct list_head free_slots;
50 static struct dsscomp_dev *cdev;
51 static DEFINE_MUTEX(mtx);
52 static struct semaphore free_slots_sem =
53 __SEMAPHORE_INITIALIZER(free_slots_sem, 0);
55 /* gralloc composition sync object */
56 struct dsscomp_gralloc_t {
57 void (*cb_fn)(void *, int);
58 void *cb_arg;
59 struct list_head q;
60 struct list_head slots;
61 atomic_t refs;
62 bool early_callback;
63 bool programmed;
64 };
66 /* queued gralloc compositions */
67 static LIST_HEAD(flip_queue);
69 static u32 ovl_use_mask[MAX_MANAGERS];
71 static inline bool needs_split(struct tiler1d_slot *slot)
72 {
73 return slot->size == tiler1d_slot_size(cdev) >> PAGE_SHIFT;
74 }
76 static struct tiler1d_slot *split_slots(struct tiler1d_slot *slot);
77 static struct tiler1d_slot *merge_slots(struct tiler1d_slot *slot);
78 static struct tiler1d_slot *alloc_tiler_slot(void);
80 static void unpin_tiler_blocks(struct list_head *slots)
81 {
82 struct tiler1d_slot *slot;
84 /* unpin any tiler memory */
85 list_for_each_entry(slot, slots, q) {
86 tiler_unpin(slot->block_handle);
87 up(&free_slots_sem);
88 }
90 /* free tiler slots */
91 list_splice_init(slots, &free_slots);
92 }
94 static void dsscomp_gralloc_cb(void *data, int status)
95 {
96 struct dsscomp_gralloc_t *gsync = data, *gsync_;
97 bool early_cbs = true;
98 LIST_HEAD(done);
100 mutex_lock(&mtx);
101 if (gsync->early_callback && status == DSS_COMPLETION_PROGRAMMED)
102 gsync->programmed = true;
104 if (status & DSS_COMPLETION_RELEASED) {
105 if (atomic_dec_and_test(&gsync->refs))
106 unpin_tiler_blocks(&gsync->slots);
108 log_event(0, 0, gsync, "--refs=%d on %s",
109 atomic_read(&gsync->refs),
110 (u32) log_status_str(status));
111 }
113 /* get completed list items in order, if any */
114 list_for_each_entry_safe(gsync, gsync_, &flip_queue, q) {
115 if (gsync->cb_fn) {
116 early_cbs &= gsync->early_callback && gsync->programmed;
117 if (early_cbs) {
118 gsync->cb_fn(gsync->cb_arg, 1);
119 gsync->cb_fn = NULL;
120 }
121 }
122 if (gsync->refs.counter && gsync->cb_fn)
123 break;
124 if (gsync->refs.counter == 0)
125 list_move_tail(&gsync->q, &done);
126 }
127 mutex_unlock(&mtx);
129 /* call back for completed composition with mutex unlocked */
130 list_for_each_entry_safe(gsync, gsync_, &done, q) {
131 if (debug & DEBUG_GRALLOC_PHASES)
132 dev_info(DEV(cdev), "[%p] completed flip\n", gsync);
134 log_event(0, 0, gsync, "calling %pf [%p]", (u32)gsync->cb_fn,
135 (u32)gsync->cb_arg);
137 if (gsync->cb_fn)
138 gsync->cb_fn(gsync->cb_arg, 1);
139 kfree(gsync);
140 }
141 }
143 /* This is just test code for now that does the setup + apply.
144 It still uses userspace virtual addresses, but maps non
145 TILER buffers into 1D */
146 int dsscomp_gralloc_queue_ioctl(struct dsscomp_setup_dispc_data *d)
147 {
148 struct tiler_pa_info *pas[MAX_OVERLAYS];
149 s32 ret;
150 u32 i;
152 /* convert virtual addresses to physical and get tiler pa infos */
153 for (i = 0; i < d->num_ovls; i++) {
154 struct dss2_ovl_info *oi = d->ovls + i;
155 u32 addr = (u32) oi->address;
157 pas[i] = NULL;
159 /* only supporting DIRECT buffer types */
161 /* assume virtual NV12 for now */
162 if (oi->cfg.color_mode == OMAP_DSS_COLOR_NV12)
163 oi->uv = tiler_virt2phys(addr +
164 oi->cfg.height * oi->cfg.stride);
165 else
166 oi->uv = 0;
167 oi->ba = tiler_virt2phys(addr);
169 /* map non-TILER buffers to 1D */
170 if (oi->ba && !is_tiler_addr(oi->ba))
171 pas[i] = user_block_to_pa(addr & PAGE_MASK,
172 PAGE_ALIGN(oi->cfg.height * oi->cfg.stride +
173 (addr & ~PAGE_MASK)) >> PAGE_SHIFT);
174 }
175 ret = dsscomp_gralloc_queue(d, pas, false, NULL, NULL);
176 for (i = 0; i < d->num_ovls; i++)
177 tiler_pa_free(pas[i]);
178 return ret;
179 }
181 static bool dsscomp_is_any_device_active(void)
182 {
183 struct omap_dss_device *dssdev;
184 u32 display_ix;
186 /* We have to also check for device active in case HWC logic has
187 * not yet started for boot logo.
188 */
189 for (display_ix = 0 ; display_ix < cdev->num_displays ; display_ix++) {
190 dssdev = cdev->displays[display_ix];
191 if (dssdev && dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
192 return true;
193 }
194 return false;
195 }
197 int dsscomp_gralloc_queue(struct dsscomp_setup_dispc_data *d,
198 struct tiler_pa_info **pas,
199 bool early_callback,
200 void (*cb_fn)(void *, int), void *cb_arg)
201 {
202 u32 i;
203 int r = 0;
204 struct omap_dss_device *dev;
205 struct omap_overlay_manager *mgr;
206 static DEFINE_MUTEX(local_mtx);
207 struct dsscomp *comp[MAX_MANAGERS];
208 u32 ovl_new_use_mask[MAX_MANAGERS];
209 u32 mgr_set_mask = 0;
210 u32 ovl_set_mask = 0;
211 struct tiler1d_slot *slot = NULL;
212 u32 slot_used = 0;
213 #ifdef CONFIG_DEBUG_FS
214 u32 ms = ktime_to_ms(ktime_get());
215 #endif
216 u32 channels[MAX_MANAGERS], ch;
217 int skip;
218 struct dsscomp_gralloc_t *gsync;
219 struct dss2_rect_t win = { .w = 0 };
221 /* reserve tiler areas if not already done so */
222 dsscomp_gralloc_init(cdev);
224 dump_total_comp_info(cdev, d, "queue");
225 for (i = 0; i < d->num_ovls; i++)
226 dump_ovl_info(cdev, d->ovls + i);
228 mutex_lock(&local_mtx);
230 mutex_lock(&mtx);
232 /* create sync object with 1 temporary ref */
233 gsync = kzalloc(sizeof(*gsync), GFP_KERNEL);
234 gsync->cb_arg = cb_arg;
235 gsync->cb_fn = cb_fn;
236 gsync->refs.counter = 1;
237 gsync->early_callback = early_callback;
238 INIT_LIST_HEAD(&gsync->slots);
239 list_add_tail(&gsync->q, &flip_queue);
240 if (debug & DEBUG_GRALLOC_PHASES)
241 dev_info(DEV(cdev), "[%p] queuing flip\n", gsync);
243 log_event(0, ms, gsync, "new in %pf (refs=1)",
244 (u32)dsscomp_gralloc_queue, 0);
246 /* ignore frames while we are blanked */
247 skip = blanked;
248 if (skip && (debug & DEBUG_PHASES))
249 dev_info(DEV(cdev), "[%p,%08x] ignored\n", gsync, d->sync_id);
251 /* mark blank frame by NULL tiler pa pointer */
252 if (!skip && pas == NULL)
253 blanked = true;
255 mutex_unlock(&mtx);
257 d->num_mgrs = min_t(u16, d->num_mgrs, ARRAY_SIZE(d->mgrs));
258 d->num_ovls = min_t(u16, d->num_ovls, ARRAY_SIZE(d->ovls));
260 memset(comp, 0, sizeof(comp));
261 memset(ovl_new_use_mask, 0, sizeof(ovl_new_use_mask));
263 if (skip || !dsscomp_is_any_device_active())
264 goto skip_comp;
266 d->mode = DSSCOMP_SETUP_DISPLAY;
268 /* mark managers we are using */
269 for (i = 0; i < d->num_mgrs; i++) {
270 /* verify display is valid & connected, ignore if not */
271 if (d->mgrs[i].ix >= cdev->num_displays)
272 continue;
273 dev = cdev->displays[d->mgrs[i].ix];
274 if (!dev) {
275 dev_warn(DEV(cdev), "failed to get display%d\n",
276 d->mgrs[i].ix);
277 continue;
278 }
279 mgr = dev->output->manager;
280 if (!mgr) {
281 dev_warn(DEV(cdev), "no manager for display%d\n",
282 d->mgrs[i].ix);
283 continue;
284 }
285 ch = mgr->id;
286 channels[i] = ch;
287 mgr_set_mask |= 1 << ch;
289 /* swap red & blue if requested */
290 if (d->mgrs[i].swap_rb)
291 swap_rb_in_mgr_info(d->mgrs + i);
292 }
294 /* create dsscomp objects for set managers (including active ones) */
295 for (ch = 0; ch < MAX_MANAGERS; ch++) {
296 if (!(mgr_set_mask & (1 << ch)))
297 continue;
299 mgr = cdev->mgrs[ch];
301 comp[ch] = dsscomp_new(mgr);
302 if (IS_ERR(comp[ch])) {
303 comp[ch] = NULL;
304 dev_warn(DEV(cdev), "failed to get composition on %s\n",
305 mgr->name);
306 continue;
307 }
309 comp[ch]->must_apply = true;
310 r = dsscomp_setup(comp[ch], d->mode, win);
311 if (r)
312 dev_err(DEV(cdev), "failed to setup comp (%d)\n", r);
313 }
315 /* configure manager data from gralloc composition */
316 for (i = 0; i < d->num_mgrs; i++) {
317 ch = channels[i];
318 r = dsscomp_set_mgr(comp[ch], d->mgrs + i);
319 if (r)
320 dev_err(DEV(cdev), "failed to set mgr%d (%d)\n", ch, r);
321 }
323 /* NOTE: none of the dsscomp sets should fail as composition is new */
324 for (i = 0; i < d->num_ovls; i++) {
325 struct dss2_ovl_info *oi = d->ovls + i;
326 u32 size;
327 int j;
328 for (j = 0; j < d->num_mgrs; j++)
329 if (d->mgrs[j].ix == oi->cfg.mgr_ix) {
330 /* swap red & blue if requested */
331 if (d->mgrs[j].swap_rb)
332 swap_rb_in_ovl_info(d->ovls + i);
333 break;
334 }
336 if (j == d->num_mgrs) {
337 dev_err(DEV(cdev), "invalid manager %d for ovl%d\n",
338 ch, oi->cfg.ix);
339 continue;
340 }
342 /* skip overlays on compositions we could not create */
343 ch = channels[j];
344 if (!comp[ch])
345 continue;
346 /* copy prior overlay to avoid mapping layers twice to 1D */
347 if (oi->addressing == OMAP_DSS_BUFADDR_OVL_IX) {
348 unsigned int j = oi->ba;
349 if (j >= i || !d->ovls[j].cfg.enabled) {
350 WARN(1, "Invalid clone layer (%u)", j);
351 goto skip_buffer;
352 }
354 oi->ba = d->ovls[j].ba;
355 oi->uv = d->ovls[j].uv;
356 goto skip_map1d;
357 } else if (oi->addressing == OMAP_DSS_BUFADDR_FB) {
358 /* get fb */
359 int fb_ix = (oi->ba >> 28);
360 int fb_uv_ix = (oi->uv >> 28);
361 struct fb_info *fbi = NULL, *fbi_uv = NULL;
362 size_t hs_size = oi->cfg.height * oi->cfg.stride;
363 if (fb_ix >= num_registered_fb ||
364 (oi->cfg.color_mode == OMAP_DSS_COLOR_NV12 &&
365 fb_uv_ix >= num_registered_fb)) {
366 WARN(1, "display has no framebuffer");
367 goto skip_buffer;
368 }
370 fbi_uv = registered_fb[fb_ix];
371 fbi = fbi_uv;
372 if (oi->cfg.color_mode == OMAP_DSS_COLOR_NV12)
373 fbi_uv = registered_fb[fb_uv_ix];
375 if (hs_size + oi->ba > fbi->fix.smem_len ||
376 (oi->cfg.color_mode == OMAP_DSS_COLOR_NV12 &&
377 (hs_size >> 1) + oi->uv > fbi_uv->fix.smem_len)) {
378 WARN(1, "image outside of framebuffer memory");
379 goto skip_buffer;
380 }
382 oi->ba += fbi->fix.smem_start;
383 oi->uv += fbi_uv->fix.smem_start;
384 goto skip_map1d;
385 } else if (oi->addressing != OMAP_DSS_BUFADDR_DIRECT) {
386 goto skip_buffer;
387 }
389 /* map non-TILER buffers to 1D */
391 /* skip 2D and disabled layers */
392 if (!pas[i] || !oi->cfg.enabled)
393 goto skip_map1d;
395 if (!slot) {
396 mutex_lock(&mtx);
397 /* separate comp for tv means presentation mode */
398 if (d->num_mgrs == 1 && d->mgrs[0].ix == 1)
399 presentation_mode = true;
400 else if (d->num_mgrs == 2 ||
401 cdev->mgrs[1]->output->device->state !=
402 OMAP_DSS_DISPLAY_ACTIVE)
403 presentation_mode = false;
405 slot = alloc_tiler_slot();
406 if (IS_ERR_OR_NULL(slot)) {
407 dev_warn(DEV(cdev), "could not obtain "
408 "tiler slot");
409 slot = NULL;
410 mutex_unlock(&mtx);
411 goto skip_buffer;
412 }
413 list_move(&slot->q, &gsync->slots);
414 mutex_unlock(&mtx);
415 }
417 size = oi->cfg.stride * oi->cfg.height;
418 if (oi->cfg.color_mode == OMAP_DSS_COLOR_NV12)
419 size += size >> 2;
420 size = DIV_ROUND_UP(size, PAGE_SIZE);
422 if (slot_used + size > slot->size) {
423 dev_err(DEV(cdev), "tiler slot not big enough for "
424 "frame %d + %d > %d", slot_used, size,
425 slot->size);
426 goto skip_buffer;
427 }
429 /* "map" into TILER 1D - will happen after loop */
430 oi->ba = slot->phys + (slot_used << PAGE_SHIFT) +
431 (oi->ba & ~PAGE_MASK);
432 if (oi->cfg.color_mode == OMAP_DSS_COLOR_NV12)
433 oi->uv = oi->ba + oi->cfg.stride * oi->cfg.height;
434 memcpy(slot->page_map + slot_used, pas[i]->mem,
435 sizeof(*slot->page_map) * size);
436 slot_used += size;
437 goto skip_map1d;
439 skip_buffer:
440 oi->cfg.enabled = false;
441 skip_map1d:
443 if (oi->cfg.enabled)
444 ovl_new_use_mask[ch] |= 1 << oi->cfg.ix;
446 r = dsscomp_set_ovl(comp[ch], oi);
447 if (r)
448 dev_err(DEV(cdev), "failed to set ovl%d (%d)\n",
449 oi->cfg.ix, r);
450 else
451 ovl_set_mask |= 1 << oi->cfg.ix;
452 }
454 if (slot && slot_used) {
455 r = tiler_pin_phys(slot->block_handle, slot->page_map,
456 slot_used);
457 if (r)
458 dev_err(DEV(cdev), "failed to pin %d pages into"
459 " %d-pg slots (%d)\n", slot_used,
460 tiler1d_slot_size(cdev) >> PAGE_SHIFT, r);
461 }
463 for (ch = 0; ch < MAX_MANAGERS; ch++) {
464 /* disable all overlays not specifically set from prior frame */
465 u32 mask = ovl_use_mask[ch] & ~ovl_set_mask;
467 if (!comp[ch])
468 continue;
470 while (mask) {
471 struct dss2_ovl_info oi = {
472 .cfg.zonly = true,
473 .cfg.enabled = false,
474 .cfg.ix = fls(mask) - 1,
475 };
476 dsscomp_set_ovl(comp[ch], &oi);
477 mask &= ~(1 << oi.cfg.ix);
478 }
480 /* associate dsscomp objects with this gralloc composition */
481 comp[ch]->extra_cb = dsscomp_gralloc_cb;
482 comp[ch]->extra_cb_data = gsync;
483 atomic_inc(&gsync->refs);
484 log_event(0, ms, gsync, "++refs=%d for [%p]",
485 atomic_read(&gsync->refs), (u32) comp[ch]);
487 r = dsscomp_delayed_apply(comp[ch]);
488 if (r)
489 dev_err(DEV(cdev), "failed to apply comp (%d)\n", r);
490 else
491 ovl_use_mask[ch] = ovl_new_use_mask[ch];
492 }
493 skip_comp:
494 /* release sync object ref - this completes unapplied compositions */
495 dsscomp_gralloc_cb(gsync, DSS_COMPLETION_RELEASED);
497 mutex_unlock(&local_mtx);
499 return r;
500 }
501 EXPORT_SYMBOL(dsscomp_gralloc_queue);
503 #ifdef CONFIG_EARLYSUSPEND
504 static int blank_complete;
505 static DECLARE_WAIT_QUEUE_HEAD(early_suspend_wq);
507 static void dsscomp_early_suspend_cb(void *data, int status)
508 {
509 blank_complete = true;
510 wake_up(&early_suspend_wq);
511 }
513 static void dsscomp_early_suspend(struct early_suspend *h)
514 {
515 struct dsscomp_setup_dispc_data d = {
516 .num_mgrs = 0,
517 };
519 int err, mgr_ix;
521 pr_info("DSSCOMP: %s\n", __func__);
523 /*dsscomp_gralloc_queue() expects all blanking mgrs set up in comp */
524 for (mgr_ix = 0 ; mgr_ix < cdev->num_mgrs ; mgr_ix++) {
525 struct omap_dss_device *dssdev = cdev->mgrs[mgr_ix]->device;
526 if (dssdev && dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
527 d.num_mgrs++;
528 d.mgrs[mgr_ix].ix = mgr_ix;
529 d.mgrs[mgr_ix].alpha_blending = true;
530 }
531 }
533 /* use gralloc queue as we need to blank all screens */
534 blank_complete = false;
535 dsscomp_gralloc_queue(&d, NULL, false, dsscomp_early_suspend_cb, NULL);
537 /* wait until composition is displayed */
538 err = wait_event_timeout(early_suspend_wq, blank_complete,
539 msecs_to_jiffies(500));
540 if (err == 0)
541 pr_warn("DSSCOMP: timeout blanking screen\n");
542 else
543 pr_info("DSSCOMP: blanked screen\n");
544 }
546 static void dsscomp_late_resume(struct early_suspend *h)
547 {
548 pr_info("DSSCOMP: %s\n", __func__);
549 blanked = false;
550 }
552 static struct early_suspend early_suspend_info = {
553 .suspend = dsscomp_early_suspend,
554 .resume = dsscomp_late_resume,
555 .level = EARLY_SUSPEND_LEVEL_DISABLE_FB,
556 };
557 #endif
559 void dsscomp_dbg_gralloc(struct seq_file *s)
560 {
561 #ifdef CONFIG_DEBUG_FS
562 struct dsscomp_gralloc_t *g;
563 struct tiler1d_slot *t;
564 struct dsscomp *c;
565 #ifdef CONFIG_DSSCOMP_DEBUG_LOG
566 int i;
567 #endif
569 mutex_lock(&dbg_mtx);
570 seq_printf(s, "ACTIVE GRALLOC FLIPS\n\n");
571 list_for_each_entry(g, &flip_queue, q) {
572 char *sep = "";
573 seq_printf(s, " [%p] (refs=%d)\n"
574 " slots=[", g, atomic_read(&g->refs));
575 list_for_each_entry(t, &g->slots, q) {
576 seq_printf(s, "%s%08x", sep, t->phys);
577 sep = ", ";
578 }
579 seq_printf(s, "]\n cmdcb=[%08x] ", (u32) g->cb_arg);
580 if (g->cb_fn)
581 seq_printf(s, "%pf\n\n ", g->cb_fn);
582 else
583 seq_printf(s, "(called)\n\n ");
585 list_for_each_entry(c, &dbg_comps, dbg_q) {
586 if (c->extra_cb && c->extra_cb_data == g)
587 seq_printf(s, "| %8s ",
588 cdev->mgrs[c->ix]->name);
589 }
590 seq_printf(s, "\n ");
591 list_for_each_entry(c, &dbg_comps, dbg_q) {
592 if (c->extra_cb && c->extra_cb_data == g)
593 seq_printf(s, "| [%08x] %7s ", (u32) c,
594 log_state_str(c->state));
595 }
596 #ifdef CONFIG_DSSCOMP_DEBUG_LOG
597 for (i = 0; i < ARRAY_SIZE(c->dbg_log); i++) {
598 int go = false;
599 seq_printf(s, "\n ");
600 list_for_each_entry(c, &dbg_comps, dbg_q) {
601 if (!c->extra_cb || c->extra_cb_data != g)
602 continue;
603 if (i < c->dbg_used) {
604 u32 dbg_t = c->dbg_log[i].t;
605 u32 state = c->dbg_log[i].state;
606 seq_printf(s, "| % 6d.%03d %7s ",
607 dbg_t / 1000,
608 dbg_t % 1000,
609 log_state_str(state));
610 go |= c->dbg_used > i + 1;
611 } else {
612 seq_printf(s, "%-21s", "|");
613 }
614 }
615 if (!go)
616 break;
617 }
618 #endif
619 seq_printf(s, "\n\n");
620 }
621 seq_printf(s, "\n");
622 mutex_unlock(&dbg_mtx);
623 #endif
624 }
626 void dsscomp_gralloc_init(struct dsscomp_dev *cdev_)
627 {
628 int i;
630 if (!cdev_)
631 return;
633 /* save at least cdev pointer */
634 if (!cdev) {
635 cdev = cdev_;
637 #ifdef CONFIG_HAS_EARLYSUSPEND
638 register_early_suspend(&early_suspend_info);
639 #endif
640 }
642 if (!free_slots.next) {
643 INIT_LIST_HEAD(&free_slots);
644 for (i = 0; i < MAX_NUM_TILER1D_SLOTS; i++)
645 slots[i].id = -1;
647 for (i = 0; i < NUM_TILER1D_SLOTS; i++) {
648 struct tiler_block *block_handle =
649 tiler_reserve_1d(tiler1d_slot_size(cdev_));
650 if (IS_ERR_OR_NULL(block_handle)) {
651 pr_err("could not allocate tiler block\n");
652 break;
653 }
654 slots[i].block_handle = block_handle;
655 slots[i].phys = tiler_ssptr(block_handle);
656 slots[i].size = tiler1d_slot_size(cdev_) >> PAGE_SHIFT;
657 slots[i].page_map = vmalloc(sizeof(*slots[i].page_map) *
658 slots[i].size);
659 if (!slots[i].page_map) {
660 pr_err("could not allocate page_map\n");
661 tiler_unpin(block_handle);
662 break;
663 }
664 slots[i].id = i;
665 list_add(&slots[i].q, &free_slots);
666 up(&free_slots_sem);
667 }
668 /* reset free_slots if no TILER memory could be reserved */
669 if (!i)
670 ZERO(free_slots);
671 }
672 }
674 static struct tiler1d_slot *alloc_tiler_slot(void)
675 {
676 struct tiler1d_slot *slot, *ret;
677 if (down_timeout(&free_slots_sem,
678 msecs_to_jiffies(100))) {
679 return ERR_PTR(-ETIME);
680 }
681 slot = list_first_entry(&free_slots, typeof(*slot), q);
682 if (presentation_mode && needs_split(slot)) {
683 ret = split_slots(slot);
684 if (IS_ERR_OR_NULL(ret))
685 goto err;
686 dev_dbg(DEV(cdev),
687 "slot split, size %u block 0x%x\n",
688 slot->size, slot->phys);
689 } else if (!presentation_mode && !needs_split(slot)) {
690 ret = merge_slots(slot);
691 if (IS_ERR_OR_NULL(ret))
692 goto err;
693 dev_dbg(DEV(cdev),
694 "slot merged, size %u ptr 0x%x\n",
695 slot->size, slot->phys);
696 }
698 return slot;
699 err:
700 up(&free_slots_sem);
701 return ret;
702 }
704 static struct tiler1d_slot *merge_slots(struct tiler1d_slot *slot)
705 {
706 struct tiler1d_slot *slot2free;
707 u32 new_size = tiler1d_slot_size(cdev);
709 list_for_each_entry(slot2free, &free_slots, q)
710 if (!needs_split(slot2free) && slot2free != slot)
711 break;
713 if (&slot2free->q == &free_slots || slot2free->id == -1) {
714 dev_err(DEV(cdev), "%s: no free slot to megre\n", __func__);
715 return ERR_PTR(-EINVAL);
716 }
718 down(&free_slots_sem);
719 list_del(&slot2free->q);
721 dev_dbg(DEV(cdev), "%s: merging with %d id\n", __func__,
722 slot2free->id);
723 /*FIXME: potentially unsafe, as tiler1d space
724 * might be overtaken before we claim it again.
725 * Will be fixed later with tiler slot splitting API
726 */
727 tiler_release(slot->block_handle);
729 tiler_release(slot2free->block_handle);
731 slot->size = new_size >> PAGE_SHIFT;
732 slot->block_handle = tiler_reserve_1d(new_size);
734 if (IS_ERR_OR_NULL(slot->block_handle)) {
735 dev_err(DEV(cdev), "%s: failed to allocate slot\n", __func__);
736 return ERR_PTR(-ENOMEM);
737 }
739 slot->phys = tiler_ssptr(slot->block_handle);
741 vfree(slot2free->page_map);
742 slot2free->id = -1;
744 return slot;
745 }
747 static struct tiler1d_slot *split_slots(struct tiler1d_slot *slot)
748 {
749 int i;
750 u32 new_size = tiler1d_slot_size(cdev)/2;
752 /*FIXME: potentially unsafe, as tiler1d space
753 * might be overtaken before we claim it again.
754 * Will be fixed later with tiler slot splitting API
755 */
756 tiler_release(slot->block_handle);
757 for (i = 0; i < MAX_NUM_TILER1D_SLOTS; i++)
758 if (slots[i].id == -1)
759 break;
761 if (i == MAX_NUM_TILER1D_SLOTS) {
762 dev_err(DEV(cdev), "%s: all slots allocated\n", __func__);
763 return ERR_PTR(-EINVAL);
764 }
766 dev_dbg(DEV(cdev), "%s: splitting to %d id\n", __func__, i);
767 slot->size = slots[i].size = new_size >> PAGE_SHIFT;
768 slots[i].page_map = vmalloc(sizeof(*slots[i].page_map) *
769 slots[i].size*2);
770 slot->block_handle = tiler_reserve_1d(new_size);
771 slots[i].block_handle = tiler_reserve_1d(new_size);
773 if (IS_ERR_OR_NULL(slot->block_handle) ||
774 IS_ERR_OR_NULL(slots[i].block_handle)) {
775 dev_err(DEV(cdev), "%s: failed to allocate slot\n", __func__);
776 return ERR_PTR(-ENOMEM);
777 }
779 slot->phys = tiler_ssptr(slot->block_handle);
780 slots[i].phys = tiler_ssptr(slots[i].block_handle);
781 slots[i].id = i;
782 list_add(&slots[i].q, &free_slots);
783 up(&free_slots_sem);
785 return &slots[i];
786 }
788 void dsscomp_gralloc_exit(void)
789 {
790 struct tiler1d_slot *slot;
792 #ifdef CONFIG_HAS_EARLYSUSPEND
793 unregister_early_suspend(&early_suspend_info);
794 #endif
796 list_for_each_entry(slot, &free_slots, q) {
797 vfree(slot->page_map);
798 tiler_unpin(slot->block_handle);
799 }
800 INIT_LIST_HEAD(&free_slots);
801 }