]> Gitweb @ Texas Instruments - Open Source Git Repositories - git.TI.com/gitweb - android-sdk/kernel-video.git/blob - drivers/video/omap2/dsscomp/gralloc.c
OMAPDSS: DSSCOMP: do not clone from disabled layer
[android-sdk/kernel-video.git] / drivers / video / omap2 / dsscomp / gralloc.c
1 /*
2  * drivers/video/omap2/dsscomp/gralloc.c
3  *
4  * DSS Composition gralloc file
5  *
6  * Copyright (C) 2011 Texas Instruments, Inc
7  * Author: Lajos Molnar <molnar@ti.com>
8  *
9  * This program is free software; you can redistribute it and/or modify it
10  * under the terms of the GNU General Public License version 2 as published by
11  * the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along with
19  * this program.  If not, see <http://www.gnu.org/licenses/>.
20  */
22 #include <linux/kernel.h>
23 #include <linux/slab.h>
24 #include <linux/sched.h>
25 #include <linux/vmalloc.h>
26 #include "../../../drivers/gpu/drm/omapdrm/omap_dmm_tiler.h"
27 #include <video/dsscomp.h>
28 #include <plat/dsscomp.h>
29 #include "dsscomp.h"
30 #include "tiler-utils.h"
32 #ifdef CONFIG_HAS_EARLYSUSPEND
33 #include <linux/earlysuspend.h>
34 #endif
35 static bool blanked;
36 static bool presentation_mode;
38 #define NUM_TILER1D_SLOTS 2
39 #define MAX_NUM_TILER1D_SLOTS 4
41 static struct tiler1d_slot {
42         struct list_head q;
43         struct tiler_block *block_handle;
44         u32 phys;
45         u32 size;
46         u32 *page_map;
47         short id;
48 } slots[MAX_NUM_TILER1D_SLOTS];
49 static struct list_head free_slots;
50 static struct dsscomp_dev *cdev;
51 static DEFINE_MUTEX(mtx);
52 static struct semaphore free_slots_sem =
53                                 __SEMAPHORE_INITIALIZER(free_slots_sem, 0);
55 /* gralloc composition sync object */
56 struct dsscomp_gralloc_t {
57         void (*cb_fn)(void *, int);
58         void *cb_arg;
59         struct list_head q;
60         struct list_head slots;
61         atomic_t refs;
62         bool early_callback;
63         bool programmed;
64 };
66 /* queued gralloc compositions */
67 static LIST_HEAD(flip_queue);
69 static u32 ovl_use_mask[MAX_MANAGERS];
71 static inline bool needs_split(struct tiler1d_slot *slot)
72 {
73         return slot->size == tiler1d_slot_size(cdev) >> PAGE_SHIFT;
74 }
76 static struct tiler1d_slot *split_slots(struct tiler1d_slot *slot);
77 static struct tiler1d_slot *merge_slots(struct tiler1d_slot *slot);
78 static struct tiler1d_slot *alloc_tiler_slot(void);
80 static void unpin_tiler_blocks(struct list_head *slots)
81 {
82         struct tiler1d_slot *slot;
84         /* unpin any tiler memory */
85         list_for_each_entry(slot, slots, q) {
86                 tiler_unpin(slot->block_handle);
87                 up(&free_slots_sem);
88         }
90         /* free tiler slots */
91         list_splice_init(slots, &free_slots);
92 }
94 static void dsscomp_gralloc_cb(void *data, int status)
95 {
96         struct dsscomp_gralloc_t *gsync = data, *gsync_;
97         bool early_cbs = true;
98         LIST_HEAD(done);
100         mutex_lock(&mtx);
101         if (gsync->early_callback && status == DSS_COMPLETION_PROGRAMMED)
102                 gsync->programmed = true;
104         if (status & DSS_COMPLETION_RELEASED) {
105                 if (atomic_dec_and_test(&gsync->refs))
106                         unpin_tiler_blocks(&gsync->slots);
108                 log_event(0, 0, gsync, "--refs=%d on %s",
109                                 atomic_read(&gsync->refs),
110                                 (u32) log_status_str(status));
111         }
113         /* get completed list items in order, if any */
114         list_for_each_entry_safe(gsync, gsync_, &flip_queue, q) {
115                 if (gsync->cb_fn) {
116                         early_cbs &= gsync->early_callback && gsync->programmed;
117                         if (early_cbs) {
118                                 gsync->cb_fn(gsync->cb_arg, 1);
119                                 gsync->cb_fn = NULL;
120                         }
121                 }
122                 if (gsync->refs.counter && gsync->cb_fn)
123                         break;
124                 if (gsync->refs.counter == 0)
125                         list_move_tail(&gsync->q, &done);
126         }
127         mutex_unlock(&mtx);
129         /* call back for completed composition with mutex unlocked */
130         list_for_each_entry_safe(gsync, gsync_, &done, q) {
131                 if (debug & DEBUG_GRALLOC_PHASES)
132                         dev_info(DEV(cdev), "[%p] completed flip\n", gsync);
134                 log_event(0, 0, gsync, "calling %pf [%p]", (u32)gsync->cb_fn,
135                                 (u32)gsync->cb_arg);
137                 if (gsync->cb_fn)
138                         gsync->cb_fn(gsync->cb_arg, 1);
139                 kfree(gsync);
140         }
143 /* This is just test code for now that does the setup + apply.
144    It still uses userspace virtual addresses, but maps non
145    TILER buffers into 1D */
146 int dsscomp_gralloc_queue_ioctl(struct dsscomp_setup_dispc_data *d)
148         struct tiler_pa_info *pas[MAX_OVERLAYS];
149         s32 ret;
150         u32 i;
152         /* convert virtual addresses to physical and get tiler pa infos */
153         for (i = 0; i < d->num_ovls; i++) {
154                 struct dss2_ovl_info *oi = d->ovls + i;
155                 u32 addr = (u32) oi->address;
157                 pas[i] = NULL;
159                 /* only supporting DIRECT buffer types */
161                 /* assume virtual NV12 for now */
162                 if (oi->cfg.color_mode == OMAP_DSS_COLOR_NV12)
163                         oi->uv = tiler_virt2phys(addr +
164                                         oi->cfg.height * oi->cfg.stride);
165                 else
166                         oi->uv = 0;
167                 oi->ba = tiler_virt2phys(addr);
169                 /* map non-TILER buffers to 1D */
170                 if (oi->ba && !is_tiler_addr(oi->ba))
171                         pas[i] = user_block_to_pa(addr & PAGE_MASK,
172                                 PAGE_ALIGN(oi->cfg.height * oi->cfg.stride +
173                                         (addr & ~PAGE_MASK)) >> PAGE_SHIFT);
174         }
175         ret = dsscomp_gralloc_queue(d, pas, false, NULL, NULL);
176         for (i = 0; i < d->num_ovls; i++)
177                 tiler_pa_free(pas[i]);
178         return ret;
181 static bool dsscomp_is_any_device_active(void)
183         struct omap_dss_device *dssdev;
184         u32 display_ix;
186         /* We have to also check for device active in case HWC logic has
187          * not yet started for boot logo.
188          */
189         for (display_ix = 0 ; display_ix < cdev->num_displays ; display_ix++) {
190                 dssdev = cdev->displays[display_ix];
191                 if (dssdev && dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
192                         return true;
193         }
194         return false;
197 int dsscomp_gralloc_queue(struct dsscomp_setup_dispc_data *d,
198                         struct tiler_pa_info **pas,
199                         bool early_callback,
200                         void (*cb_fn)(void *, int), void *cb_arg)
202         u32 i;
203         int r = 0;
204         struct omap_dss_device *dev;
205         struct omap_overlay_manager *mgr;
206         static DEFINE_MUTEX(local_mtx);
207         struct dsscomp *comp[MAX_MANAGERS];
208         u32 ovl_new_use_mask[MAX_MANAGERS];
209         u32 mgr_set_mask = 0;
210         u32 ovl_set_mask = 0;
211         struct tiler1d_slot *slot = NULL;
212         u32 slot_used = 0;
213 #ifdef CONFIG_DEBUG_FS
214         u32 ms = ktime_to_ms(ktime_get());
215 #endif
216         u32 channels[MAX_MANAGERS], ch;
217         int skip;
218         struct dsscomp_gralloc_t *gsync;
219         struct dss2_rect_t win = { .w = 0 };
221         /* reserve tiler areas if not already done so */
222         dsscomp_gralloc_init(cdev);
224         dump_total_comp_info(cdev, d, "queue");
225         for (i = 0; i < d->num_ovls; i++)
226                 dump_ovl_info(cdev, d->ovls + i);
228         mutex_lock(&local_mtx);
230         mutex_lock(&mtx);
232         /* create sync object with 1 temporary ref */
233         gsync = kzalloc(sizeof(*gsync), GFP_KERNEL);
234         gsync->cb_arg = cb_arg;
235         gsync->cb_fn = cb_fn;
236         gsync->refs.counter = 1;
237         gsync->early_callback = early_callback;
238         INIT_LIST_HEAD(&gsync->slots);
239         list_add_tail(&gsync->q, &flip_queue);
240         if (debug & DEBUG_GRALLOC_PHASES)
241                 dev_info(DEV(cdev), "[%p] queuing flip\n", gsync);
243         log_event(0, ms, gsync, "new in %pf (refs=1)",
244                         (u32)dsscomp_gralloc_queue, 0);
246         /* ignore frames while we are blanked */
247         skip = blanked;
248         if (skip && (debug & DEBUG_PHASES))
249                 dev_info(DEV(cdev), "[%p,%08x] ignored\n", gsync, d->sync_id);
251         /* mark blank frame by NULL tiler pa pointer */
252         if (!skip && pas == NULL)
253                 blanked = true;
255         mutex_unlock(&mtx);
257         d->num_mgrs = min_t(u16, d->num_mgrs, ARRAY_SIZE(d->mgrs));
258         d->num_ovls = min_t(u16, d->num_ovls, ARRAY_SIZE(d->ovls));
260         memset(comp, 0, sizeof(comp));
261         memset(ovl_new_use_mask, 0, sizeof(ovl_new_use_mask));
263         if (skip || !dsscomp_is_any_device_active())
264                 goto skip_comp;
266         d->mode = DSSCOMP_SETUP_DISPLAY;
268         /* mark managers we are using */
269         for (i = 0; i < d->num_mgrs; i++) {
270                 /* verify display is valid & connected, ignore if not */
271                 if (d->mgrs[i].ix >= cdev->num_displays)
272                         continue;
273                 dev = cdev->displays[d->mgrs[i].ix];
274                 if (!dev) {
275                         dev_warn(DEV(cdev), "failed to get display%d\n",
276                                         d->mgrs[i].ix);
277                         continue;
278                 }
279                 mgr = dev->output->manager;
280                 if (!mgr) {
281                         dev_warn(DEV(cdev), "no manager for display%d\n",
282                                         d->mgrs[i].ix);
283                         continue;
284                 }
285                 ch = mgr->id;
286                 channels[i] = ch;
287                 mgr_set_mask |= 1 << ch;
289                 /* swap red & blue if requested */
290                 if (d->mgrs[i].swap_rb)
291                         swap_rb_in_mgr_info(d->mgrs + i);
292         }
294         /* create dsscomp objects for set managers (including active ones) */
295         for (ch = 0; ch < MAX_MANAGERS; ch++) {
296                 if (!(mgr_set_mask & (1 << ch)))
297                         continue;
299                 mgr = cdev->mgrs[ch];
301                 comp[ch] = dsscomp_new(mgr);
302                 if (IS_ERR(comp[ch])) {
303                         comp[ch] = NULL;
304                         dev_warn(DEV(cdev), "failed to get composition on %s\n",
305                                         mgr->name);
306                         continue;
307                 }
309                 comp[ch]->must_apply = true;
310                 r = dsscomp_setup(comp[ch], d->mode, win);
311                 if (r)
312                         dev_err(DEV(cdev), "failed to setup comp (%d)\n", r);
313         }
315         /* configure manager data from gralloc composition */
316         for (i = 0; i < d->num_mgrs; i++) {
317                 ch = channels[i];
318                 r = dsscomp_set_mgr(comp[ch], d->mgrs + i);
319                 if (r)
320                         dev_err(DEV(cdev), "failed to set mgr%d (%d)\n", ch, r);
321         }
323         /* NOTE: none of the dsscomp sets should fail as composition is new */
324         for (i = 0; i < d->num_ovls; i++) {
325                 struct dss2_ovl_info *oi = d->ovls + i;
326                 u32 size;
327                 int j;
328                 ch = oi->cfg.mgr_ix;
330                 /* skip overlays on compositions we could not create */
331                 if (!comp[ch])
332                         continue;
334                 for (j = 0; j < d->num_mgrs; j++)
335                         if (d->mgrs[j].ix == ch) {
336                                 /* swap red & blue if requested */
337                                 if (d->mgrs[j].swap_rb)
338                                         swap_rb_in_ovl_info(d->ovls + i);
339                                 break;
340                         }
342                 if (j == d->num_mgrs) {
343                         dev_err(DEV(cdev), "invalid manager %d for ovl%d\n",
344                                                 ch, oi->cfg.ix);
345                         continue;
346                 }
348                 /* copy prior overlay to avoid mapping layers twice to 1D */
349                 if (oi->addressing == OMAP_DSS_BUFADDR_OVL_IX) {
350                         unsigned int j = oi->ba;
351                         if (j >= i || !d->ovls[j].cfg.enabled) {
352                                 WARN(1, "Invalid clone layer (%u)", j);
353                                 goto skip_buffer;
354                         }
356                         oi->ba = d->ovls[j].ba;
357                         oi->uv = d->ovls[j].uv;
358                         goto skip_map1d;
359                 } else if (oi->addressing == OMAP_DSS_BUFADDR_FB) {
360                         /* get fb */
361                         int fb_ix = (oi->ba >> 28);
362                         int fb_uv_ix = (oi->uv >> 28);
363                         struct fb_info *fbi = NULL, *fbi_uv = NULL;
364                         size_t hs_size = oi->cfg.height * oi->cfg.stride;
365                         if (fb_ix >= num_registered_fb ||
366                             (oi->cfg.color_mode == OMAP_DSS_COLOR_NV12 &&
367                              fb_uv_ix >= num_registered_fb)) {
368                                 WARN(1, "display has no framebuffer");
369                                 goto skip_buffer;
370                         }
372                         fbi_uv = registered_fb[fb_ix];
373                         fbi = fbi_uv;
374                         if (oi->cfg.color_mode == OMAP_DSS_COLOR_NV12)
375                                 fbi_uv = registered_fb[fb_uv_ix];
377                         if (hs_size + oi->ba > fbi->fix.smem_len ||
378                             (oi->cfg.color_mode == OMAP_DSS_COLOR_NV12 &&
379                              (hs_size >> 1) + oi->uv > fbi_uv->fix.smem_len)) {
380                                 WARN(1, "image outside of framebuffer memory");
381                                 goto skip_buffer;
382                         }
384                         oi->ba += fbi->fix.smem_start;
385                         oi->uv += fbi_uv->fix.smem_start;
386                         goto skip_map1d;
387                 } else if (oi->addressing != OMAP_DSS_BUFADDR_DIRECT) {
388                         goto skip_buffer;
389                 }
391                 /* map non-TILER buffers to 1D */
393                 /* skip 2D and disabled layers */
394                 if (!pas[i] || !oi->cfg.enabled)
395                         goto skip_map1d;
397                 if (!slot) {
398                         mutex_lock(&mtx);
399                         /* separate comp for tv means presentation mode */
400                         if (d->num_mgrs == 1 && d->mgrs[0].ix == 1)
401                                 presentation_mode = true;
402                         else if (d->num_mgrs == 2 ||
403                                 cdev->mgrs[1]->output->device->state !=
404                                         OMAP_DSS_DISPLAY_ACTIVE)
405                                 presentation_mode = false;
407                         slot = alloc_tiler_slot();
408                         if (IS_ERR_OR_NULL(slot)) {
409                                 dev_warn(DEV(cdev), "could not obtain "
410                                                         "tiler slot");
411                                 slot = NULL;
412                                 mutex_unlock(&mtx);
413                                 goto skip_buffer;
414                         }
415                         list_move(&slot->q, &gsync->slots);
416                         mutex_unlock(&mtx);
417                 }
419                 size = oi->cfg.stride * oi->cfg.height;
420                 if (oi->cfg.color_mode == OMAP_DSS_COLOR_NV12)
421                         size += size >> 2;
422                 size = DIV_ROUND_UP(size, PAGE_SIZE);
424                 if (slot_used + size > slot->size) {
425                         dev_err(DEV(cdev), "tiler slot not big enough for "
426                                         "frame %d + %d > %d", slot_used, size,
427                                         slot->size);
428                         goto skip_buffer;
429                 }
431                 /* "map" into TILER 1D - will happen after loop */
432                 oi->ba = slot->phys + (slot_used << PAGE_SHIFT) +
433                         (oi->ba & ~PAGE_MASK);
434                 if (oi->cfg.color_mode == OMAP_DSS_COLOR_NV12)
435                         oi->uv = oi->ba + oi->cfg.stride * oi->cfg.height;
436                 memcpy(slot->page_map + slot_used, pas[i]->mem,
437                        sizeof(*slot->page_map) * size);
438                 slot_used += size;
439                 goto skip_map1d;
441 skip_buffer:
442                 oi->cfg.enabled = false;
443 skip_map1d:
445                 if (oi->cfg.enabled)
446                         ovl_new_use_mask[ch] |= 1 << oi->cfg.ix;
448                 r = dsscomp_set_ovl(comp[ch], oi);
449                 if (r)
450                         dev_err(DEV(cdev), "failed to set ovl%d (%d)\n",
451                                         oi->cfg.ix, r);
452                 else
453                         ovl_set_mask |= 1 << oi->cfg.ix;
454         }
456         if (slot && slot_used) {
457                 r = tiler_pin_phys(slot->block_handle, slot->page_map,
458                                                 slot_used);
459                 if (r)
460                         dev_err(DEV(cdev), "failed to pin %d pages into"
461                         " %d-pg slots (%d)\n", slot_used,
462                         tiler1d_slot_size(cdev) >> PAGE_SHIFT, r);
463         }
465         for (ch = 0; ch < MAX_MANAGERS; ch++) {
466                 /* disable all overlays not specifically set from prior frame */
467                 u32 mask = ovl_use_mask[ch] & ~ovl_set_mask;
469                 if (!comp[ch])
470                         continue;
472                 while (mask) {
473                         struct dss2_ovl_info oi = {
474                                 .cfg.zonly = true,
475                                 .cfg.enabled = false,
476                                 .cfg.ix = fls(mask) - 1,
477                         };
478                         dsscomp_set_ovl(comp[ch], &oi);
479                         mask &= ~(1 << oi.cfg.ix);
480                 }
482                 /* associate dsscomp objects with this gralloc composition */
483                 comp[ch]->extra_cb = dsscomp_gralloc_cb;
484                 comp[ch]->extra_cb_data = gsync;
485                 atomic_inc(&gsync->refs);
486                 log_event(0, ms, gsync, "++refs=%d for [%p]",
487                                 atomic_read(&gsync->refs), (u32) comp[ch]);
489                 r = dsscomp_delayed_apply(comp[ch]);
490                 if (r)
491                         dev_err(DEV(cdev), "failed to apply comp (%d)\n", r);
492                 else
493                         ovl_use_mask[ch] = ovl_new_use_mask[ch];
494         }
495 skip_comp:
496         /* release sync object ref - this completes unapplied compositions */
497         dsscomp_gralloc_cb(gsync, DSS_COMPLETION_RELEASED);
499         mutex_unlock(&local_mtx);
501         return r;
503 EXPORT_SYMBOL(dsscomp_gralloc_queue);
505 #ifdef CONFIG_EARLYSUSPEND
506 static int blank_complete;
507 static DECLARE_WAIT_QUEUE_HEAD(early_suspend_wq);
509 static void dsscomp_early_suspend_cb(void *data, int status)
511         blank_complete = true;
512         wake_up(&early_suspend_wq);
515 static void dsscomp_early_suspend(struct early_suspend *h)
517         struct dsscomp_setup_dispc_data d = {
518                 .num_mgrs = 0,
519         };
521         int err, mgr_ix;
523         pr_info("DSSCOMP: %s\n", __func__);
525         /*dsscomp_gralloc_queue() expects all blanking mgrs set up in comp */
526         for (mgr_ix = 0 ; mgr_ix < cdev->num_mgrs ; mgr_ix++) {
527                 struct omap_dss_device *dssdev = cdev->mgrs[mgr_ix]->device;
528                 if (dssdev && dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
529                         d.num_mgrs++;
530                         d.mgrs[mgr_ix].ix = mgr_ix;
531                 }
532         }
534         /* use gralloc queue as we need to blank all screens */
535         blank_complete = false;
536         dsscomp_gralloc_queue(&d, NULL, false, dsscomp_early_suspend_cb, NULL);
538         /* wait until composition is displayed */
539         err = wait_event_timeout(early_suspend_wq, blank_complete,
540                                  msecs_to_jiffies(500));
541         if (err == 0)
542                 pr_warn("DSSCOMP: timeout blanking screen\n");
543         else
544                 pr_info("DSSCOMP: blanked screen\n");
547 static void dsscomp_late_resume(struct early_suspend *h)
549         pr_info("DSSCOMP: %s\n", __func__);
550         blanked = false;
553 static struct early_suspend early_suspend_info = {
554         .suspend = dsscomp_early_suspend,
555         .resume = dsscomp_late_resume,
556         .level = EARLY_SUSPEND_LEVEL_DISABLE_FB,
557 };
558 #endif
560 void dsscomp_dbg_gralloc(struct seq_file *s)
562 #ifdef CONFIG_DEBUG_FS
563         struct dsscomp_gralloc_t *g;
564         struct tiler1d_slot *t;
565         struct dsscomp *c;
566 #ifdef CONFIG_DSSCOMP_DEBUG_LOG
567                 int i;
568 #endif
570         mutex_lock(&dbg_mtx);
571         seq_printf(s, "ACTIVE GRALLOC FLIPS\n\n");
572         list_for_each_entry(g, &flip_queue, q) {
573                 char *sep = "";
574                 seq_printf(s, "  [%p] (refs=%d)\n"
575                            "    slots=[", g, atomic_read(&g->refs));
576                 list_for_each_entry(t, &g->slots, q) {
577                         seq_printf(s, "%s%08x", sep, t->phys);
578                         sep = ", ";
579                 }
580                 seq_printf(s, "]\n    cmdcb=[%08x] ", (u32) g->cb_arg);
581                 if (g->cb_fn)
582                         seq_printf(s, "%pf\n\n  ", g->cb_fn);
583                 else
584                         seq_printf(s, "(called)\n\n  ");
586                 list_for_each_entry(c, &dbg_comps, dbg_q) {
587                         if (c->extra_cb && c->extra_cb_data == g)
588                                 seq_printf(s, "|      %8s      ",
589                                                 cdev->mgrs[c->ix]->name);
590                 }
591                 seq_printf(s, "\n  ");
592                 list_for_each_entry(c, &dbg_comps, dbg_q) {
593                         if (c->extra_cb && c->extra_cb_data == g)
594                                 seq_printf(s, "| [%08x] %7s ", (u32) c,
595                                            log_state_str(c->state));
596                 }
597 #ifdef CONFIG_DSSCOMP_DEBUG_LOG
598                 for (i = 0; i < ARRAY_SIZE(c->dbg_log); i++) {
599                         int go = false;
600                         seq_printf(s, "\n  ");
601                         list_for_each_entry(c, &dbg_comps, dbg_q) {
602                                 if (!c->extra_cb || c->extra_cb_data != g)
603                                         continue;
604                                 if (i < c->dbg_used) {
605                                         u32 dbg_t = c->dbg_log[i].t;
606                                         u32 state = c->dbg_log[i].state;
607                                         seq_printf(s, "| % 6d.%03d %7s ",
608                                                         dbg_t / 1000,
609                                                         dbg_t % 1000,
610                                                         log_state_str(state));
611                                         go |= c->dbg_used > i + 1;
612                                 } else {
613                                         seq_printf(s, "%-21s", "|");
614                                 }
615                         }
616                         if (!go)
617                                 break;
618                 }
619 #endif
620                 seq_printf(s, "\n\n");
621         }
622         seq_printf(s, "\n");
623         mutex_unlock(&dbg_mtx);
624 #endif
627 void dsscomp_gralloc_init(struct dsscomp_dev *cdev_)
629         int i;
631         if (!cdev_)
632                 return;
634         /* save at least cdev pointer */
635         if (!cdev) {
636                 cdev = cdev_;
638 #ifdef CONFIG_HAS_EARLYSUSPEND
639                 register_early_suspend(&early_suspend_info);
640 #endif
641         }
643         if (!free_slots.next) {
644                 INIT_LIST_HEAD(&free_slots);
645                 for (i = 0; i < MAX_NUM_TILER1D_SLOTS; i++)
646                         slots[i].id = -1;
648                 for (i = 0; i < NUM_TILER1D_SLOTS; i++) {
649                         struct tiler_block *block_handle =
650                                 tiler_reserve_1d(tiler1d_slot_size(cdev_));
651                         if (IS_ERR_OR_NULL(block_handle)) {
652                                 pr_err("could not allocate tiler block\n");
653                                 break;
654                         }
655                         slots[i].block_handle = block_handle;
656                         slots[i].phys = tiler_ssptr(block_handle);
657                         slots[i].size =  tiler1d_slot_size(cdev_) >> PAGE_SHIFT;
658                         slots[i].page_map = vmalloc(sizeof(*slots[i].page_map) *
659                                                 slots[i].size);
660                         if (!slots[i].page_map) {
661                                 pr_err("could not allocate page_map\n");
662                                 tiler_unpin(block_handle);
663                                 break;
664                         }
665                         slots[i].id = i;
666                         list_add(&slots[i].q, &free_slots);
667                         up(&free_slots_sem);
668                 }
669                 /* reset free_slots if no TILER memory could be reserved */
670                 if (!i)
671                         ZERO(free_slots);
672         }
675 static struct tiler1d_slot *alloc_tiler_slot(void)
677         struct tiler1d_slot *slot, *ret;
678         if (down_timeout(&free_slots_sem,
679                         msecs_to_jiffies(100))) {
680                 return ERR_PTR(-ETIME);
681         }
682         slot = list_first_entry(&free_slots, typeof(*slot), q);
683         if (presentation_mode && needs_split(slot)) {
684                 ret = split_slots(slot);
685                 if (IS_ERR_OR_NULL(ret))
686                         goto err;
687                 dev_dbg(DEV(cdev),
688                         "slot split, size %u block 0x%x\n",
689                         slot->size, slot->phys);
690         } else if (!presentation_mode && !needs_split(slot)) {
691                 ret = merge_slots(slot);
692                 if (IS_ERR_OR_NULL(ret))
693                         goto err;
694                 dev_dbg(DEV(cdev),
695                         "slot merged, size %u ptr 0x%x\n",
696                         slot->size, slot->phys);
697         }
699         return slot;
700 err:
701         up(&free_slots_sem);
702         return ret;
705 static struct tiler1d_slot *merge_slots(struct tiler1d_slot *slot)
707         struct tiler1d_slot *slot2free;
708         u32 new_size = tiler1d_slot_size(cdev);
710         list_for_each_entry(slot2free, &free_slots, q)
711                 if (!needs_split(slot2free) && slot2free != slot)
712                         break;
714         if (&slot2free->q == &free_slots || slot2free->id == -1) {
715                 dev_err(DEV(cdev), "%s: no free slot to megre\n", __func__);
716                 return ERR_PTR(-EINVAL);
717         }
719         down(&free_slots_sem);
720         list_del(&slot2free->q);
722         dev_dbg(DEV(cdev), "%s: merging with %d id\n", __func__,
723                                                 slot2free->id);
724         /*FIXME: potentially unsafe, as tiler1d space
725          * might be overtaken before we claim it again.
726          * Will be fixed later with tiler slot splitting API
727         */
728         tiler_unpin(slot->block_handle);
729         tiler_release(slot->block_handle);
731         tiler_unpin(slot2free->block_handle);
732         tiler_release(slot2free->block_handle);
734         slot->size = new_size >> PAGE_SHIFT;
735         slot->block_handle = tiler_reserve_1d(new_size);
737         if (IS_ERR_OR_NULL(slot->block_handle)) {
738                 dev_err(DEV(cdev), "%s: failed to allocate slot\n", __func__);
739                 return ERR_PTR(-ENOMEM);
740         }
742         slot->phys = tiler_ssptr(slot->block_handle);
744         vfree(slot2free->page_map);
745         slot2free->id = -1;
747         return slot;
750 static struct tiler1d_slot *split_slots(struct tiler1d_slot *slot)
752         int i;
753         u32 new_size = tiler1d_slot_size(cdev)/2;
755         /*FIXME: potentially unsafe, as tiler1d space
756          * might be overtaken before we claim it again.
757          * Will be fixed later with tiler slot splitting API
758         */
759         tiler_unpin(slot->block_handle);
760         tiler_release(slot->block_handle);
761         for (i = 0; i < MAX_NUM_TILER1D_SLOTS; i++)
762                 if (slots[i].id == -1)
763                         break;
765         if (i == MAX_NUM_TILER1D_SLOTS) {
766                 dev_err(DEV(cdev), "%s: all slots allocated\n", __func__);
767                 return ERR_PTR(-EINVAL);
768         }
770         dev_dbg(DEV(cdev), "%s: splitting to %d id\n", __func__, i);
771         slot->size = slots[i].size = new_size >> PAGE_SHIFT;
772         slots[i].page_map = vmalloc(sizeof(*slots[i].page_map) *
773                                 slots[i].size*2);
774         slot->block_handle = tiler_reserve_1d(new_size);
775         slots[i].block_handle = tiler_reserve_1d(new_size);
777         if (IS_ERR_OR_NULL(slot->block_handle) ||
778                 IS_ERR_OR_NULL(slots[i].block_handle)) {
779                 dev_err(DEV(cdev), "%s: failed to allocate slot\n", __func__);
780                 return ERR_PTR(-ENOMEM);
781         }
783         slot->phys = tiler_ssptr(slot->block_handle);
784         slots[i].phys = tiler_ssptr(slots[i].block_handle);
785         slots[i].id = i;
786         list_add(&slots[i].q, &free_slots);
787         up(&free_slots_sem);
789         return &slots[i];
792 void dsscomp_gralloc_exit(void)
794         struct tiler1d_slot *slot;
796 #ifdef CONFIG_HAS_EARLYSUSPEND
797         unregister_early_suspend(&early_suspend_info);
798 #endif
800         list_for_each_entry(slot, &free_slots, q) {
801                 vfree(slot->page_map);
802                 tiler_unpin(slot->block_handle);
803         }
804         INIT_LIST_HEAD(&free_slots);