18f009372a63f7af81dcc5c1d46bcf7f44eef87d
1 /*
2 * linux/drivers/video/omap2/dsscomp/device.c
3 *
4 * DSS Composition file device and ioctl support
5 *
6 * Copyright (C) 2011 Texas Instruments, Inc
7 * Author: Lajos Molnar <molnar@ti.com>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License version 2 as published by
11 * the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program. If not, see <http://www.gnu.org/licenses/>.
20 */
22 #define DEBUG
24 #include <linux/err.h>
25 #include <linux/platform_device.h>
26 #include <linux/slab.h>
27 #include <linux/device.h>
28 #include <linux/file.h>
29 #include <linux/mm.h>
30 #include <linux/fs.h>
31 #include <linux/anon_inodes.h>
32 #include <linux/list.h>
33 #include <linux/miscdevice.h>
34 #include <linux/uaccess.h>
35 #include <linux/sched.h>
36 #include <linux/syscalls.h>
38 #define MODULE_NAME_DSSCOMP "dsscomp"
40 #include <video/omapdss.h>
41 #include <video/dsscomp.h>
42 #include <plat/dsscomp.h>
43 #include "../../../drivers/gpu/drm/omapdrm/omap_dmm_tiler.h"
44 #include "dsscomp.h"
45 #include "../dss/dss_features.h"
46 #include "../dss/dss.h"
48 #include <linux/debugfs.h>
51 static DECLARE_WAIT_QUEUE_HEAD(waitq);
52 static DEFINE_MUTEX(wait_mtx);
53 bool alpha_only = true;
55 static struct dsscomp_platform_info platform_info;
57 static u32 hwc_virt_to_phys(u32 arg)
58 {
59 pmd_t *pmd;
60 pte_t *ptep;
62 pgd_t *pgd = pgd_offset(current->mm, arg);
63 if (pgd_none(*pgd) || pgd_bad(*pgd))
64 return 0;
66 pmd = pmd_offset((pud_t *)pgd, arg);
67 if (pmd_none(*pmd) || pmd_bad(*pmd))
68 return 0;
70 ptep = pte_offset_map(pmd, arg);
71 if (ptep && pte_present(*ptep))
72 return (PAGE_MASK & *ptep) | (~PAGE_MASK & arg);
74 return 0;
75 }
77 /*
78 * ===========================================================================
79 * WAIT OPERATIONS
80 * ===========================================================================
81 */
83 static void sync_drop(struct dsscomp_sync_obj *sync)
84 {
85 if (sync && atomic_dec_and_test(&sync->refs)) {
86 if (debug & DEBUG_WAITS)
87 pr_info("free sync [%p]\n", sync);
89 kfree(sync);
90 }
91 }
93 static int sync_setup(const char *name, const struct file_operations *fops,
94 struct dsscomp_sync_obj *sync, int flags)
95 {
96 if (!sync)
97 return -ENOMEM;
99 sync->refs.counter = 1;
100 sync->fd = anon_inode_getfd(name, fops, sync, flags);
101 return sync->fd < 0 ? sync->fd : 0;
102 }
104 static int sync_finalize(struct dsscomp_sync_obj *sync, int r)
105 {
106 if (sync) {
107 if (r < 0)
108 /* delete sync object on failure */
109 sys_close(sync->fd);
110 else
111 /* return file descriptor on success */
112 r = sync->fd;
113 }
114 return r;
115 }
117 /* wait for programming or release of a composition */
118 int dsscomp_wait(struct dsscomp_sync_obj *sync, enum dsscomp_wait_phase phase,
119 int timeout)
120 {
121 mutex_lock(&wait_mtx);
122 if (debug & DEBUG_WAITS) {
123 pr_info("wait %s on ", phase == DSSCOMP_WAIT_DISPLAYED ?
124 "display" : phase == DSSCOMP_WAIT_PROGRAMMED ?
125 "program" : "release");
126 pr_info("[%p]\n", sync);
127 }
129 if (sync->state < phase) {
130 mutex_unlock(&wait_mtx);
132 timeout = wait_event_interruptible_timeout(waitq,
133 sync->state >= phase, timeout);
134 if (debug & DEBUG_WAITS) {
135 pr_info("wait over [%p]: ", sync);
136 pr_info("%s", timeout < 0 ? "signal" : timeout > 0 ?
137 "ok" : "timeout");
138 pr_info("%d\n", timeout);
139 }
140 if (timeout <= 0)
141 return timeout ? : -ETIME;
143 mutex_lock(&wait_mtx);
144 }
145 mutex_unlock(&wait_mtx);
147 return 0;
148 }
149 EXPORT_SYMBOL(dsscomp_wait);
151 static void dsscomp_queue_cb(void *data, int status)
152 {
153 struct dsscomp_sync_obj *sync = data;
154 enum dsscomp_wait_phase phase =
155 status == DSS_COMPLETION_PROGRAMMED ? DSSCOMP_WAIT_PROGRAMMED :
156 status == DSS_COMPLETION_DISPLAYED ? DSSCOMP_WAIT_DISPLAYED :
157 DSSCOMP_WAIT_RELEASED, old_phase;
159 mutex_lock(&wait_mtx);
160 old_phase = sync->state;
161 if (old_phase < phase)
162 sync->state = phase;
163 mutex_unlock(&wait_mtx);
165 if (status & DSS_COMPLETION_RELEASED)
166 sync_drop(sync);
167 if (old_phase < phase)
168 wake_up_interruptible_sync(&waitq);
169 }
171 static int sync_release(struct inode *inode, struct file *filp)
172 {
173 struct dsscomp_sync_obj *sync = filp->private_data;
174 sync_drop(sync);
175 return 0;
176 }
178 static long sync_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
179 {
180 int r = 0;
181 struct dsscomp_sync_obj *sync = filp->private_data;
182 void __user *ptr = (void __user *)arg;
184 switch (cmd) {
185 case DSSCIOC_WAIT:
186 {
187 struct dsscomp_wait_data wd;
188 r = copy_from_user(&wd, ptr, sizeof(wd)) ? :
189 dsscomp_wait(sync, wd.phase,
190 usecs_to_jiffies(wd.timeout_us));
191 break;
192 }
193 default:
194 r = -EINVAL;
195 }
196 return r;
197 }
199 static const struct file_operations sync_fops = {
200 .owner = THIS_MODULE,
201 .release = sync_release,
202 .unlocked_ioctl = sync_ioctl,
203 };
205 static long setup_mgr(struct dsscomp_dev *cdev,
206 struct dsscomp_setup_mgr_data *d)
207 {
208 int i, r;
209 struct omap_dss_device *dev;
210 struct omap_overlay_manager *mgr;
211 struct dsscomp *comp;
212 struct dsscomp_sync_obj *sync = NULL;
214 dump_comp_info(cdev, d, "queue");
215 for (i = 0; i < d->num_ovls; i++)
216 dump_ovl_info(cdev, d->ovls + i);
218 /* verify display is valid and connected */
219 if (d->mgr.ix >= cdev->num_displays)
220 return -EINVAL;
221 dev = cdev->displays[d->mgr.ix];
222 if (!dev)
223 return -EINVAL;
224 mgr = dev->output->manager;
225 if (!mgr)
226 return -ENODEV;
228 comp = dsscomp_new(mgr);
229 if (IS_ERR(comp))
230 return PTR_ERR(comp);
232 /* swap red & blue if requested */
233 if (d->mgr.swap_rb) {
234 swap_rb_in_mgr_info(&d->mgr);
235 for (i = 0; i < d->num_ovls; i++)
236 swap_rb_in_ovl_info(d->ovls + i);
237 }
239 r = dsscomp_set_mgr(comp, &d->mgr);
241 for (i = 0; i < d->num_ovls; i++) {
242 struct dss2_ovl_info *oi = d->ovls + i;
243 u32 addr = (u32) oi->address;
245 if (oi->addressing != OMAP_DSS_BUFADDR_DIRECT)
246 return -EINVAL;
248 /* convert addresses to user space */
249 if (oi->cfg.color_mode == OMAP_DSS_COLOR_NV12) {
250 if (oi->uv_address)
251 oi->uv = hwc_virt_to_phys((u32) oi->uv_address);
252 else
253 oi->uv = hwc_virt_to_phys(addr +
254 oi->cfg.height * oi->cfg.stride);
255 }
256 oi->ba = hwc_virt_to_phys(addr);
258 r = r ? : dsscomp_set_ovl(comp, oi);
259 }
261 r = r ? : dsscomp_setup(comp, d->mode, d->win);
263 /* create sync object */
264 if (d->get_sync_obj) {
265 sync = kzalloc(sizeof(*sync), GFP_KERNEL);
266 r = sync_setup("dsscomp_sync", &sync_fops, sync, O_RDONLY);
267 if (sync && (debug & DEBUG_WAITS))
268 dev_info(DEV(cdev), "new sync [%p] on #%d\n", sync,
269 sync->fd);
270 if (r)
271 sync_drop(sync);
272 }
274 /* drop composition if failed to create */
275 if (r) {
276 dsscomp_drop(comp);
277 return r;
278 }
280 if (sync) {
281 sync->refs.counter++;
282 comp->extra_cb = dsscomp_queue_cb;
283 comp->extra_cb_data = sync;
284 }
285 if (d->mode & DSSCOMP_SETUP_APPLY)
286 r = dsscomp_delayed_apply(comp);
288 /* delete sync object if failed to apply or create file */
289 if (sync) {
290 r = sync_finalize(sync, r);
291 if (r < 0)
292 sync_drop(sync);
293 }
294 return r;
295 }
297 static long query_display(struct dsscomp_dev *cdev,
298 struct dsscomp_display_info *dis)
299 {
300 struct omap_dss_device *dev;
301 struct omap_overlay_manager *mgr;
302 struct omap_overlay_manager_info info;
303 int i;
305 /* get display */
306 if (dis->ix >= cdev->num_displays)
307 return -EINVAL;
308 dev = cdev->displays[dis->ix];
309 if (!dev)
310 return -EINVAL;
311 mgr = dev->output->manager;
313 /* fill out display information */
314 dis->channel = dev->channel;
315 dis->enabled = (dev->state == OMAP_DSS_DISPLAY_SUSPENDED) ?
316 dev->activate_after_resume :
317 (dev->state == OMAP_DSS_DISPLAY_ACTIVE);
318 dis->overlays_available = 0;
319 dis->overlays_owned = 0;
320 #if 0
321 dis->s3d_info = dev->panel.s3d_info;
322 #endif
323 dis->state = dev->state;
324 dis->timings = dev->panel.timings;
326 dis->width_in_mm = DIV_ROUND_CLOSEST(dev->panel.width_in_um, 1000);
327 dis->height_in_mm = DIV_ROUND_CLOSEST(dev->panel.height_in_um, 1000);
329 /* find all overlays available for/owned by this display */
330 for (i = 0; i < cdev->num_ovls && dis->enabled; i++) {
331 if (cdev->ovls[i]->manager == mgr)
332 dis->overlays_owned |= 1 << i;
333 else if (!cdev->ovls[i]->is_enabled(cdev->ovls[i]))
334 dis->overlays_available |= 1 << i;
335 }
336 dis->overlays_available |= dis->overlays_owned;
338 /* fill out manager information */
339 if (mgr) {
340 mgr->get_manager_info(mgr, &info);
341 dis->mgr.alpha_blending =
342 alpha_only || info.partial_alpha_enabled;
343 dis->mgr.default_color = info.default_color;
344 #if 0
345 dis->mgr.interlaced = !strcmp(dev->name, "hdmi") &&
346 is_hdmi_interlaced()
347 #else
348 dis->mgr.interlaced = 0;
349 #endif
350 dis->mgr.trans_enabled = info.trans_enabled;
351 dis->mgr.trans_key = info.trans_key;
352 dis->mgr.trans_key_type = info.trans_key_type;
353 } else {
354 /* display is disabled if it has no manager */
355 memset(&dis->mgr, 0, sizeof(dis->mgr));
356 }
357 dis->mgr.ix = dis->ix;
359 if (dev->driver && dis->modedb_len && dev->driver->get_modedb)
360 dis->modedb_len = dev->driver->get_modedb(dev,
361 (struct fb_videomode *)dis->modedb, dis->modedb_len);
362 return 0;
363 }
365 static long check_ovl(struct dsscomp_dev *cdev,
366 struct dsscomp_check_ovl_data *chk)
367 {
368 u16 x_decim, y_decim;
369 bool five_taps;
370 struct omap_dss_device *dev;
371 struct omap_overlay_manager *mgr;
372 int i;
373 long allowed = 0;
374 bool checked_vid = false, scale_ok = false;
375 struct dss2_ovl_cfg *c = &chk->ovl.cfg;
376 enum tiler_fmt fmt;
378 /* get display */
379 if (chk->mgr.ix >= cdev->num_displays)
380 return -EINVAL;
382 dev = cdev->displays[chk->mgr.ix];
383 if (!dev)
384 return -EINVAL;
385 mgr = dev->output->manager;
387 /* we support alpha-enabled only if we have free zorder */
388 /* :FIXME: for now DSS has this as an ovl cap */
389 if (alpha_only && !chk->mgr.alpha_blending)
390 return -EINVAL;
392 /* normalize decimation */
393 if (!c->decim.min_x)
394 c->decim.min_x = 1;
395 if (!c->decim.min_y)
396 c->decim.min_y = 1;
397 if (!c->decim.max_x)
398 c->decim.max_x = 255;
399 if (!c->decim.max_y)
400 c->decim.max_y = 255;
402 /* check scaling support */
403 for (i = 0; i < cdev->num_ovls; i++) {
404 /* verify color format support */
405 if (c->color_mode & ~cdev->ovls[i]->supported_modes)
406 continue;
408 /* verify scaling on GFX and VID pipes */
409 if (!i || !checked_vid) {
410 struct omap_overlay_info info = {
411 .out_width = c->win.w,
412 .out_height = c->win.h,
413 .width = c->crop.w,
414 .height = c->crop.h,
415 .color_mode = c->color_mode,
416 .rotation = c->rotation,
417 .min_x_decim = c->decim.min_x,
418 .max_x_decim = c->decim.max_x,
419 .min_y_decim = c->decim.min_y,
420 .max_y_decim = c->decim.max_y,
421 };
422 u32 ba = (unsigned int) &chk->ovl.address;
424 ba = hwc_virt_to_phys(ba);
425 /* check for valid tiler container */
426 if (tiler_get_fmt(ba, &fmt) && fmt >= TILFMT_8BIT &&
427 fmt <= TILFMT_32BIT)
428 info.rotation_type = OMAP_DSS_ROT_TILER;
429 else
430 info.rotation_type = OMAP_DSS_ROT_DMA;
432 /* scale_ok = !dispc_scaling_decision(i, &info, mgr->id,
433 &x_decim, &y_decim, &five_taps);
434 */
435 /* update minimum decimation needs to support ovl */
436 if (scale_ok) {
437 if (x_decim > c->decim.min_x)
438 c->decim.min_x = x_decim;
439 if (y_decim > c->decim.min_y)
440 c->decim.min_y = y_decim;
441 }
442 }
443 checked_vid = i;
444 if (scale_ok)
445 allowed |= 1 << i;
446 }
448 return allowed;
449 }
451 static long setup_display(struct dsscomp_dev *cdev,
452 struct dsscomp_setup_display_data *dis)
453 {
454 struct omap_dss_device *dev;
456 /* get display */
457 if (dis->ix >= cdev->num_displays)
458 return -EINVAL;
459 dev = cdev->displays[dis->ix];
460 if (!dev)
461 return -EINVAL;
463 if (dev->driver->set_mode)
464 return dev->driver->set_mode(dev,
465 (struct fb_videomode *)&dis->mode);
466 else
467 return 0;
468 }
470 static void fill_cache(struct dsscomp_dev *cdev)
471 {
472 unsigned long i;
473 struct omap_dss_device *dssdev = NULL;
475 cdev->num_ovls = min(omap_dss_get_num_overlays(), MAX_OVERLAYS);
476 for (i = 0; i < cdev->num_ovls; i++)
477 cdev->ovls[i] = omap_dss_get_overlay(i);
479 cdev->num_mgrs = min(omap_dss_get_num_overlay_managers(), MAX_MANAGERS);
480 for (i = 0; i < cdev->num_mgrs; i++)
481 cdev->mgrs[i] = omap_dss_get_overlay_manager(i);
483 for_each_dss_dev(dssdev) {
484 const char *name = dev_name(&dssdev->dev);
485 if (strncmp(name, "display", 7) ||
486 strict_strtoul(name + 7, 10, &i) ||
487 i >= MAX_DISPLAYS)
488 continue;
490 if (cdev->num_displays <= i)
491 cdev->num_displays = i + 1;
493 cdev->displays[i] = dssdev;
494 dev_dbg(DEV(cdev), "display%lu=%s\n", i, dssdev->driver_name);
496 cdev->state_notifiers[i].notifier_call = dsscomp_state_notifier;
497 blocking_notifier_chain_register(&dssdev->state_notifiers,
498 cdev->state_notifiers + i);
499 }
500 dev_info(DEV(cdev), "found %d displays and %d overlays\n",
501 cdev->num_displays, cdev->num_ovls);
503 /*
504 * :FIXME: for now DSS has this as an ovl cap, even though it relates
505 * to the manager. For now we store this globally so we can access
506 * this.
507 */
508 alpha_only = cdev->num_ovls &&
509 (cdev->ovls[0]->caps & OMAP_DSS_OVL_CAP_ZORDER);
510 }
512 static void fill_platform_info(struct dsscomp_dev *cdev)
513 {
514 struct dsscomp_platform_info *p = &platform_info;
516 p->max_xdecim_1d = 16;
517 p->max_xdecim_2d = 16;
518 p->max_ydecim_1d = 16;
519 p->max_ydecim_2d = 2;
521 p->fclk = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
522 /*
523 * :TODO: for now overwrite with actual fclock as dss will not scale
524 * fclock based on composition
525 */
526 p->fclk = dispc_fclk_rate();
528 p->min_width = 2;
529 p->max_width = 2048;
530 p->max_height = 2048;
532 p->max_downscale = 4;
533 p->integer_scale_ratio_limit = 2048;
535 p->tiler1d_slot_size = tiler1d_slot_size(cdev);
537 p->fbmem_type = DSSCOMP_FBMEM_TILER2D;
538 }
540 static long comp_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
541 {
542 int r = 0;
543 struct miscdevice *dev = filp->private_data;
544 struct dsscomp_dev *cdev = container_of(dev, struct dsscomp_dev, dev);
545 void __user *ptr = (void __user *)arg;
547 union {
548 struct {
549 struct dsscomp_setup_mgr_data set;
550 struct dss2_ovl_info ovl[MAX_OVERLAYS];
551 } m;
552 struct dsscomp_setup_dispc_data dispc;
553 struct dsscomp_display_info dis;
554 struct dsscomp_check_ovl_data chk;
555 struct dsscomp_setup_display_data sdis;
556 } u;
558 dsscomp_gralloc_init(cdev);
560 switch (cmd) {
561 case DSSCIOC_SETUP_MGR:
562 {
563 r = copy_from_user(&u.m.set, ptr, sizeof(u.m.set)) ? :
564 u.m.set.num_ovls > ARRAY_SIZE(u.m.ovl) ? -EINVAL :
565 copy_from_user(&u.m.ovl,
566 (void __user *)arg + sizeof(u.m.set),
567 sizeof(*u.m.ovl) * u.m.set.num_ovls) ? :
568 setup_mgr(cdev, &u.m.set);
569 break;
570 }
571 case DSSCIOC_SETUP_DISPC:
572 {
573 r = copy_from_user(&u.dispc, ptr, sizeof(u.dispc)) ? :
574 dsscomp_gralloc_queue_ioctl(&u.dispc);
575 break;
576 }
577 case DSSCIOC_QUERY_DISPLAY:
578 {
579 struct dsscomp_display_info *dis = NULL;
580 r = copy_from_user(&u.dis, ptr, sizeof(u.dis));
581 if (!r)
582 dis = kzalloc(sizeof(*dis->modedb) * u.dis.modedb_len +
583 sizeof(*dis), GFP_KERNEL);
584 if (dis) {
585 *dis = u.dis;
586 r = query_display(cdev, dis) ? :
587 copy_to_user(ptr, dis, sizeof(*dis) +
588 sizeof(*dis->modedb) * dis->modedb_len);
589 kfree(dis);
590 } else {
591 r = r ? : -ENOMEM;
592 }
593 break;
594 }
595 case DSSCIOC_CHECK_OVL:
596 {
597 r = copy_from_user(&u.chk, ptr, sizeof(u.chk)) ? :
598 check_ovl(cdev, &u.chk);
599 break;
600 }
601 case DSSCIOC_SETUP_DISPLAY:
602 {
603 r = copy_from_user(&u.sdis, ptr, sizeof(u.sdis)) ? :
604 setup_display(cdev, &u.sdis);
605 break;
606 }
607 case DSSCIOC_QUERY_PLATFORM:
608 {
609 /* :TODO: for now refill platform info as it is dynamic */
610 r = copy_to_user(ptr, &platform_info, sizeof(platform_info));
611 break;
612 }
613 default:
614 r = -EINVAL;
615 }
616 return r;
617 }
619 /* must implement open for filp->private_data to be filled */
620 static int comp_open(struct inode *inode, struct file *filp)
621 {
622 return 0;
623 }
625 static const struct file_operations comp_fops = {
626 .owner = THIS_MODULE,
627 .open = comp_open,
628 .unlocked_ioctl = comp_ioctl,
629 };
631 static int dsscomp_debug_show(struct seq_file *s, void *unused)
632 {
633 void (*fn)(struct seq_file *s) = s->private;
634 fn(s);
635 return 0;
636 }
638 static int dsscomp_debug_open(struct inode *inode, struct file *file)
639 {
640 return single_open(file, dsscomp_debug_show, inode->i_private);
641 }
643 static const struct file_operations dsscomp_debug_fops = {
644 .open = dsscomp_debug_open,
645 .read = seq_read,
646 .llseek = seq_lseek,
647 .release = single_release,
648 };
650 static int dsscomp_probe(struct platform_device *pdev)
651 {
652 int ret;
653 struct dsscomp_dev *cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
654 if (!cdev) {
655 pr_err("dsscomp: failed to allocate device.\n");
656 return -ENOMEM;
657 }
658 cdev->dev.minor = MISC_DYNAMIC_MINOR;
659 cdev->dev.name = "dsscomp";
660 cdev->dev.mode = 0666;
661 cdev->dev.fops = &comp_fops;
663 ret = misc_register(&cdev->dev);
664 if (ret) {
665 pr_err("dsscomp: failed to register misc device.\n");
666 kfree(cdev);
667 return ret;
668 }
669 cdev->dbgfs = debugfs_create_dir("dsscomp", NULL);
670 if (IS_ERR_OR_NULL(cdev->dbgfs)) {
671 dev_warn(DEV(cdev), "failed to create debug files.\n");
672 } else {
673 debugfs_create_file("comps", S_IRUGO,
674 cdev->dbgfs, dsscomp_dbg_comps, &dsscomp_debug_fops);
675 debugfs_create_file("gralloc", S_IRUGO,
676 cdev->dbgfs, dsscomp_dbg_gralloc, &dsscomp_debug_fops);
677 #ifdef CONFIG_DSSCOMP_DEBUG_LOG
678 debugfs_create_file("log", S_IRUGO,
679 cdev->dbgfs, dsscomp_dbg_events, &dsscomp_debug_fops);
680 #endif
681 }
683 cdev->pdev = &pdev->dev;
684 platform_set_drvdata(pdev, cdev);
686 pr_info("dsscomp: initializing.\n");
688 fill_cache(cdev);
689 fill_platform_info(cdev);
691 /* initialize queues */
692 dsscomp_queue_init(cdev);
693 dsscomp_gralloc_init(cdev);
695 return 0;
696 }
698 static int dsscomp_remove(struct platform_device *pdev)
699 {
700 struct dsscomp_dev *cdev = platform_get_drvdata(pdev);
701 misc_deregister(&cdev->dev);
702 debugfs_remove_recursive(cdev->dbgfs);
703 dsscomp_queue_exit();
704 dsscomp_gralloc_exit();
705 kfree(cdev);
707 return 0;
708 }
710 static struct platform_driver dsscomp_pdriver = {
711 .probe = dsscomp_probe,
712 .remove = dsscomp_remove,
713 .driver = { .name = MODULE_NAME_DSSCOMP, .owner = THIS_MODULE }
714 };
716 static int __init dsscomp_init(void)
717 {
718 return platform_driver_register(&dsscomp_pdriver);
719 }
721 static void __exit dsscomp_exit(void)
722 {
723 platform_driver_unregister(&dsscomp_pdriver);
724 }
726 #define DUMP_CHUNK 256
727 static char dump_buf[64 * 1024];
728 static void dsscomp_kdump(void)
729 {
730 struct seq_file s = {
731 .buf = dump_buf,
732 .size = sizeof(dump_buf) - 1,
733 };
734 int i;
736 #ifdef CONFIG_DSSCOMP_DEBUG_LOG
737 dsscomp_dbg_events(&s);
738 #endif
739 dsscomp_dbg_comps(&s);
740 dsscomp_dbg_gralloc(&s);
742 for (i = 0; i < s.count; i += DUMP_CHUNK) {
743 if ((s.count - i) > DUMP_CHUNK) {
744 char c = s.buf[i + DUMP_CHUNK];
745 s.buf[i + DUMP_CHUNK] = 0;
746 pr_cont("%s", s.buf + i);
747 s.buf[i + DUMP_CHUNK] = c;
748 } else {
749 s.buf[s.count] = 0;
750 pr_cont("%s", s.buf + i);
751 }
752 }
753 }
754 EXPORT_SYMBOL(dsscomp_kdump);
756 MODULE_LICENSE("GPL v2");
757 module_init(dsscomp_init);
758 module_exit(dsscomp_exit);