1 /*
2 * gcmain.c
3 *
4 * Copyright (C) 2010-2011 Vivante Corporation.
5 *
6 * This package is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
11 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
12 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
13 */
15 #include <linux/module.h>
16 #include <linux/platform_device.h>
17 #include <linux/uaccess.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/delay.h>
20 #include <plat/omap_gcx.h>
21 #include <linux/opp.h>
22 #include <linux/io.h>
23 #include <plat/omap_hwmod.h>
24 #include <plat/omap-pm.h>
25 #include "gcmain.h"
27 #define GCZONE_NONE 0
28 #define GCZONE_ALL (~0U)
29 #define GCZONE_INIT (1 << 0)
30 #define GCZONE_CONTEXT (1 << 1)
31 #define GCZONE_POWER (1 << 2)
32 #define GCZONE_COMMIT (1 << 3)
33 #define GCZONE_MAPPING (1 << 4)
34 #define GCZONE_PROBE (1 << 5)
35 #define GCZONE_CALLBACK (1 << 6)
36 #define GCZONE_FREQSCALE (1 << 7)
38 GCDBG_FILTERDEF(core, GCZONE_NONE,
39 "init",
40 "context",
41 "power",
42 "commit",
43 "mapping",
44 "probe",
45 "callback",
46 "freqscale")
49 #if !defined(GC_ENABLE_SUSPEND)
50 #define GC_ENABLE_SUSPEND 1
51 #endif
53 #if !defined(CONFIG_HAS_EARLYSUSPEND)
54 #define CONFIG_HAS_EARLYSUSPEND 0
55 #endif
57 /* Driver private data. */
58 static struct gccorecontext g_context;
61 /*******************************************************************************
62 * Context management.
63 */
65 static enum gcerror find_context(struct gccorecontext *gccorecontext,
66 bool fromuser,
67 struct gcmmucontext **gcmmucontext)
68 {
69 enum gcerror gcerror = GCERR_NONE;
70 struct list_head *ctxhead;
71 struct gcmmucontext *temp = NULL;
72 pid_t pid;
74 GCENTER(GCZONE_CONTEXT);
76 /* Get current PID. */
77 pid = fromuser ? current->tgid : 0;
79 /* Search the list. */
80 GCDBG(GCZONE_CONTEXT, "scanning context records for pid %d.\n", pid);
82 /* Try to locate the record. */
83 list_for_each(ctxhead, &gccorecontext->mmuctxlist) {
84 temp = list_entry(ctxhead, struct gcmmucontext, link);
85 if (temp->pid == pid) {
86 /* Success. */
87 GCDBG(GCZONE_CONTEXT, "context is found @ 0x%08X\n",
88 (unsigned int) temp);
90 goto exit;
91 }
92 }
94 /* Get new record. */
95 if (list_empty(&gccorecontext->mmuctxvac)) {
96 GCDBG(GCZONE_CONTEXT, "not found, allocating.\n");
98 temp = kmalloc(sizeof(struct gcmmucontext), GFP_KERNEL);
99 if (temp == NULL) {
100 GCERR("out of memory.\n");
101 gcerror = GCERR_SETGRP(GCERR_OODM,
102 GCERR_IOCTL_CTX_ALLOC);
103 goto fail;
104 }
106 GCDBG(GCZONE_CONTEXT, "allocated @ 0x%08X\n",
107 (unsigned int) temp);
108 } else {
109 ctxhead = gccorecontext->mmuctxvac.next;
110 temp = list_entry(ctxhead, struct gcmmucontext, link);
111 list_del(ctxhead);
113 GCDBG(GCZONE_CONTEXT, "not found, reusing vacant @ 0x%08X\n",
114 (unsigned int) temp);
115 }
117 gcerror = gcmmu_create_context(gccorecontext, temp, pid);
118 if (gcerror != GCERR_NONE)
119 goto fail;
121 /* Add the context to the list. */
122 list_add(&temp->link, &gccorecontext->mmuctxlist);
124 exit:
125 *gcmmucontext = temp;
127 GCEXIT(GCZONE_CONTEXT);
128 return GCERR_NONE;
130 fail:
131 if (temp != NULL) {
132 gcmmu_destroy_context(gccorecontext, temp);
133 list_add(&temp->link, &gccorecontext->mmuctxvac);
134 }
136 GCEXITARG(GCZONE_CONTEXT, "gcerror = 0x%08X\n", gcerror);
137 return gcerror;
138 }
140 static void destroy_mmu_context(struct gccorecontext *gccorecontext)
141 {
142 struct list_head *head;
143 struct gcmmucontext *temp;
145 /* Free vacant entry list. */
146 while (!list_empty(&gccorecontext->mmuctxvac)) {
147 head = gccorecontext->mmuctxvac.next;
148 temp = list_entry(head, struct gcmmucontext, link);
149 list_del(head);
150 kfree(temp);
151 }
153 /* Free active contexts. */
154 while (!list_empty(&gccorecontext->mmuctxlist)) {
155 head = gccorecontext->mmuctxlist.next;
156 temp = list_entry(head, struct gcmmucontext, link);
157 gcmmu_destroy_context(gccorecontext, temp);
158 list_del(head);
159 kfree(temp);
160 }
161 }
163 struct device *gc_get_dev(void)
164 {
165 return g_context.device;
166 }
167 EXPORT_SYMBOL(gc_get_dev);
170 /*******************************************************************************
171 ** Register access.
172 */
174 unsigned int gc_read_reg(unsigned int address)
175 {
176 return readl((unsigned char *) g_context.regbase + address);
177 }
179 void gc_write_reg(unsigned int address, unsigned int data)
180 {
181 writel(data, (unsigned char *) g_context.regbase + address);
182 }
185 /*******************************************************************************
186 * Power management.
187 */
189 static void gcpwr_enable_clock(struct gccorecontext *gccorecontext)
190 {
191 bool ctxlost;
193 GCENTER(GCZONE_POWER);
195 ctxlost = gccorecontext->plat->get_context_loss_count(gccorecontext->device);
197 if (!gccorecontext->clockenabled) {
198 /* Enable the clock. */
199 pm_runtime_get_sync(gccorecontext->device);
201 /* Signal software not idle. */
202 gc_write_reg(GC_GP_OUT0_Address, 0);
204 /* Clock enabled. */
205 gccorecontext->clockenabled = true;
206 } else if (ctxlost) {
207 GCDBG(GCZONE_POWER, "hardware context lost.\n");
208 if (gc_read_reg(GC_GP_OUT0_Address)) {
209 GCDBG(GCZONE_POWER, "reset idle register.\n");
210 gc_write_reg(GC_GP_OUT0_Address, 0);
211 }
212 }
214 GCDBG(GCZONE_POWER, "clock %s.\n",
215 gccorecontext->clockenabled ? "enabled" : "disabled");
217 if (ctxlost || (gccorecontext->gcpower == GCPWR_UNKNOWN))
218 gcpwr_reset(gccorecontext);
220 GCEXIT(GCZONE_POWER);
221 }
223 static void gcpwr_disable_clock(struct gccorecontext *gccorecontext)
224 {
225 GCENTER(GCZONE_POWER);
227 if (gccorecontext->clockenabled) {
228 gc_debug_poweroff_cache();
230 /* Signal software idle. */
231 gc_write_reg(GC_GP_OUT0_Address, 1);
233 /* Disable the clock. */
234 pm_runtime_put_sync(gccorecontext->device);
236 /* Clock disabled. */
237 gccorecontext->clockenabled = false;
238 }
240 GCDBG(GCZONE_POWER, "clock %s.\n",
241 gccorecontext->clockenabled ? "enabled" : "disabled");
243 GCEXIT(GCZONE_POWER);
244 }
246 static void gcpwr_scale(struct gccorecontext *gccorecontext, int index)
247 {
248 int ret;
250 GCENTERARG(GCZONE_FREQSCALE, "index=%d\n", index);
252 if ((index < 0) || (index >= gccorecontext->opp_count)) {
253 GCERR("invalid index %d.\n", index);
254 goto exit;
255 }
257 if ((gccorecontext->plat == NULL) ||
258 (gccorecontext->plat->scale_dev == NULL)) {
259 GCERR("scale interface is not initialized.\n");
260 goto exit;
261 }
263 if (gccorecontext->cur_freq == gccorecontext->opp_freqs[index])
264 goto exit;
266 ret = gccorecontext->plat->scale_dev(gccorecontext->bb2ddevice,
267 gccorecontext->opp_freqs[index]);
268 if (ret != 0) {
269 GCERR("failed to scale the device.\n");
270 goto exit;
271 }
273 gccorecontext->cur_freq = gccorecontext->opp_freqs[index];
274 GCDBG(GCZONE_FREQSCALE, "frequency set to %dMHz\n",
275 gccorecontext->cur_freq / 1000 / 1000);
277 exit:
278 GCEXIT(GCZONE_FREQSCALE);
279 }
281 static void gcpwr_set_pulse_skipping(unsigned int pulsecount)
282 {
283 union gcclockcontrol gcclockcontrol;
285 GCENTER(GCZONE_POWER);
287 /* Set the pulse skip value. */
288 gcclockcontrol.raw = 0;
289 gcclockcontrol.reg.pulsecount = pulsecount;
291 /* Initiate loading. */
292 gcclockcontrol.reg.pulseset = 1;
293 GCDBG(GCZONE_POWER, "pulse skip = 0x%08X\n", gcclockcontrol.raw);
294 gc_write_reg(GCREG_HI_CLOCK_CONTROL_Address, gcclockcontrol.raw);
296 /* Lock the value. */
297 gcclockcontrol.reg.pulseset = 0;
298 GCDBG(GCZONE_POWER, "pulse skip = 0x%08X\n", gcclockcontrol.raw);
299 gc_write_reg(GCREG_HI_CLOCK_CONTROL_Address, gcclockcontrol.raw);
301 GCEXIT(GCZONE_POWER);
302 }
304 static void gcpwr_enable_pulse_skipping(struct gccorecontext *gccorecontext)
305 {
306 GCENTER(GCZONE_POWER);
308 if (!gccorecontext->clockenabled)
309 goto exit;
311 if (gccorecontext->pulseskipping != 1) {
312 /* Set the lowest frequency. */
313 gcpwr_scale(gccorecontext, 0);
315 /* Set 1 clock pulse for every 64 clocks. */
316 gcpwr_set_pulse_skipping(1);
318 /* Pulse skipping enabled. */
319 gccorecontext->pulseskipping = 1;
320 }
322 GCDBG(GCZONE_POWER, "pulse skipping %s.\n",
323 gccorecontext->pulseskipping ? "enabled" : "disabled");
325 exit:
326 GCEXIT(GCZONE_POWER);
327 }
329 static void gcpwr_disable_pulse_skipping(struct gccorecontext *gccorecontext)
330 {
331 GCENTER(GCZONE_POWER);
333 if (!gccorecontext->clockenabled)
334 goto exit;
336 if (gccorecontext->pulseskipping != 0) {
337 /* Set the maximum frequency. */
338 gcpwr_scale(gccorecontext, gccorecontext->opp_count - 1);
340 /* Set full speed. */
341 gcpwr_set_pulse_skipping(64);
343 /* Pulse skipping disabled. */
344 gccorecontext->pulseskipping = 0;
345 }
347 GCDBG(GCZONE_POWER, "pulse skipping %s.\n",
348 gccorecontext->pulseskipping ? "enabled" : "disabled");
350 exit:
351 GCEXIT(GCZONE_POWER);
352 }
354 void gcpwr_set(struct gccorecontext *gccorecontext, enum gcpower gcpower)
355 {
356 GCENTER(GCZONE_POWER);
358 GCLOCK(&gccorecontext->powerlock);
360 if (gcpower != gccorecontext->gcpower) {
361 switch (gcpower) {
362 case GCPWR_ON:
363 gcpwr_enable_clock(gccorecontext);
364 gcpwr_disable_pulse_skipping(gccorecontext);
365 break;
367 case GCPWR_LOW:
368 gcpwr_enable_clock(gccorecontext);
369 gcpwr_enable_pulse_skipping(gccorecontext);
370 break;
372 case GCPWR_OFF:
373 gcpwr_enable_pulse_skipping(gccorecontext);
374 gcpwr_disable_clock(gccorecontext);
375 break;
377 default:
378 GCERR("unsupported power mode %d.\n", gcpower);
379 goto exit;
380 }
382 GCDBG(GCZONE_POWER, "power state %d --> %d\n",
383 gccorecontext->gcpower, gcpower);
385 /* Set new power state. */
386 gccorecontext->gcpower = gcpower;
387 }
389 exit:
390 GCUNLOCK(&gccorecontext->powerlock);
392 GCEXIT(GCZONE_POWER);
393 }
395 enum gcpower gcpwr_get(void)
396 {
397 return g_context.gcpower;
398 }
400 void gcpwr_reset(struct gccorecontext *gccorecontext)
401 {
402 union gcclockcontrol gcclockcontrol;
403 union gcidle gcidle;
405 GCENTER(GCZONE_POWER);
407 GCLOCK(&gccorecontext->resetlock);
409 /* Read current clock control value. */
410 gcclockcontrol.raw
411 = gc_read_reg(GCREG_HI_CLOCK_CONTROL_Address);
413 while (true) {
414 /* Isolate the GPU. */
415 gcclockcontrol.reg.isolate = 1;
416 gc_write_reg(GCREG_HI_CLOCK_CONTROL_Address,
417 gcclockcontrol.raw);
419 /* Set soft reset. */
420 gcclockcontrol.reg.reset = 1;
421 gc_write_reg(GCREG_HI_CLOCK_CONTROL_Address,
422 gcclockcontrol.raw);
424 /* Wait for reset. */
425 msleep(1);
427 /* Reset soft reset bit. */
428 gcclockcontrol.reg.reset = 0;
429 gc_write_reg(GCREG_HI_CLOCK_CONTROL_Address,
430 gcclockcontrol.raw);
432 /* Reset GPU isolation. */
433 gcclockcontrol.reg.isolate = 0;
434 gc_write_reg(GCREG_HI_CLOCK_CONTROL_Address,
435 gcclockcontrol.raw);
437 /* Read idle register. */
438 gcidle.raw = gc_read_reg(GCREG_HI_IDLE_Address);
440 /* Try resetting again if FE not idle. */
441 if (!gcidle.reg.fe) {
442 GCERR("FE NOT IDLE\n");
443 continue;
444 }
446 /* Read reset register. */
447 gcclockcontrol.raw
448 = gc_read_reg(GCREG_HI_CLOCK_CONTROL_Address);
450 /* Try resetting again if 2D is not idle. */
451 if (!gcclockcontrol.reg.idle2d) {
452 GCERR("2D NOT IDLE\n");
453 continue;
454 }
456 /* GPU is idle. */
457 break;
458 }
460 /* Pulse skipping disabled. */
461 gccorecontext->pulseskipping = false;
463 GCUNLOCK(&gccorecontext->resetlock);
465 GCEXIT(GCZONE_POWER);
466 }
468 unsigned int gcpwr_get_speed(void)
469 {
470 struct gccorecontext *gccorecontext = &g_context;
471 static const int seccount = 2;
472 unsigned int cyclecount;
473 unsigned int speedmhz = 0;
475 GCLOCK(&gccorecontext->powerlock);
477 if (gccorecontext->gcpower == GCPWR_ON) {
478 /* Reset cycle counter and sleep. */
479 gc_write_reg(GC_TOTAL_CYCLES_Address, 0);
480 msleep(seccount * 1000);
482 /* Read the cycle counter and compute the speed. */
483 cyclecount = gc_read_reg(GC_TOTAL_CYCLES_Address);
484 speedmhz = cyclecount / 1000 / 1000 / seccount;
485 }
487 GCUNLOCK(&gccorecontext->powerlock);
489 return speedmhz;
490 }
492 /*******************************************************************************
493 * Public API.
494 */
496 void gc_caps(struct gcicaps *gcicaps)
497 {
498 struct gccorecontext *gccorecontext = &g_context;
500 /* Copy capabilities. */
501 gcicaps->gcmodel = gccorecontext->gcmodel;
502 gcicaps->gcrevision = gccorecontext->gcrevision;
503 gcicaps->gcdate = gccorecontext->gcdate;
504 gcicaps->gctime = gccorecontext->gctime;
505 gcicaps->gcfeatures = gccorecontext->gcfeatures;
506 gcicaps->gcfeatures0 = gccorecontext->gcfeatures0;
507 gcicaps->gcfeatures1 = gccorecontext->gcfeatures1;
508 gcicaps->gcfeatures2 = gccorecontext->gcfeatures2;
509 gcicaps->gcfeatures3 = gccorecontext->gcfeatures3;
511 /* Success. */
512 gcicaps->gcerror = GCERR_NONE;
513 }
515 void gc_commit(struct gcicommit *gcicommit, bool fromuser)
516 {
517 struct gccorecontext *gccorecontext = &g_context;
518 struct gcmmucontext *gcmmucontext;
519 struct gcbuffer *gcbuffer;
520 unsigned int buffersize;
521 unsigned int *logical;
522 unsigned int address;
523 struct gcmopipesel *gcmopipesel;
524 struct gcschedunmap *gcschedunmap;
525 struct list_head *head;
527 GCENTER(GCZONE_COMMIT);
529 GCLOCK(&gccorecontext->mmucontextlock);
531 /* Validate pipe values. */
532 if ((gcicommit->entrypipe != GCPIPE_2D) &&
533 (gcicommit->entrypipe != GCPIPE_3D)) {
534 gcicommit->gcerror = GCERR_CMD_ENTRY_PIPE;
535 goto exit;
536 }
538 if ((gcicommit->exitpipe != GCPIPE_2D) &&
539 (gcicommit->exitpipe != GCPIPE_3D)) {
540 gcicommit->gcerror = GCERR_CMD_EXIT_PIPE;
541 goto exit;
542 }
544 /* Locate the client entry. */
545 gcicommit->gcerror = find_context(gccorecontext, fromuser,
546 &gcmmucontext);
547 if (gcicommit->gcerror != GCERR_NONE)
548 goto exit;
550 /* Set the master table. */
551 gcicommit->gcerror = gcmmu_set_master(gccorecontext, gcmmucontext);
552 if (gcicommit->gcerror != GCERR_NONE)
553 goto exit;
555 /* Set the correct graphics pipe. */
556 if (gccorecontext->gcpipe != gcicommit->entrypipe) {
557 static struct gcregpipeselect gcregpipeselect[] = {
558 /* GCPIPE_UNKNOWN */
559 { 0, 0 },
561 /* GCPIPE_2D */
562 { GCREG_PIPE_SELECT_PIPE_PIPE2D, 0 },
564 /* GCPIPE_2D */
565 { GCREG_PIPE_SELECT_PIPE_PIPE2D, 0 }
566 };
568 GCDBG(GCZONE_COMMIT, "allocating space for pipe switch.\n");
569 gcicommit->gcerror = gcqueue_alloc(gccorecontext, gcmmucontext,
570 sizeof(struct gcmopipesel),
571 (void **) &gcmopipesel, NULL);
572 if (gcicommit->gcerror != GCERR_NONE)
573 goto exit;
575 gcmopipesel->pipesel_ldst = gcmopipesel_pipesel_ldst;
576 gcmopipesel->pipesel.reg
577 = gcregpipeselect[gcicommit->entrypipe];
578 }
580 /* Update the current pipe. */
581 gccorecontext->gcpipe = gcicommit->exitpipe;
583 /* Go through all buffers one at a time. */
584 list_for_each(head, &gcicommit->buffer) {
585 gcbuffer = list_entry(head, struct gcbuffer, link);
586 GCDBG(GCZONE_COMMIT, "gcbuffer = 0x%08X\n",
587 (unsigned int) gcbuffer);
589 /* Flush MMU. */
590 gcmmu_flush(gccorecontext, gcmmucontext);
592 /* Compute the size of the command buffer. */
593 buffersize
594 = (unsigned char *) gcbuffer->tail
595 - (unsigned char *) gcbuffer->head;
597 GCDBG(GCZONE_COMMIT, "buffersize = %d\n", buffersize);
599 /* Reserve command buffer space. */
600 GCDBG(GCZONE_COMMIT, "allocating command buffer space.\n");
601 gcicommit->gcerror = gcqueue_alloc(gccorecontext, gcmmucontext,
602 buffersize,
603 (void **) &logical,
604 &address);
605 if (gcicommit->gcerror != GCERR_NONE)
606 goto exit;
608 if (fromuser) {
609 /* Copy command buffer. */
610 if (copy_from_user(logical, gcbuffer->head,
611 buffersize)) {
612 GCERR("failed to read data.\n");
613 gcicommit->gcerror = GCERR_USER_READ;
614 goto exit;
615 }
616 } else {
617 memcpy(logical, gcbuffer->head, buffersize);
618 }
620 /* Process fixups. */
621 gcicommit->gcerror = gcmmu_fixup(&gcbuffer->fixup, logical);
622 if (gcicommit->gcerror != GCERR_NONE)
623 goto exit;
624 }
626 /* Add the callback. */
627 if (gcicommit->callback != NULL) {
628 gcicommit->gcerror = gcqueue_callback(gccorecontext,
629 gcmmucontext,
630 gcicommit->callback,
631 gcicommit->callbackparam);
632 if (gcicommit->gcerror != GCERR_NONE)
633 goto exit;
634 }
636 /* Process unmappings. */
637 list_for_each(head, &gcicommit->unmap) {
638 gcschedunmap = list_entry(head, struct gcschedunmap, link);
639 gcicommit->gcerror = gcqueue_schedunmap(gccorecontext,
640 gcmmucontext,
641 gcschedunmap->handle);
642 if (gcicommit->gcerror != GCERR_NONE)
643 goto exit;
644 }
646 /* Execute the buffer. */
647 gcicommit->gcerror = gcqueue_execute(gccorecontext, false,
648 gcicommit->asynchronous);
650 exit:
651 GCUNLOCK(&gccorecontext->mmucontextlock);
653 GCEXITARG(GCZONE_COMMIT, "gc%s = 0x%08X\n",
654 (gcicommit->gcerror == GCERR_NONE) ? "result" : "error",
655 gcicommit->gcerror);
656 }
657 EXPORT_SYMBOL(gc_commit);
659 void gc_map(struct gcimap *gcimap, bool fromuser)
660 {
661 struct gccorecontext *gccorecontext = &g_context;
662 struct gcmmucontext *gcmmucontext;
663 struct gcmmuphysmem mem;
664 struct gcmmuarena *mapped = NULL;
666 GCENTER(GCZONE_MAPPING);
668 GCLOCK(&gccorecontext->mmucontextlock);
670 /* Locate the client entry. */
671 gcimap->gcerror = find_context(gccorecontext,
672 fromuser,
673 &gcmmucontext);
674 if (gcimap->gcerror != GCERR_NONE)
675 goto exit;
677 GCDBG(GCZONE_MAPPING, "map client buffer\n");
679 /* Initialize the mapping parameters. */
680 if (gcimap->pagearray == NULL) {
681 mem.base = ((u32) gcimap->buf.logical) & ~(PAGE_SIZE - 1);
682 mem.offset = ((u32) gcimap->buf.logical) & (PAGE_SIZE - 1);
683 mem.pages = NULL;
685 GCDBG(GCZONE_MAPPING, " logical = 0x%08X\n",
686 (unsigned int) gcimap->buf.logical);
687 } else {
688 mem.base = 0;
689 mem.offset = gcimap->buf.offset;
690 mem.pages = gcimap->pagearray;
692 GCDBG(GCZONE_MAPPING, " pagearray = 0x%08X\n",
693 (unsigned int) gcimap->pagearray);
694 }
696 GCDBG(GCZONE_MAPPING, " size = %d\n", gcimap->size);
698 mem.count = DIV_ROUND_UP(gcimap->size + mem.offset, PAGE_SIZE);
699 mem.pagesize = gcimap->pagesize ? gcimap->pagesize : PAGE_SIZE;
701 /* Map the buffer. */
702 gcimap->gcerror = gcmmu_map(gccorecontext, gcmmucontext, &mem, &mapped);
703 if (gcimap->gcerror != GCERR_NONE)
704 goto exit;
706 gcimap->handle = (unsigned int) mapped;
708 GCDBG(GCZONE_MAPPING, " mapped address = 0x%08X\n", mapped->address);
709 GCDBG(GCZONE_MAPPING, " handle = 0x%08X\n", (unsigned int) mapped);
711 exit:
712 GCUNLOCK(&gccorecontext->mmucontextlock);
714 GCEXITARG(GCZONE_MAPPING, "gc%s = 0x%08X\n",
715 (gcimap->gcerror == GCERR_NONE) ? "result" : "error",
716 gcimap->gcerror);
717 }
718 EXPORT_SYMBOL(gc_map);
720 void gc_unmap(struct gcimap *gcimap, bool fromuser)
721 {
722 struct gccorecontext *gccorecontext = &g_context;
723 struct gcmmucontext *gcmmucontext;
725 GCENTER(GCZONE_MAPPING);
727 GCLOCK(&gccorecontext->mmucontextlock);
729 /* Locate the client entry. */
730 gcimap->gcerror = find_context(gccorecontext,
731 fromuser,
732 &gcmmucontext);
733 if (gcimap->gcerror != GCERR_NONE)
734 goto exit;
736 GCDBG(GCZONE_MAPPING, "unmap client buffer\n");
737 GCDBG(GCZONE_MAPPING, " handle = 0x%08X\n", gcimap->handle);
739 /* Schedule unmapping. */
740 gcimap->gcerror = gcqueue_schedunmap(gccorecontext, gcmmucontext,
741 gcimap->handle);
742 if (gcimap->gcerror != GCERR_NONE)
743 goto exit;
745 /* Execute the buffer. */
746 gcimap->gcerror = gcqueue_execute(gccorecontext, false, false);
747 if (gcimap->gcerror != GCERR_NONE)
748 goto exit;
750 /* Invalidate the handle. */
751 gcimap->handle = ~0U;
753 exit:
754 GCUNLOCK(&gccorecontext->mmucontextlock);
756 GCEXITARG(GCZONE_MAPPING, "gc%s = 0x%08X\n",
757 (gcimap->gcerror == GCERR_NONE) ? "result" : "error",
758 gcimap->gcerror);
759 }
760 EXPORT_SYMBOL(gc_unmap);
762 void gc_callback(struct gcicallbackarm *gcicallbackarm, bool fromuser)
763 {
764 struct gccorecontext *gccorecontext = &g_context;
765 struct gcmmucontext *gcmmucontext;
767 GCENTER(GCZONE_CALLBACK);
769 GCLOCK(&gccorecontext->mmucontextlock);
771 /* Locate the client entry. */
772 gcicallbackarm->gcerror = find_context(gccorecontext, fromuser,
773 &gcmmucontext);
774 if (gcicallbackarm->gcerror != GCERR_NONE)
775 goto exit;
777 /* Schedule callback. */
778 gcicallbackarm->gcerror
779 = gcqueue_callback(gccorecontext,
780 gcmmucontext,
781 gcicallbackarm->callback,
782 gcicallbackarm->callbackparam);
783 if (gcicallbackarm->gcerror != GCERR_NONE)
784 goto exit;
786 exit:
787 GCUNLOCK(&gccorecontext->mmucontextlock);
789 GCEXITARG(GCZONE_CALLBACK, "gc%s = 0x%08X\n",
790 (gcicallbackarm->gcerror == GCERR_NONE) ? "result" : "error",
791 gcicallbackarm->gcerror);
792 }
793 EXPORT_SYMBOL(gc_callback);
795 void gc_release(void)
796 {
797 struct gccorecontext *gccorecontext = &g_context;
798 struct list_head *ctxhead;
799 struct gcmmucontext *temp = NULL;
800 pid_t pid;
802 GCENTER(GCZONE_CONTEXT);
804 GCLOCK(&gccorecontext->mmucontextlock);
806 pid = current->tgid;
807 GCDBG(GCZONE_CONTEXT, "scanning context records for pid %d.\n", pid);
809 list_for_each(ctxhead, &gccorecontext->mmuctxlist) {
810 temp = list_entry(ctxhead, struct gcmmucontext, link);
811 if (temp->pid == pid) {
812 GCDBG(GCZONE_CONTEXT, "context is found @ 0x%08X\n",
813 (unsigned int) temp);
815 gcmmu_destroy_context(gccorecontext, temp);
816 list_move(ctxhead, &gccorecontext->mmuctxvac);
817 break;
818 }
819 }
821 GCUNLOCK(&gccorecontext->mmucontextlock);
823 GCEXIT(GCZONE_CONTEXT);
824 }
825 EXPORT_SYMBOL(gc_release);
827 static int gc_probe_opp(struct platform_device *pdev)
828 {
829 int i;
830 unsigned int size;
831 unsigned long freq = 0;
832 struct gccorecontext *gccorecontext = &g_context;
834 /* Query supported OPPs. */
835 rcu_read_lock();
837 gccorecontext->opp_count = opp_get_opp_count(&pdev->dev);
838 if (gccorecontext->opp_count <= 0) {
839 gccorecontext->opp_count = 0;
840 goto done;
841 }
843 size = gccorecontext->opp_count * sizeof(unsigned long);
844 gccorecontext->opp_freqs = kzalloc(size, GFP_ATOMIC);
845 if (!gccorecontext->opp_freqs) {
846 gccorecontext->opp_count = 0;
847 goto done;
848 }
850 GCDBG(GCZONE_FREQSCALE, "frequency scaling table:\n");
852 for (i = 0; i < gccorecontext->opp_count; i++) {
853 struct opp *opp = opp_find_freq_ceil(&pdev->dev, &freq);
854 if (IS_ERR_OR_NULL(opp)) {
855 gccorecontext->opp_count = i;
856 goto done;
857 }
859 /* Set freq, prepare to next. */
860 gccorecontext->opp_freqs[i] = freq++;
861 GCDBG(GCZONE_FREQSCALE, " [%d] 0x%08X\n",
862 i, gccorecontext->opp_freqs[i]);
863 }
865 done:
866 rcu_read_unlock();
867 gcpwr_set(gccorecontext, GCPWR_LOW);
868 return 0;
869 }
871 static int gc_probe(struct platform_device *pdev)
872 {
873 struct gccorecontext *gccorecontext = &g_context;
875 GCENTER(GCZONE_PROBE);
877 gccorecontext->bb2ddevice = &pdev->dev;
878 gccorecontext->plat = (struct omap_gcx_platform_data *)
879 pdev->dev.platform_data;
880 gccorecontext->regbase = gccorecontext->plat->regbase;
881 gccorecontext->irqline = platform_get_irq(pdev, pdev->id);
882 gccorecontext->device = &pdev->dev;
884 pm_runtime_enable(gccorecontext->device);
885 gccorecontext->plat->get_context_loss_count(gccorecontext->device);
887 gc_probe_opp(pdev);
889 pm_runtime_get_sync(gccorecontext->device);
891 gccorecontext->gcmodel = gc_read_reg(GC_CHIP_ID_Address);
892 gccorecontext->gcrevision = gc_read_reg(GC_CHIP_REV_Address);
893 gccorecontext->gcdate = gc_read_reg(GC_CHIP_DATE_Address);
894 gccorecontext->gctime = gc_read_reg(GC_CHIP_TIME_Address);
895 gccorecontext->gcfeatures.raw = gc_read_reg(GC_FEATURES_Address);
896 gccorecontext->gcfeatures0.raw = gc_read_reg(GC_FEATURES0_Address);
897 gccorecontext->gcfeatures1.raw = gc_read_reg(GC_FEATURES1_Address);
898 gccorecontext->gcfeatures2.raw = gc_read_reg(GC_FEATURES2_Address);
899 gccorecontext->gcfeatures3.raw = gc_read_reg(GC_FEATURES3_Address);
901 GCDBG(GCZONE_PROBE, "GPU IDENTITY:\n");
902 GCDBG(GCZONE_PROBE, " model=%X\n", gccorecontext->gcmodel);
903 GCDBG(GCZONE_PROBE, " revision=%X\n", gccorecontext->gcrevision);
904 GCDBG(GCZONE_PROBE, " date=%X\n", gccorecontext->gcdate);
905 GCDBG(GCZONE_PROBE, " time=%X\n", gccorecontext->gctime);
906 GCDBG(GCZONE_PROBE, " features=0x%08X\n", gccorecontext->gcfeatures);
907 GCDBG(GCZONE_PROBE, " features0=0x%08X\n", gccorecontext->gcfeatures0);
908 GCDBG(GCZONE_PROBE, " features1=0x%08X\n", gccorecontext->gcfeatures1);
909 GCDBG(GCZONE_PROBE, " features2=0x%08X\n", gccorecontext->gcfeatures2);
910 GCDBG(GCZONE_PROBE, " features3=0x%08X\n", gccorecontext->gcfeatures3);
912 pm_runtime_put_sync(gccorecontext->device);
914 GCEXIT(GCZONE_PROBE);
915 return 0;
916 }
918 static int gc_remove(struct platform_device *pdev)
919 {
920 kfree(g_context.opp_freqs);
921 return 0;
922 }
924 #if GC_ENABLE_SUSPEND
925 static int gc_suspend(struct platform_device *pdev, pm_message_t s)
926 {
927 GCENTER(GCZONE_POWER);
928 gcqueue_wait_idle(&g_context);
929 GCEXIT(GCZONE_POWER);
930 return 0;
931 }
933 static int gc_resume(struct platform_device *pdev)
934 {
935 GCENTER(GCZONE_POWER);
936 GCEXIT(GCZONE_POWER);
937 return 0;
938 }
939 #endif
941 static struct platform_driver plat_drv = {
942 .probe = gc_probe,
943 .remove = gc_remove,
944 #if GC_ENABLE_SUSPEND
945 .suspend = gc_suspend,
946 .resume = gc_resume,
947 #endif
948 .driver = {
949 .owner = THIS_MODULE,
950 .name = "gccore",
951 },
952 };
954 #if CONFIG_HAS_EARLYSUSPEND
955 #include <linux/earlysuspend.h>
956 static void gc_early_suspend(struct early_suspend *h)
957 {
958 GCENTER(GCZONE_POWER);
959 gcqueue_wait_idle(&g_context);
960 GCEXIT(GCZONE_POWER);
961 }
963 static void gc_late_resume(struct early_suspend *h)
964 {
965 GCENTER(GCZONE_POWER);
966 GCEXIT(GCZONE_POWER);
967 }
969 static struct early_suspend early_suspend_info = {
970 .suspend = gc_early_suspend,
971 .resume = gc_late_resume,
972 .level = EARLY_SUSPEND_LEVEL_DISABLE_FB,
973 };
974 #endif
977 /*******************************************************************************
978 * Driver init/shutdown.
979 */
981 static int gc_init(struct gccorecontext *gccorecontext);
982 static void gc_exit(struct gccorecontext *gccorecontext);
984 static int gc_init(struct gccorecontext *gccorecontext)
985 {
986 int result;
988 GCENTER(GCZONE_INIT);
990 /* check if hardware is available */
991 if (!cpu_is_omap447x()) {
992 GCDBG(GCZONE_INIT, "gcx hardware is not present\n");
993 goto exit;
994 }
996 /* Initialize data structutres. */
997 GCLOCK_INIT(&gccorecontext->powerlock);
998 GCLOCK_INIT(&gccorecontext->resetlock);
999 GCLOCK_INIT(&gccorecontext->mmucontextlock);
1000 INIT_LIST_HEAD(&gccorecontext->mmuctxlist);
1001 INIT_LIST_HEAD(&gccorecontext->mmuctxvac);
1003 /* Pulse skipping isn't known. */
1004 gccorecontext->pulseskipping = -1;
1006 /* Initialize MMU. */
1007 if (gcmmu_init(gccorecontext) != GCERR_NONE) {
1008 GCERR("failed to initialize MMU.\n");
1009 result = -EINVAL;
1010 goto fail;
1011 }
1013 result = platform_driver_register(&plat_drv);
1014 if (result < 0) {
1015 GCERR("failed to register platform driver.\n");
1016 goto fail;
1017 }
1018 gccorecontext->platdriver = true;
1020 #if CONFIG_HAS_EARLYSUSPEND
1021 register_early_suspend(&early_suspend_info);
1022 #endif
1024 /* Initialize the command buffer. */
1025 if (gcqueue_start(gccorecontext) != GCERR_NONE) {
1026 GCERR("failed to initialize command buffer.\n");
1027 result = -EINVAL;
1028 goto fail;
1029 }
1031 /* Create debugfs entry. */
1032 gc_debug_init();
1034 exit:
1035 GCEXIT(GCZONE_INIT);
1036 return 0;
1038 fail:
1039 gc_exit(gccorecontext);
1041 GCEXITARG(GCZONE_INIT, "result = %d\n", result);
1042 return result;
1043 }
1045 static void gc_exit(struct gccorecontext *gccorecontext)
1046 {
1047 GCENTER(GCZONE_INIT);
1049 if (cpu_is_omap447x()) {
1050 /* Stop command queue thread. */
1051 gcqueue_stop(gccorecontext);
1053 /* Destroy MMU. */
1054 destroy_mmu_context(gccorecontext);
1055 gcmmu_exit(gccorecontext);
1057 /* Disable power. */
1058 pm_runtime_disable(gccorecontext->device);
1060 if (gccorecontext->platdriver) {
1061 platform_driver_unregister(&plat_drv);
1062 gccorecontext->platdriver = false;
1063 }
1065 #if CONFIG_HAS_EARLYSUSPEND
1066 unregister_early_suspend(&early_suspend_info);
1067 #endif
1069 gc_debug_shutdown();
1071 GCLOCK_DESTROY(&gccorecontext->mmucontextlock);
1072 GCLOCK_DESTROY(&gccorecontext->resetlock);
1073 GCLOCK_DESTROY(&gccorecontext->powerlock);
1074 }
1076 GCEXIT(GCZONE_PROBE);
1077 }
1079 static int __init gc_init_wrapper(void)
1080 {
1081 GCDBG_INIT();
1083 GCDBG_REGISTER(core, GCZONE_NONE);
1084 GCDBG_REGISTER(mem, GCZONE_NONE);
1085 GCDBG_REGISTER(mmu, GCZONE_NONE);
1086 GCDBG_REGISTER(queue, GCZONE_NONE);
1088 return gc_init(&g_context);
1089 }
1091 static void __exit gc_exit_wrapper(void)
1092 {
1093 gc_exit(&g_context);
1094 GCDBG_EXIT();
1095 }
1097 MODULE_LICENSE("GPL v2");
1098 MODULE_AUTHOR("www.vivantecorp.com");
1099 MODULE_AUTHOR("www.ti.com");
1100 module_init(gc_init_wrapper);
1101 module_exit(gc_exit_wrapper);