1 /*
2 * Copyright (c) 2011-2014, Texas Instruments Incorporated
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 *
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 *
11 * * Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * * Neither the name of Texas Instruments Incorporated nor the names of
16 * its contributors may be used to endorse or promote products derived
17 * from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
29 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
32 /* Standard headers */
33 #include <ti/syslink/Std.h>
35 /*QNX specific header include */
36 #include <errno.h>
37 #include <unistd.h>
38 #include <sys/iofunc.h>
39 #include <sys/dispatch.h>
40 #include <sys/netmgr.h>
41 #include <sys/rsrcdbmgr.h>
42 #include <sys/rsrcdbmsg.h>
43 #undef QNX_PM_ENABLE
44 #ifdef QNX_PM_ENABLE
45 #include <sys/powman.h>
46 #include <cpu_dll_msg.h>
47 #endif
49 /* Module headers */
50 #include <ipu_pm.h>
51 #include <_ipu_pm.h>
52 #include <sys/mman.h>
53 #include <hw/inout.h>
54 #include <time.h>
55 #include <sys/siginfo.h>
56 #include <stdbool.h>
57 #include <ti/ipc/MultiProc.h>
58 #include <ti/syslink/ProcMgr.h>
59 #include <OMAP5430BenelliProc.h>
60 #include <ArchIpcInt.h>
61 #include <_Omap5430IpcInt.h>
63 #include <OMAP5430BenelliHalReset.h>
65 //#include <camera/camdrv.h>
66 #include <Bitops.h>
67 #include <_rpmsg.h>
69 #ifndef SYSLINK_SYSBIOS_SMP
70 #define CORE0 "CORE0"
71 #else
72 #define CORE0 "IPU"
73 #endif
75 /* Defines the ipu_pm state object, which contains all the module
76 * specific information. */
77 struct ipu_pm_module_object {
78 atomic_t ref_count;
79 /* Reference count */
80 ipu_pm_config cfg;
81 /* Module configuration */
82 pthread_mutex_t mtx;
83 /* Handle of gate to be used for local thread safety */
84 int ivahd_use_cnt;
85 /* Count of ivahd users */
86 int ivaseq0_use_cnt;
87 /* Count of ivaseq0 users */
88 int ivaseq1_use_cnt;
89 /* Count of ivaseq1 users */
90 ProcMgr_Handle proc_handles[MultiProc_MAXPROCESSORS];
91 /* Array of processor handles */
92 uint32_t loaded_procs;
93 /* Info on which procs are loaded */
94 uint32_t proc_state;
95 /* Current state of the remote procs */
96 timer_t hibernation_timer;
97 /* Timer used for hibernation */
98 int hib_timer_state;
99 /* State of the hibernation timer */
100 OsalIsr_Handle gpt9IsrObject;
101 /* ISR handle for gpt9 WDT */
102 OsalIsr_Handle gpt11IsrObject;
103 /* ISR handle for gpt11 WDT */
104 OsalIsr_Handle gpt6IsrObject;
105 /* ISR handle for gpt6 WDT */
106 bool attached[MultiProc_MAXPROCESSORS];
107 /* Indicates whether the ipu_pm module is attached. */
108 bool is_setup;
109 /* Indicates whether the ipu_pm module is setup. */
110 };
112 static struct ipu_pm_module_object ipu_pm_state = {
113 .ivahd_use_cnt = 0,
114 .loaded_procs = 0,
115 .proc_state = 0,
116 } ;
118 extern Bool syslink_hib_enable;
119 extern uint32_t syslink_hib_timeout;
120 extern Bool syslink_hib_hibernating;
121 extern pthread_mutex_t syslink_hib_mutex;
122 extern pthread_cond_t syslink_hib_cond;
124 #undef BENELLI_SELF_HIBERNATION
125 #define BENELLI_WATCHDOG_TIMER
127 /* A9 state flag 0000 | 0000 Benelli internal use*/
128 #define CORE0_PROC_DOWN 0x00010000
129 #define CORE1_PROC_DOWN 0x00020000
131 #define CORE0_LOADED 0x1
132 #define CORE1_LOADED 0x2
133 #define DSP_LOADED 0x4
135 #ifdef BENELLI_SELF_HIBERNATION
136 /* A9-M3 mbox status */
137 #define A9_M3_MBOX 0x4A0F4000
138 #define MBOX_MESSAGE_STATUS 0x000000CC
140 /* Flag provided by BIOS */
141 #define IDLE_FLAG_BENELLI_ADDR_MAP_BASE 0x9F0F0000
142 #define IDLE_FLAG_PHY_ADDR_OFFSET 0x2D8
144 /* BIOS flags states for each core in IPU */
145 static void *core0Idle = NULL;
146 static void *core1Idle = NULL;
147 static void *a9_m3_mbox = NULL;
148 static void *m3_clkstctrl = NULL;
150 #define NUM_IDLE_CORES ((in32((uintptr_t)core1Idle) << 1) + \
151 (in32((uintptr_t)core0Idle)))
152 #define PENDING_MBOX_MSG in32((uintptr_t)a9_m3_mbox + MBOX_MESSAGE_STATUS)
154 extern Bool rpmsg_resmgr_allow_hib (UInt16 proc_id);
155 #endif
157 #ifdef QNX_PM_ENABLE
158 static struct powauth *syslink_auth_active = NULL;
159 static struct powauth *syslink_auth_oswr = NULL;
161 enum {core_active, core_inactive, core_off};
162 static int ipu_pm_powman_init(void);
163 static void tell_powman_auth_oswr(int need);
164 static BOOL oswr_prevent = FALSE;
166 #endif
168 typedef struct GPTIMER_REGS {
169 uint32_t tidr;
170 uint32_t space[3];
171 uint32_t tiocp_cfg;
172 uint32_t space1[3];
173 uint32_t reserved;
174 uint32_t irqstatus_raw;
175 uint32_t irqstatus;
176 uint32_t irqenable_set;
177 uint32_t irqenable_clr;
178 uint32_t irqwakeen;
179 uint32_t tclr;
180 uint32_t tcrr;
181 uint32_t tldr;
182 uint32_t ttgr;
183 uint32_t twps;
184 uint32_t tmar;
185 uint32_t tcar1;
186 uint32_t tsicr;
187 uint32_t tcar2;
188 } GPTIMER_REGS;
190 #define OMAP54XX_IRQ_GPT6 74
191 #define OMAP54XX_IRQ_GPT9 77
192 #define OMAP54XX_IRQ_GPT11 79
194 #define GPTIMER3_BASE 0x48034000
195 #define GPTIMER4_BASE 0x48036000
196 #define GPTIMER9_BASE 0x4803E000
197 #define GPTIMER11_BASE 0x48088000
198 #define GPTIMER5_BASE 0x40138000
199 #define GPTIMER6_BASE 0x4013A000
201 static void *GPT3Base = 0;
202 static void *GPT3ClkCtrl =0;
203 static bool GPT3Saved = FALSE;
204 static bool GPT3InUse = FALSE;
205 static void *GPT4Base = 0;
206 static void *GPT4ClkCtrl =0;
207 static bool GPT4Saved = FALSE;
208 static bool GPT4InUse = FALSE;
209 static void *GPT5Base = 0;
210 static void *GPT5ClkCtrl =0;
211 static bool GPT5Saved = FALSE;
212 static bool GPT5InUse = FALSE;
213 static void *GPT6Base = 0;
214 static void *GPT6ClkCtrl =0;
215 static bool GPT6Saved = FALSE;
216 static bool GPT6InUse = FALSE;
217 static void *GPT9Base = 0;
218 static void *GPT9ClkCtrl =0;
219 static bool GPT9Saved = FALSE;
220 static bool GPT9InUse = FALSE;
221 static void *GPT11Base = 0;
222 static void *GPT11ClkCtrl =0;
223 static bool GPT11Saved = FALSE;
224 static bool GPT11InUse = FALSE;
226 static GPTIMER_REGS GPT3Reg_saved;
227 static GPTIMER_REGS GPT4Reg_saved;
228 static GPTIMER_REGS GPT5Reg_saved;
229 static GPTIMER_REGS GPT6Reg_saved;
230 static GPTIMER_REGS GPT9Reg_saved;
231 static GPTIMER_REGS GPT11Reg_saved;
233 static void *prm_base_va = NULL;
234 static void *cm2_base_va = NULL;
235 static void *cm_core_aon_base_va = NULL;
237 #define MAX_DUCATI_CHANNELS 4
238 #define DUCATI_CHANNEL_START 25
239 #define DUCATI_CHANNEL_END 28
240 static bool DMAAllocation = false;
241 #ifdef QNX_PM_ENABLE
242 static rsrc_request_t sdma_req;
243 #endif
245 // Note, the number of camera modes is tied to enum campower_mode_t, which can
246 // be found in camera/camdrv.h
247 #define NUM_CAM_MODES 3
248 static unsigned last_camera_req[NUM_CAM_MODES];
249 static unsigned last_led_req = 0;
251 enum processor_version {
252 OMAP_5430_es10 = 0,
253 OMAP_5430_es20,
254 ERROR_CONTROL_ID = -1,
255 INVALID_SI_VERSION = -2
256 };
258 #define PRM_SIZE 0x2000
259 #define PRM_BASE 0x4AE06000
260 #define PRM_CM_SYS_CLKSEL_OFFSET 0x110
261 //IVA_PRM registers for OMAP5 ES1.0
262 #define PM_IVA_PWRSTCTRL_OFFSET 0xF00
263 #define PM_IVA_PWRSTST_OFFSET 0xF04
264 #define RM_IVA_RSTCTRL_OFFSET 0xF10
265 #define RM_IVA_IVA_CONTEXT_OFFSET 0xF24
266 //IVA_PRM registers for OMAP5 ES2.0
267 #define PM_IVA_PWRSTCTRL_ES20_OFFSET 0x1200
268 #define PM_IVA_PWRSTST_ES20_OFFSET 0x1204
269 #define RM_IVA_RSTCTRL_ES20_OFFSET 0x1210
270 #define RM_IVA_IVA_CONTEXT_ES20_OFFSET 0x1224
272 #define CM2_SIZE 0x2000
273 #define CM2_BASE 0x4A008000
274 #define CM_L3_2_L3_2_CLKCTRL_OFFSET 0x820
275 #define CM_MPU_M3_CLKCTRL_OFFSET 0x900
277 //IVA_CM_CORE registers for OMAP5 ES1.0
278 #define CM_IVA_CLKSTCTRL_OFFSET 0xF00
279 #define CM_IVA_IVA_CLKCTRL_OFFSET 0xF20
280 #define CM_IVA_SL2_CLKCTRL_OFFSET 0xF28
281 //IVA_CM_CORE registers for OMAP5 ES2.0
282 #define CM_IVA_CLKSTCTRL_ES20_OFFSET 0x1200
283 #define CM_IVA_IVA_CLKCTRL_ES20_OFFSET 0x1220
284 #define CM_IVA_SL2_CLKCTRL_ES20_OFFSET 0x1228
286 // CM_L4PER GPTIMER offsets for OMAP5 ES1.0
287 #define CM_L4PER_GPTIMER3_CLKCTRL_ES1_0_OFFSET 0x1440
288 #define CM_L4PER_GPTIMER4_CLKCTRL_ES1_0_OFFSET 0x1448
289 #define CM_L4PER_GPTIMER9_CLKCTRL_ES1_0_OFFSET 0x1450
290 #define CM_L4PER_GPTIMER11_CLKCTRL_ES1_0_OFFSET 0x1430
292 // CM_L4PER GPTIMER offsets for OMAP5 ES2.0
293 #define CM_L4PER_TIMER3_CLKCTRL_ES2_0_OFFSET 0x1040
294 #define CM_L4PER_TIMER4_CLKCTRL_ES2_0_OFFSET 0x1048
295 #define CM_L4PER_TIMER9_CLKCTRL_ES2_0_OFFSET 0x1050
296 #define CM_L4PER_TIMER11_CLKCTRL_ES2_0_OFFSET 0x1030
298 #define CM_CORE_AON_SIZE 0x1000
299 #define CM_CORE_AON_BASE 0x4A004000
300 #define CM_ABE_CLKSTCTRL_OFFSET 0x500
301 #define CM_ABE_TIMER5_CLKCTRL_OFFSET 0x568
302 #define CM_ABE_TIMER6_CLKCTRL_OFFSET 0x570
304 #define IVAHD_FREQ_MAX_IN_HZ 532000000
306 #define ID_CODE_BASE 0x4A002000
307 #define ID_CODE_OFFSET 0x204
309 #define OMAP5430_ES10 0x0B942
310 #define OMAP5432_ES10 0x0B998
311 #define OMAP5430_ES20 0x1B942
312 #define OMAP5432_ES20 0x1B998
314 #ifdef QNX_PM_ENABLE
315 static dvfsMsg_t dvfsMessage;
316 static int cpudll_coid = -1;
317 static reply_getListOfDomainOPPs_t cpudll_iva_opp = { {0} }; /* for result of getDomainOPP (IVA)*/
318 static reply_getListOfDomainOPPs_t cpudll_core_opp = { {0} }; /* for result of getDomainOPP (CORE)*/
319 #endif
321 enum {
322 RPRM_GPTIMER = 0,
323 RPRM_IVAHD = 1,
324 RPRM_IVASEQ0 = 2,
325 RPRM_IVASEQ1 = 3,
326 RPRM_L3BUS = 4,
327 RPRM_ISS = 5,
328 RPRM_FDIF = 6,
329 RPRM_SL2IF = 7,
330 RPRM_AUXCLK = 8,
331 RPRM_REGULATOR = 9,
332 RPRM_GPIO = 10,
333 RPRM_SDMA = 11,
334 RPRM_IPU = 12,
335 RPRM_DSP = 13,
336 RPRM_I2C = 14,
337 RPRM_CAMERA = 15,
338 RPRM_LED = 16,
339 RPRM_MAX
340 };
342 enum processor_version get_omap_version (void)
343 {
344 uintptr_t id_code_base = NULL;
345 enum processor_version omap_rev;
346 uint32_t reg;
348 id_code_base = mmap_device_io(0x1000, ID_CODE_BASE);
349 if (id_code_base == MAP_DEVICE_FAILED){
350 GT_setFailureReason (curTrace, GT_4CLASS, "get_omap_version",
351 ERROR_CONTROL_ID,
352 "Unable to map ID_CODE register");
353 return ERROR_CONTROL_ID;
354 }
356 reg = in32(id_code_base + ID_CODE_OFFSET);
357 reg &= 0xFFFFF000;
358 reg = reg >> 12;
360 switch (reg) {
361 case OMAP5430_ES10:
362 case OMAP5432_ES10:
363 omap_rev = OMAP_5430_es10;
364 break;
366 case OMAP5430_ES20:
367 case OMAP5432_ES20:
368 omap_rev = OMAP_5430_es20;
369 break;
371 default:
372 omap_rev = INVALID_SI_VERSION;
373 break;
374 }
376 if (id_code_base)
377 munmap_device_io(id_code_base, 0x1000);
379 return omap_rev;
380 }
382 /* Function to Map the required registers for
383 * GPT configuration
384 */
385 int map_gpt_regs(void)
386 {
387 int retval = 0;
388 enum processor_version omap_rev;
389 uint32_t cm_l4per_gpt3_offset;
390 uint32_t cm_l4per_gpt4_offset;
391 uint32_t cm_l4per_gpt9_offset;
392 uint32_t cm_l4per_gpt11_offset;
394 omap_rev = get_omap_version();
395 if (omap_rev < 0) {
396 GT_setFailureReason (curTrace, GT_4CLASS, "map_gpt_regs",
397 omap_rev, "Error while reading the OMAP REVISION");
398 return -EIO;
399 }
401 if (omap_rev == OMAP_5430_es20) {
402 cm_l4per_gpt3_offset = CM_L4PER_TIMER3_CLKCTRL_ES2_0_OFFSET;
403 cm_l4per_gpt4_offset = CM_L4PER_TIMER4_CLKCTRL_ES2_0_OFFSET;
404 cm_l4per_gpt9_offset = CM_L4PER_TIMER9_CLKCTRL_ES2_0_OFFSET;
405 cm_l4per_gpt11_offset = CM_L4PER_TIMER11_CLKCTRL_ES2_0_OFFSET;
406 }
407 else {
408 cm_l4per_gpt3_offset = CM_L4PER_GPTIMER3_CLKCTRL_ES1_0_OFFSET;
409 cm_l4per_gpt4_offset = CM_L4PER_GPTIMER4_CLKCTRL_ES1_0_OFFSET;
410 cm_l4per_gpt9_offset = CM_L4PER_GPTIMER9_CLKCTRL_ES1_0_OFFSET;
411 cm_l4per_gpt11_offset = CM_L4PER_GPTIMER11_CLKCTRL_ES1_0_OFFSET;
412 }
414 GPT3ClkCtrl = cm2_base_va + cm_l4per_gpt3_offset;
416 GPT3Base = (void *)mmap_device_io(0x1000, GPTIMER3_BASE);
417 if ((uintptr_t)GPT3Base == MAP_DEVICE_FAILED) {
418 retval = -ENOMEM;
419 GPT3Base = NULL;
420 goto exit;
421 }
423 GPT4ClkCtrl = cm2_base_va + cm_l4per_gpt4_offset;
425 GPT4Base = (void *)mmap_device_io(0x1000, GPTIMER4_BASE);
426 if ((uintptr_t)GPT4Base == MAP_DEVICE_FAILED) {
427 retval = -ENOMEM;
428 GPT4Base = NULL;
429 goto exit;
430 }
432 GPT9ClkCtrl = cm2_base_va + cm_l4per_gpt9_offset;
434 GPT9Base = (void *)mmap_device_io(0x1000, GPTIMER9_BASE);
435 if ((uintptr_t)GPT9Base == MAP_DEVICE_FAILED) {
436 retval = -ENOMEM;
437 GPT9Base = NULL;
438 goto exit;
439 }
441 GPT11ClkCtrl = cm2_base_va + cm_l4per_gpt11_offset;
443 GPT11Base = (void *)mmap_device_io(0x1000, GPTIMER11_BASE);
444 if ((uintptr_t)GPT11Base == MAP_DEVICE_FAILED) {
445 retval = -ENOMEM;
446 GPT11Base = NULL;
447 goto exit;
448 }
450 GPT5ClkCtrl = cm_core_aon_base_va + CM_ABE_TIMER5_CLKCTRL_OFFSET;
452 GPT5Base = (void *)mmap_device_io(0x1000, GPTIMER5_BASE);
453 if ((uintptr_t)GPT5Base == MAP_DEVICE_FAILED) {
454 retval = -ENOMEM;
455 GPT5Base = NULL;
456 goto exit;
457 }
459 GPT6ClkCtrl = cm_core_aon_base_va + CM_ABE_TIMER6_CLKCTRL_OFFSET;
461 GPT6Base = (void *)mmap_device_io(0x1000, GPTIMER6_BASE);
462 if ((uintptr_t)GPT6Base == MAP_DEVICE_FAILED) {
463 retval = -ENOMEM;
464 GPT6Base = NULL;
465 goto exit;
466 }
468 return EOK;
470 exit:
471 GPT6ClkCtrl = NULL;
472 if (GPT5Base) {
473 munmap(GPT5Base, 0x1000);
474 GPT5Base = NULL;
475 }
476 GPT5ClkCtrl = NULL;
477 if (GPT11Base) {
478 munmap(GPT11Base, 0x1000);
479 GPT11Base = NULL;
480 }
481 GPT11ClkCtrl = NULL;
482 if (GPT9Base) {
483 munmap(GPT9Base, 0x1000);
484 GPT9Base = NULL;
485 }
486 GPT9ClkCtrl = NULL;
487 if (GPT4Base) {
488 munmap(GPT4Base, 0x1000);
489 GPT4Base = NULL;
490 }
491 GPT4ClkCtrl = NULL;
492 if (GPT3Base) {
493 munmap(GPT3Base, 0x1000);
494 GPT3Base = NULL;
495 }
496 GPT3ClkCtrl = NULL;
497 return retval;
498 }
500 void unmap_gpt_regs(void)
501 {
502 if(GPT11Base != NULL)
503 munmap(GPT11Base, 0x1000);
505 GPT11Base = NULL;
507 GPT11ClkCtrl = NULL;
509 if(GPT9Base != NULL)
510 munmap(GPT9Base, 0x1000);
512 GPT9Base = NULL;
514 GPT9ClkCtrl = NULL;
516 if(GPT4Base != NULL)
517 munmap(GPT4Base, 0x1000);
519 GPT4Base = NULL;
521 GPT4ClkCtrl = NULL;
523 if(GPT3Base != NULL)
524 munmap(GPT3Base, 0x1000);
526 GPT3Base = NULL;
528 GPT3ClkCtrl = NULL;
530 if(GPT5Base != NULL)
531 munmap(GPT5Base, 0x1000);
533 GPT5Base = NULL;
535 GPT5ClkCtrl = NULL;
537 if(GPT6Base != NULL)
538 munmap(GPT6Base, 0x1000);
540 GPT6Base = NULL;
542 GPT6ClkCtrl = NULL;
543 }
545 #ifdef BENELLI_WATCHDOG_TIMER
547 /* Interrupt clear function*/
548 static Bool ipu_pm_clr_gptimer_interrupt(Ptr fxnArgs)
549 {
550 UINT32 reg;
551 uint32_t num = (uint32_t)fxnArgs;
552 GPTIMER_REGS *GPTRegs = NULL;
554 if (num == GPTIMER_3) {
555 GPTRegs = GPT3Base;
556 }
557 else if (num == GPTIMER_4) {
558 GPTRegs = GPT4Base;
559 }
560 else if (num == GPTIMER_9) {
561 GPTRegs = GPT9Base;
562 }
563 else if (num == GPTIMER_11) {
564 GPTRegs = GPT11Base;
565 }
566 else if (num == GPTIMER_5) {
567 GPTRegs = GPT5Base;
568 }
569 else if (num == GPTIMER_6) {
570 GPTRegs = GPT6Base;
571 }
572 else {
573 return TRUE;
574 }
576 reg = in32((uintptr_t)&GPTRegs->irqstatus);
577 reg |= 0x2;
579 /*Clear Overflow event */
580 out32((uintptr_t)&GPTRegs->irqstatus, reg);
581 reg = in32((uintptr_t)&GPTRegs->irqstatus);
583 /*Always return TRUE for ISR*/
584 return TRUE;
585 }
587 /* ISR for GP Timer*/
588 static Bool ipu_pm_gptimer_interrupt(Ptr fxnArgs)
589 {
590 int num;
591 uint16_t core0_id = MultiProc_getId(CORE0);
592 uint16_t core1_id = MultiProc_getId("CORE1");
593 uint16_t dsp_id = MultiProc_getId("DSP");
595 switch ((uint32_t)fxnArgs) {
596 case GPTIMER_9:
597 num = 9;
598 ProcMgr_setState(ipu_pm_state.proc_handles[core0_id],
599 ProcMgr_State_Watchdog);
600 break;
601 case GPTIMER_11:
602 num = 11;
603 ProcMgr_setState(ipu_pm_state.proc_handles[core1_id],
604 ProcMgr_State_Watchdog);
605 break;
606 case GPTIMER_6:
607 num = 6;
608 ProcMgr_setState(ipu_pm_state.proc_handles[dsp_id],
609 ProcMgr_State_Watchdog);
610 break;
611 default:
612 num = 0;
613 break;
614 }
615 // what to do here?
616 GT_1trace(curTrace, GT_4CLASS,
617 "ipu_pm_gptimer_interrupt: GPTimer %d expired!", num);
619 return 0;
620 }
621 #endif
623 int ipu_pm_gpt_enable(int num)
624 {
625 GPTIMER_REGS * GPTRegs = NULL;
626 uintptr_t GPTClkCtrl = NULL;
627 int max_tries = 100;
629 if (num == GPTIMER_3) {
630 GPTClkCtrl = (uintptr_t)GPT3ClkCtrl;
631 GPTRegs = GPT3Base;
632 GPT3InUse = TRUE;
633 }
634 else if (num == GPTIMER_4) {
635 GPTClkCtrl = (uintptr_t)GPT4ClkCtrl;
636 GPTRegs = GPT4Base;
637 GPT4InUse = TRUE;
638 }
639 else if (num == GPTIMER_9) {
640 GPTClkCtrl = (uintptr_t)GPT9ClkCtrl;
641 GPTRegs = GPT9Base;
642 GPT9InUse = TRUE;
643 }
644 else if (num == GPTIMER_11) {
645 GPTClkCtrl = (uintptr_t)GPT11ClkCtrl;
646 GPTRegs = GPT11Base;
647 GPT11InUse = TRUE;
648 }
649 else if (num == GPTIMER_5) {
650 GPTClkCtrl = (uintptr_t)GPT5ClkCtrl;
651 GPTRegs = GPT5Base;
652 GPT5InUse = TRUE;
653 // make sure abe clock is enabled as it is source for gpt5
654 out32((uintptr_t)(cm_core_aon_base_va + CM_ABE_CLKSTCTRL_OFFSET), 0x2);
655 }
656 else if (num == GPTIMER_6) {
657 GPTClkCtrl = (uintptr_t)GPT6ClkCtrl;
658 GPTRegs = GPT6Base;
659 GPT6InUse = TRUE;
660 // make sure abe clock is enabled as it is source for gpt6
661 out32((uintptr_t)(cm_core_aon_base_va + CM_ABE_CLKSTCTRL_OFFSET), 0x2);
662 }
663 else {
664 return -EINVAL;
665 }
667 /* Enable GPT MODULEMODE and set CLKSEL to SYS_CLK*/
668 out32(GPTClkCtrl, 0x2);
669 do {
670 if (!(in32(GPTClkCtrl) & 0x30000))
671 break;
672 } while (--max_tries);
673 if (max_tries == 0) {
674 ipu_pm_gpt_disable(num);
675 return -EIO;
676 }
678 /* Set Smart-idle wake-up-capable */
679 out32((uintptr_t)&GPTRegs->tiocp_cfg, 0xC);
681 return EOK;
682 }
684 int ipu_pm_gpt_disable(int num)
685 {
686 uintptr_t GPTClkCtrl = NULL;
687 GPTIMER_REGS *GPTRegs = NULL;
688 UINT32 reg = 0;
690 if (num == GPTIMER_3) {
691 GPTClkCtrl = (uintptr_t)GPT3ClkCtrl;
692 GPTRegs = GPT3Base;
693 GPT3InUse = FALSE;
694 }
695 else if (num == GPTIMER_4) {
696 GPTClkCtrl = (uintptr_t)GPT4ClkCtrl;
697 GPTRegs = GPT4Base;
698 GPT4InUse = FALSE;
699 }
700 else if (num == GPTIMER_9) {
701 GPTClkCtrl = (uintptr_t)GPT9ClkCtrl;
702 GPTRegs = GPT9Base;
703 GPT9InUse = FALSE;
704 }
705 else if (num == GPTIMER_11) {
706 GPTClkCtrl = (uintptr_t)GPT11ClkCtrl;
707 GPTRegs = GPT11Base;
708 GPT11InUse = FALSE;
709 }
710 else if (num == GPTIMER_5) {
711 GPTClkCtrl = (uintptr_t)GPT5ClkCtrl;
712 GPTRegs = GPT5Base;
713 GPT5InUse = FALSE;
714 }
715 else if (num == GPTIMER_6) {
716 GPTClkCtrl = (uintptr_t)GPT6ClkCtrl;
717 GPTRegs = GPT6Base;
718 GPT6InUse = FALSE;
719 }
720 else {
721 return -EINVAL;
722 }
724 /*Check if Clock is Enabled*/
725 reg = in32(GPTClkCtrl);
726 if ((reg & 0x3) == 0x2) {
727 /* Clear any pending interrupt to allow idle */
728 reg = in32((uintptr_t)&GPTRegs->irqstatus);
729 if (reg) {
730 out32((uintptr_t)&GPTRegs->irqstatus, reg);
731 }
733 /*Disable the Timer*/
734 reg = in32(GPTClkCtrl);
735 reg &= 0xFFFFFFFC;
736 out32(GPTClkCtrl, reg);
737 }
738 else {
739 GT_setFailureReason(curTrace, GT_4CLASS, "ipu_pm_gpt_disable", -EINVAL,
740 "gpt clock is not enabled!");
741 return -EINVAL;
742 }
743 return EOK;
744 }
746 int ipu_pm_gpt_start (int num)
747 {
748 GPTIMER_REGS * GPTRegs = NULL;
749 uint32_t reg = 0;
751 if (num == GPTIMER_3) {
752 GPTRegs = GPT3Base;
753 }
754 else if (num == GPTIMER_4) {
755 GPTRegs = GPT4Base;
756 }
757 else if (num == GPTIMER_9) {
758 GPTRegs = GPT9Base;
759 }
760 else if (num == GPTIMER_11) {
761 GPTRegs = GPT11Base;
762 }
763 else if (num == GPTIMER_5) {
764 GPTRegs = GPT5Base;
765 }
766 else if (num == GPTIMER_6) {
767 GPTRegs = GPT6Base;
768 }
769 else {
770 return -EINVAL;
771 }
773 /*Start the Timer*/
774 reg = in32((uintptr_t)&GPTRegs->tclr);
775 reg |=0x1;
776 out32((uintptr_t)&GPTRegs->tclr, reg);
778 return EOK;
779 }
781 int ipu_pm_gpt_stop(int num)
782 {
783 uintptr_t GPTClkCtrl = NULL;
784 GPTIMER_REGS * GPTRegs = NULL;
785 uint32_t reg = 0;
787 if (num == GPTIMER_3) {
788 GPTClkCtrl = (uintptr_t)GPT3ClkCtrl;
789 GPTRegs = GPT3Base;
790 }
791 else if (num == GPTIMER_4) {
792 GPTClkCtrl = (uintptr_t)GPT4ClkCtrl;
793 GPTRegs = GPT4Base;
794 }
795 else if (num == GPTIMER_9) {
796 GPTClkCtrl = (uintptr_t)GPT9ClkCtrl;
797 GPTRegs = GPT9Base;
798 }
799 else if (num == GPTIMER_11) {
800 GPTClkCtrl = (uintptr_t)GPT11ClkCtrl;
801 GPTRegs = GPT11Base;
802 }
803 else if (num == GPTIMER_5) {
804 GPTClkCtrl = (uintptr_t)GPT5ClkCtrl;
805 GPTRegs = GPT5Base;
806 }
807 else if (num == GPTIMER_6) {
808 GPTClkCtrl = (uintptr_t)GPT6ClkCtrl;
809 GPTRegs = GPT6Base;
810 }
811 else {
812 return -EINVAL;
813 }
815 /*Check if Clock is Enabled*/
816 reg = in32(GPTClkCtrl);
817 if ((reg & 0x3) == 0x2) {
819 /*Stop the Timer*/
820 reg = in32((uintptr_t)&GPTRegs->tclr);
821 reg &=0xFFFFFFFE;
822 out32((uintptr_t)&GPTRegs->tclr, reg);
823 }
824 else {
825 GT_setFailureReason(curTrace, GT_4CLASS, "ipu_pm_gpt_stop", -EINVAL,
826 "gpt clock is not enabled!");
827 return -EINVAL;
828 };
830 return EOK;
831 }
833 void save_gpt_context(int num)
834 {
835 GPTIMER_REGS *GPTRegs = NULL;
836 GPTIMER_REGS *GPTSaved = NULL;
837 bool *GPTRestore = NULL;
839 if (num == GPTIMER_3) {
840 GPTRegs = GPT3Base;
841 GPTSaved = &GPT3Reg_saved;
842 GPTRestore = &GPT3Saved;
843 }
844 else if (num == GPTIMER_4) {
845 GPTRegs = GPT4Base;
846 GPTSaved = &GPT4Reg_saved;
847 GPTRestore = &GPT4Saved;
848 }
849 else if (num == GPTIMER_9) {
850 GPTRegs = GPT9Base;
851 GPTSaved = &GPT9Reg_saved;
852 GPTRestore = &GPT9Saved;
853 }
854 else if (num == GPTIMER_11) {
855 GPTRegs = GPT11Base;
856 GPTSaved = &GPT11Reg_saved;
857 GPTRestore = &GPT11Saved;
858 }
859 else if (num == GPTIMER_5) {
860 GPTRegs = GPT5Base;
861 GPTSaved = &GPT5Reg_saved;
862 GPTRestore = &GPT5Saved;
863 }
864 else if (num == GPTIMER_6) {
865 GPTRegs = GPT6Base;
866 GPTSaved = &GPT6Reg_saved;
867 GPTRestore = &GPT6Saved;
868 }
869 else {
870 return;
871 }
873 GPTSaved->tiocp_cfg = in32((uintptr_t)&GPTRegs->tiocp_cfg);
874 GPTSaved->tcrr = in32((uintptr_t)&GPTRegs->tcrr);
875 GPTSaved->irqenable_set = in32((uintptr_t)&GPTRegs->irqenable_set);
876 GPTSaved->irqstatus_raw = in32((uintptr_t)&GPTRegs->irqstatus_raw);
877 GPTSaved->irqstatus = in32((uintptr_t)&GPTRegs->irqstatus);
878 GPTSaved->irqwakeen = in32((uintptr_t)&GPTRegs->irqwakeen);
879 GPTSaved->tclr = in32((uintptr_t)&GPTRegs->tclr);
880 GPTSaved->tldr = in32((uintptr_t)&GPTRegs->tldr);
881 GPTSaved->ttgr = in32((uintptr_t)&GPTRegs->ttgr);
882 GPTSaved->tmar = in32((uintptr_t)&GPTRegs->tmar);
883 GPTSaved->tsicr = in32((uintptr_t)&GPTRegs->tsicr);
884 *GPTRestore = TRUE;
885 }
887 void restore_gpt_context(int num)
888 {
889 GPTIMER_REGS *GPTRegs = NULL;
890 GPTIMER_REGS *GPTSaved = NULL;
891 bool *GPTRestore = NULL;
893 if (num == GPTIMER_3) {
894 GPTRegs = GPT3Base;
895 GPTSaved = &GPT3Reg_saved;
896 GPTRestore = &GPT3Saved;
897 }
898 else if (num == GPTIMER_4) {
899 GPTRegs = GPT4Base;
900 GPTSaved = &GPT4Reg_saved;
901 GPTRestore = &GPT4Saved;
902 }
903 else if (num == GPTIMER_9) {
904 GPTRegs = GPT9Base;
905 GPTSaved = &GPT9Reg_saved;
906 GPTRestore = &GPT9Saved;
907 }
908 else if (num == GPTIMER_11) {
909 GPTRegs = GPT11Base;
910 GPTSaved = &GPT11Reg_saved;
911 GPTRestore = &GPT11Saved;
912 }
913 else if (num == GPTIMER_5) {
914 GPTRegs = GPT5Base;
915 GPTSaved = &GPT5Reg_saved;
916 GPTRestore = &GPT5Saved;
917 }
918 else if (num == GPTIMER_6) {
919 GPTRegs = GPT6Base;
920 GPTSaved = &GPT6Reg_saved;
921 GPTRestore = &GPT6Saved;
922 }
923 else {
924 return;
925 }
927 if (*GPTRestore) {
928 *GPTRestore = FALSE;
929 out32((uintptr_t)&GPTRegs->tiocp_cfg, GPTSaved->tiocp_cfg);
930 out32((uintptr_t)&GPTRegs->irqenable_set, GPTSaved->irqenable_set);
931 out32((uintptr_t)&GPTRegs->tcrr, GPTSaved->tcrr);
932 out32((uintptr_t)&GPTRegs->irqstatus_raw, GPTSaved->irqstatus_raw);
933 out32((uintptr_t)&GPTRegs->irqstatus, GPTSaved->irqstatus);
934 out32((uintptr_t)&GPTRegs->irqwakeen, GPTSaved->irqwakeen);
935 out32((uintptr_t)&GPTRegs->tclr, GPTSaved->tclr);
936 out32((uintptr_t)&GPTRegs->tldr, GPTSaved->tldr);
937 out32((uintptr_t)&GPTRegs->ttgr, GPTSaved->ttgr);
938 out32((uintptr_t)&GPTRegs->tmar, GPTSaved->tmar);
939 out32((uintptr_t)&GPTRegs->tsicr, GPTSaved->tsicr);
940 }
941 }
943 int ipu_pm_ivaseq0_disable()
944 {
945 uintptr_t pm_base = 0;
946 uint32_t reg = 0;
947 enum processor_version omap_rev;
948 uint32_t rm_iva_rstctrl_offset;
950 pthread_mutex_lock(&ipu_pm_state.mtx);
952 if (ipu_pm_state.ivaseq0_use_cnt-- == 1) {
953 pm_base = (uintptr_t)prm_base_va;
955 omap_rev = get_omap_version();
956 if (omap_rev < 0) {
957 GT_setFailureReason (curTrace, GT_4CLASS, "ipu_pm_ivaseq0_disable",
958 omap_rev, "Error while reading the OMAP REVISION");
959 return -EIO;
960 }
962 if (omap_rev == OMAP_5430_es20) {
963 rm_iva_rstctrl_offset = RM_IVA_RSTCTRL_ES20_OFFSET;
964 }
965 else {
966 rm_iva_rstctrl_offset = RM_IVA_RSTCTRL_OFFSET;
967 }
969 reg = in32(pm_base + rm_iva_rstctrl_offset);
970 reg |= 0x1;
971 out32(pm_base + rm_iva_rstctrl_offset, reg);
972 }
973 else {
974 GT_0trace(curTrace, GT_3CLASS, "ivaseq0 still in use");
975 }
977 pthread_mutex_unlock(&ipu_pm_state.mtx);
978 return EOK;
979 }
981 int ipu_pm_ivaseq0_enable()
982 {
983 uintptr_t pm_base = 0;
984 uint32_t reg = 0;
985 enum processor_version omap_rev;
986 uint32_t rm_iva_rstctrl_offset;
988 pthread_mutex_lock(&ipu_pm_state.mtx);
989 if (++ipu_pm_state.ivaseq0_use_cnt == 1) {
990 pm_base = (uintptr_t)prm_base_va;
992 omap_rev = get_omap_version();
993 if (omap_rev < 0) {
994 GT_setFailureReason (curTrace, GT_4CLASS, "ipu_pm_ivaseq0_disable",
995 omap_rev, "Error while reading the OMAP REVISION");
996 return -EIO;
997 }
999 if (omap_rev == OMAP_5430_es20) {
1000 rm_iva_rstctrl_offset = RM_IVA_RSTCTRL_ES20_OFFSET;
1001 }
1002 else {
1003 rm_iva_rstctrl_offset = RM_IVA_RSTCTRL_OFFSET;
1004 }
1006 reg = in32(pm_base + rm_iva_rstctrl_offset);
1007 reg &= 0xFFFFFFFE;
1008 out32(pm_base + rm_iva_rstctrl_offset, reg);
1009 }
1010 else {
1011 GT_0trace(curTrace, GT_3CLASS, "ivaseq0 still in use");
1012 }
1014 pthread_mutex_unlock(&ipu_pm_state.mtx);
1015 return EOK;
1016 }
1018 int ipu_pm_ivaseq1_disable()
1019 {
1020 uintptr_t pm_base = 0;
1021 uint32_t reg = 0;
1022 enum processor_version omap_rev;
1023 uint32_t rm_iva_rstctrl_offset;
1025 pthread_mutex_lock(&ipu_pm_state.mtx);
1027 if (ipu_pm_state.ivaseq1_use_cnt-- == 1) {
1028 pm_base = (uintptr_t)prm_base_va;
1030 omap_rev = get_omap_version();
1031 if (omap_rev < 0) {
1032 GT_setFailureReason (curTrace, GT_4CLASS, "ipu_pm_ivaseq0_disable",
1033 omap_rev, "Error while reading the OMAP REVISION");
1034 return -EIO;
1035 }
1037 if (omap_rev == OMAP_5430_es20) {
1038 rm_iva_rstctrl_offset = RM_IVA_RSTCTRL_ES20_OFFSET;
1039 }
1040 else {
1041 rm_iva_rstctrl_offset = RM_IVA_RSTCTRL_OFFSET;
1042 }
1044 reg = in32(pm_base + rm_iva_rstctrl_offset);
1045 reg |= 0x2;
1046 out32(pm_base + rm_iva_rstctrl_offset, reg);
1047 }
1048 else {
1049 GT_0trace(curTrace, GT_3CLASS, "ivaseq1 still in use");
1050 }
1052 pthread_mutex_unlock(&ipu_pm_state.mtx);
1053 return EOK;
1054 }
1056 int ipu_pm_ivaseq1_enable()
1057 {
1058 uintptr_t pm_base = 0;
1059 uint32_t reg = 0;
1060 enum processor_version omap_rev;
1061 uint32_t rm_iva_rstctrl_offset;
1063 pthread_mutex_lock(&ipu_pm_state.mtx);
1064 if (++ipu_pm_state.ivaseq1_use_cnt == 1) {
1065 pm_base = (uintptr_t)prm_base_va;
1067 omap_rev = get_omap_version();
1068 if (omap_rev < 0) {
1069 GT_setFailureReason (curTrace, GT_4CLASS, "ipu_pm_ivaseq0_disable",
1070 omap_rev, "Error while reading the OMAP REVISION");
1071 return -EIO;
1072 }
1074 if (omap_rev == OMAP_5430_es20) {
1075 rm_iva_rstctrl_offset = RM_IVA_RSTCTRL_ES20_OFFSET;
1076 }
1077 else {
1078 rm_iva_rstctrl_offset = RM_IVA_RSTCTRL_OFFSET;
1079 }
1081 reg = in32(pm_base + rm_iva_rstctrl_offset);
1082 reg &= 0xFFFFFFFD;
1083 out32(pm_base + rm_iva_rstctrl_offset, reg);
1084 }
1085 else {
1086 GT_0trace(curTrace, GT_3CLASS, "ivaseq1 still in use");
1087 }
1089 pthread_mutex_unlock(&ipu_pm_state.mtx);
1090 return EOK;
1091 }
1093 int ipu_pm_ivahd_disable()
1094 {
1095 uintptr_t pm_base = 0;
1096 uintptr_t cm_base = 0;
1097 uint32_t reg = 0;
1098 int max_tries = 100;
1099 enum processor_version omap_rev;
1100 uint32_t pm_iva_pwrstctrl_offset;
1101 uint32_t cm_iva_clkstctrl_offset;
1102 uint32_t cm_iva_iva_clkctrl_offset;
1103 uint32_t cm_iva_sl2_clkctrl_offset;
1104 uint32_t rm_iva_rstctrl_offset;
1106 pthread_mutex_lock(&ipu_pm_state.mtx);
1108 if (ipu_pm_state.ivahd_use_cnt-- == 1) {
1109 omap_rev = get_omap_version();
1110 if (omap_rev < 0) {
1111 GT_setFailureReason (curTrace, GT_4CLASS, "ipu_pm_ivahd_disable",
1112 omap_rev, "Error while reading the OMAP REVISION");
1113 return -EIO;
1114 }
1115 pm_base = (uintptr_t)prm_base_va;
1116 cm_base = (uintptr_t)cm2_base_va;
1118 if (omap_rev == OMAP_5430_es20) {
1119 pm_iva_pwrstctrl_offset = PM_IVA_PWRSTCTRL_ES20_OFFSET;
1120 cm_iva_clkstctrl_offset = CM_IVA_CLKSTCTRL_ES20_OFFSET;
1121 cm_iva_iva_clkctrl_offset = CM_IVA_IVA_CLKCTRL_ES20_OFFSET;
1122 cm_iva_sl2_clkctrl_offset = CM_IVA_SL2_CLKCTRL_ES20_OFFSET;
1123 rm_iva_rstctrl_offset = RM_IVA_RSTCTRL_ES20_OFFSET;
1124 }
1125 else {
1126 pm_iva_pwrstctrl_offset = PM_IVA_PWRSTCTRL_OFFSET;
1127 cm_iva_clkstctrl_offset = CM_IVA_CLKSTCTRL_OFFSET;
1128 cm_iva_iva_clkctrl_offset = CM_IVA_IVA_CLKCTRL_OFFSET;
1129 cm_iva_sl2_clkctrl_offset = CM_IVA_SL2_CLKCTRL_OFFSET;
1130 rm_iva_rstctrl_offset = RM_IVA_RSTCTRL_OFFSET;
1131 }
1133 reg = in32(pm_base + pm_iva_pwrstctrl_offset);
1134 reg &= 0xFFFFFFFC;
1135 reg |= 0x00000002;
1136 out32(pm_base + pm_iva_pwrstctrl_offset, reg);
1138 /* Ensure that the wake up mode is set to SW_WAKEUP */
1139 out32(cm_base + cm_iva_clkstctrl_offset, 0x00000002);
1141 /* Check the standby status */
1142 do {
1143 if (((in32(cm_base + cm_iva_iva_clkctrl_offset) & 0x00040000) != 0x0))
1144 break;
1145 } while (--max_tries);
1146 if (max_tries == 0) {
1147 GT_0trace(curTrace, GT_4CLASS," ** Error in IVAHD standby status");
1148 }
1150 // IVAHD_CM2:CM_IVAHD_IVAHD_CLKCTRL
1151 out32(cm_base + cm_iva_iva_clkctrl_offset, 0x00000000);
1153 max_tries = 100;
1154 do {
1155 if((in32(cm_base + cm_iva_iva_clkctrl_offset) & 0x00030000) == 0x30000)
1156 break;
1157 } while (--max_tries);
1158 if (max_tries == 0) {
1159 GT_0trace(curTrace, GT_4CLASS," ** Error in IVAHD standby status");
1160 }
1162 // IVAHD_CM2:CM_IVAHD_SL2_CLKCTRL
1163 out32(cm_base + cm_iva_sl2_clkctrl_offset, 0x00000000);
1165 max_tries = 100;
1166 do {
1167 if((in32(cm_base + cm_iva_sl2_clkctrl_offset) & 0x00030000) == 0x30000);
1168 break;
1169 } while (--max_tries);
1170 if (max_tries == 0) {
1171 GT_0trace(curTrace, GT_4CLASS," ** Error in SL2 CLKCTRL");
1172 }
1174 /* put IVA into HW Auto mode */
1175 out32(cm_base + cm_iva_clkstctrl_offset, 0x00000003);
1177 max_tries = 100;
1178 /* Check CLK ACTIVITY bit */
1180 while(((in32(cm_base + cm_iva_clkstctrl_offset) & 0x00000100) != 0x0) && --max_tries);
1181 if (max_tries == 0)
1182 GT_0trace(curTrace, GT_4CLASS, "SYSLINK: ivahd_disable: WARNING - CLK ACTIVITY bit did not go off");
1184 // IVA sub-system resets - Assert reset for IVA logic and SL2
1185 out32(pm_base + rm_iva_rstctrl_offset, 0x00000004);
1186 max_tries = 200;
1187 while(--max_tries);
1189 // IVA sub-system resets - Assert reset for IVA logic, SL2, and sequencer1
1190 out32(pm_base + rm_iva_rstctrl_offset, 0x00000005);
1191 max_tries = 200;
1192 while(--max_tries);
1194 // IVA sub-system resets - Assert reset for IVA logic, SL2, sequencer1 and sequencer2
1195 out32(pm_base + rm_iva_rstctrl_offset, 0x00000007);
1196 }
1197 else {
1198 GT_0trace(curTrace, GT_3CLASS, "ivahd still in use");
1199 }
1201 pthread_mutex_unlock(&ipu_pm_state.mtx);
1202 return EOK;
1203 }
1205 int ipu_pm_ivahd_enable()
1206 {
1207 uintptr_t pm_base = 0;
1208 uintptr_t cm_base = 0;
1209 uint32_t reg = 0;
1210 unsigned int pwrst = 0;
1211 int max_tries = 100;
1212 enum processor_version omap_rev;
1213 uint32_t pm_iva_pwrstctrl_offset;
1214 uint32_t cm_iva_clkstctrl_offset;
1215 uint32_t cm_iva_iva_clkctrl_offset;
1216 uint32_t cm_iva_sl2_clkctrl_offset;
1217 uint32_t rm_iva_rstctrl_offset;
1218 uint32_t rm_iva_iva_context_offset;
1219 uint32_t pm_iva_pwrstst_offset;
1221 pthread_mutex_lock(&ipu_pm_state.mtx);
1223 if (++ipu_pm_state.ivahd_use_cnt == 1) {
1224 pm_base = (uintptr_t)prm_base_va;
1225 cm_base = (uintptr_t)cm2_base_va;
1227 omap_rev = get_omap_version();
1228 if (omap_rev < 0) {
1229 GT_setFailureReason (curTrace, GT_4CLASS, "ipu_pm_ivahd_enable",
1230 omap_rev, "Error while reading the OMAP REVISION");
1231 return -EIO;
1232 }
1234 if (omap_rev == OMAP_5430_es20) {
1235 pm_iva_pwrstctrl_offset = PM_IVA_PWRSTCTRL_ES20_OFFSET;
1236 cm_iva_clkstctrl_offset = CM_IVA_CLKSTCTRL_ES20_OFFSET;
1237 cm_iva_iva_clkctrl_offset = CM_IVA_IVA_CLKCTRL_ES20_OFFSET;
1238 cm_iva_sl2_clkctrl_offset = CM_IVA_SL2_CLKCTRL_ES20_OFFSET;
1239 rm_iva_rstctrl_offset = RM_IVA_RSTCTRL_ES20_OFFSET;
1240 rm_iva_iva_context_offset = RM_IVA_IVA_CONTEXT_ES20_OFFSET;
1241 pm_iva_pwrstst_offset = PM_IVA_PWRSTST_ES20_OFFSET;
1242 }
1243 else {
1244 pm_iva_pwrstctrl_offset = PM_IVA_PWRSTCTRL_OFFSET;
1245 cm_iva_clkstctrl_offset = CM_IVA_CLKSTCTRL_OFFSET;
1246 cm_iva_iva_clkctrl_offset = CM_IVA_IVA_CLKCTRL_OFFSET;
1247 cm_iva_sl2_clkctrl_offset = CM_IVA_SL2_CLKCTRL_OFFSET;
1248 rm_iva_rstctrl_offset = RM_IVA_RSTCTRL_OFFSET;
1249 rm_iva_iva_context_offset = RM_IVA_IVA_CONTEXT_OFFSET;
1250 pm_iva_pwrstst_offset = PM_IVA_PWRSTST_OFFSET;
1251 }
1252 /* Read the IVAHD Context register to check if the memory content has been lost */
1253 reg = in32(pm_base + rm_iva_iva_context_offset);
1254 /* Clear the context register by writing 1 to bit 8,9 and 10 */
1255 out32(pm_base + rm_iva_iva_context_offset, 0x700);
1257 /*Display power state*/
1258 pwrst = in32(pm_base + pm_iva_pwrstst_offset);
1259 GT_1trace(curTrace, GT_4CLASS, "###: off state reg bit = 0x%x\n", (pwrst & 0x03000003));
1260 /*Clear the power status reg by writting 1'a into the requred bits*/
1261 out32(pm_base + pm_iva_pwrstst_offset, 0x03000000);
1263 /* Ensure power state is set to ON */
1264 reg = in32(pm_base + pm_iva_pwrstctrl_offset);
1265 reg &= 0xFFFFFFFC;
1266 reg |= 0x00000003;
1267 out32(pm_base + pm_iva_pwrstctrl_offset, reg);
1269 /* Ensure that the wake up mode is set to SW_WAKEUP */
1270 out32(cm_base + cm_iva_clkstctrl_offset, 0x00000002);
1272 max_tries = 100;
1273 while(((in32(pm_base + pm_iva_pwrstst_offset) & 0x00100000) != 0) && --max_tries);
1274 if (max_tries == 0)
1275 GT_0trace(curTrace, GT_4CLASS, "SYSLINK: ivahd_enable: WARNING - PwrSt did not transition");
1277 // IVAHD_CM2:CM_IVAHD_IVAHD_CLKCTRL
1278 out32(cm_base + cm_iva_iva_clkctrl_offset, 0x00000001);
1280 // IVAHD_CM2:CM_IVAHD_SL2_CLKCTRL
1281 out32(cm_base + cm_iva_sl2_clkctrl_offset, 0x00000001);
1283 /* Wait until the CLK_ACTIVITY bit is set */
1284 max_tries = 100;
1285 while (((in32(cm_base + cm_iva_clkstctrl_offset) & 0x00000100) == 0x0) && --max_tries);
1286 if (max_tries == 0)
1287 GT_0trace(curTrace, GT_4CLASS, "SYSLINK: ivahd_enable: WARNING - Clk_ACTIVITY bit is not set");
1289 /* Release ICONT1 and SL2/IVAHD first, wait for few usec then release ICONT2 */
1290 reg = in32(pm_base + rm_iva_rstctrl_offset);
1291 reg &= 0xFFFFFFFB;
1292 out32(pm_base + rm_iva_rstctrl_offset, reg);
1294 max_tries = 100;
1295 usleep(100);
1296 do {
1297 if((in32(cm_base + cm_iva_iva_clkctrl_offset) & 0x00030000) == 0x0)
1298 break;
1299 } while(--max_tries);
1300 if (max_tries == 0) {
1301 GT_0trace(curTrace, GT_4CLASS," ** Error in IVAHD clk control");
1302 return -EIO;
1303 }
1305 max_tries = 100;
1306 do {
1307 if((in32(cm_base + cm_iva_sl2_clkctrl_offset) & 0x00030000) == 0x00000)
1308 break;
1309 } while(--max_tries);
1310 if (max_tries == 0) {
1311 GT_0trace(curTrace, GT_4CLASS," ** Error in SL2 clk control");
1312 return -EIO;
1313 }
1315 max_tries = 100;
1316 do {
1317 if((in32(cm_base + CM_L3_2_L3_2_CLKCTRL_OFFSET) & 0x30001) == 0x00001)
1318 break;
1319 } while(--max_tries);
1320 if (max_tries == 0) {
1321 GT_0trace(curTrace, GT_4CLASS," ** Error in L3 clk control");
1322 return -EIO;
1323 }
1325 /* Ensure IVAHD and SL2 is functional */
1326 max_tries = 100;
1327 do {
1328 if((in32(cm_base + cm_iva_iva_clkctrl_offset) & 0x00030001) == 0x00001)
1329 break;
1330 } while(--max_tries);
1331 if (max_tries == 0) {
1332 GT_0trace(curTrace, GT_4CLASS," ** IVAHD is not functional");
1333 return -EIO;
1334 }
1336 max_tries = 100;
1337 do {
1338 if((in32(cm_base + cm_iva_sl2_clkctrl_offset) & 0x00030001) == 0x00001)
1339 break;
1340 } while(--max_tries);
1341 if (max_tries == 0) {
1342 GT_0trace(curTrace, GT_4CLASS," ** SL2 is not functional");
1343 return -EIO;
1344 }
1345 } else {
1346 GT_0trace(curTrace, GT_3CLASS, "ivahd already acquired");
1347 }
1349 pthread_mutex_unlock(&ipu_pm_state.mtx);
1350 return EOK;
1351 }
1353 int ipu_pm_ivahd_off()
1354 {
1355 uintptr_t pm_base = 0;
1356 uintptr_t cm_base = 0;
1357 uint32_t reg = 0;
1358 int32_t max_tries = 0;
1359 bool ivahd_enabled = false;
1360 bool sl2_enabled = false;
1361 enum processor_version omap_rev;
1362 uint32_t pm_iva_pwrstctrl_offset;
1363 uint32_t cm_iva_clkstctrl_offset;
1364 uint32_t cm_iva_iva_clkctrl_offset;
1365 uint32_t cm_iva_sl2_clkctrl_offset;
1366 uint32_t rm_iva_rstctrl_offset;
1367 uint32_t pm_iva_pwrstst_offset;
1369 pm_base = (uintptr_t)prm_base_va;
1370 cm_base = (uintptr_t)cm2_base_va;
1372 omap_rev = get_omap_version();
1373 if (omap_rev < 0) {
1374 GT_setFailureReason (curTrace, GT_4CLASS, "ipu_pm_ivahd_enable",
1375 omap_rev, "Error while reading the OMAP REVISION");
1376 return -EIO;
1377 }
1379 if (omap_rev == OMAP_5430_es20) {
1380 pm_iva_pwrstst_offset = PM_IVA_PWRSTST_ES20_OFFSET;
1381 cm_iva_clkstctrl_offset = CM_IVA_CLKSTCTRL_ES20_OFFSET;
1382 cm_iva_iva_clkctrl_offset = CM_IVA_IVA_CLKCTRL_ES20_OFFSET;
1383 cm_iva_sl2_clkctrl_offset = CM_IVA_SL2_CLKCTRL_ES20_OFFSET;
1384 pm_iva_pwrstctrl_offset = PM_IVA_PWRSTCTRL_ES20_OFFSET;
1385 rm_iva_rstctrl_offset = RM_IVA_RSTCTRL_ES20_OFFSET;
1386 }
1387 else {
1388 pm_iva_pwrstst_offset = PM_IVA_PWRSTST_OFFSET;
1389 cm_iva_clkstctrl_offset = CM_IVA_CLKSTCTRL_OFFSET;
1390 cm_iva_iva_clkctrl_offset = CM_IVA_IVA_CLKCTRL_OFFSET;
1391 cm_iva_sl2_clkctrl_offset = CM_IVA_SL2_CLKCTRL_OFFSET;
1392 pm_iva_pwrstctrl_offset = PM_IVA_PWRSTCTRL_OFFSET;
1393 rm_iva_rstctrl_offset = RM_IVA_RSTCTRL_OFFSET;
1394 }
1396 reg = in32(pm_base + pm_iva_pwrstst_offset);
1397 reg = reg & 0x00000007;
1399 if (reg != 0x00000000) {
1400 /* set IVAHD to SW_WKUP */
1401 out32(cm_base + cm_iva_clkstctrl_offset, 0x2);
1402 max_tries = 100;
1403 /* Check for ivahd module and disable if it is enabled */
1404 if ((in32(cm_base + cm_iva_iva_clkctrl_offset) & 0x1) != 0) {
1405 out32(cm_base + cm_iva_iva_clkctrl_offset, 0x0);
1406 ivahd_enabled = 1;
1407 }
1408 /* Check for sl2 module and disable if it is enabled */
1409 if ((in32(cm_base + cm_iva_sl2_clkctrl_offset) & 0x1) != 0) {
1410 out32(cm_base + cm_iva_sl2_clkctrl_offset, 0x0);
1411 sl2_enabled = 1;
1412 }
1413 if (ivahd_enabled || sl2_enabled) {
1414 while (((in32(cm_base + cm_iva_clkstctrl_offset) & 0x00000100) == 0x0) && --max_tries);
1415 if (max_tries == 0) {
1416 GT_0trace(curTrace, GT_4CLASS,"IPU_PM:IVAHD DOMAIN is Not Enabled after retries");
1417 }
1418 }
1420 /* Set IVAHD PD to OFF */
1421 reg = in32(pm_base + pm_iva_pwrstctrl_offset);
1422 reg = (reg & 0xFFFFFFFC) | 0x0;
1423 out32(pm_base + pm_iva_pwrstctrl_offset, reg);
1425 max_tries = 100;
1426 while (((in32(pm_base + pm_iva_pwrstst_offset) & 0x00100000) != 0) && --max_tries);
1427 if (max_tries == 0) {
1428 GT_0trace(curTrace, GT_4CLASS,"IPU_PM: IVAHD Power Domain is in transition after retries");
1429 }
1431 if (ivahd_enabled) {
1432 max_tries = 100;
1433 while (((in32(cm_base + cm_iva_iva_clkctrl_offset) & 0x00030000) != 0x30000) && --max_tries);
1434 if (max_tries == 0) {
1435 GT_0trace(curTrace, GT_4CLASS,"IPU_PM: Stuck up in the IVAHD Module after retries");
1436 }
1437 }
1438 if (sl2_enabled) {
1439 max_tries = 100;
1440 while (((in32(cm_base + cm_iva_sl2_clkctrl_offset) & 0x00030000) != 0x30000) && --max_tries);
1441 if (max_tries == 0) {
1442 GT_0trace(curTrace, GT_4CLASS,"IPU_PM: Stuck up in the SL2 Module after retries");
1443 }
1444 }
1445 /* Set IVAHD to HW_AUTO */
1446 out32(cm_base + cm_iva_clkstctrl_offset, 0x3);
1447 /* Check the reset states and assert resets */
1448 if (in32(pm_base + rm_iva_rstctrl_offset) != 0x7) {
1449 out32(pm_base + rm_iva_rstctrl_offset, 0x7);
1450 }
1451 }
1453 return EOK;
1454 }
1456 int ipu_pm_ivahd_on()
1457 {
1458 uintptr_t pm_base = 0;
1459 uintptr_t cm_base = 0;
1460 uint32_t reg = 0;
1461 enum processor_version omap_rev;
1462 uint32_t pm_iva_pwrstctrl_offset;
1463 uint32_t cm_iva_clkstctrl_offset;
1465 pm_base = (uintptr_t)prm_base_va;
1466 cm_base = (uintptr_t)cm2_base_va;
1468 omap_rev = get_omap_version();
1469 if (omap_rev < 0) {
1470 GT_setFailureReason (curTrace, GT_4CLASS, "ipu_pm_ivahd_enable",
1471 omap_rev, "Error while reading the OMAP REVISION");
1472 return -EIO;
1473 }
1475 if (omap_rev == OMAP_5430_es20) {
1476 pm_iva_pwrstctrl_offset = PM_IVA_PWRSTCTRL_ES20_OFFSET;
1477 cm_iva_clkstctrl_offset = CM_IVA_CLKSTCTRL_ES20_OFFSET;
1478 }
1479 else {
1480 pm_iva_pwrstctrl_offset = PM_IVA_PWRSTCTRL_OFFSET;
1481 cm_iva_clkstctrl_offset = CM_IVA_CLKSTCTRL_OFFSET;
1482 }
1484 /* Set the power state to ON */
1485 reg = in32(pm_base + pm_iva_pwrstctrl_offset);
1486 reg &= 0xFFFFFFFC;
1487 reg |= 0x00000002;
1488 out32(pm_base + pm_iva_pwrstctrl_offset, reg);
1490 /* Ensure that the wake up mode is set to SW_WAKEUP */
1491 out32(cm_base + cm_iva_clkstctrl_offset, 0x00000002);
1493 /* put IVA into HW Auto mode */
1494 reg = in32(cm_base + cm_iva_clkstctrl_offset);
1495 reg |= 0x00000003;
1496 out32(cm_base + cm_iva_clkstctrl_offset, reg);
1498 return EOK;
1499 }
1501 int ipu_pm_led_enable(unsigned int mode, unsigned int intensity)
1502 {
1503 int ret = 0;
1505 //ret = camflash_config(mode, intensity);
1507 if (ret != -1)
1508 last_led_req = mode;
1510 return ret;
1511 }
1513 int ipu_pm_alloc_sdma(int num_chs, int* channels)
1514 {
1515 GT_0trace(curTrace, GT_3CLASS, "ipu_pm_alloc_sdma++");
1517 if(DMAAllocation == false) {
1518 GT_0trace(curTrace, GT_4CLASS, "Channel pool empty");
1519 return -1;
1520 }
1521 GT_0trace(curTrace, GT_3CLASS, "ipu_pm_alloc_sdma--");
1522 return 0;
1523 }
1525 int ipu_pm_free_sdma(int num_chs, int* channels)
1526 {
1527 GT_0trace(curTrace, GT_3CLASS, "ipu_pm_free_sdma++");
1529 if(DMAAllocation == false) {
1530 GT_0trace(curTrace, GT_4CLASS, "Channel pool empty");
1531 return -1;
1532 }
1533 GT_0trace(curTrace, GT_3CLASS, "ipu_pm_free_sdma--");
1534 return 0;
1535 }
1537 int ipu_pm_camera_enable(unsigned int mode, unsigned int on)
1538 {
1539 int ret = 0;
1541 //ret = campower_config(mode, on);
1543 if (mode < NUM_CAM_MODES && ret == 0)
1544 last_camera_req[mode] = on;
1546 return ret;
1547 }
1549 int ipu_pm_get_max_freq(unsigned int proc, unsigned int * freq)
1550 {
1551 int status = EOK;
1553 switch (proc) {
1554 case RPRM_IVAHD:
1555 /* Would like to replace the below with a call to powerman */
1556 *freq = IVAHD_FREQ_MAX_IN_HZ;
1557 break;
1558 default:
1559 status = -ENOENT;
1560 break;
1561 }
1563 return status;
1564 }
1566 #ifdef QNX_PM_ENABLE
1567 static int ipu_pm_power_init(void)
1568 {
1569 /*Allocate SDMA channels*/
1570 memset(&sdma_req, 0, sizeof(sdma_req));
1571 sdma_req.length = MAX_DUCATI_CHANNELS;
1572 sdma_req.flags = RSRCDBMGR_DMA_CHANNEL | RSRCDBMGR_FLAG_RANGE;
1573 sdma_req.start = DUCATI_CHANNEL_START;
1574 sdma_req.end = DUCATI_CHANNEL_END;
1575 if (rsrcdbmgr_attach(&sdma_req, 1) == -1) {
1576 DMAAllocation = false;
1577 GT_1trace(curTrace, GT_4CLASS,
1578 "ipu_pm_power_init: DMA channel allocation FAILED: %s", strerror(errno));
1579 }
1580 else {
1581 GT_0trace(curTrace, GT_3CLASS,
1582 "ipu_pm_power_init: DMA channels ALLOCATED");
1583 DMAAllocation = true;
1584 }
1586 return EOK;
1587 }
1589 static void ipu_pm_power_deinit(void)
1590 {
1591 if(DMAAllocation){
1592 if (rsrcdbmgr_detach(&sdma_req, 1) == -1) {
1593 GT_1trace(curTrace, GT_4CLASS,
1594 "ipu_pm_power_deinit: DMA channel deallocation FAILED!!%s",
1595 strerror(errno));
1596 }
1597 DMAAllocation = false;
1598 }
1599 return;
1600 }
1602 static int ipu_pm_powman_init(void)
1603 {
1604 int status = EOK;
1606 syslink_auth_active = powman_auth_create("SYSLINK_NEEDS_CORE_ACTIVE");
1607 if(!syslink_auth_active) {
1608 GT_setFailureReason(curTrace, GT_4CLASS, "powman_init", ENOMEM,
1609 "syslink_auth_active create failure");
1610 return -ENOMEM;
1612 }
1614 syslink_auth_oswr = powman_auth_create("SYSLINK_NEEDS_PREVENT_OSWR");
1615 if(!syslink_auth_oswr) {
1616 GT_setFailureReason(curTrace, GT_4CLASS, "powman_init", ENOMEM,
1617 "syslink_auth_oswr create failure");
1618 return -ENOMEM;
1619 }
1621 int retry = 100;
1623 /* look for server */
1624 cpudll_coid = name_open( CPUDLL_RECV_NAME, 0);
1625 while (cpudll_coid == -1 && retry-- > 0) {
1626 sleep(1);
1627 cpudll_coid = name_open (CPUDLL_RECV_NAME, 0);
1628 }
1630 if (cpudll_coid == -1) {
1631 GT_setFailureReason (curTrace, GT_4CLASS, "connect to cpudll", EINVAL,
1632 "Couldn't connect to CPU DLL!");
1633 return EINVAL;
1634 }
1636 /* get IVA OPPs */
1637 dvfsMessage.dvfs.type = getListOfDomainOPPs;
1638 dvfsMessage.dvfs.domain = CPUDLL_OMAP_IVA;
1639 if (MsgSend( cpudll_coid, &dvfsMessage, sizeof( dvfsMessage ), &cpudll_iva_opp, sizeof(cpudll_iva_opp) ) == -1) {
1640 GT_setFailureReason(curTrace, GT_4CLASS, "powman_init", ENOMEM,
1641 "Could not get list of IVA OPPs.");
1642 return -ENOMEM;
1643 }
1645 /* get CORE OPPs */
1646 dvfsMessage.dvfs.type = getListOfDomainOPPs;
1647 dvfsMessage.dvfs.domain = CPUDLL_OMAP_CORE;
1648 if (MsgSend( cpudll_coid, &dvfsMessage, sizeof( dvfsMessage ), &cpudll_core_opp, sizeof(cpudll_core_opp) ) == -1) {
1649 GT_setFailureReason(curTrace, GT_4CLASS, "powman_init", ENOMEM,
1650 "Could not get list of Core OPPs.");
1651 return -ENOMEM;
1652 }
1653 return status;
1654 }
1656 static void ipu_pm_powman_deinit(void)
1657 {
1658 int status = EOK;
1660 if (syslink_auth_active) {
1661 status = powman_auth_destroy(syslink_auth_active);
1662 if (status < 0) {
1663 GT_setFailureReason(curTrace, GT_4CLASS, "ipu_pm_powman_deinit",
1664 status,
1665 "powman_auth_destroy syslink_auth_active failure");
1666 }
1667 syslink_auth_active = NULL;
1668 }
1669 if (syslink_auth_oswr) {
1670 status = powman_auth_destroy(syslink_auth_oswr);
1671 if (status < 0) {
1672 GT_setFailureReason(curTrace, GT_4CLASS, "ipu_pm_powman_deinit",
1673 status,
1674 "powman_auth_destroy syslink_auth_oswr failure");
1675 }
1676 syslink_auth_oswr = NULL;
1677 }
1679 // close the channel
1680 dvfsMessage.dvfs.type = (cpudll_type_e)closeClient;
1681 if (MsgSend( cpudll_coid, &dvfsMessage, sizeof( dvfsMessage ), NULL, 0 ) == -1) {
1682 GT_setFailureReason(curTrace, GT_4CLASS, "powman_deinit", ENOMEM,
1683 "Could not close client connection to server.");
1684 }
1686 name_close(cpudll_coid);
1687 cpudll_coid = -1;
1688 }
1690 //no special callback needed in our case so define the default.
1691 int powman_delayed_callback(unsigned ns, void (*func) (void *), void *data)
1692 {
1693 return (powman_delayed_callback_default(ns, func, data));
1694 }
1696 static void tell_powman_auth_oswr(int need)
1697 {
1698 int r;
1699 r = powman_auth_state(syslink_auth_oswr, need);
1700 if(r != 0) {
1701 GT_setFailureReason(curTrace, GT_4CLASS, "tell_powman_auth_oswr", r,
1702 "powerman authority :cannot set state");
1703 }
1704 }
1706 int ipu_pm_set_bandwidth(unsigned int bandwidth)
1707 {
1708 int err;
1709 int oppIndex = CPUDLL_MAX_OPP_STATES-1;
1711 /* Camera needs OPPOV (highest OPP) which will be moving from index 1 to 2.
1712 * Find the greatest non-zero element to find the highest OPP and select it.
1713 */
1714 while ( (cpudll_core_opp.states[oppIndex] == 0) && (oppIndex > 0) ){
1715 oppIndex--;
1716 }
1718 dvfsMessage.dvfs.type = setDomainOPP;
1719 dvfsMessage.dvfs.domain = CPUDLL_OMAP_CORE;
1721 dvfsMessage.dvfs.req_opp = cpudll_core_opp.states[ bandwidth>=COREOPP100?oppIndex:0 ];
1722 err = MsgSend( cpudll_coid, &dvfsMessage, sizeof( dvfsMessage ), NULL, 0 );
1723 if(err != EOK) {
1724 GT_1trace(curTrace, GT_4CLASS," ** Error setting CORE OPP: %s", strerror(errno));
1725 }
1726 return EOK;
1727 }
1729 int ipu_pm_set_rate(struct ipu_pm_const_req * request)
1730 {
1731 int err = EOK;
1732 cpudll_iva_opp_t req = 0;
1734 if (request->target_rsrc == RPRM_IVAHD) {
1735 if (request->rate > FREQ_266Mhz)
1736 req = CPUDLL_IVA_OPPTURBO;
1737 else if ((request->rate > FREQ_133Mhz) &&
1738 (request->rate <= FREQ_266Mhz))
1739 req = CPUDLL_IVA_OPP100;
1740 else if ((request->rate > NO_FREQ_CONSTRAINT) &&
1741 (request->rate <= FREQ_133Mhz))
1742 req = CPUDLL_IVA_OPP50;
1743 else if (request->rate == NO_FREQ_CONSTRAINT)
1744 req = CPUDLL_IVA_OPPNONE;
1746 dvfsMessage.dvfs.req_opp = cpudll_iva_opp.states[req];
1747 dvfsMessage.dvfs.type = setDomainOPP;
1748 dvfsMessage.dvfs.domain = CPUDLL_OMAP_IVA;
1750 err = MsgSend( cpudll_coid, &dvfsMessage, sizeof( dvfsMessage ), NULL, 0 );
1751 if(err != EOK) {
1752 GT_2trace(curTrace, GT_4CLASS," ** Error setting IVA OPP %d: %s", req, strerror(err));
1753 }
1754 }
1755 return err;
1756 }
1757 #else
1758 int ipu_pm_set_rate(struct ipu_pm_const_req * request)
1759 {
1760 return EOK;
1761 }
1763 int ipu_pm_set_bandwidth(unsigned int bandwidth)
1764 {
1765 return EOK;
1766 }
1767 #endif
1769 #ifdef BENELLI_SELF_HIBERNATION
1771 static int configure_timer (int val, int reload)
1772 {
1773 int status = 0;
1774 struct itimerspec timeout;
1775 timeout.it_value.tv_sec = val;
1776 timeout.it_value.tv_nsec = 0;
1777 timeout.it_interval.tv_sec = reload;
1778 timeout.it_interval.tv_nsec = 0;
1779 status = timer_settime(ipu_pm_state.hibernation_timer, 0, &timeout, NULL);
1780 if (status != 0) {
1781 status = -errno;
1782 }
1783 return status;
1784 }
1786 /* Function implements hibernation and watch dog timer
1787 * The functionality is based on following states
1788 * RESET: Timer is disabed
1789 * OFF: Timer is OFF
1790 * ON: Timer running
1791 * HIBERNATE: Waking up for benelli cores to hibernate
1792 * WD_RESET: Waiting for Benelli cores to complete hibernation
1793 */
1794 int ipu_pm_timer_state(int event)
1795 {
1796 int retval = 0;
1798 if (!ipu_pm_state.attached)
1799 goto exit;
1801 switch (event) {
1802 case PM_HIB_TIMER_RESET:
1803 /* disable timer and remove irq handler */
1804 //Stop the timer
1805 configure_timer(0, 0);
1806 ipu_pm_state.hib_timer_state = PM_HIB_TIMER_RESET;
1807 break;
1809 case PM_HIB_TIMER_DELETE:
1810 if (ipu_pm_state.hib_timer_state == PM_HIB_TIMER_OFF) {
1811 /*Stop the Timer*/
1812 configure_timer(0, 0);
1813 /* Delete the timer */
1814 retval = timer_delete(ipu_pm_state.hibernation_timer);
1815 }
1816 break;
1818 case PM_HIB_TIMER_OFF:
1819 if (ipu_pm_state.hib_timer_state == PM_HIB_TIMER_ON) {
1820 /*Stop the Timer*/
1821 configure_timer(0, 0);
1822 ipu_pm_state.hib_timer_state = PM_HIB_TIMER_OFF;
1823 }
1824 break;
1826 case PM_HIB_TIMER_ON:
1827 if (ipu_pm_state.hib_timer_state == PM_HIB_TIMER_RESET||
1828 ipu_pm_state.hib_timer_state == PM_HIB_TIMER_OFF||
1829 ipu_pm_state.hib_timer_state == PM_HIB_TIMER_DELETE||
1830 ipu_pm_state.hib_timer_state == PM_HIB_TIMER_ON) {
1832 /*Enable the timer*/
1833 /*Start the Timer*/
1834 configure_timer(syslink_hib_timeout / 1000, 0);
1835 ipu_pm_state.hib_timer_state = PM_HIB_TIMER_ON;
1836 }
1837 break;
1838 }
1840 exit:
1841 if (retval < 0) {
1842 GT_setFailureReason (curTrace,
1843 GT_4CLASS,
1844 "ipu_pm_timer_state",
1845 retval,
1846 "ipu_pm_timer_state failed");
1847 }
1849 return retval;
1850 }
1852 /*
1853 Function to save the MMU, Mailbox context before going to hibernation
1854 *
1855 */
1856 int ipu_pm_save_ctx(int proc_id)
1857 {
1858 int retval = 0;
1859 int flag;
1860 int num_loaded_cores = 0;
1861 int core0_loaded;
1862 int core1_loaded;
1863 unsigned long timeout;
1864 unsigned short core0_id = MultiProc_getId(CORE0);
1865 unsigned short core1_id = MultiProc_getId("CORE1");
1866 unsigned short dsp_id = MultiProc_getId("DSP");
1867 struct itimerspec value;
1868 uint64_t pa = 0, da = 0;
1869 u32 len = 0;
1871 /* get M3's load flag */
1872 core0_loaded = (ipu_pm_state.loaded_procs & CORE0_LOADED);
1873 core1_loaded = (ipu_pm_state.loaded_procs & CORE1_LOADED);
1875 /* Because of the current scheme, we need to check
1876 * if CORE1 is enabled and we need to shut it down too
1877 * CORE0 is the only one sending the hibernate message
1878 */
1879 pthread_mutex_lock(&ipu_pm_state.mtx);
1881 if (core0Idle == NULL) {
1882 if (proc_id == core0_id) {
1883 retval = get_res_info(RSC_SUSPENDADDR, "0", &da, &pa, &len);
1884 if (retval == 0) {
1885 /* BIOS flags to know the state of IPU cores */
1886 core0Idle = (void *)mmap_device_io(0x1000, ROUND_DOWN(pa, 0x1000));
1887 if ((uintptr_t)core0Idle == MAP_DEVICE_FAILED) {
1888 core0Idle = NULL;
1889 retval = -ENOMEM;
1890 goto exit;
1891 }
1893 core0Idle = (void *)((uint32_t)core0Idle + ((uint32_t)pa - ROUND_DOWN((uint32_t)pa, 0x1000)));
1894 core1Idle = (void *)core0Idle + sizeof(void *);
1895 }
1896 else {
1897 goto exit;
1898 }
1899 }
1900 }
1902 if (proc_id == core0_id || proc_id == core1_id) {
1903 timer_gettime(ipu_pm_state.hibernation_timer, &value);
1904 if (value.it_value.tv_sec || value.it_value.tv_nsec)
1905 goto exit;
1907 if (!core0_loaded)
1908 goto exit;
1910 /* If already down don't kill it twice */
1911 if (ipu_pm_state.proc_state & CORE0_PROC_DOWN) {
1912 GT_0trace(curTrace, GT_4CLASS, "ipu already hibernated");
1913 goto exit;
1914 }
1916 !TESTBITREG32((uintptr_t)m3_clkstctrl,
1917 CM_MPU_M3_CLKSTCTRL_CLKACTIVITY_BIT)) {
1918 retval = ArchIpcInt_sendInterrupt(core0_id,
1919 ipu_pm_state.cfg.int_id,
1920 RP_MBOX_HIBERNATION);
1922 num_loaded_cores = core1_loaded + core0_loaded;
1923 flag = 1;
1924 timeout = WAIT_FOR_IDLE_TIMEOUT;
1925 /* Wait fot Benelli to hibernate */
1926 do {
1927 /* Checking if IPU is really in idle */
1928 if (NUM_IDLE_CORES == num_loaded_cores) {
1929 flag = 0;
1930 break;
1931 } else {
1932 usleep(1000);
1933 }
1934 } while ( --timeout != 0);
1936 if (flag) {
1937 GT_0trace(curTrace, GT_4CLASS, "Benelli Cores are NOT really Idle");
1938 goto error;
1939 }
1941 ipu_pm_timer_state(PM_HIB_TIMER_OFF);
1942 retval = Omap5430IpcInt_mboxSaveCtxt(core0_id);
1943 if(retval != OMAP5430IPCINT_SUCCESS){
1944 GT_setFailureReason(curTrace, GT_4CLASS, "ipu_pm_save_ctx",
1945 retval,
1946 "Error while saving the MailBox context");
1947 goto error;
1948 }
1950 if (core1_loaded) {
1951 #ifdef BENELLI_WATCHDOG_TIMER
1952 save_gpt_context(GPTIMER_11);
1953 ipu_pm_gpt_stop(GPTIMER_11);
1954 ipu_pm_gpt_disable(GPTIMER_11);
1955 #endif
1956 if (GPT4InUse == TRUE)
1957 save_gpt_context(GPTIMER_4);
1959 retval = ProcMgr_control(ipu_pm_state.proc_handles[core1_id],
1960 Omap5430BenelliProc_CtrlCmd_Suspend, NULL);
1961 if (retval < 0) {
1962 GT_setFailureReason(curTrace, GT_4CLASS, "ipu_pm_save_ctx",
1963 retval, "Error while suspending CORE1");
1964 goto error;
1965 }
1966 GT_0trace(curTrace, GT_4CLASS, "Sleep CORE1");
1967 }
1969 #ifdef BENELLI_WATCHDOG_TIMER
1970 save_gpt_context(GPTIMER_9);
1971 ipu_pm_gpt_stop(GPTIMER_9);
1972 ipu_pm_gpt_disable(GPTIMER_9);
1973 #endif
1974 if (GPT3InUse == TRUE)
1975 save_gpt_context(GPTIMER_3);
1977 ipu_pm_state.proc_state |= CORE1_PROC_DOWN;
1978 retval = ProcMgr_control(ipu_pm_state.proc_handles[core0_id],
1979 Omap5430BenelliProc_CtrlCmd_Suspend, NULL);
1980 if (retval < 0) {
1981 GT_setFailureReason(curTrace, GT_4CLASS, "ipu_pm_save_ctx", retval,
1982 "Error while suspending CORE0");
1983 goto error;
1984 }
1985 GT_0trace(curTrace, GT_4CLASS, "Sleep CORE0");
1986 ipu_pm_state.proc_state |= CORE0_PROC_DOWN;
1988 ipu_pm_ivahd_off();
1990 // Advise that Ducati is hibernating
1991 pthread_mutex_lock(&syslink_hib_mutex);
1992 syslink_hib_hibernating = TRUE;
1993 pthread_mutex_unlock(&syslink_hib_mutex);
1994 }
1995 else if (proc_id == dsp_id) {
1996 //TODO: Add support for DSP.
1997 }
1998 else
1999 goto error;
2001 #ifdef QNX_PM_ENABLE
2002 if (oswr_prevent == 1) {
2003 tell_powman_auth_oswr(0); // Passing 1 prevents OSWR and 0 allows OSWR
2004 oswr_prevent = 0;
2005 }
2006 #endif
2007 /* If there is a message in the mbox restore
2008 * immediately after save.
2009 */
2010 if (PENDING_MBOX_MSG)
2011 goto restore;
2013 exit:
2014 pthread_mutex_unlock(&ipu_pm_state.mtx);
2015 return 0;
2016 error:
2017 GT_setFailureReason(curTrace, GT_4CLASS, "ipu_pm_save_ctx", retval,
2018 "Aborting hibernation process");
2019 ipu_pm_timer_state(PM_HIB_TIMER_ON);
2020 pthread_mutex_unlock(&ipu_pm_state.mtx);
2021 return retval;
2022 restore:
2023 GT_0trace(curTrace, GT_4CLASS,
2024 "Starting restore_ctx since messages pending in mbox");
2025 pthread_mutex_unlock(&ipu_pm_state.mtx);
2026 ipu_pm_restore_ctx(proc_id);
2028 return retval;
2029 }
2032 /* Function to check if a processor is shutdown
2033 * if shutdown then restore context else return.
2034 */
2035 int ipu_pm_restore_ctx(int proc_id)
2036 {
2037 int retval = 0;
2038 int core0_loaded;
2039 int core1_loaded;
2040 unsigned short core0_id = MultiProc_getId(CORE0);
2041 unsigned short core1_id = MultiProc_getId("CORE1");
2042 unsigned short dsp_id = MultiProc_getId("DSP");
2044 /*If feature not supported by proc, return*/
2045 if (proc_id == dsp_id)
2046 return 0;
2048 /* Check if the M3 was loaded */
2049 core0_loaded = (ipu_pm_state.loaded_procs & CORE0_LOADED);
2050 core1_loaded = (ipu_pm_state.loaded_procs & CORE1_LOADED);
2052 /* Because of the current scheme, we need to check
2053 * if CORE1 is enable and we need to enable it too
2054 * In both cases we should check if for both cores
2055 * and enable them if they were loaded.
2056 */
2057 pthread_mutex_lock(&ipu_pm_state.mtx);
2059 /* Restart the hib timer */
2060 if (syslink_hib_enable) {
2061 ipu_pm_timer_state(PM_HIB_TIMER_ON);
2062 }
2063 #ifdef QNX_PM_ENABLE
2064 if(oswr_prevent == 0) {
2065 tell_powman_auth_oswr(1); // Passing 1 prevents OSWR and 0 allows OSWR
2066 oswr_prevent = 1;
2067 }
2068 #endif
2069 if (proc_id == core0_id || proc_id == core1_id) {
2070 if (!(ipu_pm_state.proc_state & CORE0_PROC_DOWN) || !core0_loaded) {
2071 goto exit;
2072 }
2074 if (ProcMgr_getState(ipu_pm_state.proc_handles[core0_id]) != ProcMgr_State_Suspended) {
2075 goto exit;
2076 }
2078 retval = Omap5430IpcInt_mboxRestoreCtxt(core0_id);
2079 if(retval != OMAP5430IPCINT_SUCCESS){
2080 GT_setFailureReason(curTrace, GT_4CLASS, "ipu_pm_restore_ctx",
2081 retval,
2082 "Not able to restore Mail Box context");
2083 goto error;
2084 }
2086 #ifdef BENELLI_WATCHDOG_TIMER
2087 ipu_pm_gpt_enable(GPTIMER_9);
2088 restore_gpt_context(GPTIMER_9);
2089 ipu_pm_gpt_start(GPTIMER_9);
2090 #endif
2091 if (GPT3InUse == TRUE) {
2092 ipu_pm_gpt_enable(GPTIMER_3);
2093 restore_gpt_context(GPTIMER_3);
2094 }
2096 GT_0trace(curTrace, GT_4CLASS, "Wakeup CORE0");
2097 ipu_pm_state.proc_state &= ~CORE0_PROC_DOWN;
2098 retval = ProcMgr_control(ipu_pm_state.proc_handles[core0_id],
2099 Omap5430BenelliProc_CtrlCmd_Resume, NULL);
2100 if (retval < 0){
2101 GT_setFailureReason(curTrace, GT_4CLASS, "ipu_pm_restore_ctx",
2102 retval, "Not able to resume CORE0");
2103 goto error;
2104 }
2106 if (core1_loaded) {
2107 #ifdef BENELLI_WATCHDOG_TIMER
2108 ipu_pm_gpt_enable(GPTIMER_11);
2109 restore_gpt_context(GPTIMER_11);
2110 ipu_pm_gpt_start(GPTIMER_11);
2111 #endif
2112 if (GPT4InUse == TRUE) {
2113 ipu_pm_gpt_enable(GPTIMER_4);
2114 restore_gpt_context(GPTIMER_4);
2115 }
2117 GT_0trace(curTrace, GT_4CLASS, "Wakeup CORE1");
2118 ipu_pm_state.proc_state &= ~CORE1_PROC_DOWN;
2119 retval = ProcMgr_control(ipu_pm_state.proc_handles[core1_id],
2120 Omap5430BenelliProc_CtrlCmd_Resume, NULL);
2121 if (retval < 0){
2122 GT_setFailureReason(curTrace, GT_4CLASS, "ipu_pm_restore_ctx",
2123 retval, "Not able to resume CORE1");
2124 goto error;
2125 }
2126 }
2127 pthread_mutex_lock(&syslink_hib_mutex);
2128 // Once we are active, signal any thread waiting for end of hibernation
2129 syslink_hib_hibernating = FALSE;
2130 pthread_cond_broadcast(&syslink_hib_cond);
2131 pthread_mutex_unlock(&syslink_hib_mutex);
2132 }
2133 else
2134 goto error;
2135 exit:
2136 /* turn on benelli hibernation timer */
2137 if (ipu_pm_state.hib_timer_state == PM_HIB_TIMER_OFF ||
2138 ipu_pm_state.hib_timer_state == PM_HIB_TIMER_RESET) {
2139 ipu_pm_timer_state(PM_HIB_TIMER_ON);
2140 }
2141 pthread_mutex_unlock(&ipu_pm_state.mtx);
2142 return retval;
2143 error:
2144 pthread_mutex_unlock(&ipu_pm_state.mtx);
2145 return -EINVAL;
2146 }
2148 /* ISR for Timer*/
2149 static void ipu_pm_timer_interrupt (union sigval val)
2150 {
2151 ipu_pm_save_ctx(MultiProc_getId(CORE0));
2152 return;
2153 }
2154 #else // BENELLI_SELF_HIBERNATION
2156 int ipu_pm_restore_ctx(int proc_id)
2157 {
2158 return 0;
2159 }
2160 #endif // BENELLI_SELF_HIBERNATION
2162 int ipu_pm_attach(int proc_id)
2163 {
2164 int retval = EOK;
2165 #ifdef BENELLI_WATCHDOG_TIMER
2166 OsalIsr_Params isrParams;
2167 #endif
2169 if (proc_id > MultiProc_MAXPROCESSORS) {
2170 return -EINVAL;
2171 }
2173 if (proc_id == MultiProc_getId(CORE0)) {
2174 ipu_pm_state.loaded_procs |= CORE0_LOADED;
2175 #ifdef BENELLI_WATCHDOG_TIMER
2176 ipu_pm_gpt_enable(GPTIMER_9);
2177 isrParams.checkAndClearFxn = ipu_pm_clr_gptimer_interrupt;
2178 isrParams.fxnArgs = (Ptr)GPTIMER_9;
2179 isrParams.intId = OMAP54XX_IRQ_GPT9;
2180 isrParams.sharedInt = FALSE;
2181 ipu_pm_state.gpt9IsrObject =
2182 OsalIsr_create(&ipu_pm_gptimer_interrupt,
2183 isrParams.fxnArgs, &isrParams);
2184 if(ipu_pm_state.gpt9IsrObject != NULL) {
2185 if (OsalIsr_install(ipu_pm_state.gpt9IsrObject) < 0) {
2186 retval = -ENOMEM;
2187 }
2188 }
2189 else {
2190 retval = -ENOMEM;
2191 }
2192 #endif
2193 #ifndef SYSLINK_SYSBIOS_SMP
2194 }
2195 else if (proc_id == MultiProc_getId("CORE1")) {
2196 #endif
2197 ipu_pm_state.loaded_procs |= CORE1_LOADED;
2198 #ifdef BENELLI_WATCHDOG_TIMER
2199 ipu_pm_gpt_enable(GPTIMER_11);
2200 isrParams.checkAndClearFxn = ipu_pm_clr_gptimer_interrupt;
2201 isrParams.fxnArgs = (Ptr)GPTIMER_11;
2202 isrParams.intId = OMAP54XX_IRQ_GPT11;
2203 isrParams.sharedInt = FALSE;
2204 ipu_pm_state.gpt11IsrObject =
2205 OsalIsr_create(&ipu_pm_gptimer_interrupt,
2206 isrParams.fxnArgs, &isrParams);
2207 if(ipu_pm_state.gpt11IsrObject != NULL) {
2208 if (OsalIsr_install(ipu_pm_state.gpt11IsrObject) < 0) {
2209 retval = -ENOMEM;
2210 }
2211 }
2212 else {
2213 retval = -ENOMEM;
2214 }
2215 #endif
2216 }
2217 else if (proc_id == MultiProc_getId("DSP")) {
2218 ipu_pm_state.loaded_procs |= DSP_LOADED;
2219 #ifdef BENELLI_WATCHDOG_TIMER
2220 ipu_pm_gpt_enable(GPTIMER_6);
2221 isrParams.checkAndClearFxn = ipu_pm_clr_gptimer_interrupt;
2222 isrParams.fxnArgs = (Ptr)GPTIMER_6;
2223 isrParams.intId = OMAP54XX_IRQ_GPT6;
2224 isrParams.sharedInt = FALSE;
2225 ipu_pm_state.gpt6IsrObject =
2226 OsalIsr_create(&ipu_pm_gptimer_interrupt,
2227 isrParams.fxnArgs, &isrParams);
2228 if(ipu_pm_state.gpt6IsrObject != NULL) {
2229 if (OsalIsr_install(ipu_pm_state.gpt6IsrObject) < 0) {
2230 retval = -ENOMEM;
2231 }
2232 }
2233 else {
2234 retval = -ENOMEM;
2235 }
2236 #endif
2237 }
2239 if (retval >= 0)
2240 retval = ProcMgr_open(&ipu_pm_state.proc_handles[proc_id], proc_id);
2242 if (retval < 0) {
2243 #ifdef BENELLI_WATCHDOG_TIMER
2244 if (proc_id == MultiProc_getId(CORE0)) {
2245 if (ipu_pm_state.gpt9IsrObject) {
2246 OsalIsr_uninstall(ipu_pm_state.gpt9IsrObject);
2247 OsalIsr_delete(&ipu_pm_state.gpt9IsrObject);
2248 ipu_pm_state.gpt9IsrObject = NULL;
2249 }
2250 ipu_pm_gpt_stop(GPTIMER_9);
2251 ipu_pm_gpt_disable(GPTIMER_9);
2252 #ifndef SYSLINK_SYSBIOS_SMP
2253 }
2254 else if (proc_id == MultiProc_getId("CORE1")) {
2255 #endif
2256 if (ipu_pm_state.gpt11IsrObject) {
2257 OsalIsr_delete(&ipu_pm_state.gpt11IsrObject);
2258 ipu_pm_state.gpt11IsrObject = NULL;
2259 }
2260 ipu_pm_gpt_stop(GPTIMER_11);
2261 ipu_pm_gpt_disable(GPTIMER_11);
2262 }
2263 else if (proc_id == MultiProc_getId("DSP")) {
2264 if (ipu_pm_state.gpt6IsrObject) {
2265 OsalIsr_uninstall(ipu_pm_state.gpt6IsrObject);
2266 OsalIsr_delete(&ipu_pm_state.gpt6IsrObject);
2267 ipu_pm_state.gpt6IsrObject = NULL;
2268 }
2269 ipu_pm_gpt_stop(GPTIMER_6);
2270 ipu_pm_gpt_disable(GPTIMER_6);
2271 }
2272 #endif
2274 }
2275 else {
2276 ipu_pm_state.attached[proc_id] = TRUE;
2277 }
2279 return retval;
2280 }
2282 int ipu_pm_detach(int proc_id)
2283 {
2284 int retval = EOK;
2286 if (proc_id > MultiProc_MAXPROCESSORS) {
2287 return -EINVAL;
2288 }
2290 ipu_pm_state.attached[proc_id] = FALSE;
2292 #ifdef BENELLI_SELF_HIBERNATION
2293 if (core0Idle != NULL) {
2294 munmap_device_io(ROUND_DOWN((uint32_t)core0Idle, 0x1000),
2295 0x1000);
2296 core0Idle = NULL;
2297 core1Idle = NULL;
2298 }
2299 #endif
2301 if (proc_id == MultiProc_getId(CORE0)) {
2302 #ifdef BENELLI_WATCHDOG_TIMER
2303 OsalIsr_uninstall(ipu_pm_state.gpt9IsrObject);
2304 OsalIsr_delete(&ipu_pm_state.gpt9IsrObject);
2305 ipu_pm_state.gpt9IsrObject = NULL;
2306 ipu_pm_gpt_stop(GPTIMER_9);
2307 ipu_pm_gpt_disable(GPTIMER_9);
2308 #endif
2309 ipu_pm_state.loaded_procs &= ~CORE0_LOADED;
2310 #ifndef SYSLINK_SYSBIOS_SMP
2311 }
2312 else if (proc_id == MultiProc_getId("CORE1")) {
2313 #endif
2314 #ifdef BENELLI_WATCHDOG_TIMER
2315 OsalIsr_uninstall(ipu_pm_state.gpt11IsrObject);
2316 OsalIsr_delete(&ipu_pm_state.gpt11IsrObject);
2317 ipu_pm_state.gpt11IsrObject = NULL;
2318 ipu_pm_gpt_stop(GPTIMER_11);
2319 ipu_pm_gpt_disable(GPTIMER_11);
2320 #endif
2321 ipu_pm_state.loaded_procs &= ~CORE1_LOADED;
2322 }
2323 else if (proc_id == MultiProc_getId("DSP")) {
2324 #ifdef BENELLI_WATCHDOG_TIMER
2325 OsalIsr_uninstall(ipu_pm_state.gpt6IsrObject);
2326 OsalIsr_delete(&ipu_pm_state.gpt6IsrObject);
2327 ipu_pm_state.gpt6IsrObject = NULL;
2328 ipu_pm_gpt_stop(GPTIMER_6);
2329 ipu_pm_gpt_disable(GPTIMER_6);
2330 #endif
2331 ipu_pm_state.loaded_procs &= ~DSP_LOADED;
2332 }
2334 if (ipu_pm_state.proc_handles[proc_id]) {
2335 ProcMgr_close(&ipu_pm_state.proc_handles[proc_id]);
2336 ipu_pm_state.proc_handles[proc_id] = NULL;
2337 }
2339 return retval;
2340 }
2342 int ipu_pm_setup(ipu_pm_config *cfg)
2343 {
2344 int retval = EOK;
2345 int i = 0;
2346 #ifdef BENELLI_SELF_HIBERNATION
2347 struct sigevent signal_event;
2348 #endif
2350 if (ipu_pm_state.is_setup == false) {
2351 pthread_mutex_init(&ipu_pm_state.mtx, NULL);
2353 if (cfg == NULL) {
2354 retval = -EINVAL;
2355 goto exit;
2356 }
2357 if (cfg->num_procs > MultiProc_MAXPROCESSORS) {
2358 retval = -EINVAL;
2359 goto exit;
2360 }
2362 memcpy(&ipu_pm_state.cfg, cfg, sizeof(ipu_pm_config));
2364 #ifdef BENELLI_SELF_HIBERNATION
2365 /* MBOX flag to check if there are pending messages */
2366 a9_m3_mbox = (void *)mmap_device_io(0x1000, A9_M3_MBOX);
2367 if ((uintptr_t)a9_m3_mbox == MAP_DEVICE_FAILED) {
2368 a9_m3_mbox = NULL;
2369 retval = -ENOMEM;
2370 goto exit;
2371 }
2373 if (syslink_hib_enable) {
2374 SIGEV_THREAD_INIT (&signal_event, ipu_pm_timer_interrupt, NULL,
2375 NULL);
2376 retval = timer_create(CLOCK_REALTIME, &signal_event,
2377 &ipu_pm_state.hibernation_timer);
2378 if (retval < 0) {
2379 retval = -errno;
2380 goto exit;
2381 }
2382 }
2383 #endif
2385 cm2_base_va = (void *)mmap_device_io(CM2_SIZE, CM2_BASE);
2386 if ((uintptr_t)cm2_base_va == MAP_DEVICE_FAILED) {
2387 cm2_base_va = NULL;
2388 retval = -errno;
2389 goto exit;
2390 }
2391 #ifdef BENELLI_SELF_HIBERNATION
2392 m3_clkstctrl = cm2_base_va + CM_MPU_M3_CLKCTRL_OFFSET;
2393 #endif
2395 prm_base_va = (void *)mmap_device_io(PRM_SIZE, PRM_BASE);
2396 if ((uintptr_t)prm_base_va == MAP_DEVICE_FAILED) {
2397 prm_base_va = NULL;
2398 retval = -errno;
2399 goto exit;
2400 }
2402 cm_core_aon_base_va = (void*)mmap_device_io(CM_CORE_AON_SIZE, CM_CORE_AON_BASE);
2403 if((uintptr_t)cm_core_aon_base_va == MAP_DEVICE_FAILED) {
2404 cm_core_aon_base_va = NULL;
2405 retval = -errno;
2406 goto exit;
2407 }
2409 map_gpt_regs();
2410 #ifdef QNX_PM_ENABLE
2411 ipu_pm_powman_init();
2412 ipu_pm_power_init();
2413 #endif
2414 for (i = 0; i < NUM_CAM_MODES; i++)
2415 last_camera_req[i] = 0;
2416 last_led_req = 0;
2418 ipu_pm_state.is_setup = true;
2419 }
2421 exit:
2422 if (retval != EOK) {
2423 unmap_gpt_regs();
2424 if (prm_base_va) {
2425 munmap(prm_base_va, PRM_SIZE);
2426 prm_base_va = NULL;
2427 }
2428 if (cm2_base_va) {
2429 munmap(cm2_base_va, CM2_SIZE);
2430 cm2_base_va = NULL;
2431 }
2432 #ifdef BENELLI_SELF_HIBERNATION
2433 m3_clkstctrl = NULL;
2435 if (a9_m3_mbox) {
2436 munmap(a9_m3_mbox, 0x1000);
2437 a9_m3_mbox = NULL;
2438 }
2439 #endif
2440 ipu_pm_state.loaded_procs = 0;
2441 pthread_mutex_destroy(&ipu_pm_state.mtx);
2442 }
2443 return retval;
2444 }
2446 int ipu_pm_destroy()
2447 {
2448 int i = 0;
2450 if (ipu_pm_state.is_setup) {
2451 for (i = 0; i < NUM_CAM_MODES; i++) {
2452 if (last_camera_req[i])
2453 ipu_pm_camera_enable(i, 0);
2454 }
2455 if (last_led_req)
2456 ipu_pm_led_enable(0, 0);
2458 #ifdef QNX_PM_ENABLE
2459 ipu_pm_power_deinit();
2460 ipu_pm_powman_deinit();
2461 #endif
2463 unmap_gpt_regs();
2464 #ifdef BENELLI_SELF_HIBERNATION
2465 if (syslink_hib_enable) {
2466 /*Stop the Timer*/
2467 configure_timer(0, 0);
2468 /* Delete the timer */
2469 timer_delete(ipu_pm_state.hibernation_timer);
2470 }
2471 if (a9_m3_mbox) {
2472 munmap(a9_m3_mbox, 0x1000);
2473 a9_m3_mbox = NULL;
2474 }
2475 m3_clkstctrl = NULL;
2476 #endif
2477 if (cm2_base_va) {
2478 munmap(cm2_base_va, CM2_SIZE);
2479 cm2_base_va = NULL;
2480 }
2481 if (prm_base_va) {
2482 munmap(prm_base_va, PRM_SIZE);
2483 prm_base_va = NULL;
2484 }
2485 pthread_mutex_destroy(&ipu_pm_state.mtx);
2486 ipu_pm_state.proc_state = 0;
2487 ipu_pm_state.loaded_procs = 0;
2488 ipu_pm_state.ivahd_use_cnt = 0;
2489 ipu_pm_state.is_setup = false;
2490 }
2491 return EOK;
2492 }