1 /*
2 * OMAP3 Power Management Routines
3 *
4 * Copyright (C) 2006-2008 Nokia Corporation
5 * Tony Lindgren <tony@atomide.com>
6 * Jouni Hogander
7 *
8 * Copyright (C) 2007 Texas Instruments, Inc.
9 * Rajendra Nayak <rnayak@ti.com>
10 *
11 * Copyright (C) 2005 Texas Instruments, Inc.
12 * Richard Woodruff <r-woodruff2@ti.com>
13 *
14 * Based on pm.c for omap1
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License version 2 as
18 * published by the Free Software Foundation.
19 */
21 #include <linux/pm.h>
22 #include <linux/suspend.h>
23 #include <linux/interrupt.h>
24 #include <linux/module.h>
25 #include <linux/list.h>
26 #include <linux/err.h>
27 #include <linux/gpio.h>
28 #include <linux/clk.h>
29 #include <linux/delay.h>
30 #include <linux/slab.h>
31 #include <trace/events/power.h>
33 #include <asm/suspend.h>
35 #include <plat/sram.h>
36 #include "clockdomain.h"
37 #include "powerdomain.h"
38 #include <plat/sdrc.h>
39 #include <plat/prcm.h>
40 #include <plat/gpmc.h>
41 #include <plat/dma.h>
43 #include "common.h"
44 #include "cm2xxx_3xxx.h"
45 #include "cm-regbits-34xx.h"
46 #include "prm-regbits-34xx.h"
48 #include "prm2xxx_3xxx.h"
49 #include "pm.h"
50 #include "sdrc.h"
51 #include "control.h"
53 #ifdef CONFIG_SUSPEND
54 static suspend_state_t suspend_state = PM_SUSPEND_ON;
55 #endif
57 /* pm34xx errata defined in pm.h */
58 u16 pm34xx_errata;
60 struct power_state {
61 struct powerdomain *pwrdm;
62 u32 next_state;
63 #ifdef CONFIG_SUSPEND
64 u32 saved_state;
65 #endif
66 struct list_head node;
67 };
69 static LIST_HEAD(pwrst_list);
71 static int (*_omap_save_secure_sram)(u32 *addr);
72 void (*omap3_do_wfi_sram)(void);
74 static struct powerdomain *mpu_pwrdm, *neon_pwrdm;
75 static struct powerdomain *core_pwrdm, *per_pwrdm;
76 static struct powerdomain *cam_pwrdm;
78 static inline void omap3_per_save_context(void)
79 {
80 omap_gpio_save_context();
81 }
83 static inline void omap3_per_restore_context(void)
84 {
85 omap_gpio_restore_context();
86 }
88 static void omap3_enable_io_chain(void)
89 {
90 int timeout = 0;
92 omap2_prm_set_mod_reg_bits(OMAP3430_EN_IO_CHAIN_MASK, WKUP_MOD,
93 PM_WKEN);
94 /* Do a readback to assure write has been done */
95 omap2_prm_read_mod_reg(WKUP_MOD, PM_WKEN);
97 while (!(omap2_prm_read_mod_reg(WKUP_MOD, PM_WKEN) &
98 OMAP3430_ST_IO_CHAIN_MASK)) {
99 timeout++;
100 if (timeout > 1000) {
101 pr_err("Wake up daisy chain activation failed.\n");
102 return;
103 }
104 omap2_prm_set_mod_reg_bits(OMAP3430_ST_IO_CHAIN_MASK,
105 WKUP_MOD, PM_WKEN);
106 }
107 }
109 static void omap3_disable_io_chain(void)
110 {
111 omap2_prm_clear_mod_reg_bits(OMAP3430_EN_IO_CHAIN_MASK, WKUP_MOD,
112 PM_WKEN);
113 }
115 static void omap3_core_save_context(void)
116 {
117 omap3_ctrl_save_padconf();
119 /*
120 * Force write last pad into memory, as this can fail in some
121 * cases according to errata 1.157, 1.185
122 */
123 omap_ctrl_writel(omap_ctrl_readl(OMAP343X_PADCONF_ETK_D14),
124 OMAP343X_CONTROL_MEM_WKUP + 0x2a0);
126 /* Save the Interrupt controller context */
127 omap_intc_save_context();
128 /* Save the GPMC context */
129 omap3_gpmc_save_context();
130 /* Save the system control module context, padconf already save above*/
131 omap3_control_save_context();
132 omap_dma_global_context_save();
133 }
135 static void omap3_core_restore_context(void)
136 {
137 /* Restore the control module context, padconf restored by h/w */
138 omap3_control_restore_context();
139 /* Restore the GPMC context */
140 omap3_gpmc_restore_context();
141 /* Restore the interrupt controller context */
142 omap_intc_restore_context();
143 omap_dma_global_context_restore();
144 }
146 /*
147 * FIXME: This function should be called before entering off-mode after
148 * OMAP3 secure services have been accessed. Currently it is only called
149 * once during boot sequence, but this works as we are not using secure
150 * services.
151 */
152 static void omap3_save_secure_ram_context(void)
153 {
154 u32 ret;
155 int mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm);
157 if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
158 /*
159 * MPU next state must be set to POWER_ON temporarily,
160 * otherwise the WFI executed inside the ROM code
161 * will hang the system.
162 */
163 pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON);
164 ret = _omap_save_secure_sram((u32 *)
165 __pa(omap3_secure_ram_storage));
166 pwrdm_set_next_pwrst(mpu_pwrdm, mpu_next_state);
167 /* Following is for error tracking, it should not happen */
168 if (ret) {
169 printk(KERN_ERR "save_secure_sram() returns %08x\n",
170 ret);
171 while (1)
172 ;
173 }
174 }
175 }
177 /*
178 * PRCM Interrupt Handler Helper Function
179 *
180 * The purpose of this function is to clear any wake-up events latched
181 * in the PRCM PM_WKST_x registers. It is possible that a wake-up event
182 * may occur whilst attempting to clear a PM_WKST_x register and thus
183 * set another bit in this register. A while loop is used to ensure
184 * that any peripheral wake-up events occurring while attempting to
185 * clear the PM_WKST_x are detected and cleared.
186 */
187 static int prcm_clear_mod_irqs(s16 module, u8 regs, u32 ignore_bits)
188 {
189 u32 wkst, fclk, iclk, clken;
190 u16 wkst_off = (regs == 3) ? OMAP3430ES2_PM_WKST3 : PM_WKST1;
191 u16 fclk_off = (regs == 3) ? OMAP3430ES2_CM_FCLKEN3 : CM_FCLKEN1;
192 u16 iclk_off = (regs == 3) ? CM_ICLKEN3 : CM_ICLKEN1;
193 u16 grpsel_off = (regs == 3) ?
194 OMAP3430ES2_PM_MPUGRPSEL3 : OMAP3430_PM_MPUGRPSEL;
195 int c = 0;
197 wkst = omap2_prm_read_mod_reg(module, wkst_off);
198 wkst &= omap2_prm_read_mod_reg(module, grpsel_off);
199 wkst &= ~ignore_bits;
200 if (wkst) {
201 iclk = omap2_cm_read_mod_reg(module, iclk_off);
202 fclk = omap2_cm_read_mod_reg(module, fclk_off);
203 while (wkst) {
204 clken = wkst;
205 omap2_cm_set_mod_reg_bits(clken, module, iclk_off);
206 /*
207 * For USBHOST, we don't know whether HOST1 or
208 * HOST2 woke us up, so enable both f-clocks
209 */
210 if (module == OMAP3430ES2_USBHOST_MOD)
211 clken |= 1 << OMAP3430ES2_EN_USBHOST2_SHIFT;
212 omap2_cm_set_mod_reg_bits(clken, module, fclk_off);
213 omap2_prm_write_mod_reg(wkst, module, wkst_off);
214 wkst = omap2_prm_read_mod_reg(module, wkst_off);
215 wkst &= ~ignore_bits;
216 c++;
217 }
218 omap2_cm_write_mod_reg(iclk, module, iclk_off);
219 omap2_cm_write_mod_reg(fclk, module, fclk_off);
220 }
222 return c;
223 }
225 static irqreturn_t _prcm_int_handle_io(int irq, void *unused)
226 {
227 int c;
229 c = prcm_clear_mod_irqs(WKUP_MOD, 1,
230 ~(OMAP3430_ST_IO_MASK | OMAP3430_ST_IO_CHAIN_MASK));
232 return c ? IRQ_HANDLED : IRQ_NONE;
233 }
235 static irqreturn_t _prcm_int_handle_wakeup(int irq, void *unused)
236 {
237 int c;
239 /*
240 * Clear all except ST_IO and ST_IO_CHAIN for wkup module,
241 * these are handled in a separate handler to avoid acking
242 * IO events before parsing in mux code
243 */
244 c = prcm_clear_mod_irqs(WKUP_MOD, 1,
245 OMAP3430_ST_IO_MASK | OMAP3430_ST_IO_CHAIN_MASK);
246 c += prcm_clear_mod_irqs(CORE_MOD, 1, 0);
247 c += prcm_clear_mod_irqs(OMAP3430_PER_MOD, 1, 0);
248 if (omap_rev() > OMAP3430_REV_ES1_0) {
249 c += prcm_clear_mod_irqs(CORE_MOD, 3, 0);
250 c += prcm_clear_mod_irqs(OMAP3430ES2_USBHOST_MOD, 1, 0);
251 }
253 return c ? IRQ_HANDLED : IRQ_NONE;
254 }
256 static void omap34xx_save_context(u32 *save)
257 {
258 u32 val;
260 /* Read Auxiliary Control Register */
261 asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (val));
262 *save++ = 1;
263 *save++ = val;
265 /* Read L2 AUX ctrl register */
266 asm("mrc p15, 1, %0, c9, c0, 2" : "=r" (val));
267 *save++ = 1;
268 *save++ = val;
269 }
271 static int omap34xx_do_sram_idle(unsigned long save_state)
272 {
273 omap34xx_cpu_suspend(save_state);
274 return 0;
275 }
277 void omap_sram_idle(void)
278 {
279 /* Variable to tell what needs to be saved and restored
280 * in omap_sram_idle*/
281 /* save_state = 0 => Nothing to save and restored */
282 /* save_state = 1 => Only L1 and logic lost */
283 /* save_state = 2 => Only L2 lost */
284 /* save_state = 3 => L1, L2 and logic lost */
285 int save_state = 0;
286 int mpu_next_state = PWRDM_POWER_ON;
287 int per_next_state = PWRDM_POWER_ON;
288 int core_next_state = PWRDM_POWER_ON;
289 int per_going_off;
290 int core_prev_state, per_prev_state;
291 u32 sdrc_pwr = 0;
293 pwrdm_clear_all_prev_pwrst(mpu_pwrdm);
294 pwrdm_clear_all_prev_pwrst(neon_pwrdm);
295 pwrdm_clear_all_prev_pwrst(core_pwrdm);
296 pwrdm_clear_all_prev_pwrst(per_pwrdm);
298 mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm);
299 switch (mpu_next_state) {
300 case PWRDM_POWER_ON:
301 case PWRDM_POWER_RET:
302 /* No need to save context */
303 save_state = 0;
304 break;
305 case PWRDM_POWER_OFF:
306 save_state = 3;
307 break;
308 default:
309 /* Invalid state */
310 printk(KERN_ERR "Invalid mpu state in sram_idle\n");
311 return;
312 }
314 /* NEON control */
315 if (pwrdm_read_pwrst(neon_pwrdm) == PWRDM_POWER_ON)
316 pwrdm_set_next_pwrst(neon_pwrdm, mpu_next_state);
318 /* Enable IO-PAD and IO-CHAIN wakeups */
319 per_next_state = pwrdm_read_next_pwrst(per_pwrdm);
320 core_next_state = pwrdm_read_next_pwrst(core_pwrdm);
321 if (omap3_has_io_wakeup() &&
322 (per_next_state < PWRDM_POWER_ON ||
323 core_next_state < PWRDM_POWER_ON)) {
324 omap2_prm_set_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, PM_WKEN);
325 if (omap3_has_io_chain_ctrl())
326 omap3_enable_io_chain();
327 }
329 pwrdm_pre_transition();
331 /* PER */
332 if (per_next_state < PWRDM_POWER_ON) {
333 per_going_off = (per_next_state == PWRDM_POWER_OFF) ? 1 : 0;
334 omap2_gpio_prepare_for_idle(per_going_off);
335 if (per_next_state == PWRDM_POWER_OFF)
336 omap3_per_save_context();
337 }
339 /* CORE */
340 if (core_next_state < PWRDM_POWER_ON) {
341 if (core_next_state == PWRDM_POWER_OFF) {
342 omap3_core_save_context();
343 omap3_cm_save_context();
344 }
345 }
347 omap3_intc_prepare_idle();
349 /*
350 * On EMU/HS devices ROM code restores a SRDC value
351 * from scratchpad which has automatic self refresh on timeout
352 * of AUTO_CNT = 1 enabled. This takes care of erratum ID i443.
353 * Hence store/restore the SDRC_POWER register here.
354 */
355 if (cpu_is_omap3430() && omap_rev() >= OMAP3430_REV_ES3_0 &&
356 (omap_type() == OMAP2_DEVICE_TYPE_EMU ||
357 omap_type() == OMAP2_DEVICE_TYPE_SEC) &&
358 core_next_state == PWRDM_POWER_OFF)
359 sdrc_pwr = sdrc_read_reg(SDRC_POWER);
361 /*
362 * omap3_arm_context is the location where some ARM context
363 * get saved. The rest is placed on the stack, and restored
364 * from there before resuming.
365 */
366 if (save_state)
367 omap34xx_save_context(omap3_arm_context);
368 if (save_state == 1 || save_state == 3)
369 cpu_suspend(save_state, omap34xx_do_sram_idle);
370 else
371 omap34xx_do_sram_idle(save_state);
373 /* Restore normal SDRC POWER settings */
374 if (cpu_is_omap3430() && omap_rev() >= OMAP3430_REV_ES3_0 &&
375 (omap_type() == OMAP2_DEVICE_TYPE_EMU ||
376 omap_type() == OMAP2_DEVICE_TYPE_SEC) &&
377 core_next_state == PWRDM_POWER_OFF)
378 sdrc_write_reg(sdrc_pwr, SDRC_POWER);
380 /* CORE */
381 if (core_next_state < PWRDM_POWER_ON) {
382 core_prev_state = pwrdm_read_prev_pwrst(core_pwrdm);
383 if (core_prev_state == PWRDM_POWER_OFF) {
384 omap3_core_restore_context();
385 omap3_cm_restore_context();
386 omap3_sram_restore_context();
387 omap2_sms_restore_context();
388 }
389 if (core_next_state == PWRDM_POWER_OFF)
390 omap2_prm_clear_mod_reg_bits(OMAP3430_AUTO_OFF_MASK,
391 OMAP3430_GR_MOD,
392 OMAP3_PRM_VOLTCTRL_OFFSET);
393 }
394 omap3_intc_resume_idle();
396 pwrdm_post_transition();
398 /* PER */
399 if (per_next_state < PWRDM_POWER_ON) {
400 per_prev_state = pwrdm_read_prev_pwrst(per_pwrdm);
401 omap2_gpio_resume_after_idle();
402 if (per_prev_state == PWRDM_POWER_OFF)
403 omap3_per_restore_context();
404 }
406 /* Disable IO-PAD and IO-CHAIN wakeup */
407 if (omap3_has_io_wakeup() &&
408 (per_next_state < PWRDM_POWER_ON ||
409 core_next_state < PWRDM_POWER_ON)) {
410 omap2_prm_clear_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD,
411 PM_WKEN);
412 if (omap3_has_io_chain_ctrl())
413 omap3_disable_io_chain();
414 }
416 clkdm_allow_idle(mpu_pwrdm->pwrdm_clkdms[0]);
417 }
419 static void omap3_pm_idle(void)
420 {
421 local_irq_disable();
422 local_fiq_disable();
424 if (omap_irq_pending() || need_resched())
425 goto out;
427 trace_power_start(POWER_CSTATE, 1, smp_processor_id());
428 trace_cpu_idle(1, smp_processor_id());
430 omap_sram_idle();
432 trace_power_end(smp_processor_id());
433 trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
435 out:
436 local_fiq_enable();
437 local_irq_enable();
438 }
440 #ifdef CONFIG_SUSPEND
441 static int omap3_pm_suspend(void)
442 {
443 struct power_state *pwrst;
444 int state, ret = 0;
446 /* Read current next_pwrsts */
447 list_for_each_entry(pwrst, &pwrst_list, node)
448 pwrst->saved_state = pwrdm_read_next_pwrst(pwrst->pwrdm);
449 /* Set ones wanted by suspend */
450 list_for_each_entry(pwrst, &pwrst_list, node) {
451 if (omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state))
452 goto restore;
453 if (pwrdm_clear_all_prev_pwrst(pwrst->pwrdm))
454 goto restore;
455 }
457 omap3_intc_suspend();
459 omap_sram_idle();
461 restore:
462 /* Restore next_pwrsts */
463 list_for_each_entry(pwrst, &pwrst_list, node) {
464 state = pwrdm_read_prev_pwrst(pwrst->pwrdm);
465 if (state > pwrst->next_state) {
466 printk(KERN_INFO "Powerdomain (%s) didn't enter "
467 "target state %d\n",
468 pwrst->pwrdm->name, pwrst->next_state);
469 ret = -1;
470 }
471 omap_set_pwrdm_state(pwrst->pwrdm, pwrst->saved_state);
472 }
473 if (ret)
474 printk(KERN_ERR "Could not enter target state in pm_suspend\n");
475 else
476 printk(KERN_INFO "Successfully put all powerdomains "
477 "to target state\n");
479 return ret;
480 }
482 static int omap3_pm_enter(suspend_state_t unused)
483 {
484 int ret = 0;
486 switch (suspend_state) {
487 case PM_SUSPEND_STANDBY:
488 case PM_SUSPEND_MEM:
489 ret = omap3_pm_suspend();
490 break;
491 default:
492 ret = -EINVAL;
493 }
495 return ret;
496 }
498 /* Hooks to enable / disable UART interrupts during suspend */
499 static int omap3_pm_begin(suspend_state_t state)
500 {
501 disable_hlt();
502 suspend_state = state;
503 omap_prcm_irq_prepare();
504 return 0;
505 }
507 static void omap3_pm_end(void)
508 {
509 suspend_state = PM_SUSPEND_ON;
510 enable_hlt();
511 return;
512 }
514 static void omap3_pm_finish(void)
515 {
516 omap_prcm_irq_complete();
517 }
519 static const struct platform_suspend_ops omap_pm_ops = {
520 .begin = omap3_pm_begin,
521 .end = omap3_pm_end,
522 .enter = omap3_pm_enter,
523 .finish = omap3_pm_finish,
524 .valid = suspend_valid_only_mem,
525 };
526 #endif /* CONFIG_SUSPEND */
529 /**
530 * omap3_iva_idle(): ensure IVA is in idle so it can be put into
531 * retention
532 *
533 * In cases where IVA2 is activated by bootcode, it may prevent
534 * full-chip retention or off-mode because it is not idle. This
535 * function forces the IVA2 into idle state so it can go
536 * into retention/off and thus allow full-chip retention/off.
537 *
538 **/
539 static void __init omap3_iva_idle(void)
540 {
541 /* ensure IVA2 clock is disabled */
542 omap2_cm_write_mod_reg(0, OMAP3430_IVA2_MOD, CM_FCLKEN);
544 /* if no clock activity, nothing else to do */
545 if (!(omap2_cm_read_mod_reg(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKSTST) &
546 OMAP3430_CLKACTIVITY_IVA2_MASK))
547 return;
549 /* Reset IVA2 */
550 omap2_prm_write_mod_reg(OMAP3430_RST1_IVA2_MASK |
551 OMAP3430_RST2_IVA2_MASK |
552 OMAP3430_RST3_IVA2_MASK,
553 OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
555 /* Enable IVA2 clock */
556 omap2_cm_write_mod_reg(OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_MASK,
557 OMAP3430_IVA2_MOD, CM_FCLKEN);
559 /* Set IVA2 boot mode to 'idle' */
560 omap_ctrl_writel(OMAP3_IVA2_BOOTMOD_IDLE,
561 OMAP343X_CONTROL_IVA2_BOOTMOD);
563 /* Un-reset IVA2 */
564 omap2_prm_write_mod_reg(0, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
566 /* Disable IVA2 clock */
567 omap2_cm_write_mod_reg(0, OMAP3430_IVA2_MOD, CM_FCLKEN);
569 /* Reset IVA2 */
570 omap2_prm_write_mod_reg(OMAP3430_RST1_IVA2_MASK |
571 OMAP3430_RST2_IVA2_MASK |
572 OMAP3430_RST3_IVA2_MASK,
573 OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
574 }
576 static void __init omap3_d2d_idle(void)
577 {
578 u16 mask, padconf;
580 /* In a stand alone OMAP3430 where there is not a stacked
581 * modem for the D2D Idle Ack and D2D MStandby must be pulled
582 * high. S CONTROL_PADCONF_SAD2D_IDLEACK and
583 * CONTROL_PADCONF_SAD2D_MSTDBY to have a pull up. */
584 mask = (1 << 4) | (1 << 3); /* pull-up, enabled */
585 padconf = omap_ctrl_readw(OMAP3_PADCONF_SAD2D_MSTANDBY);
586 padconf |= mask;
587 omap_ctrl_writew(padconf, OMAP3_PADCONF_SAD2D_MSTANDBY);
589 padconf = omap_ctrl_readw(OMAP3_PADCONF_SAD2D_IDLEACK);
590 padconf |= mask;
591 omap_ctrl_writew(padconf, OMAP3_PADCONF_SAD2D_IDLEACK);
593 /* reset modem */
594 omap2_prm_write_mod_reg(OMAP3430_RM_RSTCTRL_CORE_MODEM_SW_RSTPWRON_MASK |
595 OMAP3430_RM_RSTCTRL_CORE_MODEM_SW_RST_MASK,
596 CORE_MOD, OMAP2_RM_RSTCTRL);
597 omap2_prm_write_mod_reg(0, CORE_MOD, OMAP2_RM_RSTCTRL);
598 }
600 static void __init prcm_setup_regs(void)
601 {
602 u32 omap3630_en_uart4_mask = cpu_is_omap3630() ?
603 OMAP3630_EN_UART4_MASK : 0;
604 u32 omap3630_grpsel_uart4_mask = cpu_is_omap3630() ?
605 OMAP3630_GRPSEL_UART4_MASK : 0;
607 /* XXX This should be handled by hwmod code or SCM init code */
608 /* This causes MUSB failure on AM3517 so disable it. */
609 if (!cpu_is_omap3517() && !cpu_is_omap3505())
610 omap_ctrl_writel(OMAP3430_AUTOIDLE_MASK, OMAP2_CONTROL_SYSCONFIG);
612 /*
613 * Enable control of expternal oscillator through
614 * sys_clkreq. In the long run clock framework should
615 * take care of this.
616 */
617 omap2_prm_rmw_mod_reg_bits(OMAP_AUTOEXTCLKMODE_MASK,
618 1 << OMAP_AUTOEXTCLKMODE_SHIFT,
619 OMAP3430_GR_MOD,
620 OMAP3_PRM_CLKSRC_CTRL_OFFSET);
622 /* setup wakup source */
623 omap2_prm_write_mod_reg(OMAP3430_EN_IO_MASK | OMAP3430_EN_GPIO1_MASK |
624 OMAP3430_EN_GPT1_MASK | OMAP3430_EN_GPT12_MASK,
625 WKUP_MOD, PM_WKEN);
626 /* No need to write EN_IO, that is always enabled */
627 omap2_prm_write_mod_reg(OMAP3430_GRPSEL_GPIO1_MASK |
628 OMAP3430_GRPSEL_GPT1_MASK |
629 OMAP3430_GRPSEL_GPT12_MASK,
630 WKUP_MOD, OMAP3430_PM_MPUGRPSEL);
632 /* Enable PM_WKEN to support DSS LPR */
633 omap2_prm_write_mod_reg(OMAP3430_PM_WKEN_DSS_EN_DSS_MASK,
634 OMAP3430_DSS_MOD, PM_WKEN);
636 /* Enable wakeups in PER */
637 omap2_prm_write_mod_reg(omap3630_en_uart4_mask |
638 OMAP3430_EN_GPIO2_MASK | OMAP3430_EN_GPIO3_MASK |
639 OMAP3430_EN_GPIO4_MASK | OMAP3430_EN_GPIO5_MASK |
640 OMAP3430_EN_GPIO6_MASK | OMAP3430_EN_UART3_MASK |
641 OMAP3430_EN_MCBSP2_MASK | OMAP3430_EN_MCBSP3_MASK |
642 OMAP3430_EN_MCBSP4_MASK,
643 OMAP3430_PER_MOD, PM_WKEN);
644 /* and allow them to wake up MPU */
645 omap2_prm_write_mod_reg(omap3630_grpsel_uart4_mask |
646 OMAP3430_GRPSEL_GPIO2_MASK |
647 OMAP3430_GRPSEL_GPIO3_MASK |
648 OMAP3430_GRPSEL_GPIO4_MASK |
649 OMAP3430_GRPSEL_GPIO5_MASK |
650 OMAP3430_GRPSEL_GPIO6_MASK |
651 OMAP3430_GRPSEL_UART3_MASK |
652 OMAP3430_GRPSEL_MCBSP2_MASK |
653 OMAP3430_GRPSEL_MCBSP3_MASK |
654 OMAP3430_GRPSEL_MCBSP4_MASK,
655 OMAP3430_PER_MOD, OMAP3430_PM_MPUGRPSEL);
657 /* Don't attach IVA interrupts */
658 omap2_prm_write_mod_reg(0, WKUP_MOD, OMAP3430_PM_IVAGRPSEL);
659 omap2_prm_write_mod_reg(0, CORE_MOD, OMAP3430_PM_IVAGRPSEL1);
660 omap2_prm_write_mod_reg(0, CORE_MOD, OMAP3430ES2_PM_IVAGRPSEL3);
661 omap2_prm_write_mod_reg(0, OMAP3430_PER_MOD, OMAP3430_PM_IVAGRPSEL);
663 /* Clear any pending 'reset' flags */
664 omap2_prm_write_mod_reg(0xffffffff, MPU_MOD, OMAP2_RM_RSTST);
665 omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, OMAP2_RM_RSTST);
666 omap2_prm_write_mod_reg(0xffffffff, OMAP3430_PER_MOD, OMAP2_RM_RSTST);
667 omap2_prm_write_mod_reg(0xffffffff, OMAP3430_EMU_MOD, OMAP2_RM_RSTST);
668 omap2_prm_write_mod_reg(0xffffffff, OMAP3430_NEON_MOD, OMAP2_RM_RSTST);
669 omap2_prm_write_mod_reg(0xffffffff, OMAP3430_DSS_MOD, OMAP2_RM_RSTST);
670 omap2_prm_write_mod_reg(0xffffffff, OMAP3430ES2_USBHOST_MOD, OMAP2_RM_RSTST);
672 /* Clear any pending PRCM interrupts */
673 omap2_prm_write_mod_reg(0, OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
675 omap3_iva_idle();
676 omap3_d2d_idle();
677 }
679 void omap3_pm_off_mode_enable(int enable)
680 {
681 struct power_state *pwrst;
682 u32 state;
684 if (enable)
685 state = PWRDM_POWER_OFF;
686 else
687 state = PWRDM_POWER_RET;
689 list_for_each_entry(pwrst, &pwrst_list, node) {
690 if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583) &&
691 pwrst->pwrdm == core_pwrdm &&
692 state == PWRDM_POWER_OFF) {
693 pwrst->next_state = PWRDM_POWER_RET;
694 pr_warn("%s: Core OFF disabled due to errata i583\n",
695 __func__);
696 } else {
697 pwrst->next_state = state;
698 }
699 omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state);
700 }
701 }
703 int omap3_pm_get_suspend_state(struct powerdomain *pwrdm)
704 {
705 struct power_state *pwrst;
707 list_for_each_entry(pwrst, &pwrst_list, node) {
708 if (pwrst->pwrdm == pwrdm)
709 return pwrst->next_state;
710 }
711 return -EINVAL;
712 }
714 int omap3_pm_set_suspend_state(struct powerdomain *pwrdm, int state)
715 {
716 struct power_state *pwrst;
718 list_for_each_entry(pwrst, &pwrst_list, node) {
719 if (pwrst->pwrdm == pwrdm) {
720 pwrst->next_state = state;
721 return 0;
722 }
723 }
724 return -EINVAL;
725 }
727 static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused)
728 {
729 struct power_state *pwrst;
731 if (!pwrdm->pwrsts)
732 return 0;
734 pwrst = kmalloc(sizeof(struct power_state), GFP_ATOMIC);
735 if (!pwrst)
736 return -ENOMEM;
737 pwrst->pwrdm = pwrdm;
738 pwrst->next_state = PWRDM_POWER_RET;
739 list_add(&pwrst->node, &pwrst_list);
741 if (pwrdm_has_hdwr_sar(pwrdm))
742 pwrdm_enable_hdwr_sar(pwrdm);
744 return omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state);
745 }
747 /*
748 * Enable hw supervised mode for all clockdomains if it's
749 * supported. Initiate sleep transition for other clockdomains, if
750 * they are not used
751 */
752 static int __init clkdms_setup(struct clockdomain *clkdm, void *unused)
753 {
754 if (clkdm->flags & CLKDM_CAN_ENABLE_AUTO)
755 clkdm_allow_idle(clkdm);
756 else if (clkdm->flags & CLKDM_CAN_FORCE_SLEEP &&
757 atomic_read(&clkdm->usecount) == 0)
758 clkdm_sleep(clkdm);
759 return 0;
760 }
762 /*
763 * Push functions to SRAM
764 *
765 * The minimum set of functions is pushed to SRAM for execution:
766 * - omap3_do_wfi for erratum i581 WA,
767 * - save_secure_ram_context for security extensions.
768 */
769 void omap_push_sram_idle(void)
770 {
771 omap3_do_wfi_sram = omap_sram_push(omap3_do_wfi, omap3_do_wfi_sz);
773 if (omap_type() != OMAP2_DEVICE_TYPE_GP)
774 _omap_save_secure_sram = omap_sram_push(save_secure_ram_context,
775 save_secure_ram_context_sz);
776 }
778 static void __init pm_errata_configure(void)
779 {
780 if (cpu_is_omap3630()) {
781 pm34xx_errata |= PM_RTA_ERRATUM_i608;
782 /* Enable the l2 cache toggling in sleep logic */
783 enable_omap3630_toggle_l2_on_restore();
784 if (omap_rev() < OMAP3630_REV_ES1_2)
785 pm34xx_errata |= PM_SDRC_WAKEUP_ERRATUM_i583;
786 }
787 }
789 static int __init omap3_pm_init(void)
790 {
791 struct power_state *pwrst, *tmp;
792 struct clockdomain *neon_clkdm, *per_clkdm, *mpu_clkdm, *core_clkdm;
793 int ret;
795 if (!cpu_is_omap34xx() || cpu_is_am33xx())
796 return -ENODEV;
798 if (!omap3_has_io_chain_ctrl())
799 pr_warning("PM: no software I/O chain control; some wakeups may be lost\n");
801 pm_errata_configure();
803 /* XXX prcm_setup_regs needs to be before enabling hw
804 * supervised mode for powerdomains */
805 prcm_setup_regs();
807 ret = request_irq(omap_prcm_event_to_irq("wkup"),
808 _prcm_int_handle_wakeup, IRQF_NO_SUSPEND, "pm_wkup", NULL);
810 if (ret) {
811 pr_err("pm: Failed to request pm_wkup irq\n");
812 goto err1;
813 }
815 /* IO interrupt is shared with mux code */
816 ret = request_irq(omap_prcm_event_to_irq("io"),
817 _prcm_int_handle_io, IRQF_SHARED | IRQF_NO_SUSPEND, "pm_io",
818 omap3_pm_init);
820 if (ret) {
821 pr_err("pm: Failed to request pm_io irq\n");
822 goto err1;
823 }
825 ret = pwrdm_for_each(pwrdms_setup, NULL);
826 if (ret) {
827 printk(KERN_ERR "Failed to setup powerdomains\n");
828 goto err2;
829 }
831 (void) clkdm_for_each(clkdms_setup, NULL);
833 mpu_pwrdm = pwrdm_lookup("mpu_pwrdm");
834 if (mpu_pwrdm == NULL) {
835 printk(KERN_ERR "Failed to get mpu_pwrdm\n");
836 goto err2;
837 }
839 neon_pwrdm = pwrdm_lookup("neon_pwrdm");
840 per_pwrdm = pwrdm_lookup("per_pwrdm");
841 core_pwrdm = pwrdm_lookup("core_pwrdm");
842 cam_pwrdm = pwrdm_lookup("cam_pwrdm");
844 neon_clkdm = clkdm_lookup("neon_clkdm");
845 mpu_clkdm = clkdm_lookup("mpu_clkdm");
846 per_clkdm = clkdm_lookup("per_clkdm");
847 core_clkdm = clkdm_lookup("core_clkdm");
849 #ifdef CONFIG_SUSPEND
850 suspend_set_ops(&omap_pm_ops);
851 #endif /* CONFIG_SUSPEND */
853 pm_idle = omap3_pm_idle;
854 omap3_idle_init();
856 /*
857 * RTA is disabled during initialization as per erratum i608
858 * it is safer to disable RTA by the bootloader, but we would like
859 * to be doubly sure here and prevent any mishaps.
860 */
861 if (IS_PM34XX_ERRATUM(PM_RTA_ERRATUM_i608))
862 omap3630_ctrl_disable_rta();
864 clkdm_add_wkdep(neon_clkdm, mpu_clkdm);
865 if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
866 omap3_secure_ram_storage =
867 kmalloc(0x803F, GFP_KERNEL);
868 if (!omap3_secure_ram_storage)
869 printk(KERN_ERR "Memory allocation failed when"
870 "allocating for secure sram context\n");
872 local_irq_disable();
873 local_fiq_disable();
875 omap_dma_global_context_save();
876 omap3_save_secure_ram_context();
877 omap_dma_global_context_restore();
879 local_irq_enable();
880 local_fiq_enable();
881 }
883 omap3_save_scratchpad_contents();
884 err1:
885 return ret;
886 err2:
887 free_irq(INT_34XX_PRCM_MPU_IRQ, NULL);
888 list_for_each_entry_safe(pwrst, tmp, &pwrst_list, node) {
889 list_del(&pwrst->node);
890 kfree(pwrst);
891 }
892 return ret;
893 }
895 late_initcall(omap3_pm_init);