1 /*
2 * OMAP MPUSS low power code
3 *
4 * Copyright (C) 2011 Texas Instruments, Inc.
5 * Santosh Shilimkar <santosh.shilimkar@ti.com>
6 *
7 * OMAP4430 MPUSS mainly consists of dual Cortex-A9 with per-CPU
8 * Local timer and Watchdog, GIC, SCU, PL310 L2 cache controller,
9 * CPU0 and CPU1 LPRM modules.
10 * CPU0, CPU1 and MPUSS each have there own power domain and
11 * hence multiple low power combinations of MPUSS are possible.
12 *
13 * The CPU0 and CPU1 can't support Closed switch Retention (CSWR)
14 * because the mode is not supported by hw constraints of dormant
15 * mode. While waking up from the dormant mode, a reset signal
16 * to the Cortex-A9 processor must be asserted by the external
17 * power controller.
18 *
19 * With architectural inputs and hardware recommendations, only
20 * below modes are supported from power gain vs latency point of view.
21 *
22 * CPU0 CPU1 MPUSS
23 * ----------------------------------------------
24 * ON ON ON
25 * ON(Inactive) OFF ON(Inactive)
26 * OFF OFF CSWR
27 * OFF OFF OSWR
28 * OFF OFF OFF(Device OFF *TBD)
29 * ----------------------------------------------
30 *
31 * Note: CPU0 is the master core and it is the last CPU to go down
32 * and first to wake-up when MPUSS low power states are excercised
33 *
34 *
35 * This program is free software; you can redistribute it and/or modify
36 * it under the terms of the GNU General Public License version 2 as
37 * published by the Free Software Foundation.
38 */
40 #include <linux/kernel.h>
41 #include <linux/io.h>
42 #include <linux/errno.h>
43 #include <linux/linkage.h>
44 #include <linux/smp.h>
46 #include <asm/cacheflush.h>
47 #include <asm/tlbflush.h>
48 #include <asm/smp_scu.h>
49 #include <asm/pgalloc.h>
50 #include <asm/suspend.h>
51 #include <asm/hardware/cache-l2x0.h>
53 #include "soc.h"
54 #include "common.h"
55 #include "omap44xx.h"
56 #include "omap4-sar-layout.h"
57 #include "pm.h"
58 #include "prcm_mpu44xx.h"
59 #include "prcm_mpu54xx.h"
60 #include "prminst44xx.h"
61 #include "prcm44xx.h"
62 #include "prm44xx.h"
63 #include "prm-regbits-44xx.h"
65 #ifdef CONFIG_SMP
67 struct omap4_cpu_pm_info {
68 struct powerdomain *pwrdm;
69 void __iomem *scu_sar_addr;
70 void __iomem *wkup_sar_addr;
71 void __iomem *l2x0_sar_addr;
72 void (*secondary_startup)(void);
73 };
75 struct cpu_pm_ops {
76 int (*finish_suspend)(unsigned long cpu_state);
77 void (*resume)(void);
78 void (*scu_prepare)(unsigned int cpu_id, u8 cpu_state);
79 void (*hotplug_restart)(void);
80 };
82 extern int omap4_finish_suspend(unsigned long cpu_state);
83 extern void omap4_cpu_resume(void);
84 extern int omap5_finish_suspend(unsigned long cpu_state);
85 extern void omap5_cpu_resume(void);
87 static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info);
88 static struct powerdomain *mpuss_pd;
89 static void __iomem *sar_base;
90 static u32 cpu_context_offset, cpu_cswr_supported;
92 static int default_finish_suspend(unsigned long cpu_state)
93 {
94 omap_do_wfi();
95 return 0;
96 }
98 static void dummy_cpu_resume(void)
99 {}
101 static void dummy_scu_prepare(unsigned int cpu_id, u8 cpu_state)
102 {}
104 static struct cpu_pm_ops omap_pm_ops = {
105 .finish_suspend = default_finish_suspend,
106 .resume = dummy_cpu_resume,
107 .scu_prepare = dummy_scu_prepare,
108 .hotplug_restart = dummy_cpu_resume,
109 };
111 /*
112 * Program the wakeup routine address for the CPU0 and CPU1
113 * used for OFF or DORMANT wakeup.
114 */
115 static inline void set_cpu_wakeup_addr(unsigned int cpu_id, u32 addr)
116 {
117 struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
119 /*
120 * XXX should not be writing directly into another IP block's
121 * address space!
122 */
123 if (pm_info->wkup_sar_addr)
124 __raw_writel(addr, pm_info->wkup_sar_addr);
125 }
127 /*
128 * Store the SCU power status value to scratchpad memory
129 */
130 static void scu_pwrst_prepare(unsigned int cpu_id, u8 fpwrst)
131 {
132 struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
133 u32 scu_pwr_st;
135 if (!pm_info->scu_sar_addr)
136 return;
138 switch (fpwrst) {
139 case PWRDM_FUNC_PWRST_CSWR:
140 case PWRDM_FUNC_PWRST_OSWR: /* XXX is this accurate? */
141 scu_pwr_st = SCU_PM_DORMANT;
142 break;
143 case PWRDM_FUNC_PWRST_OFF:
144 scu_pwr_st = SCU_PM_POWEROFF;
145 break;
146 case PWRDM_FUNC_PWRST_ON:
147 case PWRDM_FUNC_PWRST_INACTIVE:
148 default:
149 scu_pwr_st = SCU_PM_NORMAL;
150 break;
151 }
153 /*
154 * XXX should not be writing directly into another IP block's
155 * address space!
156 */
157 __raw_writel(scu_pwr_st, pm_info->scu_sar_addr);
158 }
160 /* Helper functions for MPUSS OSWR */
161 static inline void mpuss_clear_prev_logic_pwrst(void)
162 {
163 u32 reg;
165 reg = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION,
166 OMAP4430_PRM_MPU_INST, OMAP4_RM_MPU_MPU_CONTEXT_OFFSET);
167 omap4_prminst_write_inst_reg(reg, OMAP4430_PRM_PARTITION,
168 OMAP4430_PRM_MPU_INST, OMAP4_RM_MPU_MPU_CONTEXT_OFFSET);
169 }
171 static inline void cpu_clear_prev_logic_pwrst(unsigned int cpu_id)
172 {
173 u32 reg;
175 if (cpu_id) {
176 reg = omap4_prcm_mpu_read_inst_reg(OMAP4430_PRCM_MPU_CPU1_INST,
177 cpu_context_offset);
178 omap4_prcm_mpu_write_inst_reg(reg, OMAP4430_PRCM_MPU_CPU1_INST,
179 cpu_context_offset);
180 } else {
181 reg = omap4_prcm_mpu_read_inst_reg(OMAP4430_PRCM_MPU_CPU0_INST,
182 cpu_context_offset);
183 omap4_prcm_mpu_write_inst_reg(reg, OMAP4430_PRCM_MPU_CPU0_INST,
184 cpu_context_offset);
185 }
186 }
188 /*
189 * Store the CPU cluster state for L2X0 low power operations.
190 */
191 static void l2x0_pwrst_prepare(unsigned int cpu_id, unsigned int save_state)
192 {
193 struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
195 /*
196 * XXX should not be writing directly into another IP block's
197 * address space!
198 */
199 if (pm_info->l2x0_sar_addr)
200 __raw_writel(save_state, pm_info->l2x0_sar_addr);
201 }
203 /*
204 * Save the L2X0 AUXCTRL and POR value to SAR memory. Its used to
205 * in every restore MPUSS OFF path.
206 */
207 #ifdef CONFIG_CACHE_L2X0
208 static void save_l2x0_context(void)
209 {
210 u32 val;
211 void __iomem *l2x0_base = omap4_get_l2cache_base();
212 if (l2x0_base) {
213 val = __raw_readl(l2x0_base + L2X0_AUX_CTRL);
214 __raw_writel(val, sar_base + L2X0_AUXCTRL_OFFSET);
215 val = __raw_readl(l2x0_base + L2X0_PREFETCH_CTRL);
216 __raw_writel(val, sar_base + L2X0_PREFETCH_CTRL_OFFSET);
217 }
218 }
219 #else
220 static void save_l2x0_context(void)
221 {}
222 #endif
224 /**
225 * omap4_mpuss_enter_lowpower: OMAP4 MPUSS Low Power Entry Function
226 * The purpose of this function is to manage low power programming
227 * of OMAP4 MPUSS subsystem
228 * @cpu : CPU ID
229 * @fpwrst: functional powerstate for the MPUSS to enter
230 *
231 * MPUSS states for the context save:
232 * save_state =
233 * 0 - Nothing lost and no need to save: MPUSS INACTIVE
234 * 1 - CPUx L1 and logic lost: MPUSS CSWR
235 * 2 - CPUx L1 and logic lost + GIC lost: MPUSS OSWR
236 * 3 - CPUx L1 and logic lost + GIC + L2 lost: DEVICE OFF
237 */
238 int omap4_mpuss_enter_lowpower(unsigned int cpu, u8 fpwrst)
239 {
240 struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu);
241 unsigned int save_state = 0;
242 unsigned int wakeup_cpu;
244 if (omap_rev() == OMAP4430_REV_ES1_0)
245 return -ENXIO;
247 switch (fpwrst) {
248 case PWRDM_FUNC_PWRST_ON:
249 case PWRDM_FUNC_PWRST_INACTIVE:
250 save_state = 0;
251 break;
252 case PWRDM_FUNC_PWRST_OFF:
253 save_state = 1;
254 break;
255 case PWRDM_FUNC_PWRST_CSWR:
256 case PWRDM_FUNC_PWRST_OSWR:
257 if (cpu_cswr_supported) {
258 save_state = 0;
259 break;
260 }
261 default:
262 /*
263 * CPUx CSWR is invalid hardware state. Also CPUx OSWR
264 * doesn't make much scense, since logic is lost and $L1
265 * needs to be cleaned because of coherency. This makes
266 * CPUx OSWR equivalent to CPUX OFF and hence not supported
267 */
268 WARN_ON(1);
269 return -ENXIO;
270 }
272 pwrdm_pre_transition(NULL);
274 /*
275 * Check MPUSS next state and save interrupt controller if needed.
276 * In MPUSS OSWR or device OFF, interrupt controller context is lost.
277 */
278 mpuss_clear_prev_logic_pwrst();
279 if (pwrdm_read_next_fpwrst(mpuss_pd) == PWRDM_FUNC_PWRST_OSWR)
280 save_state = 2;
282 cpu_clear_prev_logic_pwrst(cpu);
283 WARN_ON(pwrdm_set_next_fpwrst(pm_info->pwrdm, fpwrst));
284 set_cpu_wakeup_addr(cpu, virt_to_phys(omap_pm_ops.resume));
285 omap_pm_ops.scu_prepare(cpu, fpwrst);
286 l2x0_pwrst_prepare(cpu, save_state);
288 /*
289 * Call low level function with targeted low power state.
290 */
291 if (save_state)
292 cpu_suspend(save_state, omap_pm_ops.finish_suspend);
293 else
294 omap_pm_ops.finish_suspend(save_state);
296 /*
297 * Restore the CPUx power state to ON otherwise CPUx
298 * power domain can transitions to programmed low power
299 * state while doing WFI outside the low powe code. On
300 * secure devices, CPUx does WFI which can result in
301 * domain transition
302 */
303 wakeup_cpu = smp_processor_id();
305 pwrdm_post_transition(NULL);
307 WARN_ON(pwrdm_set_next_fpwrst(pm_info->pwrdm, PWRDM_FUNC_PWRST_ON));
309 return 0;
310 }
312 /**
313 * omap4_hotplug_cpu: OMAP4 CPU hotplug entry
314 * @cpu : CPU ID
315 * @fpwrst: functional power state to program the CPU powerdomain to enter
316 */
317 int __cpuinit omap4_mpuss_hotplug_cpu(unsigned int cpu, u8 fpwrst)
318 {
319 struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu);
320 unsigned int cpu_state = 0;
322 if (omap_rev() == OMAP4430_REV_ES1_0)
323 return -ENXIO;
325 if (fpwrst == PWRDM_FUNC_PWRST_OFF || fpwrst == PWRDM_FUNC_PWRST_OSWR)
326 cpu_state = 1;
328 pwrdm_clear_all_prev_pwrst(pm_info->pwrdm);
329 WARN_ON(pwrdm_set_next_fpwrst(pm_info->pwrdm, fpwrst));
330 set_cpu_wakeup_addr(cpu, virt_to_phys(omap_pm_ops.hotplug_restart));
331 omap_pm_ops.scu_prepare(cpu, fpwrst);
333 /*
334 * CPU never retuns back if targeted power state is OFF mode.
335 * CPU ONLINE follows normal CPU ONLINE ptah via
336 * omap_secondary_startup().
337 */
338 omap_pm_ops.finish_suspend(cpu_state);
340 WARN_ON(pwrdm_set_next_fpwrst(pm_info->pwrdm, PWRDM_FUNC_PWRST_ON));
341 return 0;
342 }
345 /*
346 * Enable Mercury Fast HG retention mode by default.
347 */
348 static void enable_mercury_retention_mode(void)
349 {
350 u32 reg;
352 reg = omap4_prcm_mpu_read_inst_reg(OMAP54XX_PRCM_MPU_DEVICE_INST,
353 OMAP54XX_PRCM_MPU_PRM_PSCON_COUNT_OFFSET);
354 reg |= BIT(24) | BIT(25);
355 omap4_prcm_mpu_write_inst_reg(reg, OMAP54XX_PRCM_MPU_DEVICE_INST,
356 OMAP54XX_PRCM_MPU_PRM_PSCON_COUNT_OFFSET);
357 }
359 /*
360 * Initialise OMAP4 MPUSS
361 */
362 int __init omap4_mpuss_init(void)
363 {
364 struct omap4_cpu_pm_info *pm_info;
365 u32 cpu_wakeup_addr = 0;
367 if (omap_rev() == OMAP4430_REV_ES1_0) {
368 WARN(1, "Power Management not supported on OMAP4430 ES1.0\n");
369 return -ENODEV;
370 }
372 sar_base = omap4_get_sar_ram_base();
374 /* Initilaise per CPU PM information */
375 if (cpu_is_omap44xx())
376 cpu_wakeup_addr = CPU0_WAKEUP_NS_PA_ADDR_OFFSET;
377 else if (soc_is_omap54xx())
378 cpu_wakeup_addr = OMAP5_CPU0_WAKEUP_NS_PA_ADDR_OFFSET;
379 pm_info = &per_cpu(omap4_pm_info, 0x0);
380 if (sar_base) {
381 pm_info->scu_sar_addr = sar_base + SCU_OFFSET0;
382 pm_info->wkup_sar_addr = sar_base + cpu_wakeup_addr;
383 pm_info->l2x0_sar_addr = sar_base + L2X0_SAVE_OFFSET0;
384 }
385 pm_info->pwrdm = pwrdm_lookup("cpu0_pwrdm");
386 if (!pm_info->pwrdm) {
387 pr_err("Lookup failed for CPU0 pwrdm\n");
388 return -ENODEV;
389 }
391 /* Clear CPU previous power domain state */
392 pwrdm_clear_all_prev_pwrst(pm_info->pwrdm);
393 cpu_clear_prev_logic_pwrst(0);
395 /* Initialise CPU0 power domain state to ON */
396 WARN_ON(pwrdm_set_next_fpwrst(pm_info->pwrdm, PWRDM_FUNC_PWRST_ON));
398 if (cpu_is_omap44xx())
399 cpu_wakeup_addr = CPU1_WAKEUP_NS_PA_ADDR_OFFSET;
400 else if (soc_is_omap54xx())
401 cpu_wakeup_addr = OMAP5_CPU1_WAKEUP_NS_PA_ADDR_OFFSET;
402 pm_info = &per_cpu(omap4_pm_info, 0x1);
403 if (sar_base) {
404 pm_info->scu_sar_addr = sar_base + SCU_OFFSET1;
405 pm_info->wkup_sar_addr = sar_base + cpu_wakeup_addr;
406 pm_info->l2x0_sar_addr = sar_base + L2X0_SAVE_OFFSET1;
407 }
409 pm_info->pwrdm = pwrdm_lookup("cpu1_pwrdm");
410 if (!pm_info->pwrdm) {
411 pr_err("Lookup failed for CPU1 pwrdm\n");
412 return -ENODEV;
413 }
415 /* Clear CPU previous power domain state */
416 pwrdm_clear_all_prev_pwrst(pm_info->pwrdm);
417 cpu_clear_prev_logic_pwrst(1);
419 /* Initialise CPU1 power domain state to ON */
420 WARN_ON(pwrdm_set_next_fpwrst(pm_info->pwrdm, PWRDM_FUNC_PWRST_ON));
422 mpuss_pd = pwrdm_lookup("mpu_pwrdm");
423 if (!mpuss_pd) {
424 pr_err("Failed to lookup MPUSS power domain\n");
425 return -ENODEV;
426 }
427 pwrdm_clear_all_prev_pwrst(mpuss_pd);
428 mpuss_clear_prev_logic_pwrst();
430 /* Save device type on scratchpad for low level code to use */
431 if (sar_base) {
432 if (omap_type() != OMAP2_DEVICE_TYPE_GP)
433 __raw_writel(1, sar_base + OMAP_TYPE_OFFSET);
434 else
435 __raw_writel(0, sar_base + OMAP_TYPE_OFFSET);
437 save_l2x0_context();
438 }
440 if (cpu_is_omap44xx()) {
441 omap_pm_ops.finish_suspend = omap4_finish_suspend;
442 omap_pm_ops.hotplug_restart = omap_secondary_startup;
443 omap_pm_ops.resume = omap4_cpu_resume;
444 omap_pm_ops.scu_prepare = scu_pwrst_prepare;
445 cpu_context_offset = OMAP4_RM_CPU0_CPU0_CONTEXT_OFFSET;
446 } else if (soc_is_omap54xx() || soc_is_dra7xx()) {
447 omap_pm_ops.finish_suspend = omap5_finish_suspend;
448 omap_pm_ops.hotplug_restart = omap5_secondary_startup;
449 omap_pm_ops.resume = omap5_cpu_resume;
450 cpu_context_offset = OMAP54XX_RM_CPU0_CPU0_CONTEXT_OFFSET;
451 enable_mercury_retention_mode();
452 cpu_cswr_supported = 1;
453 }
455 if (cpu_is_omap446x())
456 omap_pm_ops.hotplug_restart = omap_secondary_startup_4460;
458 return 0;
459 }
461 #endif