summary | shortlog | log | commit | commitdiff | tree
raw | patch | inline | side by side (parent: 6a4d7de)
raw | patch | inline | side by side (parent: 6a4d7de)
author | M V Pratap Reddy <x0257344@ti.com> | |
Fri, 4 Dec 2020 12:52:36 +0000 (18:22 +0530) | ||
committer | Sivaraj R <sivaraj@ti.com> | |
Mon, 7 Dec 2020 07:56:38 +0000 (01:56 -0600) |
- MMR config is failing during the pinmux configuration in release
mode. Order of execution for setting kick0 and kick1 is done in reverse
which is causing unlock/lock failure.
Added volatile specifier for kick address variables to avoid the
optimization.
mode. Order of execution for setting kick0 and kick1 is done in reverse
which is causing unlock/lock failure.
Added volatile specifier for kick address variables to avoid the
optimization.
packages/ti/board/src/am64x_evm/board_mmr.c | patch | blob | history |
diff --git a/packages/ti/board/src/am64x_evm/board_mmr.c b/packages/ti/board/src/am64x_evm/board_mmr.c
index 7e623dafc0aa4a4797c98f59020b88d679a4e1cc..26639a041fc0c096b5077dff4e90735ce9972f27 100644 (file)
#define MCU_PADCONFIG_MMR_BASE_ADDRESS CSL_MCU_PADCFG_CTRL0_CFG0_BASE\r
#endif\r
\r
#define MCU_PADCONFIG_MMR_BASE_ADDRESS CSL_MCU_PADCFG_CTRL0_CFG0_BASE\r
#endif\r
\r
-uint32_t MMR_change_lock(mmr_lock_actions_t target_state, uint32_t * kick0);\r
+uint32_t MMR_change_lock(mmr_lock_actions_t target_state, volatile uint32_t * kick0);\r
uint32_t generic_mmr_change_all_locks(mmr_lock_actions_t target_state, uint32_t base_addr, const uint32_t * offset_array, uint32_t array_size);\r
\r
uint32_t MAIN_PADCONFIG_MMR_unlock_all();\r
uint32_t generic_mmr_change_all_locks(mmr_lock_actions_t target_state, uint32_t base_addr, const uint32_t * offset_array, uint32_t array_size);\r
\r
uint32_t MAIN_PADCONFIG_MMR_unlock_all();\r
uint32_t MCU_PLL_MMR_change_all_locks(mmr_lock_actions_t target_state);\r
\r
\r
uint32_t MCU_PLL_MMR_change_all_locks(mmr_lock_actions_t target_state);\r
\r
\r
- uint32_t MMR_change_lock(mmr_lock_actions_t target_state, uint32_t * kick0) {\r
- uint32_t * kick1 = kick0 + 1;\r
+ uint32_t MMR_change_lock(mmr_lock_actions_t target_state, volatile uint32_t * kick0) {\r
+ volatile uint32_t * kick1 = kick0 + 1;\r
uint32_t lock_state = (*kick0 & 0x1); //status is 1 if unlocked, 0 if locked\r
\r
//If lock state is not what we want, change it\r
uint32_t lock_state = (*kick0 & 0x1); //status is 1 if unlocked, 0 if locked\r
\r
//If lock state is not what we want, change it\r
uint32_t generic_mmr_change_all_locks(mmr_lock_actions_t target_state, uint32_t base_addr, const uint32_t * offset_array, uint32_t array_size) {\r
uint32_t errors=0;\r
uint32_t i=0;\r
uint32_t generic_mmr_change_all_locks(mmr_lock_actions_t target_state, uint32_t base_addr, const uint32_t * offset_array, uint32_t array_size) {\r
uint32_t errors=0;\r
uint32_t i=0;\r
- uint32_t * kick0_ptr;\r
+ volatile uint32_t * kick0_ptr;\r
for(i=0;i<array_size;i++) {\r
for(i=0;i<array_size;i++) {\r
- kick0_ptr = (uint32_t *) (base_addr + offset_array[i]);\r
+ kick0_ptr = (volatile uint32_t *) (base_addr + offset_array[i]);\r
if(MMR_change_lock(target_state, kick0_ptr) == AVV_FAIL){\r
errors++;\r
}\r
if(MMR_change_lock(target_state, kick0_ptr) == AVV_FAIL){\r
errors++;\r
}\r