summaryrefslogtreecommitdiff
path: root/arch/arm/mach-omap2/sleep34xx.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mach-omap2/sleep34xx.S')
-rw-r--r--arch/arm/mach-omap2/sleep34xx.S836
1 files changed, 466 insertions, 370 deletions
diff --git a/arch/arm/mach-omap2/sleep34xx.S b/arch/arm/mach-omap2/sleep34xx.S
index 2fb205a7f285..98d8232808b8 100644
--- a/arch/arm/mach-omap2/sleep34xx.S
+++ b/arch/arm/mach-omap2/sleep34xx.S
@@ -1,6 +1,4 @@
/*
- * linux/arch/arm/mach-omap2/sleep.S
- *
* (C) Copyright 2007
* Texas Instruments
* Karthik Dasu <karthik-dp@ti.com>
@@ -26,28 +24,35 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <plat/sram.h>
#include <mach/io.h>
-#include "cm.h"
-#include "prm.h"
+#include "cm2xxx_3xxx.h"
+#include "prm2xxx_3xxx.h"
#include "sdrc.h"
#include "control.h"
-#define SDRC_SCRATCHPAD_SEM_V 0xfa00291c
-
-#define PM_PREPWSTST_CORE_V OMAP34XX_PRM_REGADDR(CORE_MOD, \
- OMAP3430_PM_PREPWSTST)
-#define PM_PREPWSTST_CORE_P 0x48306AE8
-#define PM_PREPWSTST_MPU_V OMAP34XX_PRM_REGADDR(MPU_MOD, \
- OMAP3430_PM_PREPWSTST)
+/*
+ * Registers access definitions
+ */
+#define SDRC_SCRATCHPAD_SEM_OFFS 0xc
+#define SDRC_SCRATCHPAD_SEM_V OMAP343X_SCRATCHPAD_REGADDR\
+ (SDRC_SCRATCHPAD_SEM_OFFS)
+#define PM_PREPWSTST_CORE_P OMAP3430_PRM_BASE + CORE_MOD +\
+ OMAP3430_PM_PREPWSTST
#define PM_PWSTCTRL_MPU_P OMAP3430_PRM_BASE + MPU_MOD + OMAP2_PM_PWSTCTRL
#define CM_IDLEST1_CORE_V OMAP34XX_CM_REGADDR(CORE_MOD, CM_IDLEST1)
-#define SRAM_BASE_P 0x40200000
-#define CONTROL_STAT 0x480022F0
-#define SCRATCHPAD_MEM_OFFS 0x310 /* Move this as correct place is
- * available */
-#define SCRATCHPAD_BASE_P (OMAP343X_CTRL_BASE + OMAP343X_CONTROL_MEM_WKUP\
- + SCRATCHPAD_MEM_OFFS)
+#define CM_IDLEST_CKGEN_V OMAP34XX_CM_REGADDR(PLL_MOD, CM_IDLEST)
+#define SRAM_BASE_P OMAP3_SRAM_PA
+#define CONTROL_STAT OMAP343X_CTRL_BASE + OMAP343X_CONTROL_STATUS
+#define CONTROL_MEM_RTA_CTRL (OMAP343X_CTRL_BASE +\
+ OMAP36XX_CONTROL_MEM_RTA_CTRL)
+
+/* Move this as correct place is available */
+#define SCRATCHPAD_MEM_OFFS 0x310
+#define SCRATCHPAD_BASE_P (OMAP343X_CTRL_BASE +\
+ OMAP343X_CONTROL_MEM_WKUP +\
+ SCRATCHPAD_MEM_OFFS)
#define SDRC_POWER_V OMAP34XX_SDRC_REGADDR(SDRC_POWER)
#define SDRC_SYSCONFIG_P (OMAP343X_SDRC_BASE + SDRC_SYSCONFIG)
#define SDRC_MR_0_P (OMAP343X_SDRC_BASE + SDRC_MR_0)
@@ -59,48 +64,38 @@
#define SDRC_DLLA_STATUS_V OMAP34XX_SDRC_REGADDR(SDRC_DLLA_STATUS)
#define SDRC_DLLA_CTRL_V OMAP34XX_SDRC_REGADDR(SDRC_DLLA_CTRL)
- .text
-/* Function to acquire the semaphore in scratchpad */
-ENTRY(lock_scratchpad_sem)
- stmfd sp!, {lr} @ save registers on stack
-wait_sem:
- mov r0,#1
- ldr r1, sdrc_scratchpad_sem
-wait_loop:
- ldr r2, [r1] @ load the lock value
- cmp r2, r0 @ is the lock free ?
- beq wait_loop @ not free...
- swp r2, r0, [r1] @ semaphore free so lock it and proceed
- cmp r2, r0 @ did we succeed ?
- beq wait_sem @ no - try again
- ldmfd sp!, {pc} @ restore regs and return
-sdrc_scratchpad_sem:
- .word SDRC_SCRATCHPAD_SEM_V
-ENTRY(lock_scratchpad_sem_sz)
- .word . - lock_scratchpad_sem
-
- .text
-/* Function to release the scratchpad semaphore */
-ENTRY(unlock_scratchpad_sem)
- stmfd sp!, {lr} @ save registers on stack
- ldr r3, sdrc_scratchpad_sem
- mov r2,#0
- str r2,[r3]
- ldmfd sp!, {pc} @ restore regs and return
-ENTRY(unlock_scratchpad_sem_sz)
- .word . - unlock_scratchpad_sem
+
+/*
+ * API functions
+ */
+
+/*
+ * The "get_*restore_pointer" functions are used to provide a
+ * physical restore address where the ROM code jumps while waking
+ * up from MPU OFF/OSWR state.
+ * The restore pointer is stored into the scratchpad.
+ */
.text
/* Function call to get the restore pointer for resume from OFF */
ENTRY(get_restore_pointer)
- stmfd sp!, {lr} @ save registers on stack
+ stmfd sp!, {lr} @ save registers on stack
adr r0, restore
- ldmfd sp!, {pc} @ restore regs and return
+ ldmfd sp!, {pc} @ restore regs and return
ENTRY(get_restore_pointer_sz)
- .word . - get_restore_pointer
+ .word . - get_restore_pointer
.text
-/* Function call to get the restore pointer for for ES3 to resume from OFF */
+/* Function call to get the restore pointer for 3630 resume from OFF */
+ENTRY(get_omap3630_restore_pointer)
+ stmfd sp!, {lr} @ save registers on stack
+ adr r0, restore_3630
+ ldmfd sp!, {pc} @ restore regs and return
+ENTRY(get_omap3630_restore_pointer_sz)
+ .word . - get_omap3630_restore_pointer
+
+ .text
+/* Function call to get the restore pointer for ES3 to resume from OFF */
ENTRY(get_es3_restore_pointer)
stmfd sp!, {lr} @ save registers on stack
adr r0, restore_es3
@@ -108,54 +103,23 @@ ENTRY(get_es3_restore_pointer)
ENTRY(get_es3_restore_pointer_sz)
.word . - get_es3_restore_pointer
-ENTRY(es3_sdrc_fix)
- ldr r4, sdrc_syscfg @ get config addr
- ldr r5, [r4] @ get value
- tst r5, #0x100 @ is part access blocked
- it eq
- biceq r5, r5, #0x100 @ clear bit if set
- str r5, [r4] @ write back change
- ldr r4, sdrc_mr_0 @ get config addr
- ldr r5, [r4] @ get value
- str r5, [r4] @ write back change
- ldr r4, sdrc_emr2_0 @ get config addr
- ldr r5, [r4] @ get value
- str r5, [r4] @ write back change
- ldr r4, sdrc_manual_0 @ get config addr
- mov r5, #0x2 @ autorefresh command
- str r5, [r4] @ kick off refreshes
- ldr r4, sdrc_mr_1 @ get config addr
- ldr r5, [r4] @ get value
- str r5, [r4] @ write back change
- ldr r4, sdrc_emr2_1 @ get config addr
- ldr r5, [r4] @ get value
- str r5, [r4] @ write back change
- ldr r4, sdrc_manual_1 @ get config addr
- mov r5, #0x2 @ autorefresh command
- str r5, [r4] @ kick off refreshes
- bx lr
-sdrc_syscfg:
- .word SDRC_SYSCONFIG_P
-sdrc_mr_0:
- .word SDRC_MR_0_P
-sdrc_emr2_0:
- .word SDRC_EMR2_0_P
-sdrc_manual_0:
- .word SDRC_MANUAL_0_P
-sdrc_mr_1:
- .word SDRC_MR_1_P
-sdrc_emr2_1:
- .word SDRC_EMR2_1_P
-sdrc_manual_1:
- .word SDRC_MANUAL_1_P
-ENTRY(es3_sdrc_fix_sz)
- .word . - es3_sdrc_fix
+ .text
+/*
+ * L2 cache needs to be toggled for stable OFF mode functionality on 3630.
+ * This function sets up a flag that will allow for this toggling to take
+ * place on 3630. Hopefully some version in the future may not need this.
+ */
+ENTRY(enable_omap3630_toggle_l2_on_restore)
+ stmfd sp!, {lr} @ save registers on stack
+ /* Setup so that we will disable and enable l2 */
+ mov r1, #0x1
+ str r1, l2dis_3630
+ ldmfd sp!, {pc} @ restore regs and return
+ .text
/* Function to call rom code to save secure ram context */
ENTRY(save_secure_ram_context)
stmfd sp!, {r1-r12, lr} @ save registers on stack
-save_secure_ram_debug:
- /* b save_secure_ram_debug */ @ enable to debug save code
adr r3, api_params @ r3 points to parameters
str r0, [r3,#0x4] @ r0 has sdram address
ldr r12, high_mask
@@ -185,35 +149,162 @@ ENTRY(save_secure_ram_context_sz)
.word . - save_secure_ram_context
/*
+ * ======================
+ * == Idle entry point ==
+ * ======================
+ */
+
+/*
* Forces OMAP into idle state
*
- * omap34xx_suspend() - This bit of code just executes the WFI
- * for normal idles.
+ * omap34xx_cpu_suspend() - This bit of code saves the CPU context if needed
+ * and executes the WFI instruction. Calling WFI effectively changes the
+ * power domains states to the desired target power states.
+ *
*
- * Note: This code get's copied to internal SRAM at boot. When the OMAP
- * wakes up it continues execution at the point it went to sleep.
+ * Notes:
+ * - this code gets copied to internal SRAM at boot and after wake-up
+ * from OFF mode. The execution pointer in SRAM is _omap_sram_idle.
+ * - when the OMAP wakes up it continues at different execution points
+ * depending on the low power mode (non-OFF vs OFF modes),
+ * cf. 'Resume path for xxx mode' comments.
*/
ENTRY(omap34xx_cpu_suspend)
- stmfd sp!, {r0-r12, lr} @ save registers on stack
-loop:
- /*b loop*/ @Enable to debug by stepping through code
- /* r0 contains restore pointer in sdram */
- /* r1 contains information about saving context */
- ldr r4, sdrc_power @ read the SDRC_POWER register
- ldr r5, [r4] @ read the contents of SDRC_POWER
- orr r5, r5, #0x40 @ enable self refresh on idle req
- str r5, [r4] @ write back to SDRC_POWER register
+ stmfd sp!, {r0-r12, lr} @ save registers on stack
+ /*
+ * r0 contains restore pointer in sdram
+ * r1 contains information about saving context:
+ * 0 - No context lost
+ * 1 - Only L1 and logic lost
+ * 2 - Only L2 lost
+ * 3 - Both L1 and L2 lost
+ */
+
+ /* Directly jump to WFI is the context save is not required */
cmp r1, #0x0
- /* If context save is required, do that and execute wfi */
- bne save_context_wfi
+ beq omap3_do_wfi
+
+ /* Otherwise fall through to the save context code */
+save_context_wfi:
+ mov r8, r0 @ Store SDRAM address in r8
+ mrc p15, 0, r5, c1, c0, 1 @ Read Auxiliary Control Register
+ mov r4, #0x1 @ Number of parameters for restore call
+ stmia r8!, {r4-r5} @ Push parameters for restore call
+ mrc p15, 1, r5, c9, c0, 2 @ Read L2 AUX ctrl register
+ stmia r8!, {r4-r5} @ Push parameters for restore call
+
+ /* Check what that target sleep state is from r1 */
+ cmp r1, #0x2 @ Only L2 lost, no need to save context
+ beq clean_caches
+
+l1_logic_lost:
+ /* Store sp and spsr to SDRAM */
+ mov r4, sp
+ mrs r5, spsr
+ mov r6, lr
+ stmia r8!, {r4-r6}
+ /* Save all ARM registers */
+ /* Coprocessor access control register */
+ mrc p15, 0, r6, c1, c0, 2
+ stmia r8!, {r6}
+ /* TTBR0, TTBR1 and Translation table base control */
+ mrc p15, 0, r4, c2, c0, 0
+ mrc p15, 0, r5, c2, c0, 1
+ mrc p15, 0, r6, c2, c0, 2
+ stmia r8!, {r4-r6}
+ /*
+ * Domain access control register, data fault status register,
+ * and instruction fault status register
+ */
+ mrc p15, 0, r4, c3, c0, 0
+ mrc p15, 0, r5, c5, c0, 0
+ mrc p15, 0, r6, c5, c0, 1
+ stmia r8!, {r4-r6}
+ /*
+ * Data aux fault status register, instruction aux fault status,
+ * data fault address register and instruction fault address register
+ */
+ mrc p15, 0, r4, c5, c1, 0
+ mrc p15, 0, r5, c5, c1, 1
+ mrc p15, 0, r6, c6, c0, 0
+ mrc p15, 0, r7, c6, c0, 2
+ stmia r8!, {r4-r7}
+ /*
+ * user r/w thread and process ID, user r/o thread and process ID,
+ * priv only thread and process ID, cache size selection
+ */
+ mrc p15, 0, r4, c13, c0, 2
+ mrc p15, 0, r5, c13, c0, 3
+ mrc p15, 0, r6, c13, c0, 4
+ mrc p15, 2, r7, c0, c0, 0
+ stmia r8!, {r4-r7}
+ /* Data TLB lockdown, instruction TLB lockdown registers */
+ mrc p15, 0, r5, c10, c0, 0
+ mrc p15, 0, r6, c10, c0, 1
+ stmia r8!, {r5-r6}
+ /* Secure or non secure vector base address, FCSE PID, Context PID*/
+ mrc p15, 0, r4, c12, c0, 0
+ mrc p15, 0, r5, c13, c0, 0
+ mrc p15, 0, r6, c13, c0, 1
+ stmia r8!, {r4-r6}
+ /* Primary remap, normal remap registers */
+ mrc p15, 0, r4, c10, c2, 0
+ mrc p15, 0, r5, c10, c2, 1
+ stmia r8!,{r4-r5}
+
+ /* Store current cpsr*/
+ mrs r2, cpsr
+ stmia r8!, {r2}
+
+ mrc p15, 0, r4, c1, c0, 0
+ /* save control register */
+ stmia r8!, {r4}
+
+clean_caches:
+ /*
+ * Clean Data or unified cache to POU
+ * How to invalidate only L1 cache???? - #FIX_ME#
+ * mcr p15, 0, r11, c7, c11, 1
+ */
+ cmp r1, #0x1 @ Check whether L2 inval is required
+ beq omap3_do_wfi
+
+clean_l2:
+ /*
+ * jump out to kernel flush routine
+ * - reuse that code is better
+ * - it executes in a cached space so is faster than refetch per-block
+ * - should be faster and will change with kernel
+ * - 'might' have to copy address, load and jump to it
+ */
+ ldr r1, kernel_flush
+ mov lr, pc
+ bx r1
+
+omap3_do_wfi:
+ ldr r4, sdrc_power @ read the SDRC_POWER register
+ ldr r5, [r4] @ read the contents of SDRC_POWER
+ orr r5, r5, #0x40 @ enable self refresh on idle req
+ str r5, [r4] @ write back to SDRC_POWER register
+
/* Data memory barrier and Data sync barrier */
mov r1, #0
mcr p15, 0, r1, c7, c10, 4
mcr p15, 0, r1, c7, c10, 5
+/*
+ * ===================================
+ * == WFI instruction => Enter idle ==
+ * ===================================
+ */
wfi @ wait for interrupt
+/*
+ * ===================================
+ * == Resume path for non-OFF modes ==
+ * ===================================
+ */
nop
nop
nop
@@ -226,9 +317,30 @@ loop:
nop
bl wait_sdrc_ok
- ldmfd sp!, {r0-r12, pc} @ restore regs and return
+/*
+ * ===================================
+ * == Exit point from non-OFF modes ==
+ * ===================================
+ */
+ ldmfd sp!, {r0-r12, pc} @ restore regs and return
+
+
+/*
+ * ==============================
+ * == Resume path for OFF mode ==
+ * ==============================
+ */
+
+/*
+ * The restore_* functions are called by the ROM code
+ * when back from WFI in OFF mode.
+ * Cf. the get_*restore_pointer functions.
+ *
+ * restore_es3: applies to 34xx >= ES3.0
+ * restore_3630: applies to 36xx
+ * restore: common code for 3xxx
+ */
restore_es3:
- /*b restore_es3*/ @ Enable to debug restore code
ldr r5, pm_prepwstst_core_p
ldr r4, [r5]
and r4, r4, #0x3
@@ -245,82 +357,117 @@ copy_to_sram:
bne copy_to_sram
ldr r1, sram_base
blx r1
+ b restore
+
+restore_3630:
+ ldr r1, pm_prepwstst_core_p
+ ldr r2, [r1]
+ and r2, r2, #0x3
+ cmp r2, #0x0 @ Check if previous power state of CORE is OFF
+ bne restore
+ /* Disable RTA before giving control */
+ ldr r1, control_mem_rta
+ mov r2, #OMAP36XX_RTA_DISABLE
+ str r2, [r1]
+
+ /* Fall through to common code for the remaining logic */
+
restore:
- /* b restore*/ @ Enable to debug restore code
- /* Check what was the reason for mpu reset and store the reason in r9*/
- /* 1 - Only L1 and logic lost */
- /* 2 - Only L2 lost - In this case, we wont be here */
- /* 3 - Both L1 and L2 lost */
- ldr r1, pm_pwstctrl_mpu
+ /*
+ * Check what was the reason for mpu reset and store the reason in r9:
+ * 0 - No context lost
+ * 1 - Only L1 and logic lost
+ * 2 - Only L2 lost - In this case, we wont be here
+ * 3 - Both L1 and L2 lost
+ */
+ ldr r1, pm_pwstctrl_mpu
ldr r2, [r1]
- and r2, r2, #0x3
- cmp r2, #0x0 @ Check if target power state was OFF or RET
- moveq r9, #0x3 @ MPU OFF => L1 and L2 lost
+ and r2, r2, #0x3
+ cmp r2, #0x0 @ Check if target power state was OFF or RET
+ moveq r9, #0x3 @ MPU OFF => L1 and L2 lost
movne r9, #0x1 @ Only L1 and L2 lost => avoid L2 invalidation
bne logic_l1_restore
+
+ ldr r0, l2dis_3630
+ cmp r0, #0x1 @ should we disable L2 on 3630?
+ bne skipl2dis
+ mrc p15, 0, r0, c1, c0, 1
+ bic r0, r0, #2 @ disable L2 cache
+ mcr p15, 0, r0, c1, c0, 1
+skipl2dis:
ldr r0, control_stat
ldr r1, [r0]
and r1, #0x700
cmp r1, #0x300
beq l2_inv_gp
- mov r0, #40 @ set service ID for PPA
- mov r12, r0 @ copy secure Service ID in r12
- mov r1, #0 @ set task id for ROM code in r1
- mov r2, #4 @ set some flags in r2, r6
+ mov r0, #40 @ set service ID for PPA
+ mov r12, r0 @ copy secure Service ID in r12
+ mov r1, #0 @ set task id for ROM code in r1
+ mov r2, #4 @ set some flags in r2, r6
mov r6, #0xff
adr r3, l2_inv_api_params @ r3 points to dummy parameters
mcr p15, 0, r0, c7, c10, 4 @ data write barrier
mcr p15, 0, r0, c7, c10, 5 @ data memory barrier
.word 0xE1600071 @ call SMI monitor (smi #1)
/* Write to Aux control register to set some bits */
- mov r0, #42 @ set service ID for PPA
- mov r12, r0 @ copy secure Service ID in r12
- mov r1, #0 @ set task id for ROM code in r1
- mov r2, #4 @ set some flags in r2, r6
+ mov r0, #42 @ set service ID for PPA
+ mov r12, r0 @ copy secure Service ID in r12
+ mov r1, #0 @ set task id for ROM code in r1
+ mov r2, #4 @ set some flags in r2, r6
mov r6, #0xff
ldr r4, scratchpad_base
- ldr r3, [r4, #0xBC] @ r3 points to parameters
+ ldr r3, [r4, #0xBC] @ r3 points to parameters
mcr p15, 0, r0, c7, c10, 4 @ data write barrier
mcr p15, 0, r0, c7, c10, 5 @ data memory barrier
.word 0xE1600071 @ call SMI monitor (smi #1)
#ifdef CONFIG_OMAP3_L2_AUX_SECURE_SAVE_RESTORE
/* Restore L2 aux control register */
- @ set service ID for PPA
+ @ set service ID for PPA
mov r0, #CONFIG_OMAP3_L2_AUX_SECURE_SERVICE_SET_ID
- mov r12, r0 @ copy service ID in r12
- mov r1, #0 @ set task ID for ROM code in r1
- mov r2, #4 @ set some flags in r2, r6
+ mov r12, r0 @ copy service ID in r12
+ mov r1, #0 @ set task ID for ROM code in r1
+ mov r2, #4 @ set some flags in r2, r6
mov r6, #0xff
ldr r4, scratchpad_base
ldr r3, [r4, #0xBC]
- adds r3, r3, #8 @ r3 points to parameters
+ adds r3, r3, #8 @ r3 points to parameters
mcr p15, 0, r0, c7, c10, 4 @ data write barrier
mcr p15, 0, r0, c7, c10, 5 @ data memory barrier
.word 0xE1600071 @ call SMI monitor (smi #1)
#endif
b logic_l1_restore
+
l2_inv_api_params:
- .word 0x1, 0x00
+ .word 0x1, 0x00
l2_inv_gp:
/* Execute smi to invalidate L2 cache */
- mov r12, #0x1 @ set up to invalide L2
-smi: .word 0xE1600070 @ Call SMI monitor (smieq)
+ mov r12, #0x1 @ set up to invalidate L2
+ .word 0xE1600070 @ Call SMI monitor (smieq)
/* Write to Aux control register to set some bits */
ldr r4, scratchpad_base
ldr r3, [r4,#0xBC]
ldr r0, [r3,#4]
mov r12, #0x3
- .word 0xE1600070 @ Call SMI monitor (smieq)
+ .word 0xE1600070 @ Call SMI monitor (smieq)
ldr r4, scratchpad_base
ldr r3, [r4,#0xBC]
ldr r0, [r3,#12]
mov r12, #0x2
- .word 0xE1600070 @ Call SMI monitor (smieq)
+ .word 0xE1600070 @ Call SMI monitor (smieq)
logic_l1_restore:
+ ldr r1, l2dis_3630
+ cmp r1, #0x1 @ Test if L2 re-enable needed on 3630
+ bne skipl2reen
+ mrc p15, 0, r1, c1, c0, 1
+ orr r1, r1, #2 @ re-enable L2 cache
+ mcr p15, 0, r1, c1, c0, 1
+skipl2reen:
mov r1, #0
- /* Invalidate all instruction caches to PoU
- * and flush branch target cache */
+ /*
+ * Invalidate all instruction caches to PoU
+ * and flush branch target cache
+ */
mcr p15, 0, r1, c7, c5, 0
ldr r4, scratchpad_base
@@ -341,33 +488,33 @@ logic_l1_restore:
MCR p15, 0, r6, c2, c0, 1
/* Translation table base control register */
MCR p15, 0, r7, c2, c0, 2
- /*domain access Control Register */
+ /* Domain access Control Register */
MCR p15, 0, r8, c3, c0, 0
- /* data fault status Register */
+ /* Data fault status Register */
MCR p15, 0, r9, c5, c0, 0
- ldmia r3!,{r4-r8}
- /* instruction fault status Register */
+ ldmia r3!,{r4-r8}
+ /* Instruction fault status Register */
MCR p15, 0, r4, c5, c0, 1
- /*Data Auxiliary Fault Status Register */
+ /* Data Auxiliary Fault Status Register */
MCR p15, 0, r5, c5, c1, 0
- /*Instruction Auxiliary Fault Status Register*/
+ /* Instruction Auxiliary Fault Status Register*/
MCR p15, 0, r6, c5, c1, 1
- /*Data Fault Address Register */
+ /* Data Fault Address Register */
MCR p15, 0, r7, c6, c0, 0
- /*Instruction Fault Address Register*/
+ /* Instruction Fault Address Register*/
MCR p15, 0, r8, c6, c0, 2
- ldmia r3!,{r4-r7}
+ ldmia r3!,{r4-r7}
- /* user r/w thread and process ID */
+ /* User r/w thread and process ID */
MCR p15, 0, r4, c13, c0, 2
- /* user ro thread and process ID */
+ /* User ro thread and process ID */
MCR p15, 0, r5, c13, c0, 3
- /*Privileged only thread and process ID */
+ /* Privileged only thread and process ID */
MCR p15, 0, r6, c13, c0, 4
- /* cache size selection */
+ /* Cache size selection */
MCR p15, 2, r7, c0, c0, 0
- ldmia r3!,{r4-r8}
+ ldmia r3!,{r4-r8}
/* Data TLB lockdown registers */
MCR p15, 0, r4, c10, c0, 0
/* Instruction TLB lockdown registers */
@@ -379,26 +526,27 @@ logic_l1_restore:
/* Context PID */
MCR p15, 0, r8, c13, c0, 1
- ldmia r3!,{r4-r5}
- /* primary memory remap register */
+ ldmia r3!,{r4-r5}
+ /* Primary memory remap register */
MCR p15, 0, r4, c10, c2, 0
- /*normal memory remap register */
+ /* Normal memory remap register */
MCR p15, 0, r5, c10, c2, 1
/* Restore cpsr */
- ldmia r3!,{r4} /*load CPSR from SDRAM*/
- msr cpsr, r4 /*store cpsr */
+ ldmia r3!,{r4} @ load CPSR from SDRAM
+ msr cpsr, r4 @ store cpsr
/* Enabling MMU here */
- mrc p15, 0, r7, c2, c0, 2 /* Read TTBRControl */
- /* Extract N (0:2) bits and decide whether to use TTBR0 or TTBR1*/
+ mrc p15, 0, r7, c2, c0, 2 @ Read TTBRControl
+ /* Extract N (0:2) bits and decide whether to use TTBR0 or TTBR1 */
and r7, #0x7
cmp r7, #0x0
beq usettbr0
ttbr_error:
- /* More work needs to be done to support N[0:2] value other than 0
- * So looping here so that the error can be detected
- */
+ /*
+ * More work needs to be done to support N[0:2] value other than 0
+ * So looping here so that the error can be detected
+ */
b ttbr_error
usettbr0:
mrc p15, 0, r2, c2, c0, 0
@@ -406,21 +554,25 @@ usettbr0:
and r2, r5
mov r4, pc
ldr r5, table_index_mask
- and r4, r5 /* r4 = 31 to 20 bits of pc */
+ and r4, r5 @ r4 = 31 to 20 bits of pc
/* Extract the value to be written to table entry */
ldr r1, table_entry
- add r1, r1, r4 /* r1 has value to be written to table entry*/
+ /* r1 has the value to be written to table entry*/
+ add r1, r1, r4
/* Getting the address of table entry to modify */
lsr r4, #18
- add r2, r4 /* r2 has the location which needs to be modified */
+ /* r2 has the location which needs to be modified */
+ add r2, r4
/* Storing previous entry of location being modified */
ldr r5, scratchpad_base
ldr r4, [r2]
str r4, [r5, #0xC0]
/* Modify the table entry */
str r1, [r2]
- /* Storing address of entry being modified
- * - will be restored after enabling MMU */
+ /*
+ * Storing address of entry being modified
+ * - will be restored after enabling MMU
+ */
ldr r5, scratchpad_base
str r2, [r5, #0xC4]
@@ -429,8 +581,11 @@ usettbr0:
mcr p15, 0, r0, c7, c5, 6 @ Invalidate branch predictor array
mcr p15, 0, r0, c8, c5, 0 @ Invalidate instruction TLB
mcr p15, 0, r0, c8, c6, 0 @ Invalidate data TLB
- /* Restore control register but dont enable caches here*/
- /* Caches will be enabled after restoring MMU table entry */
+ /*
+ * Restore control register. This enables the MMU.
+ * The caches and prediction are not enabled here, they
+ * will be enabled after restoring the MMU table entry.
+ */
ldmia r3!, {r4}
/* Store previous value of control register in scratchpad */
str r4, [r5, #0xC8]
@@ -438,212 +593,144 @@ usettbr0:
and r4, r2
mcr p15, 0, r4, c1, c0, 0
- ldmfd sp!, {r0-r12, pc} @ restore regs and return
-save_context_wfi:
- /*b save_context_wfi*/ @ enable to debug save code
- mov r8, r0 /* Store SDRAM address in r8 */
- mrc p15, 0, r5, c1, c0, 1 @ Read Auxiliary Control Register
- mov r4, #0x1 @ Number of parameters for restore call
- stmia r8!, {r4-r5} @ Push parameters for restore call
- mrc p15, 1, r5, c9, c0, 2 @ Read L2 AUX ctrl register
- stmia r8!, {r4-r5} @ Push parameters for restore call
- /* Check what that target sleep state is:stored in r1*/
- /* 1 - Only L1 and logic lost */
- /* 2 - Only L2 lost */
- /* 3 - Both L1 and L2 lost */
- cmp r1, #0x2 /* Only L2 lost */
- beq clean_l2
- cmp r1, #0x1 /* L2 retained */
- /* r9 stores whether to clean L2 or not*/
- moveq r9, #0x0 /* Dont Clean L2 */
- movne r9, #0x1 /* Clean L2 */
-l1_logic_lost:
- /* Store sp and spsr to SDRAM */
- mov r4, sp
- mrs r5, spsr
- mov r6, lr
- stmia r8!, {r4-r6}
- /* Save all ARM registers */
- /* Coprocessor access control register */
- mrc p15, 0, r6, c1, c0, 2
- stmia r8!, {r6}
- /* TTBR0, TTBR1 and Translation table base control */
- mrc p15, 0, r4, c2, c0, 0
- mrc p15, 0, r5, c2, c0, 1
- mrc p15, 0, r6, c2, c0, 2
- stmia r8!, {r4-r6}
- /* Domain access control register, data fault status register,
- and instruction fault status register */
- mrc p15, 0, r4, c3, c0, 0
- mrc p15, 0, r5, c5, c0, 0
- mrc p15, 0, r6, c5, c0, 1
- stmia r8!, {r4-r6}
- /* Data aux fault status register, instruction aux fault status,
- datat fault address register and instruction fault address register*/
- mrc p15, 0, r4, c5, c1, 0
- mrc p15, 0, r5, c5, c1, 1
- mrc p15, 0, r6, c6, c0, 0
- mrc p15, 0, r7, c6, c0, 2
- stmia r8!, {r4-r7}
- /* user r/w thread and process ID, user r/o thread and process ID,
- priv only thread and process ID, cache size selection */
- mrc p15, 0, r4, c13, c0, 2
- mrc p15, 0, r5, c13, c0, 3
- mrc p15, 0, r6, c13, c0, 4
- mrc p15, 2, r7, c0, c0, 0
- stmia r8!, {r4-r7}
- /* Data TLB lockdown, instruction TLB lockdown registers */
- mrc p15, 0, r5, c10, c0, 0
- mrc p15, 0, r6, c10, c0, 1
- stmia r8!, {r5-r6}
- /* Secure or non secure vector base address, FCSE PID, Context PID*/
- mrc p15, 0, r4, c12, c0, 0
- mrc p15, 0, r5, c13, c0, 0
- mrc p15, 0, r6, c13, c0, 1
- stmia r8!, {r4-r6}
- /* Primary remap, normal remap registers */
- mrc p15, 0, r4, c10, c2, 0
- mrc p15, 0, r5, c10, c2, 1
- stmia r8!,{r4-r5}
+/*
+ * ==============================
+ * == Exit point from OFF mode ==
+ * ==============================
+ */
+ ldmfd sp!, {r0-r12, pc} @ restore regs and return
- /* Store current cpsr*/
- mrs r2, cpsr
- stmia r8!, {r2}
- mrc p15, 0, r4, c1, c0, 0
- /* save control register */
- stmia r8!, {r4}
-clean_caches:
- /* Clean Data or unified cache to POU*/
- /* How to invalidate only L1 cache???? - #FIX_ME# */
- /* mcr p15, 0, r11, c7, c11, 1 */
- cmp r9, #1 /* Check whether L2 inval is required or not*/
- bne skip_l2_inval
-clean_l2:
- /* read clidr */
- mrc p15, 1, r0, c0, c0, 1
- /* extract loc from clidr */
- ands r3, r0, #0x7000000
- /* left align loc bit field */
- mov r3, r3, lsr #23
- /* if loc is 0, then no need to clean */
- beq finished
- /* start clean at cache level 0 */
- mov r10, #0
-loop1:
- /* work out 3x current cache level */
- add r2, r10, r10, lsr #1
- /* extract cache type bits from clidr*/
- mov r1, r0, lsr r2
- /* mask of the bits for current cache only */
- and r1, r1, #7
- /* see what cache we have at this level */
- cmp r1, #2
- /* skip if no cache, or just i-cache */
- blt skip
- /* select current cache level in cssr */
- mcr p15, 2, r10, c0, c0, 0
- /* isb to sych the new cssr&csidr */
- isb
- /* read the new csidr */
- mrc p15, 1, r1, c0, c0, 0
- /* extract the length of the cache lines */
- and r2, r1, #7
- /* add 4 (line length offset) */
- add r2, r2, #4
- ldr r4, assoc_mask
- /* find maximum number on the way size */
- ands r4, r4, r1, lsr #3
- /* find bit position of way size increment */
- clz r5, r4
- ldr r7, numset_mask
- /* extract max number of the index size*/
- ands r7, r7, r1, lsr #13
-loop2:
- mov r9, r4
- /* create working copy of max way size*/
-loop3:
- /* factor way and cache number into r11 */
- orr r11, r10, r9, lsl r5
- /* factor index number into r11 */
- orr r11, r11, r7, lsl r2
- /*clean & invalidate by set/way */
- mcr p15, 0, r11, c7, c10, 2
- /* decrement the way*/
- subs r9, r9, #1
- bge loop3
- /*decrement the index */
- subs r7, r7, #1
- bge loop2
-skip:
- add r10, r10, #2
- /* increment cache number */
- cmp r3, r10
- bgt loop1
-finished:
- /*swith back to cache level 0 */
- mov r10, #0
- /* select current cache level in cssr */
- mcr p15, 2, r10, c0, c0, 0
- isb
-skip_l2_inval:
- /* Data memory barrier and Data sync barrier */
- mov r1, #0
- mcr p15, 0, r1, c7, c10, 4
- mcr p15, 0, r1, c7, c10, 5
+/*
+ * Internal functions
+ */
- wfi @ wait for interrupt
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- bl wait_sdrc_ok
- /* restore regs and return */
- ldmfd sp!, {r0-r12, pc}
+/* This function implements the erratum ID i443 WA, applies to 34xx >= ES3.0 */
+ .text
+ENTRY(es3_sdrc_fix)
+ ldr r4, sdrc_syscfg @ get config addr
+ ldr r5, [r4] @ get value
+ tst r5, #0x100 @ is part access blocked
+ it eq
+ biceq r5, r5, #0x100 @ clear bit if set
+ str r5, [r4] @ write back change
+ ldr r4, sdrc_mr_0 @ get config addr
+ ldr r5, [r4] @ get value
+ str r5, [r4] @ write back change
+ ldr r4, sdrc_emr2_0 @ get config addr
+ ldr r5, [r4] @ get value
+ str r5, [r4] @ write back change
+ ldr r4, sdrc_manual_0 @ get config addr
+ mov r5, #0x2 @ autorefresh command
+ str r5, [r4] @ kick off refreshes
+ ldr r4, sdrc_mr_1 @ get config addr
+ ldr r5, [r4] @ get value
+ str r5, [r4] @ write back change
+ ldr r4, sdrc_emr2_1 @ get config addr
+ ldr r5, [r4] @ get value
+ str r5, [r4] @ write back change
+ ldr r4, sdrc_manual_1 @ get config addr
+ mov r5, #0x2 @ autorefresh command
+ str r5, [r4] @ kick off refreshes
+ bx lr
+
+sdrc_syscfg:
+ .word SDRC_SYSCONFIG_P
+sdrc_mr_0:
+ .word SDRC_MR_0_P
+sdrc_emr2_0:
+ .word SDRC_EMR2_0_P
+sdrc_manual_0:
+ .word SDRC_MANUAL_0_P
+sdrc_mr_1:
+ .word SDRC_MR_1_P
+sdrc_emr2_1:
+ .word SDRC_EMR2_1_P
+sdrc_manual_1:
+ .word SDRC_MANUAL_1_P
+ENTRY(es3_sdrc_fix_sz)
+ .word . - es3_sdrc_fix
+
+/*
+ * This function implements the erratum ID i581 WA:
+ * SDRC state restore before accessing the SDRAM
+ *
+ * Only used at return from non-OFF mode. For OFF
+ * mode the ROM code configures the SDRC and
+ * the DPLL before calling the restore code directly
+ * from DDR.
+ */
/* Make sure SDRC accesses are ok */
wait_sdrc_ok:
- ldr r4, cm_idlest1_core
- ldr r5, [r4]
- and r5, r5, #0x2
- cmp r5, #0
- bne wait_sdrc_ok
- ldr r4, sdrc_power
- ldr r5, [r4]
- bic r5, r5, #0x40
- str r5, [r4]
+
+/* DPLL3 must be locked before accessing the SDRC. Maybe the HW ensures this */
+ ldr r4, cm_idlest_ckgen
+wait_dpll3_lock:
+ ldr r5, [r4]
+ tst r5, #1
+ beq wait_dpll3_lock
+
+ ldr r4, cm_idlest1_core
+wait_sdrc_ready:
+ ldr r5, [r4]
+ tst r5, #0x2
+ bne wait_sdrc_ready
+ /* allow DLL powerdown upon hw idle req */
+ ldr r4, sdrc_power
+ ldr r5, [r4]
+ bic r5, r5, #0x40
+ str r5, [r4]
+
+is_dll_in_lock_mode:
+ /* Is dll in lock mode? */
+ ldr r4, sdrc_dlla_ctrl
+ ldr r5, [r4]
+ tst r5, #0x4
+ bxne lr @ Return if locked
+ /* wait till dll locks */
+wait_dll_lock_timed:
+ ldr r4, wait_dll_lock_counter
+ add r4, r4, #1
+ str r4, wait_dll_lock_counter
+ ldr r4, sdrc_dlla_status
+ /* Wait 20uS for lock */
+ mov r6, #8
wait_dll_lock:
- /* Is dll in lock mode? */
- ldr r4, sdrc_dlla_ctrl
- ldr r5, [r4]
- tst r5, #0x4
- bxne lr
- /* wait till dll locks */
- ldr r4, sdrc_dlla_status
- ldr r5, [r4]
- and r5, r5, #0x4
- cmp r5, #0x4
- bne wait_dll_lock
- bx lr
+ subs r6, r6, #0x1
+ beq kick_dll
+ ldr r5, [r4]
+ and r5, r5, #0x4
+ cmp r5, #0x4
+ bne wait_dll_lock
+ bx lr @ Return when locked
+
+ /* disable/reenable DLL if not locked */
+kick_dll:
+ ldr r4, sdrc_dlla_ctrl
+ ldr r5, [r4]
+ mov r6, r5
+ bic r6, #(1<<3) @ disable dll
+ str r6, [r4]
+ dsb
+ orr r6, r6, #(1<<3) @ enable dll
+ str r6, [r4]
+ dsb
+ ldr r4, kick_counter
+ add r4, r4, #1
+ str r4, kick_counter
+ b wait_dll_lock_timed
cm_idlest1_core:
.word CM_IDLEST1_CORE_V
+cm_idlest_ckgen:
+ .word CM_IDLEST_CKGEN_V
sdrc_dlla_status:
.word SDRC_DLLA_STATUS_V
sdrc_dlla_ctrl:
.word SDRC_DLLA_CTRL_V
-pm_prepwstst_core:
- .word PM_PREPWSTST_CORE_V
pm_prepwstst_core_p:
.word PM_PREPWSTST_CORE_P
-pm_prepwstst_mpu:
- .word PM_PREPWSTST_MPU_V
pm_pwstctrl_mpu:
.word PM_PWSTCTRL_MPU_P
scratchpad_base:
@@ -651,13 +738,7 @@ scratchpad_base:
sram_base:
.word SRAM_BASE_P + 0x8000
sdrc_power:
- .word SDRC_POWER_V
-clk_stabilize_delay:
- .word 0x000001FF
-assoc_mask:
- .word 0x3ff
-numset_mask:
- .word 0x7fff
+ .word SDRC_POWER_V
ttbrbit_mask:
.word 0xFFFFC000
table_index_mask:
@@ -668,5 +749,20 @@ cache_pred_disable_mask:
.word 0xFFFFE7FB
control_stat:
.word CONTROL_STAT
+control_mem_rta:
+ .word CONTROL_MEM_RTA_CTRL
+kernel_flush:
+ .word v7_flush_dcache_all
+l2dis_3630:
+ .word 0
+ /*
+ * When exporting to userspace while the counters are in SRAM,
+ * these 2 words need to be at the end to facilitate retrival!
+ */
+kick_counter:
+ .word 0
+wait_dll_lock_counter:
+ .word 0
+
ENTRY(omap34xx_cpu_suspend_sz)
.word . - omap34xx_cpu_suspend