diff options
Diffstat (limited to 'lib/cpus/aarch32')
-rw-r--r-- | lib/cpus/aarch32/aem_generic.S | 4 | ||||
-rw-r--r-- | lib/cpus/aarch32/cortex_a32.S | 5 | ||||
-rw-r--r-- | lib/cpus/aarch32/cpu_helpers.S | 59 |
3 files changed, 31 insertions, 37 deletions
diff --git a/lib/cpus/aarch32/aem_generic.S b/lib/cpus/aarch32/aem_generic.S index 10ea4e47..3d6064c9 100644 --- a/lib/cpus/aarch32/aem_generic.S +++ b/lib/cpus/aarch32/aem_generic.S @@ -65,4 +65,6 @@ func aem_generic_cluster_pwr_dwn endfunc aem_generic_cluster_pwr_dwn /* cpu_ops for Base AEM FVP */ -declare_cpu_ops aem_generic, BASE_AEM_MIDR, 1 +declare_cpu_ops aem_generic, BASE_AEM_MIDR, CPU_NO_RESET_FUNC, \ + aem_generic_core_pwr_dwn, \ + aem_generic_cluster_pwr_dwn diff --git a/lib/cpus/aarch32/cortex_a32.S b/lib/cpus/aarch32/cortex_a32.S index f2b85a31..f631c4cf 100644 --- a/lib/cpus/aarch32/cortex_a32.S +++ b/lib/cpus/aarch32/cortex_a32.S @@ -141,4 +141,7 @@ func cortex_a32_cluster_pwr_dwn b cortex_a32_disable_smp endfunc cortex_a32_cluster_pwr_dwn -declare_cpu_ops cortex_a32, CORTEX_A32_MIDR +declare_cpu_ops cortex_a32, CORTEX_A32_MIDR, \ + cortex_a32_reset_func, \ + cortex_a32_core_pwr_dwn, \ + cortex_a32_cluster_pwr_dwn diff --git a/lib/cpus/aarch32/cpu_helpers.S b/lib/cpus/aarch32/cpu_helpers.S index a4dfe5f2..900d158c 100644 --- a/lib/cpus/aarch32/cpu_helpers.S +++ b/lib/cpus/aarch32/cpu_helpers.S @@ -70,50 +70,39 @@ endfunc reset_handler #if IMAGE_BL32 /* The power down core and cluster is needed only in BL32 */ /* - * The prepare core power down function for all platforms. After - * the cpu_ops pointer is retrieved from cpu_data, the corresponding - * pwr_dwn_core in the cpu_ops is invoked. Follows AAPCS. + * void prepare_cpu_pwr_dwn(unsigned int power_level) + * + * Prepare CPU power down function for all platforms. The function takes + * a domain level to be powered down as its parameter. After the cpu_ops + * pointer is retrieved from cpu_data, the handler for requested power + * level is called. */ - .globl prepare_core_pwr_dwn -func prepare_core_pwr_dwn - /* r12 is pushed to meet the 8 byte stack alignment requirement */ - push {r12, lr} - bl _cpu_data - pop {r12, lr} - - ldr r1, [r0, #CPU_DATA_CPU_OPS_PTR] -#if ASM_ASSERTION - cmp r1, #0 - ASM_ASSERT(ne) -#endif - - /* Get the cpu_ops core_pwr_dwn handler */ - ldr r0, [r1, #CPU_PWR_DWN_CORE] - bx r0 -endfunc prepare_core_pwr_dwn - + .globl prepare_cpu_pwr_dwn +func prepare_cpu_pwr_dwn /* - * The prepare cluster power down function for all platforms. After - * the cpu_ops pointer is retrieved from cpu_data, the corresponding - * pwr_dwn_cluster in the cpu_ops is invoked. Follows AAPCS. + * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the + * power down handler for the last power level */ - .globl prepare_cluster_pwr_dwn -func prepare_cluster_pwr_dwn - /* r12 is pushed to meet the 8 byte stack alignment requirement */ - push {r12, lr} + mov r2, #(CPU_MAX_PWR_DWN_OPS - 1) + cmp r0, r2 + movhi r0, r2 + + push {r0, lr} bl _cpu_data - pop {r12, lr} + pop {r2, lr} - ldr r1, [r0, #CPU_DATA_CPU_OPS_PTR] + ldr r0, [r0, #CPU_DATA_CPU_OPS_PTR] #if ASM_ASSERTION - cmp r1, #0 + cmp r0, #0 ASM_ASSERT(ne) #endif - /* Get the cpu_ops cluster_pwr_dwn handler */ - ldr r0, [r1, #CPU_PWR_DWN_CLUSTER] - bx r0 -endfunc prepare_cluster_pwr_dwn + /* Get the appropriate power down handler */ + mov r1, #CPU_PWR_DWN_OPS + add r1, r1, r2, lsl #2 + ldr r1, [r0, r1] + bx r1 +endfunc prepare_cpu_pwr_dwn /* * Initializes the cpu_ops_ptr if not already initialized |