1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
|
/*
* Copyright (C) 2020 Intel Corporation. All rights reserved
*
* SPDX-License-Identifier: GPL-2.0
*/
#include <asm-offsets.h>
#include <config.h>
#include <linux/linkage.h>
#include <asm/macro.h>
ENTRY(lowlevel_init)
mov x29, lr /* Save LR */
#ifdef CONFIG_XPL_BUILD
/* Check for L2 reset magic word */
ldr x4, =L2_RESET_DONE_REG
ldr x5, [x4]
ldr x1, =L2_RESET_DONE_STATUS
cmp x1, x5
/* No L2 reset, skip warm reset */
b.ne skipwarmreset
/* Put all slaves CPUs into WFI mode */
branch_if_slave x0, put_cpu_in_wfi
/* L2 reset completed */
str xzr, [x4]
/* Clear previous CPU release address */
ldr x4, =CPU_RELEASE_ADDR
str wzr, [x4]
/* Master CPU (CPU0) request for warm reset */
mrs x1, rmr_el3
orr x1, x1, #0x02
msr rmr_el3, x1
isb
dsb sy
put_cpu_in_wfi:
wfi
b put_cpu_in_wfi
skipwarmreset:
#endif
#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
#if defined(CONFIG_XPL_BUILD) && defined(CONFIG_SPL_ATF)
/*
* In ATF flow, need to clear the old CPU address when cold reset
* being triggered, but shouldn't clear CPU address if it is reset
* by CPU-ON, so that the core can correctly jump to ATF code after
* reset by CPU-ON. CPU-ON trigger the reset via mpumodrst.
*
* Hardware will set 1 to core*_irq in mpurststat register in
* reset manager if the core is reset by mpumodrst.
*
* The following code will check the mpurststat to identify if the
* core is reset by mpumodrst, and it will skip CPU address clearing
* if the core is reset by mpumodrst. At last, the code need to clear
* the core*_irq by set it to 1. So that it can reflect the correct
* and latest status in next reset.
*/
/* Check if it is a master core off/on from kernel using boot scratch
* cold register 8 bit 19. This bit is set by ATF.
*/
ldr x4, =BOOT_SCRATCH_COLD8
ldr x5, [x4]
and x6, x5, #0x80000
cbnz x6, wait_for_atf_master
/* Retrieve mpurststat register in reset manager */
ldr x4, =SOCFPGA_RSTMGR_ADDRESS
ldr w5, [x4, #0x04]
/* Set mask based on current core id */
mrs x0, mpidr_el1
and x1, x0, #0xF
ldr x2, =0x00000100
lsl x2, x2, x1
/* Skip if core*_irq register is set */
and x6, x5, x2
cbnz x6, skip_clear_cpu_address
/*
* Reach here means core*_irq is 0, means the core is
* reset by cold, warm or watchdog reset.
* Clear previous CPU release address
*/
ldr x4, =CPU_RELEASE_ADDR
str wzr, [x4]
b skip_clear_core_irq
skip_clear_cpu_address:
/* Clear core*_irq register by writing 1 */
ldr x4, =SOCFPGA_RSTMGR_ADDRESS
str w2, [x4, #0x04]
skip_clear_core_irq:
/* Master CPU (CPU0) does not need to wait for atf */
branch_if_master x0, master_cpu
wait_for_atf:
ldr x4, =CPU_RELEASE_ADDR
ldr x5, [x4]
cbz x5, slave_wait_atf
br x5
slave_wait_atf:
branch_if_slave x0, wait_for_atf
wait_for_atf_master:
ldr x4, =CPU_RELEASE_ADDR
ldr x5, [x4]
cbz x5, master_wait_atf
br x5
master_wait_atf:
branch_if_master x0, wait_for_atf_master
master_cpu:
#else
branch_if_slave x0, 1f
#endif
ldr x0, =GICD_BASE
bl gic_init_secure
1:
#if defined(CONFIG_GICV3)
ldr x0, =GICR_BASE
bl gic_init_secure_percpu
#elif defined(CONFIG_GICV2)
ldr x0, =GICD_BASE
ldr x1, =GICC_BASE
bl gic_init_secure_percpu
#endif
#endif
#ifdef CONFIG_ARMV8_MULTIENTRY
branch_if_master x0, 2f
/*
* Slave should wait for master clearing spin table.
* This sync prevent slaves observing incorrect
* value of spin table and jumping to wrong place.
*/
#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
#ifdef CONFIG_GICV2
ldr x0, =GICC_BASE
#endif
bl gic_wait_for_interrupt
#endif
/*
* All slaves will enter EL2 and optionally EL1.
*/
adr x4, lowlevel_in_el2
ldr x5, =ES_TO_AARCH64
bl armv8_switch_to_el2
lowlevel_in_el2:
#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
adr x4, lowlevel_in_el1
ldr x5, =ES_TO_AARCH64
bl armv8_switch_to_el1
lowlevel_in_el1:
#endif
#endif /* CONFIG_ARMV8_MULTIENTRY */
2:
mov lr, x29 /* Restore LR */
ret
ENDPROC(lowlevel_init)
|