2 * cortex arm arch v7 cache flushing and invalidation
3 * shared by l.s and rebootcode.s
6 TEXT cacheiinv(SB), $-4 /* I invalidate */
8 MCR CpSC, 0, R0, C(CpCACHE), C(CpCACHEinvi), CpCACHEall /* ok on cortex */
13 * set/way operators, passed a suitable set/way value in R0.
15 TEXT cachedwb_sw(SB), $-4
16 MCR CpSC, 0, R0, C(CpCACHE), C(CpCACHEwb), CpCACHEsi
19 TEXT cachedwbinv_sw(SB), $-4
20 MCR CpSC, 0, R0, C(CpCACHE), C(CpCACHEwbi), CpCACHEsi
23 TEXT cachedinv_sw(SB), $-4
24 MCR CpSC, 0, R0, C(CpCACHE), C(CpCACHEinvd), CpCACHEsi
27 /* set cache size select */
28 TEXT setcachelvl(SB), $-4
29 MCR CpSC, CpIDcssel, R0, C(CpID), C(CpIDid), 0
33 /* return cache sizes */
34 TEXT getwayssets(SB), $-4
35 MRC CpSC, CpIDcsize, R0, C(CpID), C(CpIDid), 0
39 * l1 cache operations.
40 * l1 and l2 ops are intended to be called from C, thus need save no
41 * caller's regs, only those we need to preserve across calls.
44 TEXT cachedwb(SB), $-4
46 MOVW $cachedwb_sw(SB), R0
51 TEXT cachedwbinv(SB), $-4
53 MOVW $cachedwbinv_sw(SB), R0
58 TEXT cachedinv(SB), $-4
60 MOVW $cachedinv_sw(SB), R0
65 TEXT cacheuwbinv(SB), $-4
66 MOVM.DB.W [R14], (R13) /* save lr on stack */
70 MOVM.DB.W [R1], (R13) /* save R1 on stack */
75 MOVM.IA.W (R13), [R1] /* restore R1 (saved CPSR) */
77 MOVM.IA.W (R13), [R14] /* restore lr */
84 TEXT l2cacheuwb(SB), $-4
86 MOVW $cachedwb_sw(SB), R0
91 TEXT l2cacheuwbinv(SB), $-4
96 MOVM.DB.W [R1], (R13) /* save R1 on stack */
98 MOVW $cachedwbinv_sw(SB), R0
103 MOVM.IA.W (R13), [R1] /* restore R1 (saved CPSR) */
107 TEXT l2cacheuinv(SB), $-4
109 MOVW $cachedinv_sw(SB), R0
115 * these shift values are for the Cortex-A8 L1 cache (A=2, L=6) and
116 * the Cortex-A8 L2 cache (A=3, L=6).
117 * A = log2(# of ways), L = log2(bytes per cache line).
118 * see armv7 arch ref p. 1403.
126 * callers are assumed to be the above l1 and l2 ops.
127 * R0 is the function to call in the innermost loop.
128 * R8 is the cache level (one-origin: 1 or 2).
130 * initial translation by 5c, then massaged by hand.
132 TEXT wholecache+0(SB), $-4
133 MOVW R0, R1 /* save argument for inner loop in R1 */
134 SUB $1, R8 /* convert cache level to zero origin */
136 /* we may not have the MMU on yet, so map R1 to PC's space */
137 BIC $KSEGM, R1 /* strip segment from address */
138 MOVW PC, R2 /* get PC's segment ... */
140 CMP $0, R2 /* PC segment should be non-zero on omap */
142 ORR R2, R1 /* combine them */
144 /* drain write buffers */
146 MCR CpSC, 0, R0, C(CpCACHE), C(CpCACHEwb), CpCACHEwait
150 MOVM.DB.W [R2,R14], (SP) /* save regs on stack */
151 CPSID /* splhi to make entire op atomic */
153 /* get cache sizes */
154 SLL $1, R8, R0 /* R0 = (cache - 1) << 1 */
155 MCR CpSC, CpIDcssel, R0, C(CpID), C(CpIDid), 0 /* set cache size select */
157 MRC CpSC, CpIDcsize, R0, C(CpID), C(CpIDid), 0 /* get cache sizes */
159 /* compute # of ways and sets for this cache level */
160 SRA $3, R0, R5 /* R5 (ways) = R0 >> 3 */
161 AND $1023, R5 /* R5 = (R0 >> 3) & MASK(10) */
162 ADD $1, R5 /* R5 (ways) = ((R0 >> 3) & MASK(10)) + 1 */
164 SRA $13, R0, R2 /* R2 = R0 >> 13 */
165 AND $32767, R2 /* R2 = (R0 >> 13) & MASK(15) */
166 ADD $1, R2 /* R2 (sets) = ((R0 >> 13) & MASK(15)) + 1 */
168 /* precompute set/way shifts for inner loop */
169 CMP $0, R8 /* cache == 1? */
170 MOVW.EQ $L1WAYSH, R3 /* yes */
172 MOVW.NE $L2WAYSH, R3 /* no */
175 /* iterate over ways */
176 MOVW $0, R7 /* R7: way */
178 /* iterate over sets */
179 MOVW $0, R6 /* R6: set */
181 /* compute set/way register contents */
182 SLL R3, R7, R0 /* R0 = way << R3 (L?WAYSH) */
183 ORR R8<<1, R0 /* R0 = way << L?WAYSH | (cache - 1) << 1 */
184 ORR R6<<R4, R0 /* R0 = way<<L?WAYSH | (cache-1)<<1 |set<<R4 */
186 BL (R1) /* call set/way operation with R0 */
188 ADD $1, R6 /* set++ */
189 CMP R2, R6 /* set >= sets? */
190 BLT inner /* no, do next set */
192 ADD $1, R7 /* way++ */
193 CMP R5, R7 /* way >= ways? */
194 BLT outer /* no, do next way */
196 MOVM.IA.W (SP), [R2,R14] /* restore regs */
197 MOVW R2, CPSR /* splx */
199 /* drain write buffers */
200 MCR CpSC, 0, R0, C(CpCACHE), C(CpCACHEwb), CpCACHEwait