2 * Broadcom bcm2836 SoC, as used in Raspberry Pi 2
3 * 4 x Cortex-A7 processor (armv7)
9 #define ICACHELINESZ 32
14 #define DSB WORD $0xf57ff04f /* data synch. barrier; last f = SY */
15 #define DMB WORD $0xf57ff05f /* data mem. barrier; last f = SY */
16 #define ISB WORD $0xf57ff06f /* instr. sync. barrier; last f = SY */
17 #define WFI WORD $0xe320f003 /* wait for interrupt */
18 #define WFI_EQ WORD $0x0320f003 /* wait for interrupt if eq */
19 #define ERET WORD $0xe160006e /* exception return from HYP */
20 #define SEV WORD $0xe320f004 /* send event */
22 /* tas/cas strex debugging limits; started at 10000 */
25 TEXT armstart(SB), 1, $-4
28 * if not cpu0, go to secondary startup
34 * go to SVC mode, interrupts disabled
39 * disable the mmu and caches
41 MRC CpSC, 0, R1, C(CpCONTROL), C(0), CpMainctl
42 BIC $(CpCdcache|CpCicache|CpCmmu), R1
43 ORR $(CpCsbo|CpCsw), R1
45 MCR CpSC, 0, R1, C(CpCONTROL), C(0), CpMainctl
52 MRC CpSC, 0, R1, C(CpCONTROL), C(0), CpAuxctl
53 ORR $CpACsmp, R1 /* turn SMP on */
54 MCR CpSC, 0, R1, C(CpCONTROL), C(0), CpAuxctl
56 MCR CpSC, 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv
60 * clear mach and page tables
62 MOVW $PADDR(MACHADDR), R1
63 MOVW $PADDR(KTZERO), R2
71 * start stack at top of mach (physical addr)
72 * set up page tables for kernel
74 MOVW $PADDR(MACHADDR+MACHSIZE-4), R13
79 * set up domain access control and page table base
82 MCR CpSC, 0, R1, C(CpDAC), C(0)
84 ORR $(CpTTBs|CpTTBowba|CpTTBiwba), R1
85 MCR CpSC, 0, R1, C(CpTTB), C(0)
86 MCR CpSC, 0, R1, C(CpTTB), C(0), CpTTB1 /* cortex has two */
89 * invalidate my caches before enabling
97 * enable caches, mmu, and high vectors
99 MRC CpSC, 0, R0, C(CpCONTROL), C(0), CpMainctl
100 ORR $(CpChv|CpCdcache|CpCicache|CpCmmu), R0
101 MCR CpSC, 0, R0, C(CpCONTROL), C(0), CpMainctl
105 * switch SB, SP, and PC into KZERO space
107 MOVW $setR12(SB), R12
108 MOVW $(MACHADDR+MACHSIZE-4), R13
109 MOVW $_startpg(SB), R15
111 TEXT _startpg(SB), 1, $-4
114 * enable cycle counter
117 MCR CpSC, 0, R1, C(CpCLD), C(CpCLDena), CpCLDenacyc
119 MCR CpSC, 0, R1, C(CpCLD), C(CpCLDena), CpCLDenapmnc
122 * call main and loop forever if it returns
127 BL _div(SB) /* hack to load _div, etc. */
130 * startup entry for cpu(s) other than 0
132 TEXT cpureset(SB), 1, $-4
135 * load physical base for SB addressing while mmu is off
136 * keep a handy zero in R0 until first function call
138 MOVW $setR12(SB), R12
144 * SVC mode, interrupts disabled
149 * disable the mmu and caches
151 MRC CpSC, 0, R1, C(CpCONTROL), C(0), CpMainctl
152 BIC $(CpCdcache|CpCicache|CpCmmu), R1
153 ORR $(CpCsbo|CpCsw), R1
155 MCR CpSC, 0, R1, C(CpCONTROL), C(0), CpMainctl
162 MRC CpSC, 0, R1, C(CpCONTROL), C(0), CpAuxctl
163 ORR $CpACsmp, R1 /* turn SMP on */
164 MCR CpSC, 0, R1, C(CpCONTROL), C(0), CpAuxctl
166 MCR CpSC, 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv
170 * find Mach for this cpu
172 MRC CpSC, 0, R2, C(CpID), C(CpIDidct), CpIDmpid
173 AND $(MAXMACH-1), R2 /* mask out non-cpu-id bits */
174 SLL $2, R2 /* convert to word index */
175 MOVW $machaddr(SB), R0
176 ADD R2, R0 /* R0 = &machaddr[cpuid] */
177 MOVW (R0), R0 /* R0 = machaddr[cpuid] */
179 BEQ 0(PC) /* must not be zero */
180 SUB $KZERO, R0, R(MACH) /* m = PADDR(machaddr[cpuid]) */
183 * start stack at top of local Mach
185 ADD $(MACHSIZE-4), R(MACH), R13
188 * set up domain access control and page table base
191 MCR CpSC, 0, R1, C(CpDAC), C(0)
192 MOVW 12(R(MACH)), R1 /* m->mmul1 */
193 SUB $KZERO, R1 /* phys addr */
194 ORR $(CpTTBs|CpTTBowba|CpTTBiwba), R1
195 MCR CpSC, 0, R1, C(CpTTB), C(0)
196 MCR CpSC, 0, R1, C(CpTTB), C(0), CpTTB1 /* cortex has two */
199 * invalidate my caches before enabling
206 * enable caches, mmu, and high vectors
208 MRC CpSC, 0, R0, C(CpCONTROL), C(0), CpMainctl
209 ORR $(CpChv|CpCdcache|CpCicache|CpCmmu), R0
210 MCR CpSC, 0, R0, C(CpCONTROL), C(0), CpMainctl
214 * switch MACH, SB, SP, and PC into KZERO space
217 MOVW $setR12(SB), R12
219 MOVW $_startpg2(SB), R15
221 TEXT _startpg2(SB), 1, $-4
224 * enable cycle counter
227 MCR CpSC, 0, R1, C(CpCLD), C(CpCLDena), CpCLDenacyc
229 MCR CpSC, 0, R1, C(CpCLD), C(CpCLDena), CpCLDenapmnc
232 * call cpustart and loop forever if it returns
234 MRC CpSC, 0, R0, C(CpID), C(CpIDidct), CpIDmpid
235 AND $(MAXMACH-1), R0 /* mask out non-cpu-id bits */
240 * get into SVC mode with interrupts disabled
241 * raspberry pi firmware since 29 Sept 2015 starts in HYP mode
243 TEXT svcmode(SB), 1, $-4
248 MOVW $(PsrDirq|PsrDfiq|PsrMsvc), R1
250 MSR(1, 1, 1, 0xe) /* MOVW R1, SPSR_HYP */
251 MSR(0, 14, 1, 0xe) /* MOVW R14, ELR_HYP */
257 TEXT cpidget(SB), 1, $-4 /* main ID */
258 MRC CpSC, 0, R0, C(CpID), C(0), CpIDid
261 TEXT fsrget(SB), 1, $-4 /* data fault status */
262 MRC CpSC, 0, R0, C(CpFSR), C(0), CpFSRdata
265 TEXT ifsrget(SB), 1, $-4 /* instruction fault status */
266 MRC CpSC, 0, R0, C(CpFSR), C(0), CpFSRinst
269 TEXT farget(SB), 1, $-4 /* fault address */
270 MRC CpSC, 0, R0, C(CpFAR), C(0x0)
273 TEXT cpctget(SB), 1, $-4 /* cache type */
274 MRC CpSC, 0, R0, C(CpID), C(CpIDidct), CpIDct
277 TEXT lcycles(SB), 1, $-4
278 MRC CpSC, 0, R0, C(CpCLD), C(CpCLDcyc), 0
281 TEXT splhi(SB), 1, $-4
282 MOVW R14, 4(R(MACH)) /* save caller pc in m->splpc */
284 MOVW CPSR, R0 /* turn off irqs (but not fiqs) */
285 ORR $(PsrDirq), R0, R1
289 TEXT splfhi(SB), 1, $-4
290 MOVW R14, 4(R(MACH)) /* save caller pc in m->splpc */
292 MOVW CPSR, R0 /* turn off irqs and fiqs */
293 ORR $(PsrDirq|PsrDfiq), R0, R1
297 TEXT splflo(SB), 1, $-4
298 MOVW CPSR, R0 /* turn on fiqs */
299 BIC $(PsrDfiq), R0, R1
303 TEXT spllo(SB), 1, $-4
304 MOVW CPSR, R0 /* turn on irqs and fiqs */
307 MOVW.NE R1, 4(R(MACH)) /* clear m->splpc */
308 BIC $(PsrDirq|PsrDfiq), R0, R1
312 TEXT splx(SB), 1, $-4
313 MOVW R14, 4(R(MACH)) /* save caller pc in m->splpc */
315 MOVW R0, R1 /* reset interrupt level */
320 TEXT spldone(SB), 1, $0 /* end marker for devkprof.c */
323 TEXT islo(SB), 1, $-4
349 TEXT _tas(SB), $-4 /* _tas(ulong *) */
350 /* returns old (R0) after modifying (R0) */
354 MOVW $1,R2 /* new value of (R0) */
358 CMP.S $0, R7 /* old value non-zero (lock taken)? */
359 BNE lockbusy /* we lose */
364 BNE tas1 /* strex failed? try again */
372 MOVW R7, R0 /* return old value */
375 TEXT setlabel(SB), 1, $-4
376 MOVW R13, 0(R0) /* sp */
377 MOVW R14, 4(R0) /* pc */
381 TEXT gotolabel(SB), 1, $-4
382 MOVW 0(R0), R13 /* sp */
383 MOVW 4(R0), R14 /* pc */
387 TEXT getcallerpc(SB), 1, $-4
391 TEXT idlehands(SB), $-4
393 ORR $(PsrDirq|PsrDfiq), R3, R1 /* splfhi */
402 MOVW R3, CPSR /* splx */
406 TEXT coherence(SB), $-4
417 TEXT mmuinvalidate(SB), 1, $-4
420 MCR CpSC, 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv
425 * mmuinvalidateaddr(va)
426 * invalidate tlb entry for virtual page address va, ASID 0
428 TEXT mmuinvalidateaddr(SB), 1, $-4
430 MCR CpSC, 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinvse
435 * `single-element' cache operations.
436 * in arm arch v7, if effective to PoC, they operate on all cache levels, so separate
437 * l2 functions are unnecessary.
440 TEXT cachedwbse(SB), $-4 /* D writeback SE */
446 BARRIERS /* force outstanding stores to cache */
449 ADD R0, R1 /* R1 is end address */
450 BIC $(CACHELINESZ-1), R0 /* cache line start */
452 MCR CpSC, 0, R0, C(CpCACHE), C(CpCACHEwb), CpCACHEse
453 /* can't have a BARRIER here since it zeroes R0 */
460 * TLB on armv7 loads from cache, so no need for writeback
462 TEXT cachedwbtlb(SB), $-4
467 TEXT cachedwbinvse(SB), $-4 /* D writeback+invalidate SE */
473 BARRIERS /* force outstanding stores to cache */
476 ADD R0, R1 /* R1 is end address */
477 BIC $(CACHELINESZ-1), R0 /* cache line start */
479 MCR CpSC, 0, R0, C(CpCACHE), C(CpCACHEwbi), CpCACHEse
480 /* can't have a BARRIER here since it zeroes R0 */
484 _wait: /* drain write buffer */
487 MOVW R3, CPSR /* splx */
490 TEXT cachedinvse(SB), $-4 /* D invalidate SE */
496 BARRIERS /* force outstanding stores to cache */
499 ADD R0, R1 /* R1 is end address */
500 BIC $(CACHELINESZ-1), R0 /* cache line start */
502 MCR CpSC, 0, R0, C(CpCACHE), C(CpCACHEinvd), CpCACHEse
503 /* can't have a BARRIER here since it zeroes R0 */
509 #include "cache.v7.s"