4 * must fit in 11K to avoid stepping on PTEs; see mem.h.
6 * R11 is used by the loader as a temporary, so avoid it.
11 * Turn off MMU, then copy the new kernel to its correct location
12 * in physical memory. Then jump to the start of the kernel.
15 /* main(PADDR(entry), PADDR(code), size); */
19 MOVW R0, p1+0(FP) /* destination, passed in R0 */
22 ORR $(PsrDirq|PsrDfiq), R0
23 MOVW R0, CPSR /* splhi */
27 MRC CpSC, 0, R1, C(CpCONTROL), C(0), CpAuxctl
28 BIC $CpACasa, R1 /* no speculative I access forwarding to mem */
30 ORR $(CpACcachenopipe|CpACcp15serial|CpACcp15waitidle|CpACcp15pipeflush), R1
31 MCR CpSC, 0, R1, C(CpCONTROL), C(0), CpAuxctl
35 /* now back in 29- or 26-bit addressing, mainly for SB */
36 /* double mapping of PHYSDRAM & KZERO now in effect */
43 /* first switch to PHYSDRAM-based addresses */
46 MOVW $KSEGM, R7 /* clear segment bits */
47 MOVW $PHYSDRAM, R0 /* set dram base bits */
48 BIC R7, R12 /* adjust SB */
52 /* don't care about saving R14; we're not returning */
55 * now running in PHYSDRAM segment, not KZERO.
59 SUB $12, SP /* paranoia */
61 ADD $12, SP /* paranoia */
63 /* invalidate mmu mappings */
64 MOVW $KZERO, R0 /* some valid virtual address */
65 MCR CpSC, 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv
69 MRC CpSC, 0, R0, C(CpCONTROL), C(0)
70 BIC $(CpCmmu|CpCdcache|CpCicache), R0
71 MCR CpSC, 0, R0, C(CpCONTROL), C(0) /* mmu off */
75 /* copy in arguments from stack frame before moving stack */
76 MOVW p2+4(FP), R4 /* phys source */
77 MOVW n+8(FP), R5 /* byte count */
78 MOVW p1+0(FP), R6 /* phys destination */
80 /* set up a new stack for local vars and memmove args */
81 MOVW R6, SP /* tiny trampoline stack */
82 SUB $(0x20 + 4), SP /* back up before a.out header */
84 // MOVW R14, -48(SP) /* store return addr */
85 SUB $48, SP /* allocate stack frame */
87 MOVW R5, 40(SP) /* save count */
88 MOVW R6, 44(SP) /* save dest/entry */
93 MOVW 40(SP), R5 /* restore count */
94 MOVW 44(SP), R6 /* restore dest/entry */
95 MOVW R6, 0(SP) /* normally saved LR goes here */
96 MOVW R6, 4(SP) /* push dest */
98 MOVW R4, 8(SP) /* push src */
99 MOVW R5, 12(SP) /* push size */
109 DELAY(printloopret, 1)
111 DELAY(printloopnl, 1)
114 * jump to kernel entry point. Note the true kernel entry point is
115 * the virtual address KZERO|R6, but this must wait until
116 * the MMU is enabled by the kernel in l.s
118 MOVW 44(SP), R6 /* restore R6 (dest/entry) */
119 ORR R6, R6 /* NOP: avoid link bug */
125 * turn the caches off, double map PHYSDRAM & KZERO, invalidate TLBs, revert
126 * to tiny addresses. upon return, it will be safe to turn off the mmu.
128 TEXT cachesoff(SB), 1, $-4
129 MOVM.DB.W [R14,R1-R10], (R13) /* save regs on stack */
131 ORR $(PsrDirq|PsrDfiq), R0
135 SUB $12, SP /* paranoia */
137 ADD $12, SP /* paranoia */
139 MRC CpSC, 0, R0, C(CpCONTROL), C(0)
140 BIC $(CpCicache|CpCdcache), R0
141 MCR CpSC, 0, R0, C(CpCONTROL), C(0) /* caches off */
148 /* invalidate stale TLBs before changing them */
149 MOVW $KZERO, R0 /* some valid virtual address */
150 MCR CpSC, 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv
153 /* redo double map of PHYSDRAM, KZERO */
157 MOVW $(L1+L1X(PHYSDRAM)), R4 /* address of PHYSDRAM's PTE */
158 MOVW $PTEDRAM, R2 /* PTE bits */
159 MOVW $DOUBLEMAPMBS, R5
161 ORR R3, R2, R1 /* first identity-map 0 to 0, etc. */
163 ADD $4, R4 /* bump PTE address */
164 ADD $MiB, R3 /* bump pa */
170 * flush stale TLB entries
174 MOVW $KZERO, R0 /* some valid virtual address */
175 MCR CpSC, 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv
178 /* switch back to PHYSDRAM addressing, mainly for SB */
179 MOVW $KSEGM, R7 /* clear segment bits */
180 MOVW $PHYSDRAM, R0 /* set dram base bits */
181 BIC R7, R12 /* adjust SB */
186 MOVM.IA.W (R13), [R14,R1-R10] /* restore regs from stack */
188 MOVW $KSEGM, R0 /* clear segment bits */
189 BIC R0, R14 /* adjust link */
190 MOVW $PHYSDRAM, R0 /* set dram base bits */
195 TEXT _r15warp(SB), 1, $-4
196 BIC R7, R14 /* link */
203 TEXT panic(SB), 1, $-4 /* stub */
206 TEXT pczeroseg(SB), 1, $-4 /* stub */
209 #include "cache.v7.s"