4 * must fit in 11K to avoid stepping on PTEs; see mem.h.
5 * cache parameters are at CACHECONF.
10 * All caches but L1 should be off before calling this.
11 * Turn off MMU, then copy the new kernel to its correct location
12 * in physical memory. Then jump to the start of the kernel.
15 /* main(PADDR(entry), PADDR(code), size); */
18 MOVW R0, p1+0(FP) /* destination, passed in R0 */
23 /* now back in 29- or 26-bit addressing, mainly for SB */
24 /* double mapping of PHYSDRAM & KZERO now in effect */
27 /* before turning MMU off, switch to PHYSDRAM-based addresses */
30 MOVW $KSEGM, R7 /* clear segment bits */
31 MOVW $PHYSDRAM, R0 /* set dram base bits */
32 BIC R7, R12 /* adjust SB */
36 /* don't care about saving R14; we're not returning */
39 * now running in PHYSDRAM segment, not KZERO.
43 /* invalidate mmu mappings */
44 MOVW $KZERO, R0 /* some valid virtual address */
45 MTCP CpSC, 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv
52 MFCP CpSC, 0, R0, C(CpCONTROL), C(0)
54 MTCP CpSC, 0, R0, C(CpCONTROL), C(0)
58 /* copy in arguments from stack frame before moving stack */
59 MOVW p2+4(FP), R4 /* phys source */
60 MOVW n+8(FP), R5 /* byte count */
61 MOVW p1+0(FP), R6 /* phys destination */
63 /* set up a new stack for local vars and memmove args */
64 MOVW R6, SP /* tiny trampoline stack */
65 SUB $(0x20 + 4), SP /* back up before a.out header */
67 // MOVW R14, -48(SP) /* store return addr */
68 SUB $48, SP /* allocate stack frame */
70 MOVW R5, 40(SP) /* save count */
71 MOVW R6, 44(SP) /* save dest/entry */
73 /* copy the new kernel into place */
76 MOVW 40(SP), R5 /* restore count */
77 MOVW 44(SP), R6 /* restore dest/entry */
78 MOVW R6, 0(SP) /* normally saved LR goes here */
79 MOVW R6, 4(SP) /* push dest */
81 MOVW R4, 8(SP) /* push src */
82 MOVW R5, 12(SP) /* push size */
87 DELAY(printloopret, 1)
92 * jump to kernel entry point. Note the true kernel entry point is
93 * the virtual address KZERO|R6, but this must wait until
94 * the MMU is enabled by the kernel in l.s
96 MOVW 44(SP), R6 /* restore R6 (dest/entry) */
97 ORR R6, R6 /* NOP: avoid link bug */
104 * turn the caches off, double map PHYSDRAM & KZERO, invalidate TLBs, revert
105 * to tiny addresses. upon return, it will be safe to turn off the mmu.
107 TEXT cachesoff(SB), 1, $-4
108 MOVM.DB.W [R14,R1-R10], (R13) /* save regs on stack */
112 SUB $12, SP /* paranoia */
114 ADD $12, SP /* paranoia */
116 MFCP CpSC, 0, R0, C(CpCONTROL), C(0)
117 BIC $(CpCicache|CpCdcache), R0
118 MTCP CpSC, 0, R0, C(CpCONTROL), C(0) /* caches off */
125 /* invalidate stale TLBs before changing them */
126 MOVW $KZERO, R0 /* some valid virtual address */
127 MTCP CpSC, 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv
130 /* redo double map of PHYSDRAM, KZERO */
134 MOVW $(L1+L1X(PHYSDRAM)), R4 /* address of PHYSDRAM's PTE */
135 MOVW $PTEDRAM, R2 /* PTE bits */
136 MOVW $DOUBLEMAPMBS, R5
138 ORR R3, R2, R1 /* first identity-map 0 to 0, etc. */
140 ADD $4, R4 /* bump PTE address */
141 ADD $MiB, R3 /* bump pa */
147 * flush stale TLB entries
151 MOVW $KZERO, R0 /* some valid virtual address */
152 MTCP CpSC, 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv
155 /* switch back to PHYSDRAM addressing, mainly for SB */
156 MOVW $KSEGM, R7 /* clear segment bits */
157 MOVW $PHYSDRAM, R0 /* set dram base bits */
158 BIC R7, R12 /* adjust SB */
163 MOVM.IA.W (R13), [R14,R1-R10] /* restore regs from stack */
165 MOVW $KSEGM, R0 /* clear segment bits */
166 BIC R0, R14 /* adjust link */
167 MOVW $PHYSDRAM, R0 /* set dram base bits */
172 TEXT _r15warp(SB), 1, $-4
173 BIC R7, R14 /* link */
180 TEXT panic(SB), 1, $-4 /* stub */
184 TEXT pczeroseg(SB), 1, $-4 /* stub */
187 #include "cache.v7.s"
189 /* modifies R0, R3—R6 */
190 TEXT printhex(SB), 1, $-4
192 MOVW $(32-4), R5 /* bits to shift right */
198 BLE nothex /* if R4 <= 9, jump */
199 ADD $('a'-('9'+1)), R4