2 * sheevaplug reboot code
4 * R11 is used by the loader as a temporary, so avoid it.
9 * Turn off MMU, then copy the new kernel to its correct location
10 * in physical memory. Then jump to the start of the kernel.
13 /* main(PADDR(entry), PADDR(code), size); */
17 MOVW R0, p1+0(FP) /* destination, passed in R0 */
19 /* copy in arguments from frame */
20 MOVW R0, R8 /* entry point */
21 MOVW p2+4(FP), R9 /* source */
22 MOVW n+8(FP), R10 /* byte count */
26 /* now back in 29- or 26-bit addressing, mainly for SB */
28 /* turn the MMU off */
36 /* don't care about R14 */
44 MOVW R9, R4 /* restore regs across function calls */
48 /* set up a new stack for local vars and memmove args */
49 MOVW R6, SP /* tiny trampoline stack */
50 SUB $(0x20 + 4), SP /* back up before a.out header */
52 MOVW R14, -48(SP) /* store return addr */
53 SUB $48, SP /* allocate stack frame */
55 MOVW R6, 44(SP) /* save dest/entry */
56 MOVW R5, 40(SP) /* save count */
61 MOVW R6, 4(SP) /* push dest */
63 MOVW R4, 8(SP) /* push src */
64 MOVW R5, 12(SP) /* push size */
67 MOVW 44(SP), R6 /* restore R6 (dest/entry) */
68 MOVW 40(SP), R5 /* restore R5 (count) */
79 * jump to kernel entry point. Note the true kernel entry point is
80 * the virtual address KZERO|R6, but this must wait until
81 * the MMU is enabled by the kernel in l.s
83 ORR R6, R6 /* NOP: avoid link bug */
87 * turn the caches off, double map 0 & KZERO, invalidate TLBs, revert to
88 * tiny addresses. upon return, it will be safe to turn off the mmu.
90 TEXT cachesoff(SB), 1, $-4
91 MOVW $(PsrDirq|PsrDfiq|PsrMsvc), R0
93 MOVW $KADDR(0x100-4), R7 /* just before this code */
94 MOVW R14, (R7) /* save link */
98 MRC CpSC, 0, R0, C(CpCONTROL), C(0)
99 BIC $(CpCwb|CpCicache|CpCdcache|CpCalign), R0
100 MCR CpSC, 0, R0, C(CpCONTROL), C(0)
103 /* redo double map of 0, KZERO */
104 MOVW $(L1+L1X(PHYSDRAM)), R4 /* address of PTE for 0 */
105 MOVW $PTEDRAM, R2 /* PTE bits */
106 // MOVW $PTEIO, R2 /* PTE bits */
110 ORR R3, R2, R1 /* first identity-map 0 to 0, etc. */
112 ADD $4, R4 /* bump PTE address */
113 ADD $MiB, R3 /* bump pa */
119 MCR CpSC, 0, R0, C(CpTLB), C(CpTLBinvd), CpTLBinv
120 MCR CpSC, 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv
123 /* back to 29- or 26-bit addressing, mainly for SB */
124 MRC CpSC, 0, R0, C(CpCONTROL), C(0)
125 BIC $(CpCd32|CpCi32), R0
126 MCR CpSC, 0, R0, C(CpCONTROL), C(0)
129 MOVW $KADDR(0x100-4), R7 /* just before this code */
130 MOVW (R7), R14 /* restore link */
133 TEXT _r15warp(SB), 1, $-4
138 TEXT mmudisable(SB), 1, $-4
139 MRC CpSC, 0, R0, C(CpCONTROL), C(0)
140 BIC $(CpChv|CpCmmu|CpCdcache|CpCicache|CpCwb), R0
141 MCR CpSC, 0, R0, C(CpCONTROL), C(0)
145 TEXT mmuinvalidate(SB), 1, $-4 /* invalidate all */
147 MCR CpSC, 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv
151 TEXT cacheuwbinv(SB), 1, $-4 /* D+I writeback+invalidate */
153 MOVW CPSR, R3 /* splhi */
154 ORR $(PsrDirq), R3, R1
157 _uwbinv: /* D writeback+invalidate */
158 MRC CpSC, 0, PC, C(CpCACHE), C(CpCACHEwbi), CpCACHEtest
161 MOVW $0, R0 /* I invalidate */
162 MCR CpSC, 0, R0, C(CpCACHE), C(CpCACHEinvi), CpCACHEall
163 /* drain L1 write buffer, also drains L2 eviction buffer on sheeva */
166 MCR CpSC, CpL2, R0, C(CpTESTCFG), C(CpTCl2flush), CpTCl2all
168 MCR CpSC, CpL2, R0, C(CpTESTCFG), C(CpTCl2inv), CpTCl2all
171 MOVW R3, CPSR /* splx */