2 * Size memory and create the kernel page-tables on the fly while doing so.
3 * Called from main(), this code should only be run by the bootstrap processor.
14 #define PDX(va) ((((ulong)(va))>>22) & 0x03FF)
15 #define PTX(va) ((((ulong)(va))>>12) & 0x03FF)
18 MemUPA = 0, /* unbacked physical address */
19 MemRAM = 1, /* physical memory */
20 MemUMB = 2, /* upper memory block (<16MB) */
25 MemMinMB = 4, /* minimum physical memory (<=4MB) */
26 MemMaxMB = 768, /* maximum physical memory to check */
45 static RMap rmapupa = {
46 "unallocated unbacked physical memory",
51 static Map xmapupa[8];
52 static RMap xrmapupa = {
53 "unbacked physical memory",
59 static RMap rmapram = {
65 static Map mapumb[64];
66 static RMap rmapumb = {
72 static Map mapumbrw[8];
73 static RMap rmapumbrw = {
83 ulong maxpa, maxpa1, maxpa2;
88 maxpa = (nvramread(0x18)<<8)|nvramread(0x17);
89 maxpa1 = (nvramread(0x31)<<8)|nvramread(0x30);
90 maxpa2 = (nvramread(0x16)<<8)|nvramread(0x15);
91 print("maxpa = %luX -> %luX, maxpa1 = %luX maxpa2 = %luX\n",
92 maxpa, MB+maxpa*KB, maxpa1, maxpa2);
94 for(mp = rmapram.map; mp->size; mp++)
95 print("%8.8luX %8.8luX %8.8luX\n", mp->addr, (ulong)mp->size, mp->addr+mp->size);
96 for(mp = rmapumb.map; mp->size; mp++)
97 print("%8.8luX %8.8luX %8.8luX\n", mp->addr, (ulong)mp->size, mp->addr+mp->size);
98 for(mp = rmapumbrw.map; mp->size; mp++)
99 print("%8.8luX %8.8luX %8.8luX\n", mp->addr, (ulong)mp->size, mp->addr+mp->size);
100 for(mp = rmapupa.map; mp->size; mp++)
101 print("%8.8luX %8.8luX %8.8luX\n", mp->addr, (ulong)mp->size, mp->addr+mp->size);
105 mapfree(RMap* rmap, ulong addr, ulong size)
114 for(mp = rmap->map; mp->addr <= addr && mp->size; mp++)
117 if(mp > rmap->map && (mp-1)->addr+(mp-1)->size == addr){
118 (mp-1)->size += size;
119 if(addr+size == mp->addr){
120 (mp-1)->size += mp->size;
123 (mp-1)->addr = mp->addr;
124 (mp-1)->size = mp->size;
129 if(addr+size == mp->addr && mp->size){
134 if(mp >= rmap->mapend){
135 print("mapfree: %s: losing 0x%luX, %lud\n",
136 rmap->name, addr, size);
151 mapalloc(RMap* rmap, ulong addr, int size, int align)
157 for(mp = rmap->map; mp->size; mp++){
162 * A specific address range has been given:
163 * if the current map entry is greater then
164 * the address is not in the map;
165 * if the current map entry does not overlap
166 * the beginning of the requested range then
167 * continue on to the next map entry;
168 * if the current map entry does not entirely
169 * contain the requested range then the range
174 if(mp->size < addr - maddr) /* maddr+mp->size < addr, but no overflow */
176 if(addr - maddr > mp->size - size) /* addr+size > maddr+mp->size, but no overflow */
182 maddr = ((maddr+align-1)/align)*align;
183 if(mp->addr+mp->size-maddr < size)
187 mp->addr = maddr+size;
188 mp->size -= maddr-oaddr+size;
192 (mp-1)->addr = mp->addr;
193 }while((mp-1)->size = mp->size);
198 mapfree(rmap, oaddr, maddr-oaddr);
213 * Scan the Upper Memory Blocks (0xA0000->0xF0000) for pieces
214 * which aren't used; they can be used later for devices which
215 * want to allocate some virtual address space.
216 * Check for two things:
217 * 1) device BIOS ROM. This should start with a two-byte header
218 * of 0x55 0xAA, followed by a byte giving the size of the ROM
219 * in 512-byte chunks. These ROM's must start on a 2KB boundary.
220 * 2) device memory. This is read-write.
221 * There are some assumptions: there's VGA memory at 0xA0000 and
222 * the VGA BIOS ROM is at 0xC0000. Also, if there's no ROM signature
223 * at 0xE0000 then the whole 64KB up to 0xF0000 is theoretically up
224 * for grabs; check anyway.
226 p = KADDR(0xD0000); /*RSC: changed from 0xC0000 */
227 while(p < (uchar*)KADDR(0xE0000)){
228 if (p[0] == 0x55 && p[1] == 0xAA) {
229 /* Skip p[2] chunks of 512 bytes. Test for 0x55 AA before
230 poking obtrusively, or else the Thinkpad X20 dies when
231 setting up the cardbus (PB) */
238 if(p[0] != 0xCC || p[2*KB-1] != 0xCC){
242 if(p[0] == 0x55 && p[1] == 0xAA){
246 if(p[0] == 0xFF && p[1] == 0xFF)
247 mapfree(&rmapumb, PADDR(p), 2*KB);
250 mapfree(&rmapumbrw, PADDR(p), 2*KB);
255 if(p[0] != 0x55 || p[1] != 0xAA){
258 if(p[0] != 0xCC && p[64*KB-1] != 0xCC)
259 mapfree(&rmapumb, PADDR(p), 64*KB);
267 /* A hack to initialize unbacked physical memory. It's assumed PCI space is assigned by
268 the BIOS in the 0xF0000000 range and 9load never needs more than 0x2000... to run. These
269 values leave ample space for memory allocations for uninitialized PCI cards (e.g. cardbus
271 ulong maxmem = 0x40000000;
274 mapfree(&rmapupa, maxmem, 0x00000000-maxmem);
280 umbmalloc(ulong addr, int size, int align)
284 if(a = mapalloc(&rmapumb, addr, size, align))
285 return (ulong)KADDR(a);
291 umbfree(ulong addr, int size)
293 mapfree(&rmapumb, PADDR(addr), size);
297 umbrwmalloc(ulong addr, int size, int align)
302 if(a = mapalloc(&rmapumbrw, addr, size, align))
303 return(ulong)KADDR(a);
306 * Perhaps the memory wasn't visible before
307 * the interface is initialised, so try again.
309 if((a = umbmalloc(addr, size, align)) == 0)
314 if(p[0] == 0xCC && p[size-1] == 0xCC)
322 umbrwfree(ulong addr, int size)
324 mapfree(&rmapumbrw, PADDR(addr), size);
328 mmuwalk(ulong* pdb, ulong va, int level, int create)
333 * Walk the page-table pointed to by pdb and return a pointer
334 * to the entry for virtual address va at the requested level.
335 * If the entry is invalid and create isn't requested then bail
336 * out early. Otherwise, for the 2nd level walk, allocate a new
337 * page-table page and register it in the 1st level.
339 table = &pdb[PDX(va)];
340 if(!(*table & PTEVALID) && create == 0)
353 panic("mmuwalk2: va 0x%ux entry 0x%ux", va, *table);
354 if(!(*table & PTEVALID)){
355 pa = PADDR(ialloc(BY2PG, BY2PG));
356 *table = pa|PTEWRITE|PTEVALID;
358 table = KADDR(PPN(*table));
360 return &table[PTX(va)];
364 static Lock mmukmaplock;
367 mmukmap(ulong pa, ulong va, int size)
369 ulong pae, *table, *pdb, pgsz, *pte, x;
371 extern int cpuidax, cpuiddx;
373 pdb = KADDR(getcr3());
374 if((cpuiddx & 0x08) && (getcr4() & 0x10))
382 va = (ulong)KADDR(pa);
389 table = &pdb[PDX(va)];
391 * Possibly already mapped.
393 if(*table & PTEVALID){
394 if(*table & PTESIZE){
396 * Big page. Does it fit within?
397 * If it does, adjust pgsz so the correct end can be
398 * returned and get out.
399 * If not, adjust pgsz up to the next 4MB boundary
404 panic("mmukmap1: pa 0x%ux entry 0x%ux",
419 * Little page. Walk to the entry.
420 * If the entry is valid, set pgsz and continue.
421 * If not, make it so, set pgsz, sync and continue.
423 pte = mmuwalk(pdb, va, 2, 0);
424 if(pte && *pte & PTEVALID){
427 panic("mmukmap2: pa 0x%ux entry 0x%ux",
440 * Not mapped. Check if it can be mapped using a big page -
441 * starts on a 4MB boundary, size >= 4MB and processor can do it.
442 * If not a big page, walk the walk, talk the talk.
445 if(pse && (pa % (4*MB)) == 0 && (pae >= pa+4*MB)){
446 *table = pa|PTESIZE|PTEWRITE|PTEUNCACHED|PTEVALID;
450 pte = mmuwalk(pdb, va, 2, 1);
451 *pte = pa|PTEWRITE|PTEUNCACHED|PTEVALID;
458 unlock(&mmukmaplock);
461 * If something was added
462 * then need to sync up.
471 upamalloc(ulong addr, int size, int align)
477 if((a = mapalloc(&rmapupa, addr, size, align)) == 0){
483 * This is a travesty, but they all are.
485 ae = mmukmap(a, 0, size);
488 * Should check here that it was all delivered
489 * and put it back and barf if not.
494 * Be very careful this returns a PHYSICAL address.
500 upafree(ulong pa, int size)