2 * Size memory and create the kernel page-tables on the fly while doing so.
3 * Called from main(), this code should only be run by the bootstrap processor.
5 * MemMin is what the bootstrap code in l.s has already mapped;
8 #include "../port/lib.h"
15 u32int MemMin; /* set by l.s */
20 MemUPA = 0, /* unbacked physical address */
21 MemRAM = 1, /* physical memory */
22 MemUMB = 2, /* upper memory block (<16MB) */
29 typedef struct Map Map;
35 typedef struct RMap RMap;
45 * Memory allocation tracking.
47 static Map mapupa[64];
48 static RMap rmapupa = {
49 "unallocated unbacked physical memory",
51 &mapupa[nelem(mapupa)-1],
54 static Map mapram[16];
55 static RMap rmapram = {
58 &mapram[nelem(mapram)-1],
61 static Map mapumb[64];
62 static RMap rmapumb = {
65 &mapumb[nelem(mapumb)-1],
68 static Map mapumbrw[16];
69 static RMap rmapumbrw = {
72 &mapumbrw[nelem(mapumbrw)-1],
80 print("%s\n", rmap->name);
81 for(mp = rmap->map; mp->size; mp++)
82 print("\t%#p %#p (%#p)\n", mp->addr, mp->addr+mp->size, mp->size);
89 ulong maxpa, maxpa1, maxpa2;
91 maxpa = (nvramread(0x18)<<8)|nvramread(0x17);
92 maxpa1 = (nvramread(0x31)<<8)|nvramread(0x30);
93 maxpa2 = (nvramread(0x16)<<8)|nvramread(0x15);
94 print("maxpa = %luX -> %luX, maxpa1 = %luX maxpa2 = %luX\n",
95 maxpa, MB+maxpa*KB, maxpa1, maxpa2);
104 mapfree(RMap* rmap, uintptr addr, uintptr size)
113 for(mp = rmap->map; mp->addr <= addr && mp->size; mp++)
116 if(mp > rmap->map && (mp-1)->addr+(mp-1)->size == addr){
117 (mp-1)->size += size;
118 if(addr+size == mp->addr){
119 (mp-1)->size += mp->size;
122 (mp-1)->addr = mp->addr;
123 (mp-1)->size = mp->size;
128 if(addr+size == mp->addr && mp->size){
133 if(mp >= rmap->mapend){
134 print("mapfree: %s: losing %#p, %#p\n",
135 rmap->name, addr, size);
150 mapalloc(RMap* rmap, uintptr addr, int size, int align)
153 uintptr maddr, oaddr;
156 for(mp = rmap->map; mp->size; mp++){
161 * A specific address range has been given:
162 * if the current map entry is greater then
163 * the address is not in the map;
164 * if the current map entry does not overlap
165 * the beginning of the requested range then
166 * continue on to the next map entry;
167 * if the current map entry does not entirely
168 * contain the requested range then the range
173 if(mp->size < addr - maddr) /* maddr+mp->size < addr, but no overflow */
175 if(addr - maddr > mp->size - size) /* addr+size > maddr+mp->size, but no overflow */
181 maddr = ((maddr+align-1)/align)*align;
182 if(mp->addr+mp->size-maddr < size)
186 mp->addr = maddr+size;
187 mp->size -= maddr-oaddr+size;
191 (mp-1)->addr = mp->addr;
192 }while((mp-1)->size = mp->size);
197 mapfree(rmap, oaddr, maddr-oaddr);
207 * Allocate from the ram map directly to make page tables.
208 * Called by mmuwalk during e820scan.
215 m = mapalloc(&rmapram, 0, BY2PG, BY2PG);
228 if((p = getconf("umbexclude")) == nil)
231 while(p && *p != '\0' && *p != '\n'){
233 addr = strtoul(p, &rptr, 0);
234 if(rptr == nil || rptr == p || *rptr != '-'){
235 print("umbexclude: invalid argument <%s>\n", op);
240 size = strtoul(p, &rptr, 0) - addr + 1;
242 print("umbexclude: bad range <%s>\n", op);
245 if(rptr != nil && *rptr == ',')
249 mapalloc(&rmapumb, addr, size, 0);
259 * Scan the Upper Memory Blocks (0xA0000->0xF0000) for pieces
260 * which aren't used; they can be used later for devices which
261 * want to allocate some virtual address space.
262 * Check for two things:
263 * 1) device BIOS ROM. This should start with a two-byte header
264 * of 0x55 0xAA, followed by a byte giving the size of the ROM
265 * in 512-byte chunks. These ROM's must start on a 2KB boundary.
266 * 2) device memory. This is read-write.
267 * There are some assumptions: there's VGA memory at 0xA0000 and
268 * the VGA BIOS ROM is at 0xC0000. Also, if there's no ROM signature
269 * at 0xE0000 then the whole 64KB up to 0xF0000 is theoretically up
270 * for grabs; check anyway.
273 while(p < (uchar*)KADDR(0xE0000)){
275 * Test for 0x55 0xAA before poking obtrusively,
276 * some machines (e.g. Thinkpad X20) seem to map
277 * something dynamic here (cardbus?) causing weird
278 * problems if it is changed.
280 if(p[0] == 0x55 && p[1] == 0xAA){
287 if(p[0] != 0xCC || p[2*KB-1] != 0xCC){
291 if(p[0] == 0x55 && p[1] == 0xAA){
295 if(p[0] == 0xFF && p[1] == 0xFF)
296 mapfree(&rmapumb, PADDR(p), 2*KB);
299 mapfree(&rmapumbrw, PADDR(p), 2*KB);
304 if(p[0] != 0x55 || p[1] != 0xAA){
307 if(p[0] != 0xCC && p[64*KB-1] != 0xCC)
308 mapfree(&rmapumb, PADDR(p), 64*KB);
315 checksum(void *v, int n)
327 sigscan(uchar* addr, int len, char* signature)
333 sl = strlen(signature);
334 for(p = addr; p+sl < e; p += 16)
335 if(memcmp(p, signature, sl) == 0)
347 top = ((bda[0x14]<<8) | bda[0x13])*KB;
349 if(top < 64*KB || top > 640*KB)
350 top = 640*KB; /* sanity */
352 /* reserved for bios tables (EBDA) */
359 sigsearch(char* signature)
366 * Search for the data structure:
367 * 1) within the first KiB of the Extended BIOS Data Area (EBDA), or
368 * 2) within the last KiB of system base memory if the EBDA segment
370 * 3) within the BIOS ROM address space between 0xf0000 and 0xfffff
371 * (but will actually check 0xe0000 to 0xfffff).
374 if(memcmp(KADDR(0xfffd9), "EISA", 4) == 0){
375 if((p = (bda[0x0f]<<8)|bda[0x0e]) != 0){
376 if((r = sigscan(KADDR(p<<4), 1024, signature)) != nil)
380 if((r = sigscan(KADDR(convmemsize()), 1024, signature)) != nil)
383 /* hack for virtualbox: look in KiB below 0xa0000 */
384 if((r = sigscan(KADDR(0xa0000-1024), 1024, signature)) != nil)
387 return sigscan(KADDR(0xe0000), 0x20000, signature);
396 * Initialise the memory bank information for conventional memory
397 * (i.e. less than 640KB). The base is the first location after the
398 * bootstrap processor MMU information and the limit is obtained from
399 * the BIOS data area.
404 mapfree(&rmapram, x, pa-x);
405 memset(KADDR(x), 0, pa-x); /* keep us honest */
408 x = PADDR(PGROUND((uintptr)end));
411 panic("kernel too big");
412 mapfree(&rmapram, x, pa-x);
413 memset(KADDR(x), 0, pa-x); /* keep us honest */
416 typedef struct Emap Emap;
423 static Emap emap[128];
427 emapcmp(const void *va, const void *vb)
437 if(a->base < b->base)
439 if(a->base > b->base)
445 map(uintptr base, uintptr len, int type)
447 uintptr n, flags, maxkpa;
450 * Split any call crossing MemMin to make below simpler.
452 if(base < MemMin && len > MemMin-base){
455 map(MemMin, len-n, type);
459 * Let umbscan hash out the low MemMin.
465 * Any non-memory below 16*MB is used as upper mem blocks.
467 if(type == MemUPA && base < 16*MB && len > 16*MB-base){
468 map(base, 16*MB-base, MemUMB);
469 map(16*MB, len-(16*MB-base), MemUPA);
474 * Memory below CPU0END is reserved for the kernel
475 * and already mapped.
477 if(base < PADDR(CPU0END)){
478 n = PADDR(CPU0END) - base;
481 map(PADDR(CPU0END), len-n, type);
486 * Memory between KTZERO and end is the kernel itself
487 * and is already mapped.
489 if(base < PADDR(KTZERO) && len > PADDR(KTZERO)-base){
490 map(base, PADDR(KTZERO)-base, type);
493 if(PADDR(KTZERO) < base && base < PADDR(PGROUND((uintptr)end))){
494 n = PADDR(PGROUND((uintptr)end));
497 map(PADDR(PGROUND((uintptr)end)), len-n, type);
502 * Now we have a simple case.
506 mapfree(&rmapram, base, len);
507 flags = PTEWRITE|PTEVALID;
510 mapfree(&rmapumb, base, len);
511 flags = PTEWRITE|PTEUNCACHED|PTEVALID;
514 mapfree(&rmapupa, base, len);
527 if(len > maxkpa-base)
529 pmap(m->pml4, base|flags, base+KZERO, len);
536 uintptr base, len, last;
541 /* passed by bootloader */
542 if((s = getconf("*e820")) == nil)
543 if((s = getconf("e820")) == nil)
546 while(nemap < nelem(emap)){
553 if(s[1] == ' '){ /* new format */
554 e->type = s[0] - '0';
557 e->base = strtoull(s, &s, 16);
560 e->top = strtoull(s, &s, 16);
561 if(*s != ' ' && *s != 0)
568 qsort(emap, nemap, sizeof emap[0], emapcmp);
570 for(i=0; i<nemap; i++){
573 * pull out the info but only about the low 32 bits...
583 * If the map skips addresses, mark them available.
586 map(last, base-last, MemUPA);
587 map(base, len, (e->type == 1) ? MemRAM : MemReserved);
593 map(last, -last, MemUPA);
610 * Set the conf entries describing banks of allocatable memory.
612 for(i=0; i<nelem(mapram) && i<nelem(conf.mem); i++){
613 mp = &rmapram.map[i];
616 cm->npage = mp->size/BY2PG;
620 for(; i<nelem(mapram); i++)
621 lost += rmapram.map[i].size;
623 print("meminit - lost %llud bytes\n", lost);
630 * Allocate memory from the upper memory blocks.
633 umbmalloc(uintptr addr, int size, int align)
637 if(a = mapalloc(&rmapumb, addr, size, align))
638 return (uintptr)KADDR(a);
644 umbfree(uintptr addr, int size)
646 mapfree(&rmapumb, PADDR(addr), size);
650 umbrwmalloc(uintptr addr, int size, int align)
655 if(a = mapalloc(&rmapumbrw, addr, size, align))
656 return (uintptr)KADDR(a);
659 * Perhaps the memory wasn't visible before
660 * the interface is initialised, so try again.
662 if((a = umbmalloc(addr, size, align)) == 0)
667 if(p[0] == 0xCC && p[size-1] == 0xCC)
675 umbrwfree(uintptr addr, int size)
677 mapfree(&rmapumbrw, PADDR(addr), size);
681 * Give out otherwise-unused physical address space
682 * for use in configuring devices. Note that upaalloc
683 * does not map the physical address into virtual memory.
684 * Call vmap to do that.
687 upaalloc(int size, int align)
691 a = mapalloc(&rmapupa, 0, size, align);
693 print("out of physical address space allocating %d\n", size);
700 upafree(uintptr pa, int size)
702 mapfree(&rmapupa, pa, size);
706 upareserve(uintptr pa, int size)
710 a = mapalloc(&rmapupa, pa, size, 0);
713 * This can happen when we're using the E820
714 * map, which might have already reserved some
715 * of the regions claimed by the pci devices.
717 // print("upareserve: cannot reserve pa=%#p size=%d\n", pa, size);
719 mapfree(&rmapupa, a, size);