2 * Size memory and create the kernel page-tables on the fly while doing so.
3 * Called from main(), this code should only be run by the bootstrap processor.
5 * MemMin is what the bootstrap code in l.s has already mapped;
6 * MemMax is the limit of physical memory to scan.
9 #include "../port/lib.h"
18 u32int MemMin = 8*MB; /* set in l.s */
21 MemUPA = 0, /* unbacked physical address */
22 MemRAM = 1, /* physical memory */
23 MemUMB = 2, /* upper memory block (<16MB) */
29 MemMax = (3*1024+768)*MB,
32 typedef struct Map Map;
38 typedef struct RMap RMap;
48 * Memory allocation tracking.
50 static Map mapupa[16];
51 static RMap rmapupa = {
52 "unallocated unbacked physical memory",
54 &mapupa[nelem(mapupa)-1],
57 static Map mapram[16];
58 static RMap rmapram = {
61 &mapram[nelem(mapram)-1],
64 static Map mapumb[64];
65 static RMap rmapumb = {
68 &mapumb[nelem(mapumb)-1],
71 static Map mapumbrw[16];
72 static RMap rmapumbrw = {
75 &mapumbrw[nelem(mapumbrw)-1],
83 print("%s\n", rmap->name);
84 for(mp = rmap->map; mp->size; mp++)
85 print("\t%8.8luX %8.8luX (%lud)\n", mp->addr, mp->addr+mp->size, mp->size);
92 ulong maxpa, maxpa1, maxpa2;
94 maxpa = (nvramread(0x18)<<8)|nvramread(0x17);
95 maxpa1 = (nvramread(0x31)<<8)|nvramread(0x30);
96 maxpa2 = (nvramread(0x16)<<8)|nvramread(0x15);
97 print("maxpa = %luX -> %luX, maxpa1 = %luX maxpa2 = %luX\n",
98 maxpa, MB+maxpa*KB, maxpa1, maxpa2);
102 mapprint(&rmapumbrw);
107 mapfree(RMap* rmap, ulong addr, ulong size)
116 for(mp = rmap->map; mp->addr <= addr && mp->size; mp++)
119 if(mp > rmap->map && (mp-1)->addr+(mp-1)->size == addr){
120 (mp-1)->size += size;
121 if(addr+size == mp->addr){
122 (mp-1)->size += mp->size;
125 (mp-1)->addr = mp->addr;
126 (mp-1)->size = mp->size;
131 if(addr+size == mp->addr && mp->size){
136 if(mp >= rmap->mapend){
137 print("mapfree: %s: losing 0x%luX, %ld\n",
138 rmap->name, addr, size);
153 mapalloc(RMap* rmap, ulong addr, int size, int align)
159 for(mp = rmap->map; mp->size; mp++){
164 * A specific address range has been given:
165 * if the current map entry is greater then
166 * the address is not in the map;
167 * if the current map entry does not overlap
168 * the beginning of the requested range then
169 * continue on to the next map entry;
170 * if the current map entry does not entirely
171 * contain the requested range then the range
176 if(mp->size < addr - maddr) /* maddr+mp->size < addr, but no overflow */
178 if(addr - maddr > mp->size - size) /* addr+size > maddr+mp->size, but no overflow */
184 maddr = ((maddr+align-1)/align)*align;
185 if(mp->addr+mp->size-maddr < size)
189 mp->addr = maddr+size;
190 mp->size -= maddr-oaddr+size;
194 (mp-1)->addr = mp->addr;
195 }while((mp-1)->size = mp->size);
200 mapfree(rmap, oaddr, maddr-oaddr);
210 * Allocate from the ram map directly to make page tables.
211 * Called by mmuwalk during e820scan.
218 m = mapalloc(&rmapram, 0, BY2PG, BY2PG);
231 if((p = getconf("umbexclude")) == nil)
234 while(p && *p != '\0' && *p != '\n'){
236 addr = strtoul(p, &rptr, 0);
237 if(rptr == nil || rptr == p || *rptr != '-'){
238 print("umbexclude: invalid argument <%s>\n", op);
243 size = strtoul(p, &rptr, 0) - addr + 1;
245 print("umbexclude: bad range <%s>\n", op);
248 if(rptr != nil && *rptr == ',')
252 mapalloc(&rmapumb, addr, size, 0);
262 * Scan the Upper Memory Blocks (0xA0000->0xF0000) for pieces
263 * which aren't used; they can be used later for devices which
264 * want to allocate some virtual address space.
265 * Check for two things:
266 * 1) device BIOS ROM. This should start with a two-byte header
267 * of 0x55 0xAA, followed by a byte giving the size of the ROM
268 * in 512-byte chunks. These ROM's must start on a 2KB boundary.
269 * 2) device memory. This is read-write.
270 * There are some assumptions: there's VGA memory at 0xA0000 and
271 * the VGA BIOS ROM is at 0xC0000. Also, if there's no ROM signature
272 * at 0xE0000 then the whole 64KB up to 0xF0000 is theoretically up
273 * for grabs; check anyway.
276 while(p < (uchar*)KADDR(0xE0000)){
278 * Test for 0x55 0xAA before poking obtrusively,
279 * some machines (e.g. Thinkpad X20) seem to map
280 * something dynamic here (cardbus?) causing weird
281 * problems if it is changed.
283 if(p[0] == 0x55 && p[1] == 0xAA){
290 if(p[0] != 0xCC || p[2*KB-1] != 0xCC){
294 if(p[0] == 0x55 && p[1] == 0xAA){
298 if(p[0] == 0xFF && p[1] == 0xFF)
299 mapfree(&rmapumb, PADDR(p), 2*KB);
302 mapfree(&rmapumbrw, PADDR(p), 2*KB);
307 if(p[0] != 0x55 || p[1] != 0xAA){
310 if(p[0] != 0xCC && p[64*KB-1] != 0xCC)
311 mapfree(&rmapumb, PADDR(p), 64*KB);
318 checksum(void *v, int n)
330 sigscan(uchar* addr, int len, char* signature)
336 sl = strlen(signature);
337 for(p = addr; p+sl < e; p += 16)
338 if(memcmp(p, signature, sl) == 0)
344 sigsearch(char* signature)
351 * Search for the data structure:
352 * 1) within the first KiB of the Extended BIOS Data Area (EBDA), or
353 * 2) within the last KiB of system base memory if the EBDA segment
355 * 3) within the BIOS ROM address space between 0xf0000 and 0xfffff
356 * (but will actually check 0xe0000 to 0xfffff).
359 if(memcmp(KADDR(0xfffd9), "EISA", 4) == 0){
360 if((p = (bda[0x0f]<<8)|bda[0x0e]) != 0){
361 if((r = sigscan(KADDR(p<<4), 1024, signature)) != nil)
366 if((p = ((bda[0x14]<<8)|bda[0x13])*1024) != 0){
367 if((r = sigscan(KADDR(p-1024), 1024, signature)) != nil)
370 /* hack for virtualbox: look in KiB below 0xa0000 */
371 if((r = sigscan(KADDR(0xa0000-1024), 1024, signature)) != nil)
374 return sigscan(KADDR(0xe0000), 0x20000, signature);
384 * Initialise the memory bank information for conventional memory
385 * (i.e. less than 640KB). The base is the first location after the
386 * bootstrap processor MMU information and the limit is obtained from
387 * the BIOS data area.
390 bda = (uchar*)KADDR(0x400);
391 pa = ((bda[0x14]<<8)|bda[0x13])*KB;
395 mapfree(&rmapram, x, pa-x);
396 memset(KADDR(x), 0, pa-x); /* keep us honest */
399 x = PADDR(PGROUND((ulong)end));
402 panic("kernel too big");
403 mapfree(&rmapram, x, pa-x);
404 memset(KADDR(x), 0, pa-x); /* keep us honest */
408 ramscan(ulong maxmem)
410 ulong *k0, kzero, map, maxkpa, maxpa, pa, *pte, *table, *va, vbase, x;
411 int nvalid[NMemType];
414 * The bootstrap code has has created a prototype page
415 * table which maps the first MemMin of physical memory to KZERO.
416 * The page directory is at m->pdb and the first page of
417 * free memory is after the per-processor MMU information.
422 * Check if the extended memory size can be obtained from the CMOS.
423 * If it's 0 then it's either not known or >= 64MB. Always check
424 * at least 24MB in case there's a memory gap (up to 8MB) below 16MB;
425 * in this case the memory from the gap is remapped to the top of
427 * The value in CMOS is supposed to be the number of KB above 1MB.
430 x = (nvramread(0x18)<<8)|nvramread(0x17);
431 if(x == 0 || x >= (63*KB))
439 maxkpa = (u32int)-KZERO; /* 2^32 - KZERO */
442 * March up memory from MemMin to maxpa 1MB at a time,
443 * mapping the first page and checking the page can
444 * be written and read correctly. The page tables are created here
445 * on the fly, allocating from low memory as necessary.
447 k0 = (ulong*)KADDR(0);
451 memset(nvalid, 0, sizeof(nvalid));
454 * Can't map memory to KADDR(pa) when we're walking because
455 * can only use KADDR for relatively low addresses.
456 * Instead, map each 4MB we scan to the virtual address range
457 * MemMin->MemMin+4MB while we are scanning.
462 * Map the page. Use mapalloc(&rmapram, ...) to make
463 * the page table if necessary, it will be returned to the
464 * pool later if it isn't needed. Map in a fixed range (the second 4M)
465 * because high physical addresses cannot be passed to KADDR.
467 va = (void*)(vbase + pa%(4*MB));
468 table = &m->pdb[PDX(va)];
470 if(map == 0 && (map = mapalloc(&rmapram, 0, BY2PG, BY2PG)) == 0)
472 memset(KADDR(map), 0, BY2PG);
473 *table = map|PTEWRITE|PTEVALID;
474 memset(nvalid, 0, sizeof(nvalid));
476 table = KADDR(PPN(*table));
477 pte = &table[PTX(va)];
479 *pte = pa|PTEWRITE|PTEUNCACHED|PTEVALID;
480 mmuflushtlb(PADDR(m->pdb));
482 * Write a pattern to the page and write a different
483 * pattern to a possible mirror at KZERO. If the data
484 * reads back correctly the chunk is some type of RAM (possibly
485 * a linearly-mapped VGA framebuffer, for instance...) and
486 * can be cleared and added to the memory pool. If not, the
487 * chunk is marked uncached and added to the UMB pool if <16MB
488 * or is marked invalid and added to the UPA pool.
493 nvalid[MemRAM] += MB/BY2PG;
494 mapfree(&rmapram, pa, MB);
497 *pte++ = pa|PTEWRITE|PTEVALID;
500 mmuflushtlb(PADDR(m->pdb));
501 /* memset(va, 0, MB); so damn slow to memset all of memory */
504 nvalid[MemUMB] += MB/BY2PG;
505 mapfree(&rmapumb, pa, MB);
508 *pte++ = pa|PTEWRITE|PTEUNCACHED|PTEVALID;
513 nvalid[MemUPA] += MB/BY2PG;
514 mapfree(&rmapupa, pa, MB);
520 * Done with this 4MB chunk, review the options:
521 * 1) not physical memory and >=16MB - invalidate the PDB entry;
522 * 2) physical memory - use the 4MB page extension if possible;
523 * 3) not physical memory and <16MB - use the 4MB page extension
525 * 4) mixed or no 4MB page extension - commit the already
526 * initialised space for the page table.
528 if(pa%(4*MB) == 0 && pa >= 32*MB && nvalid[MemUPA] == (4*MB)/BY2PG){
530 * If we encounter a 4MB chunk of missing memory
531 * at a sufficiently high offset, call it the end of
532 * memory. Otherwise we run the risk of thinking
533 * that video memory is real RAM.
537 if(pa <= maxkpa && pa%(4*MB) == 0){
538 table = &m->pdb[PDX(KADDR(pa - 4*MB))];
539 if(nvalid[MemUPA] == (4*MB)/BY2PG)
541 else if(nvalid[MemRAM] == (4*MB)/BY2PG && (m->cpuiddx & Pse))
542 *table = (pa - 4*MB)|PTESIZE|PTEWRITE|PTEVALID;
543 else if(nvalid[MemUMB] == (4*MB)/BY2PG && (m->cpuiddx & Pse))
544 *table = (pa - 4*MB)|PTESIZE|PTEWRITE|PTEUNCACHED|PTEVALID;
546 *table = map|PTEWRITE|PTEVALID;
550 mmuflushtlb(PADDR(m->pdb));
554 * If we didn't reach the end of the 4MB chunk, that part won't
555 * be mapped. Commit the already initialised space for the page table.
557 if(pa % (4*MB) && pa <= maxkpa){
558 m->pdb[PDX(KADDR(pa))] = map|PTEWRITE|PTEVALID;
562 mapfree(&rmapram, map, BY2PG);
564 m->pdb[PDX(vbase)] = 0;
565 mmuflushtlb(PADDR(m->pdb));
567 mapfree(&rmapupa, pa, (u32int)-pa);
571 typedef struct Emap Emap;
578 static Emap emap[128];
582 emapcmp(const void *va, const void *vb)
592 if(a->base < b->base)
594 if(a->base > b->base)
600 map(ulong base, ulong len, int type)
603 ulong *table, flags, maxkpa;
606 * Split any call crossing MemMin to make below simpler.
608 if(base < MemMin && len > MemMin-base){
611 map(MemMin, len-n, type);
615 * Let lowraminit and umbscan hash out the low MemMin.
621 * Any non-memory below 16*MB is used as upper mem blocks.
623 if(type == MemUPA && base < 16*MB && len > 16*MB-base){
624 map(base, 16*MB-base, MemUMB);
625 map(16*MB, len-(16*MB-base), MemUPA);
630 * Memory below CPU0END is reserved for the kernel
631 * and already mapped.
633 if(base < PADDR(CPU0END)){
634 n = PADDR(CPU0END) - base;
637 map(PADDR(CPU0END), len-n, type);
642 * Memory between KTZERO and end is the kernel itself
643 * and is already mapped.
645 if(base < PADDR(KTZERO) && len > PADDR(KTZERO)-base){
646 map(base, PADDR(KTZERO)-base, type);
649 if(PADDR(KTZERO) < base && base < PADDR(PGROUND((ulong)end))){
650 n = PADDR(PGROUND((ulong)end));
653 map(PADDR(PGROUND((ulong)end)), len-n, type);
658 * Now we have a simple case.
660 // print("map %.8lux %.8lux %d\n", base, base+len, type);
663 mapfree(&rmapram, base, len);
664 flags = PTEWRITE|PTEVALID;
667 mapfree(&rmapumb, base, len);
668 flags = PTEWRITE|PTEUNCACHED|PTEVALID;
671 mapfree(&rmapupa, base, len);
681 * bottom MemMin is already mapped - just twiddle flags.
682 * (not currently used - see above)
685 table = KADDR(PPN(m->pdb[PDX(base)]));
688 for(; base<e; base+=BY2PG)
689 table[PTX(base)] |= flags;
694 * Only map from KZERO to 2^32.
700 if(len > maxkpa-base)
702 pdbmap(m->pdb, base|flags, base+KZERO, len);
709 ulong base, len, last;
714 /* passed by bootloader */
715 if((s = getconf("*e820")) == nil)
716 if((s = getconf("e820")) == nil)
719 while(nemap < nelem(emap)){
726 if(s[1] == ' '){ /* new format */
727 e->type = s[0] - '0';
730 e->base = strtoull(s, &s, 16);
733 e->top = strtoull(s, &s, 16);
734 if(*s != ' ' && *s != 0)
741 qsort(emap, nemap, sizeof emap[0], emapcmp);
743 for(i=0; i<nemap; i++){
746 * pull out the info but only about the low 32 bits...
748 if(e->base >= (1ULL<<32))
756 if(e->top > (1ULL<<32))
761 * If the map skips addresses, mark them available.
764 map(last, base-last, MemUPA);
765 map(base, len, (e->type == 1) ? MemRAM : MemReserved);
771 map(last, -last, MemUPA);
785 if(p = getconf("*maxmem"))
786 maxmem = strtoul(p, 0, 0);
791 * Set special attributes for memory between 640KB and 1MB:
792 * VGA memory is writethrough;
793 * BIOS ROM's/UMB's are uncached;
794 * then scan for useful memory.
796 for(pa = 0xA0000; pa < 0xC0000; pa += BY2PG){
797 pte = mmuwalk(m->pdb, (ulong)KADDR(pa), 2, 0);
800 for(pa = 0xC0000; pa < 0x100000; pa += BY2PG){
801 pte = mmuwalk(m->pdb, (ulong)KADDR(pa), 2, 0);
804 mmuflushtlb(PADDR(m->pdb));
812 * Set the conf entries describing banks of allocatable memory.
814 for(i=0; i<nelem(mapram) && i<nelem(conf.mem); i++){
815 mp = &rmapram.map[i];
818 cm->npage = mp->size/BY2PG;
822 for(; i<nelem(mapram); i++)
823 lost += rmapram.map[i].size;
825 print("meminit - lost %lud bytes\n", lost);
832 * Allocate memory from the upper memory blocks.
835 umbmalloc(ulong addr, int size, int align)
839 if(a = mapalloc(&rmapumb, addr, size, align))
840 return (ulong)KADDR(a);
846 umbfree(ulong addr, int size)
848 mapfree(&rmapumb, PADDR(addr), size);
852 umbrwmalloc(ulong addr, int size, int align)
857 if(a = mapalloc(&rmapumbrw, addr, size, align))
858 return(ulong)KADDR(a);
861 * Perhaps the memory wasn't visible before
862 * the interface is initialised, so try again.
864 if((a = umbmalloc(addr, size, align)) == 0)
869 if(p[0] == 0xCC && p[size-1] == 0xCC)
877 umbrwfree(ulong addr, int size)
879 mapfree(&rmapumbrw, PADDR(addr), size);
883 * Give out otherwise-unused physical address space
884 * for use in configuring devices. Note that upaalloc
885 * does not map the physical address into virtual memory.
886 * Call vmap to do that.
889 upaalloc(int size, int align)
893 a = mapalloc(&rmapupa, 0, size, align);
895 print("out of physical address space allocating %d\n", size);
902 upafree(ulong pa, int size)
904 mapfree(&rmapupa, pa, size);
908 upareserve(ulong pa, int size)
912 a = mapalloc(&rmapupa, pa, size, 0);
915 * This can happen when we're using the E820
916 * map, which might have already reserved some
917 * of the regions claimed by the pci devices.
919 // print("upareserve: cannot reserve pa=%#.8lux size=%d\n", pa, size);
921 mapfree(&rmapupa, a, size);