2 * Size memory and create the kernel page-tables on the fly while doing so.
3 * Called from main(), this code should only be run by the bootstrap processor.
5 * MemMin is what the bootstrap code in l.s has already mapped;
6 * MemMax is the limit of physical memory to scan.
9 #include "../port/lib.h"
18 u32int MemMin = 8*MB; /* set in l.s */
21 MemUPA = 0, /* unbacked physical address */
22 MemRAM = 1, /* physical memory */
23 MemUMB = 2, /* upper memory block (<16MB) */
24 MemACPI = 3, /* ACPI tables */
30 MemMax = (3*1024+768)*MB,
33 typedef struct Map Map;
39 typedef struct RMap RMap;
49 * Memory allocation tracking.
51 static Map mapupa[16];
52 static RMap rmapupa = {
53 "unallocated unbacked physical memory",
55 &mapupa[nelem(mapupa)-1],
58 static Map mapram[16];
59 static RMap rmapram = {
62 &mapram[nelem(mapram)-1],
65 static Map mapumb[64];
66 static RMap rmapumb = {
69 &mapumb[nelem(mapumb)-1],
72 static Map mapumbrw[16];
73 static RMap rmapumbrw = {
76 &mapumbrw[nelem(mapumbrw)-1],
79 static Map mapacpi[16];
80 static RMap rmapacpi = {
83 &mapacpi[nelem(mapacpi)-1],
91 print("%s\n", rmap->name);
92 for(mp = rmap->map; mp->size; mp++)
93 print("\t%8.8luX %8.8luX (%lud)\n", mp->addr, mp->addr+mp->size, mp->size);
100 ulong maxpa, maxpa1, maxpa2;
102 maxpa = (nvramread(0x18)<<8)|nvramread(0x17);
103 maxpa1 = (nvramread(0x31)<<8)|nvramread(0x30);
104 maxpa2 = (nvramread(0x16)<<8)|nvramread(0x15);
105 print("maxpa = %luX -> %luX, maxpa1 = %luX maxpa2 = %luX\n",
106 maxpa, MB+maxpa*KB, maxpa1, maxpa2);
110 mapprint(&rmapumbrw);
116 mapfree(RMap* rmap, ulong addr, ulong size)
125 for(mp = rmap->map; mp->addr <= addr && mp->size; mp++)
128 if(mp > rmap->map && (mp-1)->addr+(mp-1)->size == addr){
129 (mp-1)->size += size;
130 if(addr+size == mp->addr){
131 (mp-1)->size += mp->size;
134 (mp-1)->addr = mp->addr;
135 (mp-1)->size = mp->size;
140 if(addr+size == mp->addr && mp->size){
145 if(mp >= rmap->mapend){
146 print("mapfree: %s: losing 0x%luX, %ld\n",
147 rmap->name, addr, size);
162 mapalloc(RMap* rmap, ulong addr, int size, int align)
168 for(mp = rmap->map; mp->size; mp++){
173 * A specific address range has been given:
174 * if the current map entry is greater then
175 * the address is not in the map;
176 * if the current map entry does not overlap
177 * the beginning of the requested range then
178 * continue on to the next map entry;
179 * if the current map entry does not entirely
180 * contain the requested range then the range
185 if(mp->size < addr - maddr) /* maddr+mp->size < addr, but no overflow */
187 if(addr - maddr > mp->size - size) /* addr+size > maddr+mp->size, but no overflow */
193 maddr = ((maddr+align-1)/align)*align;
194 if(mp->addr+mp->size-maddr < size)
198 mp->addr = maddr+size;
199 mp->size -= maddr-oaddr+size;
203 (mp-1)->addr = mp->addr;
204 }while((mp-1)->size = mp->size);
209 mapfree(rmap, oaddr, maddr-oaddr);
219 * Allocate from the ram map directly to make page tables.
220 * Called by mmuwalk during e820scan.
227 m = mapalloc(&rmapram, 0, BY2PG, BY2PG);
240 if((p = getconf("umbexclude")) == nil)
243 while(p && *p != '\0' && *p != '\n'){
245 addr = strtoul(p, &rptr, 0);
246 if(rptr == nil || rptr == p || *rptr != '-'){
247 print("umbexclude: invalid argument <%s>\n", op);
252 size = strtoul(p, &rptr, 0) - addr + 1;
254 print("umbexclude: bad range <%s>\n", op);
257 if(rptr != nil && *rptr == ',')
261 mapalloc(&rmapumb, addr, size, 0);
271 * Scan the Upper Memory Blocks (0xA0000->0xF0000) for pieces
272 * which aren't used; they can be used later for devices which
273 * want to allocate some virtual address space.
274 * Check for two things:
275 * 1) device BIOS ROM. This should start with a two-byte header
276 * of 0x55 0xAA, followed by a byte giving the size of the ROM
277 * in 512-byte chunks. These ROM's must start on a 2KB boundary.
278 * 2) device memory. This is read-write.
279 * There are some assumptions: there's VGA memory at 0xA0000 and
280 * the VGA BIOS ROM is at 0xC0000. Also, if there's no ROM signature
281 * at 0xE0000 then the whole 64KB up to 0xF0000 is theoretically up
282 * for grabs; check anyway.
285 while(p < (uchar*)KADDR(0xE0000)){
287 * Test for 0x55 0xAA before poking obtrusively,
288 * some machines (e.g. Thinkpad X20) seem to map
289 * something dynamic here (cardbus?) causing weird
290 * problems if it is changed.
292 if(p[0] == 0x55 && p[1] == 0xAA){
299 if(p[0] != 0xCC || p[2*KB-1] != 0xCC){
303 if(p[0] == 0x55 && p[1] == 0xAA){
307 if(p[0] == 0xFF && p[1] == 0xFF)
308 mapfree(&rmapumb, PADDR(p), 2*KB);
311 mapfree(&rmapumbrw, PADDR(p), 2*KB);
316 if(p[0] != 0x55 || p[1] != 0xAA){
319 if(p[0] != 0xCC && p[64*KB-1] != 0xCC)
320 mapfree(&rmapumb, PADDR(p), 64*KB);
327 checksum(void *v, int n)
339 sigscan(uchar *addr, int len, char *sig, int size, int step)
345 e = addr+len-(size > sl ? size : sl);
346 for(p = addr; p <= e; p += step){
347 if(memcmp(p, sig, sl) != 0)
349 if(size && checksum(p, size) != 0)
363 top = ((bda[0x14]<<8) | bda[0x13])*KB;
365 if(top < 64*KB || top > 640*KB)
366 top = 640*KB; /* sanity */
368 /* reserved for bios tables (EBDA) */
375 sigsearch(char* signature, int size)
382 * Search for the data structure:
383 * 1) within the first KiB of the Extended BIOS Data Area (EBDA), or
384 * 2) within the last KiB of system base memory if the EBDA segment
386 * 3) within the BIOS ROM address space between 0xf0000 and 0xfffff
387 * (but will actually check 0xe0000 to 0xfffff).
390 if(memcmp(KADDR(0xfffd9), "EISA", 4) == 0){
391 if((p = (bda[0x0f]<<8)|bda[0x0e]) != 0){
392 if((r = sigscan(KADDR(p<<4), 1024, signature, size, 16)) != nil)
396 if((r = sigscan(KADDR(convmemsize()), 1024, signature, size, 16)) != nil)
399 /* hack for virtualbox: look in KiB below 0xa0000 */
400 if((r = sigscan(KADDR(0xa0000-1024), 1024, signature, size, 16)) != nil)
403 return sigscan(KADDR(0xe0000), 0x20000, signature, size, 16);
409 static char signature[] = "RSD PTR ";
413 if((p = sigsearch(signature, 36)) != nil)
415 if((p = sigsearch(signature, 20)) != nil)
417 for(m = rmapacpi.map; m < rmapacpi.mapend && m->size; m++){
418 if(m->size > 0x7FFFFFFF)
420 if((v = vmap(m->addr, m->size)) != nil){
421 p = sigscan(v, m->size, signature, 36, 4);
423 p = sigscan(v, m->size, signature, 20, 4);
426 return vmap(m->addr + (p - v), 64);
438 * Initialise the memory bank information for conventional memory
439 * (i.e. less than 640KB). The base is the first location after the
440 * bootstrap processor MMU information and the limit is obtained from
441 * the BIOS data area.
446 mapfree(&rmapram, x, pa-x);
447 memset(KADDR(x), 0, pa-x); /* keep us honest */
450 x = PADDR(PGROUND((uintptr)end));
453 panic("kernel too big");
454 mapfree(&rmapram, x, pa-x);
455 memset(KADDR(x), 0, pa-x); /* keep us honest */
459 ramscan(ulong maxmem)
461 ulong *k0, kzero, map, maxkpa, maxpa, pa, *pte, *table, *va, vbase, x;
462 int nvalid[NMemType];
465 * The bootstrap code has has created a prototype page
466 * table which maps the first MemMin of physical memory to KZERO.
467 * The page directory is at m->pdb and the first page of
468 * free memory is after the per-processor MMU information.
473 * Check if the extended memory size can be obtained from the CMOS.
474 * If it's 0 then it's either not known or >= 64MB. Always check
475 * at least 24MB in case there's a memory gap (up to 8MB) below 16MB;
476 * in this case the memory from the gap is remapped to the top of
478 * The value in CMOS is supposed to be the number of KB above 1MB.
481 x = (nvramread(0x18)<<8)|nvramread(0x17);
482 if(x == 0 || x >= (63*KB))
490 maxkpa = (u32int)-KZERO; /* 2^32 - KZERO */
493 * March up memory from MemMin to maxpa 1MB at a time,
494 * mapping the first page and checking the page can
495 * be written and read correctly. The page tables are created here
496 * on the fly, allocating from low memory as necessary.
498 k0 = (ulong*)KADDR(0);
502 memset(nvalid, 0, sizeof(nvalid));
505 * Can't map memory to KADDR(pa) when we're walking because
506 * can only use KADDR for relatively low addresses.
507 * Instead, map each 4MB we scan to the virtual address range
508 * MemMin->MemMin+4MB while we are scanning.
513 * Map the page. Use mapalloc(&rmapram, ...) to make
514 * the page table if necessary, it will be returned to the
515 * pool later if it isn't needed. Map in a fixed range (the second 4M)
516 * because high physical addresses cannot be passed to KADDR.
518 va = (void*)(vbase + pa%(4*MB));
519 table = &m->pdb[PDX(va)];
521 if(map == 0 && (map = mapalloc(&rmapram, 0, BY2PG, BY2PG)) == 0)
523 memset(KADDR(map), 0, BY2PG);
524 *table = map|PTEWRITE|PTEVALID;
525 memset(nvalid, 0, sizeof(nvalid));
527 table = KADDR(PPN(*table));
528 pte = &table[PTX(va)];
530 *pte = pa|PTEWRITE|PTEUNCACHED|PTEVALID;
531 mmuflushtlb(PADDR(m->pdb));
533 * Write a pattern to the page and write a different
534 * pattern to a possible mirror at KZERO. If the data
535 * reads back correctly the chunk is some type of RAM (possibly
536 * a linearly-mapped VGA framebuffer, for instance...) and
537 * can be cleared and added to the memory pool. If not, the
538 * chunk is marked uncached and added to the UMB pool if <16MB
539 * or is marked invalid and added to the UPA pool.
544 nvalid[MemRAM] += MB/BY2PG;
545 mapfree(&rmapram, pa, MB);
548 *pte++ = pa|PTEWRITE|PTEVALID;
551 mmuflushtlb(PADDR(m->pdb));
552 /* memset(va, 0, MB); so damn slow to memset all of memory */
555 nvalid[MemUMB] += MB/BY2PG;
556 mapfree(&rmapumb, pa, MB);
559 *pte++ = pa|PTEWRITE|PTEUNCACHED|PTEVALID;
564 nvalid[MemUPA] += MB/BY2PG;
565 mapfree(&rmapupa, pa, MB);
571 * Done with this 4MB chunk, review the options:
572 * 1) not physical memory and >=16MB - invalidate the PDB entry;
573 * 2) physical memory - use the 4MB page extension if possible;
574 * 3) not physical memory and <16MB - use the 4MB page extension
576 * 4) mixed or no 4MB page extension - commit the already
577 * initialised space for the page table.
579 if(pa%(4*MB) == 0 && pa >= 32*MB && nvalid[MemUPA] == (4*MB)/BY2PG){
581 * If we encounter a 4MB chunk of missing memory
582 * at a sufficiently high offset, call it the end of
583 * memory. Otherwise we run the risk of thinking
584 * that video memory is real RAM.
588 if(pa <= maxkpa && pa%(4*MB) == 0){
589 table = &m->pdb[PDX(KADDR(pa - 4*MB))];
590 if(nvalid[MemUPA] == (4*MB)/BY2PG)
592 else if(nvalid[MemRAM] == (4*MB)/BY2PG && (m->cpuiddx & Pse))
593 *table = (pa - 4*MB)|PTESIZE|PTEWRITE|PTEVALID;
594 else if(nvalid[MemUMB] == (4*MB)/BY2PG && (m->cpuiddx & Pse))
595 *table = (pa - 4*MB)|PTESIZE|PTEWRITE|PTEUNCACHED|PTEVALID;
597 *table = map|PTEWRITE|PTEVALID;
601 mmuflushtlb(PADDR(m->pdb));
605 * If we didn't reach the end of the 4MB chunk, that part won't
606 * be mapped. Commit the already initialised space for the page table.
608 if(pa % (4*MB) && pa <= maxkpa){
609 m->pdb[PDX(KADDR(pa))] = map|PTEWRITE|PTEVALID;
613 mapfree(&rmapram, map, BY2PG);
615 m->pdb[PDX(vbase)] = 0;
616 mmuflushtlb(PADDR(m->pdb));
618 mapfree(&rmapupa, pa, (u32int)-pa);
622 typedef struct Emap Emap;
629 static Emap emap[128];
633 emapcmp(const void *va, const void *vb)
643 if(a->base < b->base)
645 if(a->base > b->base)
651 map(ulong base, ulong len, int type)
654 ulong *table, flags, maxkpa;
657 * Split any call crossing MemMin to make below simpler.
659 if(base < MemMin && len > MemMin-base){
662 map(MemMin, len-n, type);
666 * Let lowraminit and umbscan hash out the low MemMin.
672 * Any non-memory below 16*MB is used as upper mem blocks.
674 if(type == MemUPA && base < 16*MB && len > 16*MB-base){
675 map(base, 16*MB-base, MemUMB);
676 map(16*MB, len-(16*MB-base), MemUPA);
681 * Memory below CPU0END is reserved for the kernel
682 * and already mapped.
684 if(base < PADDR(CPU0END)){
685 n = PADDR(CPU0END) - base;
688 map(PADDR(CPU0END), len-n, type);
693 * Memory between KTZERO and end is the kernel itself
694 * and is already mapped.
696 if(base < PADDR(KTZERO) && len > PADDR(KTZERO)-base){
697 map(base, PADDR(KTZERO)-base, type);
700 if(PADDR(KTZERO) < base && base < PADDR(PGROUND((ulong)end))){
701 n = PADDR(PGROUND((ulong)end));
704 map(PADDR(PGROUND((ulong)end)), len-n, type);
709 * Now we have a simple case.
711 // print("map %.8lux %.8lux %d\n", base, base+len, type);
714 mapfree(&rmapram, base, len);
715 flags = PTEWRITE|PTEVALID;
718 mapfree(&rmapumb, base, len);
719 flags = PTEWRITE|PTEUNCACHED|PTEVALID;
722 mapfree(&rmapupa, base, len);
726 mapfree(&rmapacpi, base, len);
736 * bottom MemMin is already mapped - just twiddle flags.
737 * (not currently used - see above)
740 table = KADDR(PPN(m->pdb[PDX(base)]));
743 for(; base<e; base+=BY2PG)
744 table[PTX(base)] |= flags;
749 * Only map from KZERO to 2^32.
755 if(len > maxkpa-base)
757 pdbmap(m->pdb, base|flags, base+KZERO, len);
764 ulong base, len, last;
769 /* passed by bootloader */
770 if((s = getconf("*e820")) == nil)
771 if((s = getconf("e820")) == nil)
774 while(nemap < nelem(emap)){
781 if(s[1] == ' '){ /* new format */
782 e->type = s[0] - '0';
785 e->base = strtoull(s, &s, 16);
788 e->top = strtoull(s, &s, 16);
789 if(*s != ' ' && *s != 0)
796 qsort(emap, nemap, sizeof emap[0], emapcmp);
798 for(i=0; i<nemap; i++){
801 * pull out the info but only about the low 32 bits...
803 if(e->base >= (1ULL<<32))
811 if(e->top > (1ULL<<32))
816 * If the map skips addresses, mark them available.
819 map(last, base-last, MemUPA);
823 map(base, len, MemRAM);
826 map(base, len, MemACPI);
829 map(base, len, MemReserved);
837 map(last, -last, MemUPA);
851 if(p = getconf("*maxmem"))
852 maxmem = strtoul(p, 0, 0);
857 * Set special attributes for memory between 640KB and 1MB:
858 * VGA memory is writethrough;
859 * BIOS ROM's/UMB's are uncached;
860 * then scan for useful memory.
862 for(pa = 0xA0000; pa < 0xC0000; pa += BY2PG){
863 pte = mmuwalk(m->pdb, (ulong)KADDR(pa), 2, 0);
866 for(pa = 0xC0000; pa < 0x100000; pa += BY2PG){
867 pte = mmuwalk(m->pdb, (ulong)KADDR(pa), 2, 0);
870 mmuflushtlb(PADDR(m->pdb));
878 * Set the conf entries describing banks of allocatable memory.
880 for(i=0; i<nelem(mapram) && i<nelem(conf.mem); i++){
881 mp = &rmapram.map[i];
884 cm->npage = mp->size/BY2PG;
888 for(; i<nelem(mapram); i++)
889 lost += rmapram.map[i].size;
891 print("meminit - lost %lud bytes\n", lost);
898 * Allocate memory from the upper memory blocks.
901 umbmalloc(ulong addr, int size, int align)
905 if(a = mapalloc(&rmapumb, addr, size, align))
906 return (ulong)KADDR(a);
912 umbfree(ulong addr, int size)
914 mapfree(&rmapumb, PADDR(addr), size);
918 umbrwmalloc(ulong addr, int size, int align)
923 if(a = mapalloc(&rmapumbrw, addr, size, align))
924 return(ulong)KADDR(a);
927 * Perhaps the memory wasn't visible before
928 * the interface is initialised, so try again.
930 if((a = umbmalloc(addr, size, align)) == 0)
935 if(p[0] == 0xCC && p[size-1] == 0xCC)
943 umbrwfree(ulong addr, int size)
945 mapfree(&rmapumbrw, PADDR(addr), size);
949 * Give out otherwise-unused physical address space
950 * for use in configuring devices. Note that upaalloc
951 * does not map the physical address into virtual memory.
952 * Call vmap to do that.
955 upaalloc(int size, int align)
959 a = mapalloc(&rmapupa, 0, size, align);
961 print("out of physical address space allocating %d\n", size);
968 upafree(ulong pa, int size)
970 mapfree(&rmapupa, pa, size);
974 upareserve(ulong pa, int size)
978 a = mapalloc(&rmapupa, pa, size, 0);
981 * This can happen when we're using the E820
982 * map, which might have already reserved some
983 * of the regions claimed by the pci devices.
985 // print("upareserve: cannot reserve pa=%#.8lux size=%d\n", pa, size);
987 mapfree(&rmapupa, a, size);