2 #include "../port/lib.h"
10 MemUPA = 0, /* unbacked physical address */
11 MemUMB = 1, /* upper memory block (<16MB) */
12 MemRAM = 2, /* physical memory */
13 MemACPI = 3, /* ACPI tables */
14 MemReserved = 4, /* don't allocate */
19 u32int MemMin; /* set by l.s */
26 if(conf.mem[0].npage != 0)
27 return xspanalloc(BY2PG, BY2PG, 0);
30 * Allocate from the map directly to make page tables.
32 pa = memmapalloc(-1, BY2PG, BY2PG, MemRAM);
33 if(pa == -1 || cankaddr(pa) == 0)
34 panic("rampage: out of memory\n");
39 mapkzero(uintptr base, uintptr len, int type)
43 if(base < MemMin && base+len > MemMin){
44 mapkzero(base, MemMin-base, type);
45 len = base+len-MemMin;
61 flags = PTEWRITE|PTEVALID;
65 punmap(base+KZERO, len);
66 flags = PTEWRITE|PTEUNCACHED|PTEVALID;
72 pmap(base|flags, base+KZERO, len);
80 if(memcmp(KADDR(0xfffd9), "EISA", 4) != 0)
83 return ((bda[0x0f]<<8)|bda[0x0e]) << 4;
93 top = ((bda[0x14]<<8) | bda[0x13])*KB;
95 if(top < 64*KB || top > 640*KB)
96 top = 640*KB; /* sanity */
98 /* Reserved for BIOS tables */
107 uintptr base, pa, len;
111 * Discover the memory bank information for conventional memory
112 * (i.e. less than 640KB). The base is the first location after the
113 * bootstrap processor MMU information and the limit is obtained from
114 * the BIOS data area.
116 base = PADDR(CPU0END);
119 memmapadd(base, pa-base, MemRAM);
121 /* Reserve BIOS tables */
122 memmapadd(pa, 1*KB, MemReserved);
125 if((pa = ebdaseg()) != 0)
126 memmapadd(pa, 1*KB, MemReserved);
127 memmapadd(0xA0000-1*KB, 1*KB, MemReserved);
129 /* Reserve the VGA frame buffer */
130 umballoc(0xA0000, 128*KB, 0);
132 /* Reserve VGA ROM */
133 memmapadd(0xC0000, 64*KB, MemReserved);
136 * Scan the Upper Memory Blocks (0xD0000->0xF0000) for device BIOS ROMs.
137 * This should start with a two-byte header of 0x55 0xAA, followed by a
138 * byte giving the size of the ROM in 512-byte chunks.
139 * These ROM's must start on a 2KB boundary.
141 for(p = (uchar*)KADDR(0xD0000); p < (uchar*)KADDR(0xF0000); p += len){
143 if(p[0] == 0x55 && p[1] == 0xAA){
146 memmapadd(PADDR(p), len, MemReserved);
147 len = ROUND(len, 2*KB);
151 /* Reserve BIOS ROM */
152 memmapadd(0xF0000, 64*KB, MemReserved);
156 checksum(void *v, int n)
168 sigscan(uchar *addr, int len, char *sig, int size, int step)
174 e = addr+len-(size > sl ? size : sl);
175 for(p = addr; p <= e; p += step){
176 if(memcmp(p, sig, sl) != 0)
178 if(size && checksum(p, size) != 0)
186 sigsearch(char* signature, int size)
192 * Search for the data structure:
193 * 1) within the first KiB of the Extended BIOS Data Area (EBDA), or
194 * 2) within the last KiB of system base memory if the EBDA segment
196 * 3) within the BIOS ROM address space between 0xf0000 and 0xfffff
197 * (but will actually check 0xe0000 to 0xfffff).
199 if((p = ebdaseg()) != 0){
200 if((r = sigscan(KADDR(p), 1*KB, signature, size, 16)) != nil)
203 if((r = sigscan(KADDR(convmemsize()), 1*KB, signature, size, 16)) != nil)
206 /* hack for virtualbox: look in KiB below 0xa0000 */
207 if((r = sigscan(KADDR(0xA0000-1*KB), 1*KB, signature, size, 16)) != nil)
210 return sigscan(KADDR(0xE0000), 128*KB, signature, size, 16);
216 static char signature[] = "RSD PTR ";
220 if((p = sigsearch(signature, 36)) != nil)
222 if((p = sigsearch(signature, 20)) != nil)
225 for(base = memmapnext(-1, MemACPI); base != -1; base = memmapnext(base, MemACPI)){
226 size = memmapsize(base, 0);
227 if(size == 0 || size > 0x7fffffff)
229 if((v = vmap(base, size)) != nil){
230 p = sigscan(v, size, signature, 36, 4);
232 p = sigscan(v, size, signature, 20, 4);
235 return vmap(base + (p - v), 64);
242 * Give out otherwise-unused physical address space
243 * for use in configuring devices. Note that upaalloc
244 * does not map the physical address into virtual memory.
245 * Call vmap to do that.
248 upaalloc(ulong pa, ulong size, ulong align)
250 return (ulong)memmapalloc(pa == -1UL ? -1ULL : (uvlong)pa, size, align, MemUPA);
254 upafree(ulong pa, ulong size)
256 memmapfree(pa, size, MemUPA);
260 * Allocate memory from the upper memory blocks.
263 umballoc(ulong pa, ulong size, ulong align)
265 return (ulong)memmapalloc(pa == -1UL ? -1ULL : (uvlong)pa, size, align, MemUMB);
269 umbfree(ulong pa, ulong size)
271 memmapfree(pa, size, MemUMB);
280 if((p = getconf("umbexclude")) == nil)
283 while(p && *p != '\0' && *p != '\n'){
285 pa = strtoul(p, &rptr, 0);
286 if(rptr == nil || rptr == p || *rptr != '-'){
287 print("umbexclude: invalid argument <%s>\n", op);
292 size = strtoul(p, &rptr, 0) - pa + 1;
294 print("umbexclude: bad range <%s>\n", op);
297 if(rptr != nil && *rptr == ',')
301 memmapalloc(pa, size, 0, MemUMB);
308 uvlong base, top, size;
312 /* passed by bootloader */
313 if((s = getconf("*e820")) == nil)
314 if((s = getconf("e820")) == nil)
323 if(s[1] == ' '){ /* new format */
327 base = strtoull(s, &s, 16);
330 top = strtoull(s, &s, 16);
331 if(*s != ' ' && *s != 0)
337 memmapadd(base, top - base, MemRAM);
340 memmapadd(base, top - base, MemACPI);
343 memmapadd(base, top - base, MemReserved);
347 for(base = memmapnext(-1, MemRAM); base != -1; base = memmapnext(base, MemRAM)){
348 size = memmapsize(base, BY2PG) & ~(BY2PG-1);
350 mapkzero(PGROUND(base), size, MemRAM);
357 ramscan(uintptr pa, uintptr top, uintptr chunk)
359 ulong save, pat, seed, *v, *k0;
366 n = chunk/sizeof(*v);
367 w = BY2PG/sizeof(*v);
373 for(; pa < top; pa += chunk){
376 if((v = vmap(pa, chunk)) == nil)
378 for(i = 0; i < n; i += w){
389 if((v = vmap(pa, chunk)) == nil)
391 for(i = 0; i < n; i += w){
398 memmapadd(pa, chunk, MemRAM);
399 mapkzero(pa, chunk, MemRAM);
405 if(pa+chunk <= 16*MB)
406 memmapadd(pa, chunk, MemUMB);
409 * If we encounter a chunk of missing memory
410 * at a sufficiently high offset, call it the end of
411 * memory. Otherwise we run the risk of thinking
412 * that video memory is real RAM.
422 * Sort out initial memory map and discover RAM.
428 * Add the already mapped memory after the kernel.
430 if(MemMin < PADDR(PGROUND((uintptr)end)))
431 panic("kernel too big");
432 memmapadd(PADDR(PGROUND((uintptr)end)), MemMin-PADDR(PGROUND((uintptr)end)), MemRAM);
435 * Memory between KTZERO and end is the kernel itself.
437 memreserve(PADDR(KTZERO), PADDR(PGROUND((uintptr)end))-PADDR(KTZERO));
440 * Memory below CPU0END is reserved for the kernel.
442 memreserve(0, PADDR(CPU0END));
445 * Addresses below 16MB default to be upper
446 * memory blocks usable for ISA devices.
448 memmapadd(0, 16*MB, MemUMB);
451 * Everything between 16MB and 4GB defaults
452 * to unbacked physical addresses usable for
455 memmapadd(16*MB, (u32int)-16*MB, MemUPA);
458 * On 386, reserve >= 4G as we have no PAE support.
460 if(sizeof(void*) == 4)
461 memmapadd((u32int)-BY2PG, -((uvlong)((u32int)-BY2PG)), MemReserved);
464 * Discover conventional RAM, ROMs and UMBs.
469 * Discover more RAM and map to KZERO.
472 ramscan(MemMin, -((uintptr)MemMin), 4*MB);
476 * Until the memory map is finalized by meminit(),
477 * archinit() should reserve memory of discovered BIOS
478 * and ACPI tables by calling memreserve() to prevent
479 * them from getting allocated and trashed.
480 * This is due to the UEFI and BIOS memory map being
481 * unreliable and sometimes marking these ranges as RAM.
484 memreserve(uintptr pa, uintptr size)
486 assert(conf.mem[0].npage == 0);
488 size += (pa & BY2PG-1);
491 memmapadd(pa, size, MemReserved);
495 * Finalize the memory map:
496 * (re-)map the upper memory blocks
497 * allocate all usable ram to the conf.mem[] banks
506 for(base = memmapnext(-1, MemUMB); base != -1; base = memmapnext(base, MemUMB)){
507 size = memmapsize(base, BY2PG) & ~(BY2PG-1);
509 mapkzero(PGROUND(base), size, MemUMB);
513 for(base = memmapnext(-1, MemRAM); base != -1; base = memmapnext(base, MemRAM)){
514 size = memmapsize(base, BY2PG) & ~(BY2PG-1);
517 cm->base = memmapalloc(base, size, BY2PG, MemRAM);
521 cm->npage = size/BY2PG;
522 if(++cm >= &conf.mem[nelem(conf.mem)])