2 #include "../port/lib.h"
6 #include "../port/error.h"
11 * Attachable segment types
13 static Physseg physseg[10] = {
14 { SG_SHARED, "shared", 0, SEGMAXSIZE },
15 { SG_BSS, "memory", 0, SEGMAXSIZE },
19 static Lock physseglock;
22 #define ihash(s) imagealloc.hash[s%IHASHSIZE]
23 static struct Imagealloc
28 Image *hash[IHASHSIZE];
29 QLock ireclaim; /* mutex on reclaiming free images */
32 Segment* (*_globalsegattach)(Proc*, char*);
39 imagealloc.list = xalloc(conf.nimage*sizeof(Image));
40 if(imagealloc.list == nil)
41 panic("initseg: no memory for Image");
42 ie = &imagealloc.list[conf.nimage-1];
43 for(i = imagealloc.list; i < ie; i++)
46 imagealloc.free = imagealloc.list;
50 newseg(int type, uintptr base, ulong size)
55 if(size > (SEGMAPSIZE*PTEPERTAB))
58 s = malloc(sizeof(Segment));
64 s->top = base+(size*BY2PG);
66 s->sema.prev = &s->sema;
67 s->sema.next = &s->sema;
69 mapsize = ROUND(size, PTEPERTAB)/PTEPERTAB;
70 if(mapsize > nelem(s->ssegmap)){
71 s->map = malloc(mapsize*sizeof(Pte*));
80 s->mapsize = nelem(s->ssegmap);
106 } else if(decref(s) != 0)
109 emap = &s->map[s->mapsize];
110 for(pte = s->map; pte < emap; pte++)
114 if(s->map != s->ssegmap)
116 if(s->profile != nil)
123 relocateseg(Segment *s, uintptr offset)
128 emap = &s->map[s->mapsize];
129 for(pte = s->map; pte < emap; pte++) {
133 for(pg = (*pte)->first; pg <= pe; pg++) {
141 dupseg(Segment **seg, int segno, int share)
155 switch(s->type&SG_TYPE) {
156 case SG_TEXT: /* New segment shares pte set */
163 n = newseg(s->type, s->base, s->size);
166 case SG_BSS: /* Just copy on write */
169 n = newseg(s->type, s->base, s->size);
172 case SG_DATA: /* Copy on write plus demand load info */
182 n = newseg(s->type, s->base, s->size);
186 n->fstart = s->fstart;
191 for(i = 0; i < size; i++)
192 if((pte = s->map[i]) != nil)
193 n->map[i] = ptecpy(pte);
195 n->flushme = s->flushme;
210 segpage(Segment *s, Page *p)
216 if(p->va < s->base || p->va >= s->top)
219 soff = p->va - s->base;
220 pte = &s->map[soff/PTEMAPMEM];
221 if((etp = *pte) == nil)
222 *pte = etp = ptealloc();
224 pg = &etp->pages[(soff&(PTEMAPMEM-1))/BY2PG];
233 attachimage(int type, Chan *c, uintptr base, ulong len)
240 * Search the image cache for remains of the text from a previous
241 * or currently running incarnation
243 for(i = ihash(c->qid.path); i; i = i->hash) {
244 if(c->qid.path == i->qid.path) {
246 if(eqchantdqid(c, i->type, i->dev, i->qid, 0) && c->qid.type == i->qid.type)
252 /* dump pages of inactive images to free image structures */
253 while((i = imagealloc.free) == nil) {
255 if(imagereclaim(1000) == 0 && imagealloc.free == nil){
256 freebroken(); /* can use the memory */
257 resrcwait("no image after reclaim");
262 imagealloc.free = i->next;
269 l = &ihash(c->qid.path);
288 i->s = newseg(type, base, len);
298 extern int pagereclaim(Image*, int); /* page.c */
301 imagereclaim(int min)
303 static Image *i, *ie;
306 eqlock(&imagealloc.ireclaim);
309 ie = &imagealloc.list[conf.nimage];
312 for(j = 0; j < conf.nimage; j++, i++){
318 * if there are no free image structures, only
319 * reclaim pages from inactive images.
321 if(imagealloc.free != nil || i->ref == i->pgref){
322 n += pagereclaim(i, min - n);
327 qunlock(&imagealloc.ireclaim);
349 * all remaining references to this image are from the
350 * page cache, so close the chan.
356 l = &ihash(i->qid.path);
357 mkqid(&i->qid, ~0, ~0, QTFILE);
361 for(f = *l; f != nil; f = f->hash) {
368 i->next = imagealloc.free;
374 ccloseq(c); /* does not block */
378 ibrk(uintptr addr, int seg)
395 /* We may start with the bss overlapping the data */
397 if(seg != BSEG || up->seg[DSEG] == nil || addr < up->seg[DSEG]->base) {
404 newtop = PGROUND(addr);
405 newsize = (newtop-s->base)/BY2PG;
406 if(newtop < s->top) {
408 * do not shrink a segment shared with other procs, as the
409 * to-be-freed address space may have been passed to the kernel
410 * already by another proc and is past the validaddr stage.
416 mfreeseg(s, newtop, (s->top-newtop)/BY2PG);
424 for(i = 0; i < NSEG; i++) {
426 if(ns == nil || ns == s)
428 if(newtop >= ns->base && newtop < ns->top) {
434 if(newsize > (SEGMAPSIZE*PTEPERTAB)) {
438 mapsize = ROUND(newsize, PTEPERTAB)/PTEPERTAB;
439 if(mapsize > s->mapsize){
440 map = smalloc(mapsize*sizeof(Pte*));
441 memmove(map, s->map, s->mapsize*sizeof(Pte*));
442 if(s->map != s->ssegmap)
445 s->mapsize = mapsize;
455 * called with s locked
458 mcountseg(Segment *s)
464 if((s->type&SG_TYPE) == SG_PHYSICAL)
468 emap = &s->map[s->mapsize];
469 for(pte = s->map; pte < emap; pte++){
473 for(pg = (*pte)->first; pg <= pe; pg++)
481 * called with s locked
484 mfreeseg(Segment *s, uintptr start, ulong pages)
493 switch(s->type&SG_TYPE){
500 * we have to make sure other processors flush the
501 * entry from their TLBs before the page is freed.
507 pte = &s->map[off/PTEMAPMEM];
508 off = (off&(PTEMAPMEM-1))/BY2PG;
509 for(emap = &s->map[s->mapsize]; pte < emap; pte++, off = 0) {
511 off = PTEPERTAB - off;
517 pg = &(*pte)->pages[off];
518 for(pe = &(*pte)->pages[PTEPERTAB]; pg < pe; pg++) {
530 isoverlap(Proc *p, uintptr va, uintptr len)
537 for(i = 0; i < NSEG; i++) {
541 if((newtop > ns->base && newtop <= ns->top) ||
542 (va >= ns->base && va < ns->top))
549 addphysseg(Physseg* new)
554 * Check not already entered and there is room
555 * for a new entry and the terminating null entry.
558 for(ps = physseg; ps->name; ps++){
559 if(strcmp(ps->name, new->name) == 0){
560 unlock(&physseglock);
564 if(ps-physseg >= nelem(physseg)-2){
565 unlock(&physseglock);
569 unlock(&physseglock);
575 isphysseg(char *name)
581 for(ps = physseg; ps->name; ps++){
582 if(strcmp(ps->name, name) == 0){
587 unlock(&physseglock);
592 segattach(Proc *p, ulong attr, char *name, uintptr va, uintptr len)
598 if(va != 0 && va >= USTKTOP)
601 validaddr((uintptr)name, 1, 0);
602 vmemchr(name, 0, ~0);
604 for(sno = 0; sno < NSEG; sno++)
605 if(p->seg[sno] == nil && sno != ESEG)
612 * first look for a global segment with the
615 if(_globalsegattach != nil){
616 s = (*_globalsegattach)(p, name);
623 /* round up va+len */
624 len += va & (BY2PG-1);
631 * Find a hole in the address space.
632 * Starting at the lowest possible stack address - len,
633 * check for an overlapping segment, and repeat at the
634 * base of that segment - len until either a hole is found
635 * or the address space is exhausted. Ensure that we don't
639 for (os = p->seg[SSEG]; os != nil; os = isoverlap(p, va, len)) {
648 if(va == 0 || (va+len) > USTKTOP || (va+len) < va)
651 if(isoverlap(p, va, len) != nil)
654 for(ps = physseg; ps->name; ps++)
655 if(strcmp(name, ps->name) == 0)
663 attr &= ~SG_TYPE; /* Turn off what is not allowed */
664 attr |= ps->attr; /* Copy in defaults */
666 s = newseg(attr, va, len/BY2PG);
674 syssegflush(va_list list)
676 uintptr from, to, off, len;
681 from = va_arg(list, uintptr);
682 to = va_arg(list, ulong);
691 s = seg(up, from, 1);
697 len = (s->top < to ? s->top : to) - from;
699 pte = s->map[off/PTEMAPMEM];
701 if(off+len > PTEMAPMEM)
705 pg = &pte->pages[off/BY2PG];
709 (*pg)->txtflush = ~0;
715 if(from < to && from < s->top)
730 if(s == nil || s->profile == nil)
733 s->profile[0] += TK2MS(1);
734 if(pc >= s->base && pc < s->top) {
736 s->profile[pc>>LRESPROF] += TK2MS(1);
745 ps = newseg(SG_DATA, s->base, s->size);
746 ps->image = s->image;
748 ps->fstart = s->fstart;
762 ps = newseg(SG_TEXT, s->base, s->size);
763 ps->image = s->image;
765 ps->fstart = s->fstart;