2 #include "../port/lib.h"
6 #include "../port/error.h"
8 static void imagereclaim(void);
13 * Attachable segment types
15 static Physseg physseg[10] = {
16 { SG_SHARED, "shared", 0, SEGMAXSIZE, 0, 0 },
17 { SG_BSS, "memory", 0, SEGMAXSIZE, 0, 0 },
21 static Lock physseglock;
24 #define ihash(s) imagealloc.hash[s%IHASHSIZE]
25 static struct Imagealloc
29 Image *hash[IHASHSIZE];
30 QLock ireclaim; /* mutex on reclaiming free images */
33 Segment* (*_globalsegattach)(Proc*, char*);
40 imagealloc.free = xalloc(conf.nimage*sizeof(Image));
41 if(imagealloc.free == nil)
42 panic("initseg: no memory for Image");
43 ie = &imagealloc.free[conf.nimage-1];
44 for(i = imagealloc.free; i < ie; i++)
50 newseg(int type, ulong base, ulong size)
55 if(size > (SEGMAPSIZE*PTEPERTAB))
58 s = smalloc(sizeof(Segment));
62 s->top = base+(size*BY2PG);
64 s->sema.prev = &s->sema;
65 s->sema.next = &s->sema;
67 mapsize = ROUND(size, PTEPERTAB)/PTEPERTAB;
68 if(mapsize > nelem(s->ssegmap)){
70 if(mapsize > (SEGMAPSIZE*PTEPERTAB))
71 mapsize = (SEGMAPSIZE*PTEPERTAB);
72 s->map = smalloc(mapsize*sizeof(Pte*));
77 s->mapsize = nelem(s->ssegmap);
96 if(i->s == s && s->ref == 1)
114 emap = &s->map[s->mapsize];
115 for(pp = s->map; pp < emap; pp++)
120 if(s->map != s->ssegmap)
128 relocateseg(Segment *s, ulong offset)
131 Pte *pte, **p, **endpte;
133 endpte = &s->map[s->mapsize];
134 for(p = s->map; p < endpte; p++) {
138 for(pg = pte->first; pg <= pte->last; pg++) {
146 dupseg(Segment **seg, int segno, int share)
160 switch(s->type&SG_TYPE) {
161 case SG_TEXT: /* New segment shares pte set */
167 n = newseg(s->type, s->base, s->size);
170 case SG_BSS: /* Just copy on write */
173 n = newseg(s->type, s->base, s->size);
176 case SG_DATA: /* Copy on write plus demand load info */
186 n = newseg(s->type, s->base, s->size);
190 n->fstart = s->fstart;
195 for(i = 0; i < size; i++)
197 n->map[i] = ptecpy(pte);
199 n->flushme = s->flushme;
214 segpage(Segment *s, Page *p)
220 if(p->va < s->base || p->va >= s->top)
223 off = p->va - s->base;
224 pte = &s->map[off/PTEMAPMEM];
228 pg = &(*pte)->pages[(off&(PTEMAPMEM-1))/BY2PG];
230 if(pg < (*pte)->first)
232 if(pg > (*pte)->last)
237 attachimage(int type, Chan *c, ulong base, ulong len)
244 * Search the image cache for remains of the text from a previous
245 * or currently running incarnation
247 for(i = ihash(c->qid.path); i; i = i->hash) {
248 if(c->qid.path == i->qid.path) {
250 if(eqqid(c->qid, i->qid) &&
251 eqqid(c->mqid, i->mqid) &&
252 c->mchan == i->mchan &&
253 c->type == i->type) {
261 * imagereclaim dumps pages from the free list which are cached by image
262 * structures. This should free some image structures.
264 while(!(i = imagealloc.free)) {
267 if(!imagealloc.free){
268 freebroken(); /* can use the memory */
269 resrcwait("no image after reclaim");
274 imagealloc.free = i->next;
284 l = &ihash(c->qid.path);
297 i->s = newseg(type, base, len);
308 int calls; /* times imagereclaim was called */
309 int loops; /* times the main loop was run */
310 uvlong ticks; /* total time in the main loop */
311 uvlong maxt; /* longest time in main loop */
322 /* Somebody is already cleaning the page cache */
323 if(!canqlock(&imagealloc.ireclaim))
327 ticks = fastticks(nil);
330 * All the pages with images backing them are at the
331 * end of the list (see putpage) so start there and work
334 for(p = palloc.tail; p && p->image && (n<1000 || !imagealloc.free); p = p->prev) {
335 if(p->ref == 0 && canlock(p)) {
336 if(p->ref == 0 && p->image && !p->image->notext) {
343 ticks = fastticks(nil) - ticks;
346 irstats.ticks += ticks;
347 if(ticks > irstats.maxt)
348 irstats.maxt = ticks;
349 //print("T%llud+", ticks);
350 qunlock(&imagealloc.ireclaim);
364 l = &ihash(i->qid.path);
365 mkqid(&i->qid, ~0, ~0, QTFILE);
370 for(f = *l; f; f = f->hash) {
378 i->next = imagealloc.free;
382 ccloseq(c); /* does not block */
389 ibrk(ulong addr, int seg)
392 ulong newtop, newsize;
405 /* We may start with the bss overlapping the data */
407 if(seg != BSEG || up->seg[DSEG] == 0 || addr < up->seg[DSEG]->base) {
414 newtop = PGROUND(addr);
415 newsize = (newtop-s->base)/BY2PG;
416 if(newtop < s->top) {
418 * do not shrink a segment shared with other procs, as the
419 * to-be-freed address space may have been passed to the kernel
420 * already by another proc and is past the validaddr stage.
426 mfreeseg(s, newtop, (s->top-newtop)/BY2PG);
434 for(i = 0; i < NSEG; i++) {
436 if(ns == 0 || ns == s)
438 if(newtop >= ns->base && newtop < ns->top) {
444 if(newsize > (SEGMAPSIZE*PTEPERTAB)) {
448 mapsize = ROUND(newsize, PTEPERTAB)/PTEPERTAB;
449 if(mapsize > s->mapsize){
450 map = smalloc(mapsize*sizeof(Pte*));
451 memmove(map, s->map, s->mapsize*sizeof(Pte*));
452 if(s->map != s->ssegmap)
455 s->mapsize = mapsize;
465 * called with s->lk locked
468 mcountseg(Segment *s)
474 for(i = 0; i < s->mapsize; i++){
477 map = s->map[i]->pages;
478 for(j = 0; j < PTEPERTAB; j++)
486 * called with s->lk locked
489 mfreeseg(Segment *s, ulong start, int pages)
496 soff = start-s->base;
497 j = (soff&(PTEMAPMEM-1))/BY2PG;
501 for(i = soff/PTEMAPMEM; i < size; i++) {
505 pages -= PTEPERTAB-j;
509 while(j < PTEPERTAB) {
510 pg = s->map[i]->pages[j];
512 * We want to zero s->map[i]->page[j] and putpage(pg),
513 * but we have to make sure other processors flush the
514 * entry from their TLBs before the page is freed.
515 * We construct a list of the pages to be freed, zero
516 * the entries, then (below) call procflushseg, and call
517 * putpage on the whole list.
519 * Swapped-out pages don't appear in TLBs, so it's okay
520 * to putswap those pages before procflushseg.
529 s->map[i]->pages[j] = 0;
538 /* flush this seg in all other processes */
543 for(pg = list; pg != nil; pg = list){
550 isoverlap(Proc *p, ulong va, int len)
557 for(i = 0; i < NSEG; i++) {
561 if((newtop > ns->base && newtop <= ns->top) ||
562 (va >= ns->base && va < ns->top))
569 addphysseg(Physseg* new)
574 * Check not already entered and there is room
575 * for a new entry and the terminating null entry.
578 for(ps = physseg; ps->name; ps++){
579 if(strcmp(ps->name, new->name) == 0){
580 unlock(&physseglock);
584 if(ps-physseg >= nelem(physseg)-2){
585 unlock(&physseglock);
590 unlock(&physseglock);
596 isphysseg(char *name)
602 for(ps = physseg; ps->name; ps++){
603 if(strcmp(ps->name, name) == 0){
608 unlock(&physseglock);
613 segattach(Proc *p, ulong attr, char *name, ulong va, ulong len)
619 if(va != 0 && va >= USTKTOP)
622 validaddr((ulong)name, 1, 0);
623 vmemchr(name, 0, ~0);
625 for(sno = 0; sno < NSEG; sno++)
626 if(p->seg[sno] == nil && sno != ESEG)
633 * first look for a global segment with the
636 if(_globalsegattach != nil){
637 s = (*_globalsegattach)(p, name);
649 * Find a hole in the address space.
650 * Starting at the lowest possible stack address - len,
651 * check for an overlapping segment, and repeat at the
652 * base of that segment - len until either a hole is found
653 * or the address space is exhausted. Ensure that we don't
657 for (os = p->seg[SSEG]; os != nil; os = isoverlap(p, va, len)) {
666 if(va == 0 || va >= USTKTOP)
670 if(isoverlap(p, va, len) != nil)
673 for(ps = physseg; ps->name; ps++)
674 if(strcmp(name, ps->name) == 0)
682 attr &= ~SG_TYPE; /* Turn off what is not allowed */
683 attr |= ps->attr; /* Copy in defaults */
685 s = newseg(attr, va, len/BY2PG);
693 pteflush(Pte *pte, int s, int e)
698 for(i = s; i < e; i++) {
701 memset(p->cachectl, PG_TXTFLUSH, sizeof(p->cachectl));
706 syssegflush(ulong *arg)
711 int chunk, ps, pe, len;
717 s = seg(up, addr, 1);
728 pte = s->map[ps/PTEMAPMEM];
733 pe = (pe+BY2PG-1)&~(BY2PG-1);
741 pteflush(pte, ps/BY2PG, pe/BY2PG);
747 if(len > 0 && addr < s->top)
762 if(s == 0 || s->profile == 0)
765 s->profile[0] += TK2MS(1);
766 if(pc >= s->base && pc < s->top) {
768 s->profile[pc>>LRESPROF] += TK2MS(1);