2 #include "../port/lib.h"
6 #include "../port/error.h"
8 static void imagereclaim(void);
9 static void imagechanreclaim(void);
14 * Attachable segment types
16 static Physseg physseg[10] = {
17 { SG_SHARED, "shared", 0, SEGMAXSIZE, 0, 0 },
18 { SG_BSS, "memory", 0, SEGMAXSIZE, 0, 0 },
22 static Lock physseglock;
26 #define ihash(s) imagealloc.hash[s%IHASHSIZE]
27 static struct Imagealloc
31 Image *hash[IHASHSIZE];
32 QLock ireclaim; /* mutex on reclaiming free images */
34 Chan **freechan; /* free image channels */
35 int nfreechan; /* number of free channels */
36 int szfreechan; /* size of freechan array */
37 QLock fcreclaim; /* mutex on reclaiming free channels */
40 Segment* (*_globalsegattach)(Proc*, char*);
47 imagealloc.free = xalloc(conf.nimage*sizeof(Image));
48 if (imagealloc.free == nil)
49 panic("initseg: no memory");
50 ie = &imagealloc.free[conf.nimage-1];
51 for(i = imagealloc.free; i < ie; i++)
54 imagealloc.freechan = malloc(NFREECHAN * sizeof(Chan*));
55 imagealloc.szfreechan = NFREECHAN;
59 newseg(int type, ulong base, ulong size)
64 if(size > (SEGMAPSIZE*PTEPERTAB))
67 s = smalloc(sizeof(Segment));
71 s->top = base+(size*BY2PG);
73 s->sema.prev = &s->sema;
74 s->sema.next = &s->sema;
76 mapsize = ROUND(size, PTEPERTAB)/PTEPERTAB;
77 if(mapsize > nelem(s->ssegmap)){
79 if(mapsize > (SEGMAPSIZE*PTEPERTAB))
80 mapsize = (SEGMAPSIZE*PTEPERTAB);
81 s->map = smalloc(mapsize*sizeof(Pte*));
86 s->mapsize = nelem(s->ssegmap);
105 if(i->s == s && s->ref == 1)
123 emap = &s->map[s->mapsize];
124 for(pp = s->map; pp < emap; pp++)
129 if(s->map != s->ssegmap)
137 relocateseg(Segment *s, ulong offset)
140 Pte *pte, **p, **endpte;
142 endpte = &s->map[s->mapsize];
143 for(p = s->map; p < endpte; p++) {
147 for(pg = pte->first; pg <= pte->last; pg++) {
155 dupseg(Segment **seg, int segno, int share)
169 switch(s->type&SG_TYPE) {
170 case SG_TEXT: /* New segment shares pte set */
176 n = newseg(s->type, s->base, s->size);
179 case SG_BSS: /* Just copy on write */
182 n = newseg(s->type, s->base, s->size);
185 case SG_DATA: /* Copy on write plus demand load info */
194 n = newseg(s->type, s->base, s->size);
198 n->fstart = s->fstart;
203 for(i = 0; i < size; i++)
205 n->map[i] = ptecpy(pte);
207 n->flushme = s->flushme;
222 segpage(Segment *s, Page *p)
228 if(p->va < s->base || p->va >= s->top)
231 off = p->va - s->base;
232 pte = &s->map[off/PTEMAPMEM];
236 pg = &(*pte)->pages[(off&(PTEMAPMEM-1))/BY2PG];
238 if(pg < (*pte)->first)
240 if(pg > (*pte)->last)
245 attachimage(int type, Chan *c, ulong base, ulong len)
249 /* reclaim any free channels from reclaimed segments */
250 if(imagealloc.nfreechan)
256 * Search the image cache for remains of the text from a previous
257 * or currently running incarnation
259 for(i = ihash(c->qid.path); i; i = i->hash) {
260 if(c->qid.path == i->qid.path) {
262 if(eqqid(c->qid, i->qid) &&
263 eqqid(c->mqid, i->mqid) &&
264 c->mchan == i->mchan &&
265 c->type == i->type) {
273 * imagereclaim dumps pages from the free list which are cached by image
274 * structures. This should free some image structures.
276 while(!(i = imagealloc.free)) {
283 imagealloc.free = i->next;
292 l = &ihash(c->qid.path);
299 /* Disaster after commit in exec */
304 i->s = newseg(type, base, len);
316 int calls; /* times imagereclaim was called */
317 int loops; /* times the main loop was run */
318 uvlong ticks; /* total time in the main loop */
319 uvlong maxt; /* longest time in main loop */
330 /* Somebody is already cleaning the page cache */
331 if(!canqlock(&imagealloc.ireclaim))
335 ticks = fastticks(nil);
338 * All the pages with images backing them are at the
339 * end of the list (see putpage) so start there and work
342 for(p = palloc.tail; p && p->image && n<1000; p = p->prev) {
343 if(p->ref == 0 && canlock(p)) {
351 ticks = fastticks(nil) - ticks;
354 irstats.ticks += ticks;
355 if(ticks > irstats.maxt)
356 irstats.maxt = ticks;
357 //print("T%llud+", ticks);
358 qunlock(&imagealloc.ireclaim);
362 * since close can block, this has to be called outside of
366 imagechanreclaim(void)
370 /* Somebody is already cleaning the image chans */
371 if(!canqlock(&imagealloc.fcreclaim))
375 * We don't have to recheck that nfreechan > 0 after we
376 * acquire the lock, because we're the only ones who decrement
377 * it (the other lock contender increments it), and there's only
378 * one of us thanks to the qlock above.
380 while(imagealloc.nfreechan > 0){
382 imagealloc.nfreechan--;
383 c = imagealloc.freechan[imagealloc.nfreechan];
388 qunlock(&imagealloc.fcreclaim);
402 l = &ihash(i->qid.path);
403 mkqid(&i->qid, ~0, ~0, QTFILE);
408 for(f = *l; f; f = f->hash) {
416 i->next = imagealloc.free;
419 /* defer freeing channel till we're out of spin lock's */
420 if(imagealloc.nfreechan == imagealloc.szfreechan){
421 imagealloc.szfreechan += NFREECHAN;
422 cp = malloc(imagealloc.szfreechan*sizeof(Chan*));
425 memmove(cp, imagealloc.freechan, imagealloc.nfreechan*sizeof(Chan*));
426 free(imagealloc.freechan);
427 imagealloc.freechan = cp;
429 imagealloc.freechan[imagealloc.nfreechan++] = c;
438 ibrk(ulong addr, int seg)
441 ulong newtop, newsize;
454 /* We may start with the bss overlapping the data */
456 if(seg != BSEG || up->seg[DSEG] == 0 || addr < up->seg[DSEG]->base) {
463 newtop = PGROUND(addr);
464 newsize = (newtop-s->base)/BY2PG;
465 if(newtop < s->top) {
467 * do not shrink a segment shared with other procs, as the
468 * to-be-freed address space may have been passed to the kernel
469 * already by another proc and is past the validaddr stage.
475 mfreeseg(s, newtop, (s->top-newtop)/BY2PG);
483 for(i = 0; i < NSEG; i++) {
485 if(ns == 0 || ns == s)
487 if(newtop >= ns->base && newtop < ns->top) {
493 if(newsize > (SEGMAPSIZE*PTEPERTAB)) {
497 mapsize = ROUND(newsize, PTEPERTAB)/PTEPERTAB;
498 if(mapsize > s->mapsize){
499 map = smalloc(mapsize*sizeof(Pte*));
500 memmove(map, s->map, s->mapsize*sizeof(Pte*));
501 if(s->map != s->ssegmap)
504 s->mapsize = mapsize;
514 * called with s->lk locked
517 mcountseg(Segment *s)
523 for(i = 0; i < s->mapsize; i++){
526 map = s->map[i]->pages;
527 for(j = 0; j < PTEPERTAB; j++)
535 * called with s->lk locked
538 mfreeseg(Segment *s, ulong start, int pages)
545 soff = start-s->base;
546 j = (soff&(PTEMAPMEM-1))/BY2PG;
550 for(i = soff/PTEMAPMEM; i < size; i++) {
554 pages -= PTEPERTAB-j;
558 while(j < PTEPERTAB) {
559 pg = s->map[i]->pages[j];
561 * We want to zero s->map[i]->page[j] and putpage(pg),
562 * but we have to make sure other processors flush the
563 * entry from their TLBs before the page is freed.
564 * We construct a list of the pages to be freed, zero
565 * the entries, then (below) call procflushseg, and call
566 * putpage on the whole list.
568 * Swapped-out pages don't appear in TLBs, so it's okay
569 * to putswap those pages before procflushseg.
578 s->map[i]->pages[j] = 0;
587 /* flush this seg in all other processes */
592 for(pg = list; pg != nil; pg = list){
599 isoverlap(Proc *p, ulong va, int len)
606 for(i = 0; i < NSEG; i++) {
610 if((newtop > ns->base && newtop <= ns->top) ||
611 (va >= ns->base && va < ns->top))
618 addphysseg(Physseg* new)
623 * Check not already entered and there is room
624 * for a new entry and the terminating null entry.
627 for(ps = physseg; ps->name; ps++){
628 if(strcmp(ps->name, new->name) == 0){
629 unlock(&physseglock);
633 if(ps-physseg >= nelem(physseg)-2){
634 unlock(&physseglock);
639 unlock(&physseglock);
645 isphysseg(char *name)
651 for(ps = physseg; ps->name; ps++){
652 if(strcmp(ps->name, name) == 0){
657 unlock(&physseglock);
662 segattach(Proc *p, ulong attr, char *name, ulong va, ulong len)
668 if(va != 0 && va >= USTKTOP)
671 validaddr((ulong)name, 1, 0);
672 vmemchr(name, 0, ~0);
674 for(sno = 0; sno < NSEG; sno++)
675 if(p->seg[sno] == nil && sno != ESEG)
682 * first look for a global segment with the
685 if(_globalsegattach != nil){
686 s = (*_globalsegattach)(p, name);
698 * Find a hole in the address space.
699 * Starting at the lowest possible stack address - len,
700 * check for an overlapping segment, and repeat at the
701 * base of that segment - len until either a hole is found
702 * or the address space is exhausted. Ensure that we don't
706 for (os = p->seg[SSEG]; os != nil; os = isoverlap(p, va, len)) {
715 if(va == 0 || va >= USTKTOP)
719 if(isoverlap(p, va, len) != nil)
722 for(ps = physseg; ps->name; ps++)
723 if(strcmp(name, ps->name) == 0)
731 attr &= ~SG_TYPE; /* Turn off what is not allowed */
732 attr |= ps->attr; /* Copy in defaults */
734 s = newseg(attr, va, len/BY2PG);
742 pteflush(Pte *pte, int s, int e)
747 for(i = s; i < e; i++) {
750 memset(p->cachectl, PG_TXTFLUSH, sizeof(p->cachectl));
755 syssegflush(ulong *arg)
760 int chunk, ps, pe, len;
766 s = seg(up, addr, 1);
777 pte = s->map[ps/PTEMAPMEM];
782 pe = (pe+BY2PG-1)&~(BY2PG-1);
790 pteflush(pte, ps/BY2PG, pe/BY2PG);
796 if(len > 0 && addr < s->top)
811 if(s == 0 || s->profile == 0)
814 s->profile[0] += TK2MS(1);
815 if(pc >= s->base && pc < s->top) {
817 s->profile[pc>>LRESPROF] += TK2MS(1);