2 #include "../port/lib.h"
6 #include "../port/error.h"
8 static void imagereclaim(void);
13 * Attachable segment types
15 static Physseg physseg[10] = {
16 { SG_SHARED, "shared", 0, SEGMAXSIZE, 0, 0 },
17 { SG_BSS, "memory", 0, SEGMAXSIZE, 0, 0 },
21 static Lock physseglock;
24 #define ihash(s) imagealloc.hash[s%IHASHSIZE]
25 static struct Imagealloc
29 Image *hash[IHASHSIZE];
30 QLock ireclaim; /* mutex on reclaiming free images */
33 Segment* (*_globalsegattach)(Proc*, char*);
40 imagealloc.free = xalloc(conf.nimage*sizeof(Image));
41 if(imagealloc.free == nil)
42 panic("initseg: no memory for Image");
43 ie = &imagealloc.free[conf.nimage-1];
44 for(i = imagealloc.free; i < ie; i++)
50 newseg(int type, ulong base, ulong size)
55 if(size > (SEGMAPSIZE*PTEPERTAB))
58 s = smalloc(sizeof(Segment));
62 s->top = base+(size*BY2PG);
64 s->sema.prev = &s->sema;
65 s->sema.next = &s->sema;
67 mapsize = ROUND(size, PTEPERTAB)/PTEPERTAB;
68 if(mapsize > nelem(s->ssegmap)){
70 if(mapsize > (SEGMAPSIZE*PTEPERTAB))
71 mapsize = (SEGMAPSIZE*PTEPERTAB);
72 s->map = smalloc(mapsize*sizeof(Pte*));
77 s->mapsize = nelem(s->ssegmap);
96 if(i->s == s && s->ref == 1)
114 emap = &s->map[s->mapsize];
115 for(pp = s->map; pp < emap; pp++)
120 if(s->map != s->ssegmap)
128 relocateseg(Segment *s, ulong offset)
131 Pte *pte, **p, **endpte;
133 endpte = &s->map[s->mapsize];
134 for(p = s->map; p < endpte; p++) {
138 for(pg = pte->first; pg <= pte->last; pg++) {
146 dupseg(Segment **seg, int segno, int share)
160 switch(s->type&SG_TYPE) {
161 case SG_TEXT: /* New segment shares pte set */
167 n = newseg(s->type, s->base, s->size);
170 case SG_BSS: /* Just copy on write */
173 n = newseg(s->type, s->base, s->size);
176 case SG_DATA: /* Copy on write plus demand load info */
186 n = newseg(s->type, s->base, s->size);
190 n->fstart = s->fstart;
195 for(i = 0; i < size; i++)
197 n->map[i] = ptecpy(pte);
199 n->flushme = s->flushme;
214 segpage(Segment *s, Page *p)
220 if(p->va < s->base || p->va >= s->top)
223 off = p->va - s->base;
224 pte = &s->map[off/PTEMAPMEM];
228 pg = &(*pte)->pages[(off&(PTEMAPMEM-1))/BY2PG];
230 if(pg < (*pte)->first)
232 if(pg > (*pte)->last)
237 attachimage(int type, Chan *c, ulong base, ulong len)
244 * Search the image cache for remains of the text from a previous
245 * or currently running incarnation
247 for(i = ihash(c->qid.path); i; i = i->hash) {
248 if(c->qid.path == i->qid.path) {
250 if(eqchantdqid(c, i->type, i->dev, i->qid, 0) && c->qid.type == i->qid.type)
257 * imagereclaim dumps pages from the free list which are cached by image
258 * structures. This should free some image structures.
260 while(!(i = imagealloc.free)) {
263 if(!imagealloc.free){
264 freebroken(); /* can use the memory */
265 resrcwait("no image after reclaim");
270 imagealloc.free = i->next;
277 l = &ihash(c->qid.path);
296 i->s = newseg(type, base, len);
307 int calls; /* times imagereclaim was called */
308 int loops; /* times the main loop was run */
309 uvlong ticks; /* total time in the main loop */
310 uvlong maxt; /* longest time in main loop */
321 /* Somebody is already cleaning the page cache */
322 if(!canqlock(&imagealloc.ireclaim))
326 ticks = fastticks(nil);
329 * All the pages with images backing them are at the
330 * end of the list (see putpage) so start there and work
333 for(p = palloc.tail; p && p->image && (n<1000 || !imagealloc.free); p = p->prev) {
334 if(p->ref == 0 && canlock(p)) {
335 if(p->ref == 0 && p->image && !p->image->notext) {
342 ticks = fastticks(nil) - ticks;
345 irstats.ticks += ticks;
346 if(ticks > irstats.maxt)
347 irstats.maxt = ticks;
348 //print("T%llud+", ticks);
349 qunlock(&imagealloc.ireclaim);
363 if(--i->ref == i->pgref){
365 * all remaining references to this image are from the
366 * page cache now. close the channel as we can reattach
367 * the chan on attachimage()
373 l = &ihash(i->qid.path);
374 mkqid(&i->qid, ~0, ~0, QTFILE);
378 for(f = *l; f; f = f->hash) {
385 i->next = imagealloc.free;
391 ccloseq(c); /* does not block */
395 ibrk(ulong addr, int seg)
398 ulong newtop, newsize;
411 /* We may start with the bss overlapping the data */
413 if(seg != BSEG || up->seg[DSEG] == 0 || addr < up->seg[DSEG]->base) {
420 newtop = PGROUND(addr);
421 newsize = (newtop-s->base)/BY2PG;
422 if(newtop < s->top) {
424 * do not shrink a segment shared with other procs, as the
425 * to-be-freed address space may have been passed to the kernel
426 * already by another proc and is past the validaddr stage.
432 mfreeseg(s, newtop, (s->top-newtop)/BY2PG);
440 for(i = 0; i < NSEG; i++) {
442 if(ns == 0 || ns == s)
444 if(newtop >= ns->base && newtop < ns->top) {
450 if(newsize > (SEGMAPSIZE*PTEPERTAB)) {
454 mapsize = ROUND(newsize, PTEPERTAB)/PTEPERTAB;
455 if(mapsize > s->mapsize){
456 map = smalloc(mapsize*sizeof(Pte*));
457 memmove(map, s->map, s->mapsize*sizeof(Pte*));
458 if(s->map != s->ssegmap)
461 s->mapsize = mapsize;
471 * called with s->lk locked
474 mcountseg(Segment *s)
480 for(i = 0; i < s->mapsize; i++){
483 map = s->map[i]->pages;
484 for(j = 0; j < PTEPERTAB; j++)
492 * called with s->lk locked
495 mfreeseg(Segment *s, ulong start, int pages)
502 soff = start-s->base;
503 j = (soff&(PTEMAPMEM-1))/BY2PG;
507 for(i = soff/PTEMAPMEM; i < size; i++) {
511 pages -= PTEPERTAB-j;
515 while(j < PTEPERTAB) {
516 pg = s->map[i]->pages[j];
518 * We want to zero s->map[i]->page[j] and putpage(pg),
519 * but we have to make sure other processors flush the
520 * entry from their TLBs before the page is freed.
521 * We construct a list of the pages to be freed, zero
522 * the entries, then (below) call procflushseg, and call
523 * putpage on the whole list.
525 * Swapped-out pages don't appear in TLBs, so it's okay
526 * to putswap those pages before procflushseg.
535 s->map[i]->pages[j] = 0;
544 /* flush this seg in all other processes */
549 for(pg = list; pg != nil; pg = list){
556 isoverlap(Proc *p, ulong va, int len)
563 for(i = 0; i < NSEG; i++) {
567 if((newtop > ns->base && newtop <= ns->top) ||
568 (va >= ns->base && va < ns->top))
575 addphysseg(Physseg* new)
580 * Check not already entered and there is room
581 * for a new entry and the terminating null entry.
584 for(ps = physseg; ps->name; ps++){
585 if(strcmp(ps->name, new->name) == 0){
586 unlock(&physseglock);
590 if(ps-physseg >= nelem(physseg)-2){
591 unlock(&physseglock);
596 unlock(&physseglock);
602 isphysseg(char *name)
608 for(ps = physseg; ps->name; ps++){
609 if(strcmp(ps->name, name) == 0){
614 unlock(&physseglock);
619 segattach(Proc *p, ulong attr, char *name, ulong va, ulong len)
625 if(va != 0 && va >= USTKTOP)
628 validaddr((ulong)name, 1, 0);
629 vmemchr(name, 0, ~0);
631 for(sno = 0; sno < NSEG; sno++)
632 if(p->seg[sno] == nil && sno != ESEG)
639 * first look for a global segment with the
642 if(_globalsegattach != nil){
643 s = (*_globalsegattach)(p, name);
655 * Find a hole in the address space.
656 * Starting at the lowest possible stack address - len,
657 * check for an overlapping segment, and repeat at the
658 * base of that segment - len until either a hole is found
659 * or the address space is exhausted. Ensure that we don't
663 for (os = p->seg[SSEG]; os != nil; os = isoverlap(p, va, len)) {
672 if(va == 0 || va >= USTKTOP)
676 if(isoverlap(p, va, len) != nil)
679 for(ps = physseg; ps->name; ps++)
680 if(strcmp(name, ps->name) == 0)
688 attr &= ~SG_TYPE; /* Turn off what is not allowed */
689 attr |= ps->attr; /* Copy in defaults */
691 s = newseg(attr, va, len/BY2PG);
699 pteflush(Pte *pte, int s, int e)
704 for(i = s; i < e; i++) {
707 memset(p->cachectl, PG_TXTFLUSH, sizeof(p->cachectl));
712 syssegflush(ulong *arg)
717 int chunk, ps, pe, len;
723 s = seg(up, addr, 1);
734 pte = s->map[ps/PTEMAPMEM];
739 pe = (pe+BY2PG-1)&~(BY2PG-1);
747 pteflush(pte, ps/BY2PG, pe/BY2PG);
753 if(len > 0 && addr < s->top)
768 if(s == 0 || s->profile == 0)
771 s->profile[0] += TK2MS(1);
772 if(pc >= s->base && pc < s->top) {
774 s->profile[pc>>LRESPROF] += TK2MS(1);