2 #include "../port/lib.h"
6 #include "../port/error.h"
8 static void imagereclaim(void);
13 * Attachable segment types
15 static Physseg physseg[10] = {
16 { SG_SHARED, "shared", 0, SEGMAXSIZE, 0, 0 },
17 { SG_BSS, "memory", 0, SEGMAXSIZE, 0, 0 },
21 static Lock physseglock;
24 #define ihash(s) imagealloc.hash[s%IHASHSIZE]
25 static struct Imagealloc
29 Image *hash[IHASHSIZE];
30 QLock ireclaim; /* mutex on reclaiming free images */
33 Segment* (*_globalsegattach)(Proc*, char*);
40 imagealloc.free = xalloc(conf.nimage*sizeof(Image));
41 if(imagealloc.free == nil)
42 panic("initseg: no memory for Image");
43 ie = &imagealloc.free[conf.nimage-1];
44 for(i = imagealloc.free; i < ie; i++)
50 newseg(int type, ulong base, ulong size)
55 if(size > (SEGMAPSIZE*PTEPERTAB))
58 s = smalloc(sizeof(Segment));
62 s->top = base+(size*BY2PG);
64 s->sema.prev = &s->sema;
65 s->sema.next = &s->sema;
67 mapsize = ROUND(size, PTEPERTAB)/PTEPERTAB;
68 if(mapsize > nelem(s->ssegmap)){
70 if(mapsize > (SEGMAPSIZE*PTEPERTAB))
71 mapsize = (SEGMAPSIZE*PTEPERTAB);
72 s->map = smalloc(mapsize*sizeof(Pte*));
77 s->mapsize = nelem(s->ssegmap);
96 if(i->s == s && s->ref == 1)
114 emap = &s->map[s->mapsize];
115 for(pp = s->map; pp < emap; pp++)
120 if(s->map != s->ssegmap)
128 relocateseg(Segment *s, ulong offset)
131 Pte *pte, **p, **endpte;
133 endpte = &s->map[s->mapsize];
134 for(p = s->map; p < endpte; p++) {
138 for(pg = pte->first; pg <= pte->last; pg++) {
146 dupseg(Segment **seg, int segno, int share)
160 switch(s->type&SG_TYPE) {
161 case SG_TEXT: /* New segment shares pte set */
167 n = newseg(s->type, s->base, s->size);
170 case SG_BSS: /* Just copy on write */
173 n = newseg(s->type, s->base, s->size);
176 case SG_DATA: /* Copy on write plus demand load info */
186 n = newseg(s->type, s->base, s->size);
190 n->fstart = s->fstart;
195 for(i = 0; i < size; i++)
197 n->map[i] = ptecpy(pte);
199 n->flushme = s->flushme;
214 segpage(Segment *s, Page *p)
220 if(p->va < s->base || p->va >= s->top)
223 off = p->va - s->base;
224 pte = &s->map[off/PTEMAPMEM];
228 pg = &(*pte)->pages[(off&(PTEMAPMEM-1))/BY2PG];
230 if(pg < (*pte)->first)
232 if(pg > (*pte)->last)
237 attachimage(int type, Chan *c, ulong base, ulong len)
244 * Search the image cache for remains of the text from a previous
245 * or currently running incarnation
247 for(i = ihash(c->qid.path); i; i = i->hash) {
248 if(c->qid.path == i->qid.path) {
250 if(eqqid(c->qid, i->qid) &&
251 eqqid(c->mqid, i->mqid) &&
252 c->mchan == i->mchan &&
253 c->type == i->type) {
261 * imagereclaim dumps pages from the free list which are cached by image
262 * structures. This should free some image structures.
264 while(!(i = imagealloc.free)) {
271 imagealloc.free = i->next;
281 l = &ihash(c->qid.path);
294 i->s = newseg(type, base, len);
305 int calls; /* times imagereclaim was called */
306 int loops; /* times the main loop was run */
307 uvlong ticks; /* total time in the main loop */
308 uvlong maxt; /* longest time in main loop */
319 /* Somebody is already cleaning the page cache */
320 if(!canqlock(&imagealloc.ireclaim))
324 ticks = fastticks(nil);
327 * All the pages with images backing them are at the
328 * end of the list (see putpage) so start there and work
331 for(p = palloc.tail; p && p->image && n<1000; p = p->prev) {
332 if(p->ref == 0 && canlock(p)) {
340 ticks = fastticks(nil) - ticks;
343 irstats.ticks += ticks;
344 if(ticks > irstats.maxt)
345 irstats.maxt = ticks;
346 //print("T%llud+", ticks);
347 qunlock(&imagealloc.ireclaim);
361 l = &ihash(i->qid.path);
362 mkqid(&i->qid, ~0, ~0, QTFILE);
367 for(f = *l; f; f = f->hash) {
375 i->next = imagealloc.free;
379 ccloseq(c); /* does not block */
386 ibrk(ulong addr, int seg)
389 ulong newtop, newsize;
402 /* We may start with the bss overlapping the data */
404 if(seg != BSEG || up->seg[DSEG] == 0 || addr < up->seg[DSEG]->base) {
411 newtop = PGROUND(addr);
412 newsize = (newtop-s->base)/BY2PG;
413 if(newtop < s->top) {
415 * do not shrink a segment shared with other procs, as the
416 * to-be-freed address space may have been passed to the kernel
417 * already by another proc and is past the validaddr stage.
423 mfreeseg(s, newtop, (s->top-newtop)/BY2PG);
431 for(i = 0; i < NSEG; i++) {
433 if(ns == 0 || ns == s)
435 if(newtop >= ns->base && newtop < ns->top) {
441 if(newsize > (SEGMAPSIZE*PTEPERTAB)) {
445 mapsize = ROUND(newsize, PTEPERTAB)/PTEPERTAB;
446 if(mapsize > s->mapsize){
447 map = smalloc(mapsize*sizeof(Pte*));
448 memmove(map, s->map, s->mapsize*sizeof(Pte*));
449 if(s->map != s->ssegmap)
452 s->mapsize = mapsize;
462 * called with s->lk locked
465 mcountseg(Segment *s)
471 for(i = 0; i < s->mapsize; i++){
474 map = s->map[i]->pages;
475 for(j = 0; j < PTEPERTAB; j++)
483 * called with s->lk locked
486 mfreeseg(Segment *s, ulong start, int pages)
493 soff = start-s->base;
494 j = (soff&(PTEMAPMEM-1))/BY2PG;
498 for(i = soff/PTEMAPMEM; i < size; i++) {
502 pages -= PTEPERTAB-j;
506 while(j < PTEPERTAB) {
507 pg = s->map[i]->pages[j];
509 * We want to zero s->map[i]->page[j] and putpage(pg),
510 * but we have to make sure other processors flush the
511 * entry from their TLBs before the page is freed.
512 * We construct a list of the pages to be freed, zero
513 * the entries, then (below) call procflushseg, and call
514 * putpage on the whole list.
516 * Swapped-out pages don't appear in TLBs, so it's okay
517 * to putswap those pages before procflushseg.
526 s->map[i]->pages[j] = 0;
535 /* flush this seg in all other processes */
540 for(pg = list; pg != nil; pg = list){
547 isoverlap(Proc *p, ulong va, int len)
554 for(i = 0; i < NSEG; i++) {
558 if((newtop > ns->base && newtop <= ns->top) ||
559 (va >= ns->base && va < ns->top))
566 addphysseg(Physseg* new)
571 * Check not already entered and there is room
572 * for a new entry and the terminating null entry.
575 for(ps = physseg; ps->name; ps++){
576 if(strcmp(ps->name, new->name) == 0){
577 unlock(&physseglock);
581 if(ps-physseg >= nelem(physseg)-2){
582 unlock(&physseglock);
587 unlock(&physseglock);
593 isphysseg(char *name)
599 for(ps = physseg; ps->name; ps++){
600 if(strcmp(ps->name, name) == 0){
605 unlock(&physseglock);
610 segattach(Proc *p, ulong attr, char *name, ulong va, ulong len)
616 if(va != 0 && va >= USTKTOP)
619 validaddr((ulong)name, 1, 0);
620 vmemchr(name, 0, ~0);
622 for(sno = 0; sno < NSEG; sno++)
623 if(p->seg[sno] == nil && sno != ESEG)
630 * first look for a global segment with the
633 if(_globalsegattach != nil){
634 s = (*_globalsegattach)(p, name);
646 * Find a hole in the address space.
647 * Starting at the lowest possible stack address - len,
648 * check for an overlapping segment, and repeat at the
649 * base of that segment - len until either a hole is found
650 * or the address space is exhausted. Ensure that we don't
654 for (os = p->seg[SSEG]; os != nil; os = isoverlap(p, va, len)) {
663 if(va == 0 || va >= USTKTOP)
667 if(isoverlap(p, va, len) != nil)
670 for(ps = physseg; ps->name; ps++)
671 if(strcmp(name, ps->name) == 0)
679 attr &= ~SG_TYPE; /* Turn off what is not allowed */
680 attr |= ps->attr; /* Copy in defaults */
682 s = newseg(attr, va, len/BY2PG);
690 pteflush(Pte *pte, int s, int e)
695 for(i = s; i < e; i++) {
698 memset(p->cachectl, PG_TXTFLUSH, sizeof(p->cachectl));
703 syssegflush(ulong *arg)
708 int chunk, ps, pe, len;
714 s = seg(up, addr, 1);
725 pte = s->map[ps/PTEMAPMEM];
730 pe = (pe+BY2PG-1)&~(BY2PG-1);
738 pteflush(pte, ps/BY2PG, pe/BY2PG);
744 if(len > 0 && addr < s->top)
759 if(s == 0 || s->profile == 0)
762 s->profile[0] += TK2MS(1);
763 if(pc >= s->base && pc < s->top) {
765 s->profile[pc>>LRESPROF] += TK2MS(1);