2 #include "../port/lib.h"
9 * tlb entry 0 is used only by mmuswitch() to set the current tlb pid.
11 * It is apparently assumed that user tlb entries are not
12 * overwritten during start-up, so ...
13 * During system start-up (before up first becomes non-nil),
14 * Kmap entries start at tlb index 1 and work their way up until
15 * kmapinval() removes them. They then restart at 1. As long as there
16 * are few kmap entries they will not pass tlbroff (the WIRED tlb entry
17 * limit) and interfere with user tlb entries.
18 * Once start-up is over, we combine the kernel and user tlb pools into one,
19 * in the hope of making better use of the tlb on systems with small ones.
21 * All invalidations of the tlb are via indexed entries. The virtual
22 * address used is always 'KZERO | (x<<(PGSHIFT+1) | currentpid' where
23 * 'x' is the index into the tlb. This ensures that the current pid doesn't
24 * change and that no two invalidated entries have matching virtual
25 * addresses just in case SGI/MIPS ever makes a chip that cares (as
26 * they keep threatening). These entries should never be used in
27 * lookups since accesses to KZERO addresses don't go through the tlb
28 * (actually only true of KSEG0 and KSEG1; KSEG2 and KSEG3 do go
32 #define TLBINVAL(x, pid) puttlbx(x, KZERO|((x)<<(PGSHIFT+1))|(pid), 0, 0, PGSZ)
39 static ulong ktime[8]; /* only for first 8 cpus */
54 static int minfree = KPTESIZE;
56 static int tlbroff = TLBROFF;
65 for(k=kmapfree; k; k=k->next)
68 iprint("%d free\n", i);
81 klast = &kpte[KPTESIZE-1];
82 for(k=kpte; k<klast; k++)
95 for(i=0; i<KPTESIZE; i++)
96 iprint("%d: %lud pc=%#lux\n", i, kpte[i].ref, kpte[i].pc);
106 virt = k->virt & ~BY2PG | TLBPID(tlbvirt());
107 x = gettlbp(virt, tlbent);
109 if (up) { /* startup just ended? */
111 setwired(tlbroff); /* share all-but-one entries */
113 } else if (x < 0) { /* no such entry? use next */
115 if(m->ktlbnext >= tlbroff)
116 m->ktlbnext = TLBOFF;
118 if (x < 0) x = getrandom(); /* no entry for va? overwrite random one */
119 puttlbx(x, virt, k->phys0, k->phys1, PGSZ);
125 * Arrange that the KMap'd virtual address will hit the same
126 * primary cache line as pg->va by making bits 14...12 of the
127 * tag the same as virtual address. These bits are the index
128 * into the primary cache and are checked whenever accessing
129 * the secondary cache through the primary. Violation causes
145 kmapinval(); /* try and free some */
151 /* using iprint here we get mixed up with other prints */
152 print("%d KMAP RETRY %#lux ktime %ld %ld %ld %ld %ld %ld %ld %ld\n",
153 m->machno, getcallerpc(&pg),
154 ktime[0], ktime[1], ktime[2], ktime[3],
155 ktime[4], ktime[5], ktime[6], ktime[7]);
169 * One for the allocation,
172 k->pc = getcallerpc(&pg);
174 k->konmach[m->machno] = m->kactive;
178 /* bits 14..12 form the secondary-cache virtual index */
180 virt |= KMAPADDR | ((k-kpte)<<KMAPSHIFT);
183 pte = PPN(pg->pa)|PTECACHABILITY|PTEGLOBL|PTEWRITE|PTEVALID;
185 k->phys0 = PTEGLOBL | PTECACHABILITY;
190 k->phys1 = PTEGLOBL | PTECACHABILITY;
222 kfault(Ureg *ur) /* called from trap() */
228 index = (addr & ~KSEGM) >> KMAPSHIFT;
229 if(index >= KPTESIZE)
230 panic("kmapfault: va=%#lux", addr);
234 panic("kmapfault: unmapped %#lux", addr);
236 for(f = m->kactive; f; f = f->konmach[m->machno])
241 k->konmach[m->machno] = m->kactive;
254 if(m->machno < nelem(ktime))
255 ktime[m->machno] = MACHP(0)->ticks;
259 curpid = PTEPID(TLBPID(tlbvirt()));
261 for(i = 0; i < NTLB; i++, ktlbx++){
269 for(k = m->kactive; k; k = next) {
270 next = k->konmach[mno];
275 m->ktlbnext = TLBOFF;
279 * Process must be splhi
289 for(s = 0; s < NTLBPID; s++) {
303 p->pidonmach[m->machno] = i;
313 static char lasttext[32];
315 if(Debugswitch && !p->kp){
316 if(strncmp(lasttext, p->text, sizeof lasttext) != 0)
317 iprint("[%s]", p->text);
318 strncpy(lasttext, p->text, sizeof lasttext);
322 memset(p->pidonmach, 0, sizeof p->pidonmach);
325 tp = p->pidonmach[m->machno];
328 puttlbx(0, KZERO|PTEPID(tp), 0, 0, PGSZ);
334 memset(p->pidonmach, 0, sizeof p->pidonmach);
338 /* tlbvirt also has TLBPID() in its low byte as the asid */
340 putstlb(ulong tlbvirt, ulong tlbphys)
345 /* identical calculation in l.s/utlbmiss */
346 entry = &m->stb[stlbhash(tlbvirt)];
347 odd = tlbvirt & BY2PG; /* even/odd bit */
348 tlbvirt &= ~BY2PG; /* zero even/odd bit */
349 if(entry->virt != tlbvirt) { /* not my entry? overwrite it */
350 if(entry->virt != 0) {
353 iprint("putstlb: hash collision: %#lx old virt "
354 "%#lux new virt %#lux page %#lux\n",
355 entry - m->stb, entry->virt, tlbvirt,
356 tlbvirt >> (PGSHIFT+1));
358 entry->virt = tlbvirt;
364 entry->phys1 = tlbphys;
366 entry->phys0 = tlbphys;
368 if(entry->phys0 == 0 && entry->phys1 == 0)
375 putmmu(ulong tlbvirt, ulong tlbphys, Page *pg)
383 tp = up->pidonmach[m->machno];
387 tlbvirt |= PTEPID(tp);
388 if((tlbphys & PTEALGMASK) != PTEUNCACHED) {
389 tlbphys &= ~PTEALGMASK;
390 tlbphys |= PTECACHABILITY;
393 entry = putstlb(tlbvirt, tlbphys);
394 x = gettlbp(tlbvirt, tlbent);
395 if(x < 0) x = getrandom();
396 puttlbx(x, entry->virt, entry->phys0, entry->phys1, PGSZ);
397 if(pg->txtflush & (1<<m->machno)){
398 icflush((void*)pg->va, BY2PG);
399 pg->txtflush &= ~(1<<m->machno);
409 Softtlb *entry, *etab;
414 * find all pid entries that are no longer used by processes
417 pidproc = m->pidproc;
418 for(i=1; i<NTLBPID; i++) {
420 if(sp && sp->pidonmach[mno] != i)
425 * shoot down the one we want
429 sp->pidonmach[mno] = 0;
433 * clean out all dead pids from the stlb;
436 for(etab = &entry[STLBSIZE]; entry < etab; entry++)
437 if(pidproc[TLBPID(entry->virt)] == 0)
441 * clean up the hardware
443 for(i=tlbroff; i<NTLB; i++)
444 if(pidproc[TLBPID(gettlbvirt(i))] == 0)
460 checkmmu(ulong, ulong)
465 countpagerefs(ulong*, int)
470 * Return the number of bytes that can be accessed via KADDR(pa).
471 * If pa is not a valid argument to KADDR, return 0.