2 * memory-type region registers.
4 * due to the possibility of extended addresses (for PAE)
5 * as large as 36 bits coming from the e820 memory map and the like,
6 * we'll use vlongs to hold addresses and lengths, even though we don't
7 * implement PAE in Plan 9.
10 #include "../port/lib.h"
18 * MTRR Physical base/mask are indexed by
19 * MTRRPhys{Base|Mask}N = MTRRPhys{Base|Mask}0 + 2*N
21 MTRRPhysBase0 = 0x200,
22 MTRRPhysMask0 = 0x201,
23 MTRRDefaultType = 0x2FF,
27 /* cpuid extended function codes */
28 Exthighfunc = 1ul << 31,
42 CR4PageGlobalEnable = 1 << 7,
43 CR0CacheDisable = 1 << 30,
57 Capvcnt = 0xff, /* mask: # of variable-range MTRRs we have */
58 Capwc = 1<<8, /* flag: have write combining? */
59 Capfix = 1<<10, /* flag: have fixed MTRRs? */
60 Deftype = 0xff, /* default MTRR type */
61 Deffixena = 1<<10, /* fixed-range MTRR enable */
62 Defena = 1<<11, /* MTRR enable */
65 typedef struct Mtrreg Mtrreg;
66 typedef struct Mtrrop Mtrrop;
77 static char *types[] = {
87 static Mtrrop *postedop;
93 if(type < 0 || type >= nelem(types))
103 for(p = types; *p != nil; p++)
104 if (strcmp(str, *p) == 0)
113 static vlong mask = -1;
117 cpuid(Exthighfunc, regs);
118 if(regs[0] >= Extaddrsz) { /* ax */
119 cpuid(Extaddrsz, regs);
120 mask = (1LL << (regs[0] & 0xFF)) - 1; /* ax */
122 mask &= Paerange - 1; /* x86 sanity */
126 /* limit physical addresses to 36 bits on the x86 */
130 mtrr->base &= Paerange - 1;
131 mtrr->mask &= Paerange - 1;
137 return (ul & (ul - 1)) == 0;
140 /* true if mtrr is valid */
142 mtrrdec(Mtrreg *mtrr, uvlong *ptr, uvlong *size, int *type)
145 *ptr = mtrr->base & ~(BY2PG-1);
146 *type = mtrr->base & 0xff;
147 *size = (physmask() ^ (mtrr->mask & ~(BY2PG-1))) + 1;
148 return (mtrr->mask >> 11) & 1;
152 mtrrenc(Mtrreg *mtrr, uvlong ptr, uvlong size, int type, int ok)
154 mtrr->base = ptr | (type & 0xff);
155 mtrr->mask = (physmask() & ~(size - 1)) | (ok? 1<<11: 0);
160 * i is the index of the MTRR, and is multiplied by 2 because
161 * mask and base offsets are interleaved.
164 mtrrget(Mtrreg *mtrr, uint i)
167 error("mtrr index out of range");
168 rdmsr(MTRRPhysBase0 + 2*i, &mtrr->base);
169 rdmsr(MTRRPhysMask0 + 2*i, &mtrr->mask);
174 mtrrput(Mtrreg *mtrr, uint i)
177 error("mtrr index out of range");
179 wrmsr(MTRRPhysBase0 + 2*i, mtrr->base);
180 wrmsr(MTRRPhysMask0 + 2*i, mtrr->mask);
189 static long bar1, bar2;
191 s = splhi(); /* avoid race with mtrrclock */
194 * wait for all CPUs to sync here, so that the MTRR setup gets
195 * done at roughly the same time on all processors.
198 while(bar1 < conf.nmach)
202 putcr4(cr4 & ~CR4PageGlobalEnable);
205 putcr0(cr0 | CR0CacheDisable);
207 rdmsr(MTRRDefaultType, &def);
208 wrmsr(MTRRDefaultType, def & ~(vlong)Defena);
210 mtrrput((*op)->reg, (*op)->slot);
213 wrmsr(MTRRDefaultType, def);
218 * wait for all CPUs to sync up again, so that we don't continue
219 * executing while the MTRRs are still being set up.
222 while(bar2 < conf.nmach)
234 mtrrclock(void) /* called from clock interrupt */
240 /* if there's an operation still pending, keep sleeping */
244 return postedop == nil;
248 mtrr(uvlong base, uvlong size, char *tstr)
250 int i, vcnt, slot, type, mtype, mok;
258 if(!(m->cpuiddx & Mtrr))
259 error("mtrrs not supported");
260 if(base & (BY2PG-1) || size & (BY2PG-1) || size == 0)
261 error("mtrr base or size not 4k aligned or zero size");
262 if(base + size >= Paerange)
263 error("mtrr range exceeds 36 bits");
265 error("mtrr size not power of 2");
266 if(base & (size - 1))
267 error("mtrr base not naturally aligned");
269 if((type = str2type(tstr)) == -1)
270 error("mtrr bad type");
272 rdmsr(MTRRCap, &cap);
273 rdmsr(MTRRDefaultType, &def);
277 error("mtrr unknown type");
281 error("mtrr type wc (write combining) unsupported");
292 vcnt = cap & Capvcnt;
295 for(i = 0; i < vcnt; i++){
297 mok = mtrrdec(&mtrr, &mp, &msize, &mtype);
298 if(slot == -1 && (!mok || mtype == (def & Deftype)))
299 slot = i; /* good, but look further for exact match */
300 if(mok && mp == base && msize == size){
306 error("no free mtrr slots");
308 while(postedop != nil)
309 sleep(&oprend, opavail, 0);
310 mtrrenc(&entry, base, size, type, 1);
320 mtrrprint(char *buf, long bufsize)
329 if(!(m->cpuiddx & Mtrr))
331 rdmsr(MTRRCap, &cap);
332 rdmsr(MTRRDefaultType, &def);
333 n += snprint(buf+n, bufsize-n, "cache default %s\n",
334 type2str(def & Deftype));
335 vcnt = cap & Capvcnt;
338 for(i = 0; i < vcnt; i++){
340 if (mtrrdec(&mtrr, &base, &size, &type))
341 n += snprint(buf+n, bufsize-n,
342 "cache 0x%llux %llud %s\n",
343 base, size, type2str(type));