5 int icacheprefetch = 1;
7 typedef struct ICache ICache;
8 typedef struct IHash IHash;
9 typedef struct ISum ISum;
35 * Hash table of IEntries
59 ih = vtmallocz(sizeof(IHash)+size*sizeof(ih->table[0]));
60 ih->table = (IEntry**)(ih+1);
67 ihashlookup(IHash *ih, u8int score[VtScoreSize], int type)
72 h = hashbits(score, ih->bits);
73 for(ie=ih->table[h]; ie; ie=ie->nexthash)
74 if((type == -1 || type == ie->ia.type) && scorecmp(score, ie->score) == 0)
80 ihashdelete(IHash *ih, IEntry *ie, char *what)
85 h = hashbits(ie->score, ih->bits);
86 for(l=&ih->table[h]; *l; l=&(*l)->nexthash)
91 fprint(2, "warning: %s %V not found in ihashdelete\n", what, ie->score);
95 ihashinsert(IHash *ih, IEntry *ie)
99 h = hashbits(ie->score, ih->bits);
100 ie->nexthash = ih->table[h];
112 if(ie->prev == nil && ie->next == nil)
114 ie->prev->next = ie->next;
115 ie->next->prev = ie->prev;
122 poplast(IEntry *list)
124 if(list->prev == list)
126 return popout(list->prev);
130 pushfirst(IEntry *list, IEntry *ie)
134 ie->next = list->next;
141 * Arena summary cache.
156 scachelookup(u64int addr)
161 for(i=0; i<icache.nsum; i++){
163 if(s->addr <= addr && addr < s->limit){
165 memmove(icache.sum+1, icache.sum, i*sizeof icache.sum[0]);
179 for(i=0; i<s->nentries; i++)
180 ihashdelete(icache.shash, &s->entries[i], "scache");
195 for(i=icache.nsum-1; i>=0; i--){
197 if(canqlock(&s->lock)){
199 memmove(icache.sum+1, icache.sum, i*sizeof icache.sum[0]);
210 scachehit(u64int addr)
212 scachelookup(addr); /* for move-to-front */
216 scachesetup(ISum *s, u64int addr)
221 s->arena = amapitoag(mainindex, addr, &addr0, &limit, &g);
233 n = asumload(s->arena, s->g, s->entries, ArenaCIGSize);
235 * n can be less then ArenaCIGSize, either if the clump group
236 * is the last in the arena and is only partially filled, or if there
237 * are corrupt clumps in the group -- those are not returned.
240 s->entries[i].ia.addr += s->addr;
241 ihashinsert(icache.shash, &s->entries[i]);
243 //fprint(2, "%T scacheload %s %d - %d entries\n", s->arena->name, s->g, n);
244 addstat(StatScachePrefetch, n);
249 scachemiss(u64int addr)
255 s = scachelookup(addr);
257 /* first time: make an entry in the cache but don't populate it yet */
261 scachesetup(s, addr);
266 /* second time: load from disk */
268 if(s->loaded || !icacheprefetch){
273 return s; /* locked */
281 initicache(u32int mem0)
284 int i, entries, scache;
286 icache.full.l = &icache.lock;
289 entries = mem / (sizeof(IEntry)+sizeof(IEntry*));
290 scache = (entries/8) / ArenaCIGSize;
291 entries -= entries/8;
298 fprint(2, "icache %,d bytes = %,d entries; %d scache\n", mem0, entries, scache);
300 icache.clean.prev = icache.clean.next = &icache.clean;
301 icache.dirty.prev = icache.dirty.next = &icache.dirty;
302 icache.free.prev = icache.free.next = &icache.free;
304 icache.hash = mkihash(entries);
305 icache.nentries = entries;
306 setstat(StatIcacheSize, entries);
307 icache.entries = vtmallocz(entries*sizeof icache.entries[0]);
308 icache.maxdirty = entries / 2;
309 for(i=0; i<entries; i++)
310 pushfirst(&icache.free, &icache.entries[i]);
312 icache.nsum = scache;
313 icache.sum = vtmallocz(scache*sizeof icache.sum[0]);
314 icache.sum[0] = vtmallocz(scache*sizeof icache.sum[0][0]);
315 icache.nsentries = scache * ArenaCIGSize;
316 icache.sentries = vtmallocz(scache*ArenaCIGSize*sizeof icache.sentries[0]);
317 icache.shash = mkihash(scache*ArenaCIGSize);
318 for(i=0; i<scache; i++){
319 icache.sum[i] = icache.sum[0] + i;
320 icache.sum[i]->entries = icache.sentries + i*ArenaCIGSize;
330 ie = poplast(&icache.clean);
333 ihashdelete(icache.hash, ie, "evictlru");
338 icacheinsert(u8int score[VtScoreSize], IAddr *ia, int state)
342 if((ie = poplast(&icache.free)) == nil && (ie = evictlru()) == nil){
343 addstat(StatIcacheStall, 1);
344 while((ie = poplast(&icache.free)) == nil && (ie = evictlru()) == nil){
345 // Could safely return here if state == IEClean.
346 // But if state == IEDirty, have to wait to make
347 // sure we don't lose an index write.
348 // Let's wait all the time.
351 rsleep(&icache.full);
353 addstat(StatIcacheStall, -1);
356 memmove(ie->score, score, VtScoreSize);
359 if(state == IEClean){
360 addstat(StatIcachePrefetch, 1);
361 pushfirst(&icache.clean, ie);
363 addstat(StatIcacheWrite, 1);
364 assert(state == IEDirty);
366 setstat(StatIcacheDirty, icache.ndirty);
368 pushfirst(&icache.dirty, ie);
370 ihashinsert(icache.hash, ie);
374 icachelookup(u8int score[VtScoreSize], int type, IAddr *ia)
379 addstat(StatIcacheLookup, 1);
380 if((ie = ihashlookup(icache.hash, score, type)) != nil){
382 if(ie->state == IEClean)
383 pushfirst(&icache.clean, ie);
384 addstat(StatIcacheHit, 1);
385 qunlock(&icache.lock);
389 if((ie = ihashlookup(icache.shash, score, type)) != nil){
391 icacheinsert(score, &ie->ia, IEClean);
392 scachehit(ie->ia.addr);
393 addstat(StatScacheHit, 1);
394 qunlock(&icache.lock);
397 addstat(StatIcacheMiss, 1);
398 qunlock(&icache.lock);
404 insertscore(u8int score[VtScoreSize], IAddr *ia, int state, AState *as)
409 icacheinsert(score, ia, state);
411 toload = scachemiss(ia->addr);
413 assert(state == IEDirty);
416 fprint(2, "%T insertscore IEDirty without as; called from %#p\n",
417 getcallerpc(&score));
419 if(icache.as.aa > as->aa)
420 fprint(2, "%T insertscore: aa moving backward: %#llux -> %#llux\n", icache.as.aa, as->aa);
424 qunlock(&icache.lock);
427 qunlock(&toload->lock);
430 if(icache.ndirty >= icache.maxdirty)
434 * It's okay not to do this under icache.lock.
435 * Calling insertscore only happens when we hold
436 * the lump, meaning any searches for this block
437 * will hit in the lump cache until after we return.
440 markbloomfilter(mainindex->bloom, score);
446 lookupscore(u8int score[VtScoreSize], int type, IAddr *ia)
451 if(icachelookup(score, type, ia) >= 0){
452 addstat(StatIcacheRead, 1);
457 addstat(StatIcacheFill, 1);
458 if(loadientry(mainindex, score, type, &d) < 0)
462 insertscore(score, &d.ia, IEClean, nil);
465 addstat2(StatIcacheRead, 1, StatIcacheReadTime, msec() - ms);
470 hashbits(u8int *sc, int bits)
474 v = (sc[0] << 24) | (sc[1] << 16) | (sc[2] << 8) | sc[3];
481 icachedirtyfrac(void)
483 return (vlong)icache.ndirty*IcacheFrac / icache.nentries;
487 * Return a singly-linked list of dirty index entries.
488 * with 32-bit hash numbers between lo and hi
489 * and address < limit.
492 icachedirty(u32int lo, u32int hi, u64int limit)
498 trace(TraceProc, "icachedirty enter");
500 for(ie = icache.dirty.next; ie != &icache.dirty; ie=ie->next){
501 if(ie->state == IEDirty && ie->ia.addr <= limit){
502 h = hashbits(ie->score, 32);
503 if(lo <= h && h <= hi){
504 ie->nextdirty = dirty;
509 qunlock(&icache.lock);
510 trace(TraceProc, "icachedirty exit");
523 qunlock(&icache.lock);
528 * The singly-linked non-circular list of index entries ie
529 * has been written to disk. Move them to the clean list.
532 icacheclean(IEntry *ie)
536 trace(TraceProc, "icacheclean enter");
539 assert(ie->state == IEDirty);
540 next = ie->nextdirty;
542 popout(ie); /* from icache.dirty */
545 pushfirst(&icache.clean, ie);
547 setstat(StatIcacheDirty, icache.ndirty);
548 rwakeupall(&icache.full);
549 qunlock(&icache.lock);
550 trace(TraceProc, "icacheclean exit");
561 while((ie = evictlru()) != nil)
562 pushfirst(&icache.free, ie);
563 for(i=0; i<icache.nsum; i++){
569 qunlock(&icache.lock);