5 /* #define CHECK(x) x */
8 typedef struct LumpCache LumpCache;
13 HashSize = 1<<HashLog,
14 HashMask = HashSize - 1,
21 Lump *free; /* list of available lumps */
22 u32int allowed; /* total allowable space for packets */
23 u32int avail; /* remaining space for packets */
24 u32int now; /* ticks for usage timestamps */
25 Lump **heads; /* hash table for finding address */
26 int nheap; /* number of available victims */
27 Lump **heap; /* heap for locating victims */
28 int nblocks; /* number of blocks allocated */
29 Lump *blocks; /* array of block descriptors */
32 static LumpCache lumpcache;
34 static void delheap(Lump *db);
35 static int downheap(int i, Lump *b);
36 static void fixheap(int i, Lump *b);
37 static int upheap(int i, Lump *b);
38 static Lump *bumplump(void);
41 initlumpcache(u32int size, u32int nblocks)
46 lumpcache.full.l = &lumpcache.lock;
47 lumpcache.nblocks = nblocks;
48 lumpcache.allowed = size;
49 lumpcache.avail = size;
50 lumpcache.heads = MKNZ(Lump*, HashSize);
51 lumpcache.heap = MKNZ(Lump*, nblocks);
52 lumpcache.blocks = MKNZ(Lump, nblocks);
53 setstat(StatLcacheSize, lumpcache.nblocks);
56 for(i = 0; i < nblocks; i++){
57 b = &lumpcache.blocks[i];
63 lumpcache.free = last;
68 lookuplump(u8int *score, int type)
75 trace(TraceLump, "lookuplump enter");
77 h = hashbits(score, HashLog);
80 * look for the block in the cache
82 qlock(&lumpcache.lock);
83 CHECK(checklumpcache());
85 for(b = lumpcache.heads[h]; b != nil; b = b->next){
86 if(scorecmp(score, b->score)==0 && type == b->type){
87 addstat(StatLcacheHit, 1);
88 trace(TraceLump, "lookuplump hit");
93 trace(TraceLump, "lookuplump miss");
96 * missed: locate the block with the oldest second to last use.
97 * remove it from the heap, and fix up the heap.
99 while(lumpcache.free == nil){
100 trace(TraceLump, "lookuplump bump");
101 CHECK(checklumpcache());
102 if(bumplump() == nil){
103 CHECK(checklumpcache());
104 logerr(EAdmin, "all lump cache blocks in use");
105 addstat(StatLcacheStall, 1);
106 CHECK(checklumpcache());
107 rsleep(&lumpcache.full);
108 CHECK(checklumpcache());
109 addstat(StatLcacheStall, -1);
112 CHECK(checklumpcache());
115 /* start timer on cache miss to avoid system call on cache hit */
118 addstat(StatLcacheMiss, 1);
120 lumpcache.free = b->next;
123 * the new block has no last use, so assume it happens sometime in the middle
124 ZZZ this is not reasonable
126 b->used = (b->used2 + lumpcache.now) / 2;
129 * rechain the block on the correct hash chain
131 b->next = lumpcache.heads[h];
132 lumpcache.heads[h] = b;
137 scorecp(b->score, score);
145 b->used = lumpcache.now++;
146 if(b->heap != TWID32)
148 CHECK(checklumpcache());
149 qunlock(&lumpcache.lock);
152 addstat(StatLumpStall, 1);
154 addstat(StatLumpStall, -1);
156 trace(TraceLump, "lookuplump exit");
157 addstat2(StatLcacheRead, 1, StatLcacheReadTime, ms ? msec()-ms : 0);
162 insertlump(Lump *b, Packet *p)
167 * look for the block in the cache
169 trace(TraceLump, "insertlump enter");
170 qlock(&lumpcache.lock);
171 CHECK(checklumpcache());
174 addstat(StatLcacheWrite, 1);
177 * missed: locate the block with the oldest second to last use.
178 * remove it from the heap, and fix up the heap.
180 size = packetasize(p);
181 while(lumpcache.avail < size){
182 trace(TraceLump, "insertlump bump");
183 CHECK(checklumpcache());
184 if(bumplump() == nil){
185 logerr(EAdmin, "all lump cache blocks in use");
186 addstat(StatLcacheStall, 1);
187 CHECK(checklumpcache());
188 rsleep(&lumpcache.full);
189 CHECK(checklumpcache());
190 addstat(StatLcacheStall, -1);
193 CHECK(checklumpcache());
197 lumpcache.avail -= size;
198 CHECK(checklumpcache());
199 qunlock(&lumpcache.lock);
200 trace(TraceLump, "insertlump exit");
209 trace(TraceLump, "putlump");
211 qlock(&lumpcache.lock);
212 CHECK(checklumpcache());
214 if(b->heap == TWID32)
215 upheap(lumpcache.nheap++, b);
216 trace(TraceLump, "putlump wakeup");
217 rwakeupall(&lumpcache.full);
219 CHECK(checklumpcache());
220 qunlock(&lumpcache.lock);
224 * remove some lump from use and update the free list and counters
233 * remove blocks until we find one that is unused
234 * referenced blocks are left in the heap even though
235 * they can't be scavenged; this is simple a speed optimization
237 CHECK(checklumpcache());
239 if(lumpcache.nheap == 0){
240 trace(TraceLump, "bumplump emptyheap");
243 b = lumpcache.heap[0];
246 trace(TraceLump, "bumplump wakeup");
247 rwakeupall(&lumpcache.full);
255 trace(TraceLump, "bumplump unchain");
257 h = hashbits(b->score, HashLog);
258 if(lumpcache.heads[h] != b)
259 sysfatal("bad hash chains in lump cache");
260 lumpcache.heads[h] = b->next;
262 b->prev->next = b->next;
264 b->next->prev = b->prev;
269 lumpcache.avail += b->size;
274 b->next = lumpcache.free;
277 CHECK(checklumpcache());
278 trace(TraceLump, "bumplump exit");
285 qlock(&lumpcache.lock);
288 qunlock(&lumpcache.lock);
292 * delete an arbitrary block from the heap
297 fixheap(db->heap, lumpcache.heap[--lumpcache.nheap]);
302 * push an element up or down to it's correct new location
305 fixheap(int i, Lump *b)
307 if(upheap(i, b) == i)
312 upheap(int i, Lump *b)
319 for(; i != 0; i = p){
321 bb = lumpcache.heap[p];
322 if(b->used2 - now >= bb->used2 - now)
324 lumpcache.heap[i] = bb;
328 lumpcache.heap[i] = b;
334 downheap(int i, Lump *b)
343 if(k >= lumpcache.nheap)
345 if(k + 1 < lumpcache.nheap && lumpcache.heap[k]->used2 - now > lumpcache.heap[k + 1]->used2 - now)
347 bb = lumpcache.heap[k];
348 if(b->used2 - now <= bb->used2 - now)
350 lumpcache.heap[i] = bb;
354 lumpcache.heap[i] = b;
366 h = hashbits(bb->score, HashLog);
367 for(b = lumpcache.heads[h]; b != nil; b = b->next){
369 sysfatal("bad prev link");
374 sysfatal("block score=%V type=%#x missing from hash table", bb->score, bb->type);
381 u32int size, now, nfree;
385 for(i = 0; i < lumpcache.nheap; i++){
386 if(lumpcache.heap[i]->heap != i)
387 sysfatal("lc: mis-heaped at %d: %d", i, lumpcache.heap[i]->heap);
388 if(i > 0 && lumpcache.heap[(i - 1) >> 1]->used2 - now > lumpcache.heap[i]->used2 - now)
389 sysfatal("lc: bad heap ordering");
391 if(k < lumpcache.nheap && lumpcache.heap[i]->used2 - now > lumpcache.heap[k]->used2 - now)
392 sysfatal("lc: bad heap ordering");
394 if(k < lumpcache.nheap && lumpcache.heap[i]->used2 - now > lumpcache.heap[k]->used2 - now)
395 sysfatal("lc: bad heap ordering");
400 for(i = 0; i < lumpcache.nblocks; i++){
401 b = &lumpcache.blocks[i];
402 if(b->data == nil && b->size != 0)
403 sysfatal("bad size: %d data=%p", b->size, b->data);
404 if(b->ref && b->heap == TWID32)
406 if(b->type != TWID8){
411 && lumpcache.heap[b->heap] != b)
412 sysfatal("lc: spurious heap value");
414 if(lumpcache.avail != lumpcache.allowed - size){
415 fprint(2, "mismatched available=%d and allowed=%d - used=%d space", lumpcache.avail, lumpcache.allowed, size);
420 for(b = lumpcache.free; b != nil; b = b->next){
421 if(b->type != TWID8 || b->heap != TWID32)
422 sysfatal("lc: bad free list");
426 if(lumpcache.nheap + nfree + refed != lumpcache.nblocks)
427 sysfatal("lc: missing blocks: %d %d %d %d", lumpcache.nheap, refed, nfree, lumpcache.nblocks);