2 * Manage tree of VtFiles stored in the block cache.
4 * The single point of truth for the info about the VtFiles themselves
5 * is the block data. Because of this, there is no explicit locking of
6 * VtFile structures, and indeed there may be more than one VtFile
7 * structure for a given Venti file. They synchronize through the
10 * This is a bit simpler than fossil because there are no epochs
11 * or tags or anything else. Just mutable local blocks and immutable
19 #define MaxBlock (1UL<<31)
21 static char ENotDir[] = "walk in non-directory";
22 static char ETooBig[] = "file too big";
23 /* static char EBadAddr[] = "bad address"; */
24 static char ELabelMismatch[] = "label mismatch";
26 static int sizetodepth(uvlong s, int psize, int dsize);
27 static VtBlock *fileload(VtFile *r, VtEntry *e);
28 static int shrinkdepth(VtFile*, VtBlock*, VtEntry*, int);
29 static int shrinksize(VtFile*, VtEntry*, uvlong);
30 static int growdepth(VtFile*, VtBlock*, VtEntry*, int);
32 #define ISLOCKED(r) ((r)->b != nil)
33 #define DEPTH(t) ((t)&VtTypeDepthMask)
36 vtfilealloc(VtCache *c, VtBlock *b, VtFile *p, u32int offset, int mode)
43 assert(p==nil || ISLOCKED(p));
49 epb = p->dsize / VtEntrySize;
51 if(b->type != VtDirType){
52 werrstr("bad block type %#uo", b->type);
57 * a non-active entry is the only thing that
58 * can legitimately happen here. all the others
61 if(vtentryunpack(&e, b->data, offset % epb) < 0){
62 fprint(2, "vtentryunpack failed: %r (%.*H)\n", VtEntrySize, b->data+VtEntrySize*(offset%epb));
65 if(!(e.flags & VtEntryActive)){
66 werrstr("entry not active");
70 if(DEPTH(e.type) < sizetodepth(e.size, e.psize, e.dsize)){
71 fprint(2, "depth %ud size %llud psize %ud dsize %ud\n",
72 DEPTH(e.type), e.size, e.psize, e.dsize);
77 size = vtcacheblocksize(c);
78 if(e.dsize > size || e.psize > size){
79 werrstr("block sizes %ud, %ud bigger than cache block size %ud",
80 e.psize, e.dsize, size);
84 r = vtmallocz(sizeof(VtFile));
90 r->dir = (e.type & VtTypeBaseMask) == VtDirType;
95 assert(mode == VtOREAD || p->mode == VtORDWR);
99 assert(b->addr != NilBlock);
102 memmove(r->score, b->score, VtScoreSize);
110 vtfileroot(VtCache *c, u32int addr, int mode)
115 b = vtcachelocal(c, addr, VtDirType);
118 r = vtfilealloc(c, b, nil, 0, mode);
124 vtfileopenroot(VtCache *c, VtEntry *e)
129 b = vtcacheallocblock(c, VtDirType);
133 vtentrypack(e, b->data, 0);
134 f = vtfilealloc(c, b, nil, 0, VtORDWR);
140 vtfilecreateroot(VtCache *c, int psize, int dsize, int type)
144 memset(&e, 0, sizeof e);
145 e.flags = VtEntryActive;
149 memmove(e.score, vtzeroscore, VtScoreSize);
151 return vtfileopenroot(c, &e);
155 vtfileopen(VtFile *r, u32int offset, int mode)
166 bn = offset/(r->dsize/VtEntrySize);
168 b = vtfileblock(r, bn, mode);
171 r = vtfilealloc(r->c, b, r, offset, mode);
177 vtfilecreate(VtFile *r, int psize, int dsize, int type)
179 return _vtfilecreate(r, -1, psize, dsize, type);
183 _vtfilecreate(VtFile *r, int o, int psize, int dsize, int type)
194 assert(psize <= VtMaxLumpSize);
195 assert(dsize <= VtMaxLumpSize);
196 assert(type == VtDirType || type == VtDataType);
203 epb = r->dsize/VtEntrySize;
205 size = vtfilegetdirsize(r);
207 * look at a random block to see if we can find an empty entry
210 offset = lnrand(size+1);
211 offset -= offset % epb;
215 /* try the given block and then try the last block */
218 b = vtfileblock(r, bn, VtORDWR);
221 for(i=offset%r->epb; i<epb; i++){
222 if(vtentryunpack(&e, b->data, i) < 0)
224 if((e.flags&VtEntryActive) == 0 && e.gen != ~0)
229 fprint(2, "vtfilecreate: cannot happen\n");
230 werrstr("vtfilecreate: cannot happen");
237 /* found an entry - gen already set */
240 e.flags = VtEntryActive;
243 memmove(e.score, vtzeroscore, VtScoreSize);
244 vtentrypack(&e, b->data, i);
248 if(vtfilesetdirsize(r, offset+1) < 0){
254 rr = vtfilealloc(r->c, b, r, offset, VtORDWR);
260 vtfilekill(VtFile *r, int doremove)
270 if(doremove==0 && e.size == 0){
271 /* already truncated */
283 e.flags &= ~VtEntryLocal;
286 memmove(e.score, vtzeroscore, VtScoreSize);
287 vtentrypack(&e, b->data, r->offset % r->epb);
299 vtfileremove(VtFile *r)
301 return vtfilekill(r, 1);
305 vtfiletruncate(VtFile *r)
307 return vtfilekill(r, 0);
311 vtfilegetsize(VtFile *r)
326 shrinksize(VtFile *r, VtEntry *e, uvlong size)
328 int i, depth, type, isdir, ppb;
330 uchar score[VtScoreSize];
333 b = vtcacheglobal(r->c, e->score, e->type);
338 ppb = e->psize/VtScoreSize;
341 for(i=0; i+1<depth; i++)
346 if(b->addr == NilBlock){
347 /* not worth copying the block just so we can zero some of it */
353 * invariant: each pointer in the tree rooted at b accounts for ptrsz bytes
356 /* zero the pointers to unnecessary blocks */
357 i = (size+ptrsz-1)/ptrsz;
359 memmove(b->data+i*VtScoreSize, vtzeroscore, VtScoreSize);
361 /* recurse (go around again) on the partially necessary block */
370 memmove(score, b->data+i*VtScoreSize, VtScoreSize);
372 b = vtcacheglobal(r->c, score, type);
377 if(b->addr == NilBlock){
383 * No one ever truncates BtDir blocks.
385 if(depth==0 && !isdir && e->dsize > size)
386 memset(b->data+size, 0, e->dsize-size);
392 vtfilesetsize(VtFile *r, u64int size)
400 return vtfiletruncate(r);
402 if(size > VtMaxFileSize || size > ((uvlong)MaxBlock)*r->dsize){
417 depth = sizetodepth(size, e.psize, e.dsize);
418 edepth = DEPTH(e.type);
420 if(shrinkdepth(r, b, &e, depth) < 0){
424 }else if(depth > edepth){
425 if(growdepth(r, b, &e, depth) < 0){
432 shrinksize(r, &e, size);
435 vtentrypack(&e, b->data, r->offset % r->epb);
442 vtfilesetdirsize(VtFile *r, u32int ds)
448 epb = r->dsize/VtEntrySize;
450 size = (uvlong)r->dsize*(ds/epb);
451 size += VtEntrySize*(ds%epb);
452 return vtfilesetsize(r, size);
456 vtfilegetdirsize(VtFile *r)
463 epb = r->dsize/VtEntrySize;
465 size = vtfilegetsize(r);
466 ds = epb*(size/r->dsize);
467 ds += (size%r->dsize)/VtEntrySize;
472 vtfilegetentry(VtFile *r, VtEntry *e)
486 vtfilesetentry(VtFile *r, VtEntry *e)
492 b = fileload(r, &ee);
495 vtentrypack(e, b->data, r->offset % r->epb);
501 blockwalk(VtBlock *p, int index, VtCache *c, int mode, VtEntry *e)
516 score = p->data+index*VtScoreSize;
519 /*print("walk from %V/%d ty %d to %V ty %d\n", p->score, index, p->type, score, type); */
521 if(mode == VtOWRITE && vtglobaltolocal(score) == NilBlock){
522 b = vtcacheallocblock(c, type);
526 b = vtcacheglobal(c, score, type);
528 if(b == nil || mode == VtOREAD)
531 if(vtglobaltolocal(b->score) != NilBlock)
537 e->flags |= VtEntryLocal;
539 b = vtblockcopy(b/*, e->tag, fs->ehi, fs->elo*/);
544 if(p->type == VtDirType){
545 memmove(e->score, b->score, VtScoreSize);
546 vtentrypack(e, p->data, index);
548 memmove(p->data+index*VtScoreSize, b->score, VtScoreSize);
554 * Change the depth of the VtFile r.
555 * The entry e for r is contained in block p.
558 growdepth(VtFile *r, VtBlock *p, VtEntry *e, int depth)
564 assert(depth <= VtPointerDepth);
566 b = vtcacheglobal(r->c, e->score, e->type);
573 * Keep adding layers until we get to the right depth
574 * or an error occurs.
576 while(DEPTH(e->type) < depth){
577 bb = vtcacheallocblock(r->c, e->type+1);
580 memmove(bb->data, b->score, VtScoreSize);
581 memmove(e->score, bb->score, VtScoreSize);
583 e->flags |= VtEntryLocal;
588 vtentrypack(e, p->data, r->offset % r->epb);
591 if(DEPTH(e->type) == depth)
597 shrinkdepth(VtFile *r, VtBlock *p, VtEntry *e, int depth)
599 VtBlock *b, *nb, *ob, *rb;
602 assert(depth <= VtPointerDepth);
604 rb = vtcacheglobal(r->c, e->score, e->type);
610 for(; DEPTH(e->type) > depth; e->type--){
611 nb = vtcacheglobal(r->c, b->data, e->type-1);
614 if(ob!=nil && ob!=rb)
626 * Right now, e points at the root block rb, b is the new root block,
627 * and ob points at b. To update:
629 * (i) change e to point at b
630 * (ii) zero the pointer ob -> b
631 * (iii) free the root block
633 * p (the block containing e) must be written before
638 memmove(e->score, b->score, VtScoreSize);
639 vtentrypack(e, p->data, r->offset % r->epb);
642 memmove(ob->data, vtzeroscore, VtScoreSize);
646 if(ob!=nil && ob!=rb)
650 if(DEPTH(e->type) == depth)
656 mkindices(VtEntry *e, u32int bn, int *index)
660 memset(index, 0, (VtPointerDepth+1)*sizeof(int));
662 np = e->psize/VtScoreSize;
663 for(i=0; bn > 0; i++){
664 if(i >= VtPointerDepth){
665 werrstr("bad address 0x%lux", (ulong)bn);
675 vtfileblock(VtFile *r, u32int bn, int mode)
678 int index[VtPointerDepth+1];
684 assert(bn != NilBlock);
690 i = mkindices(&e, bn, index);
693 if(i > DEPTH(e.type)){
695 werrstr("bad address 0x%lux", (ulong)bn);
699 if(growdepth(r, b, &e, i) < 0)
703 assert(b->type == VtDirType);
705 index[DEPTH(e.type)] = r->offset % r->epb;
707 /* mode for intermediate block */
712 for(i=DEPTH(e.type); i>=0; i--){
713 bb = blockwalk(b, index[i], r->c, i==0 ? mode : m, &e);
719 b->pc = getcallerpc(&r);
727 vtfileblockscore(VtFile *r, u32int bn, uchar score[VtScoreSize])
730 int index[VtPointerDepth+1];
735 assert(bn != NilBlock);
741 if(DEPTH(e.type) == 0){
742 memmove(score, e.score, VtScoreSize);
747 i = mkindices(&e, bn, index);
752 if(i > DEPTH(e.type)){
753 memmove(score, vtzeroscore, VtScoreSize);
758 index[DEPTH(e.type)] = r->offset % r->epb;
760 for(i=DEPTH(e.type); i>=1; i--){
761 bb = blockwalk(b, index[i], r->c, VtOREAD, &e);
766 if(memcmp(b->score, vtzeroscore, VtScoreSize) == 0)
770 memmove(score, b->data+index[0]*VtScoreSize, VtScoreSize);
780 vtfileincref(VtFile *r)
788 vtfileclose(VtFile *r)
801 vtfileclose(r->parent);
802 memset(r, ~0, sizeof(*r));
807 * Retrieve the block containing the entry for r.
808 * If a snapshot has happened, we might need
809 * to get a new copy of the block. We avoid this
810 * in the common case by caching the score for
811 * the block and the last epoch in which it was valid.
813 * We use r->mode to tell the difference between active
814 * file system VtFiles (VtORDWR) and VtFiles for the
815 * snapshot file system (VtOREAD).
818 fileloadblock(VtFile *r, int mode)
828 assert(r->mode == VtORDWR);
830 b = vtcacheglobal(r->c, r->score, VtDirType);
833 b->pc = getcallerpc(&r);
836 assert(r->parent != nil);
837 if(vtfilelock(r->parent, VtORDWR) < 0)
839 b = vtfileblock(r->parent, r->offset/r->epb, VtORDWR);
840 vtfileunlock(r->parent);
843 memmove(r->score, b->score, VtScoreSize);
849 werrstr("read/write lock of read-only file");
852 addr = vtglobaltolocal(r->score);
854 return vtcacheglobal(r->c, r->score, VtDirType);
856 b = vtcachelocal(r->c, addr, VtDirType);
861 * If it failed because the epochs don't match, the block has been
862 * archived and reclaimed. Rewalk from the parent and get the
863 * new pointer. This can't happen in the VtORDWR case
864 * above because blocks in the current epoch don't get
865 * reclaimed. The fact that we're VtOREAD means we're
866 * a snapshot. (Or else the file system is read-only, but then
867 * the archiver isn't going around deleting blocks.)
869 rerrstr(e, sizeof e);
870 if(strcmp(e, ELabelMismatch) == 0){
871 if(vtfilelock(r->parent, VtOREAD) < 0)
873 b = vtfileblock(r->parent, r->offset/r->epb, VtOREAD);
874 vtfileunlock(r->parent);
876 fprint(2, "vtfilealloc: lost %V found %V\n",
878 memmove(r->score, b->score, VtScoreSize);
887 vtfilelock(VtFile *r, int mode)
894 b = fileloadblock(r, mode);
898 * The fact that we are holding b serves as the
899 * lock entitling us to write to r->b.
903 b->pc = getcallerpc(&r);
908 * Lock two (usually sibling) VtFiles. This needs special care
909 * because the Entries for both vtFiles might be in the same block.
910 * We also try to lock blocks in left-to-right order within the tree.
913 vtfilelock2(VtFile *r, VtFile *rr, int mode)
918 return vtfilelock(r, mode);
923 if(r->parent==rr->parent && r->offset/r->epb == rr->offset/rr->epb){
924 b = fileloadblock(r, mode);
929 }else if(r->parent==rr->parent || r->offset > rr->offset){
930 bb = fileloadblock(rr, mode);
931 b = fileloadblock(r, mode);
933 b = fileloadblock(r, mode);
934 bb = fileloadblock(rr, mode);
936 if(b == nil || bb == nil){
945 * The fact that we are holding b and bb serves
946 * as the lock entitling us to write to r->b and rr->b.
950 b->pc = getcallerpc(&r);
951 bb->pc = getcallerpc(&r);
956 vtfileunlock(VtFile *r)
961 fprint(2, "vtfileunlock: already unlocked\n");
970 fileload(VtFile *r, VtEntry *e)
976 if(vtentryunpack(e, b->data, r->offset % r->epb) < 0)
983 sizetodepth(uvlong s, int psize, int dsize)
988 /* determine pointer depth */
989 np = psize/VtScoreSize;
990 s = (s + dsize - 1)/dsize;
991 for(d = 0; s > 1; d++)
997 vtfileread(VtFile *f, void *data, long count, vlong offset)
1003 assert(ISLOCKED(f));
1005 vtfilegetentry(f, &e);
1008 if(count < 0 || offset < 0){
1009 werrstr("vtfileread: bad offset or count");
1012 if(offset >= e.size)
1015 if(offset+count > e.size)
1016 count = e.size - offset;
1018 frag = offset % e.dsize;
1019 if(frag+count > e.dsize)
1020 count = e.dsize - frag;
1022 b = vtfileblock(f, offset/e.dsize, VtOREAD);
1026 memmove(data, b->data+frag, count);
1032 filewrite1(VtFile *f, void *data, long count, vlong offset)
1038 vtfilegetentry(f, &e);
1039 if(count < 0 || offset < 0){
1040 werrstr("vtfilewrite: bad offset or count");
1044 frag = offset % e.dsize;
1045 if(frag+count > e.dsize)
1046 count = e.dsize - frag;
1049 if(frag == 0 && count == e.dsize)
1052 b = vtfileblock(f, offset/e.dsize, m);
1056 memmove(b->data+frag, data, count);
1057 if(m == VtOWRITE && frag+count < e.dsize)
1058 memset(b->data+frag+count, 0, e.dsize-frag-count);
1060 if(offset+count > e.size){
1061 vtfilegetentry(f, &e);
1062 e.size = offset+count;
1063 vtfilesetentry(f, &e);
1071 vtfilewrite(VtFile *f, void *data, long count, vlong offset)
1075 assert(ISLOCKED(f));
1080 m = filewrite1(f, (char*)data+tot, count-tot, offset+tot);
1091 flushblock(VtCache *c, VtBlock *bb, uchar score[VtScoreSize], int ppb, int epb,
1099 addr = vtglobaltolocal(score);
1100 if(addr == NilBlock)
1105 if(memcmp(b->score, score, VtScoreSize) != 0)
1108 if((b = vtcachelocal(c, addr, type)) == nil)
1116 for(i=0; i<epb; i++){
1117 if(vtentryunpack(&e, b->data, i) < 0)
1119 if(!(e.flags&VtEntryActive))
1121 if(flushblock(c, nil, e.score, e.psize/VtScoreSize, e.dsize/VtEntrySize,
1124 vtentrypack(&e, b->data, i);
1128 default: /* VtPointerTypeX */
1129 for(i=0; i<ppb; i++){
1130 if(flushblock(c, nil, b->data+VtScoreSize*i, ppb, epb, type-1) < 0)
1136 if(vtblockwrite(b) < 0)
1138 memmove(score, b->score, VtScoreSize);
1150 vtfileflush(VtFile *f)
1156 assert(ISLOCKED(f));
1157 b = fileload(f, &e);
1158 if(!(e.flags&VtEntryLocal)){
1163 ret = flushblock(f->c, nil, e.score, e.psize/VtScoreSize, e.dsize/VtEntrySize,
1170 vtentrypack(&e, b->data, f->offset % f->epb);
1176 vtfileflushbefore(VtFile *r, u64int offset)
1180 int i, base, depth, ppb, epb, doflush;
1181 int index[VtPointerDepth+1], j, ret;
1182 VtBlock *bi[VtPointerDepth+2];
1185 assert(ISLOCKED(r));
1189 b = fileload(r, &e);
1194 * compute path through tree for the last written byte and the next one.
1197 memset(bi, 0, sizeof bi);
1198 depth = DEPTH(e.type);
1200 i = mkindices(&e, (offset-1)/e.dsize, index);
1205 ppb = e.psize / VtScoreSize;
1206 epb = e.dsize / VtEntrySize;
1209 * load the blocks along the last written byte
1211 index[depth] = r->offset % r->epb;
1212 for(i=depth; i>=0; i--){
1213 bb = blockwalk(b, index[i], r->c, VtORDWR, &e);
1222 * walk up the path from leaf to root, flushing anything that
1223 * has been finished.
1225 base = e.type&~VtTypeDepthMask;
1226 for(i=0; i<=depth; i++){
1229 /* leaf: data or dir block */
1230 if(offset%e.dsize == 0)
1234 * interior node: pointer blocks.
1235 * specifically, b = bi[i] is a block whose index[i-1]'th entry
1236 * points at bi[i-1].
1241 * the index entries up to but not including index[i-1] point at
1242 * finished blocks, so flush them for sure.
1244 for(j=0; j<index[i-1]; j++)
1245 if(flushblock(r->c, nil, b->data+j*VtScoreSize, ppb, epb, base+i-1) < 0)
1249 * if index[i-1] is the last entry in the block and is global
1250 * (i.e. the kid is flushed), then we can flush this block.
1252 if(j==ppb-1 && vtglobaltolocal(b->data+j*VtScoreSize)==NilBlock)
1259 score = bi[i+1]->data+index[i]*VtScoreSize;
1260 if(flushblock(r->c, bi[i], score, ppb, epb, base+i) < 0)
1266 /* top: entry. do this always so that the score is up-to-date */
1267 vtentrypack(&e, bi[depth+1]->data, index[depth]);
1268 for(i=0; i<nelem(bi); i++)