]> git.lizzy.rs Git - plan9front.git/blob - sys/src/cmd/9660srv/iobuf.c
audiohda: fix syntax error
[plan9front.git] / sys / src / cmd / 9660srv / iobuf.c
1 #include <u.h>
2 #include <libc.h>
3 #include <auth.h>
4 #include <fcall.h>
5 #include "dat.h"
6 #include "fns.h"
7
8 /*
9  * We used to use 100 i/o buffers of size 2kb (Sectorsize).
10  * Unfortunately, reading 2kb at a time often hopping around
11  * the disk doesn't let us get near the disk bandwidth.
12  *
13  * Based on a trace of iobuf address accesses taken while
14  * tarring up a Plan 9 distribution CD, we now use 16 128kb
15  * buffers.  This works for ISO9660 because data is required
16  * to be laid out contiguously; effectively we're doing agressive
17  * readahead.  Because the buffers are so big and the typical 
18  * disk accesses so concentrated, it's okay that we have so few
19  * of them.
20  *
21  * If this is used to access multiple discs at once, it's not clear
22  * how gracefully the scheme degrades, but I'm not convinced
23  * it's worth worrying about.           -rsc
24  */
25
26 #define BUFPERCLUST     64 /* sectors/cluster; 64*Sectorsize = 128kb */
27 #define NCLUST          64
28
29 int nclust = NCLUST;
30
31 static Ioclust* iohead;
32 static Ioclust* iotail;
33
34 static Ioclust* getclust(Xdata*, long, ulong);
35 static void     putclust(Ioclust*);
36 static void     xread(Ioclust*);
37
38 void
39 iobuf_init(void)
40 {
41         int i, j, n;
42         Ioclust *c;
43         Iobuf *b;
44         uchar *mem;
45
46         n = nclust*sizeof(Ioclust) +
47                 nclust*BUFPERCLUST*(sizeof(Iobuf)+Sectorsize);
48         mem = sbrk(n);
49         if(mem == (void*)-1)
50                 panic(0, "iobuf_init");
51         memset(mem, 0, n);
52
53         for(i=0; i<nclust; i++){
54                 c = (Ioclust*)mem;
55                 mem += sizeof(Ioclust);
56
57                 /*
58                  * on a iso filesystem, data is usually layed out sequentially
59                  * but directory information is at the end of the disk. to avoid
60                  * evicting directory information when reading large sequential
61                  * files, we keep them tagged in the cache. for now, we use
62                  * an 8th of the clusters for meta data.
63                  */
64                 c->tag = i <= (nclust/8);
65
66                 c->addr = -1;
67                 c->prev = iotail;
68                 if(iotail)
69                         iotail->next = c;
70                 iotail = c;
71                 if(iohead == nil)
72                         iohead = c;
73
74                 c->buf = (Iobuf*)mem;
75                 mem += BUFPERCLUST*sizeof(Iobuf);
76                 c->iobuf = mem;
77                 mem += BUFPERCLUST*Sectorsize;
78                 for(j=0; j<BUFPERCLUST; j++){
79                         b = &c->buf[j];
80                         b->clust = c;
81                         b->addr = -1;
82                         b->iobuf = c->iobuf+j*Sectorsize;
83                 }
84         }
85 }
86
87 void
88 purgebuf(Xdata *dev)
89 {
90         Ioclust *p;
91
92         for(p=iohead; p!=nil; p=p->next)
93                 if(p->dev == dev){
94                         p->addr = -1;
95                         p->busy = 0;
96                 }
97 }
98
99 static Ioclust*
100 getclust(Xdata *dev, long addr, ulong tag)
101 {
102         Ioclust *c, *f;
103
104         f = nil;
105         for(c=iohead; c; c=c->next){
106                 if(!c->busy && c->tag == tag)
107                         f = c;
108                 if(c->addr == addr && c->dev == dev){
109                         c->busy++;
110                         return c;
111                 }
112         }
113
114         if(f == nil)
115                 panic(0, "out of buffers");
116
117         f->addr = addr;
118         f->dev = dev;
119         f->busy++;
120         if(waserror()){
121                 f->addr = -1;   /* stop caching */
122                 putclust(f);
123                 nexterror();
124         }
125         xread(f);
126         poperror();
127         return f;
128 }
129
130 static void
131 putclust(Ioclust *c)
132 {
133         if(c->busy <= 0)
134                 panic(0, "putbuf");
135         c->busy--;
136
137         /* Link onto head for LRU */
138         if(c == iohead)
139                 return;
140         c->prev->next = c->next;
141
142         if(c->next)
143                 c->next->prev = c->prev;
144         else
145                 iotail = c->prev;
146
147         c->prev = nil;
148         c->next = iohead;
149         iohead->prev = c;
150         iohead = c;
151 }
152
153 Iobuf*
154 getbuf(Xdata *dev, ulong addr, ulong tag)
155 {
156         int off;
157         Ioclust *c;
158
159         off = addr%BUFPERCLUST;
160         c = getclust(dev, addr - off, tag);
161         if(c->nbuf < off){
162                 c->busy--;
163                 error("I/O read error");
164         }
165         return &c->buf[off];
166 }
167
168 void
169 putbuf(Iobuf *b)
170 {
171         putclust(b->clust);
172 }
173
174 static void
175 xread(Ioclust *c)
176 {
177         int n;
178         Xdata *dev;
179
180         dev = c->dev;
181         seek(dev->dev, (vlong)c->addr * Sectorsize, 0);
182         n = readn(dev->dev, c->iobuf, BUFPERCLUST*Sectorsize);
183         if(n < Sectorsize)
184                 error("I/O read error");
185         c->nbuf = n/Sectorsize;
186 }