2 #include "../port/lib.h"
9 typedef struct DMAport DMAport;
10 typedef struct DMA DMA;
11 typedef struct DMAxfer DMAxfer;
14 * state of a dma transfer
18 ulong bpa; /* bounce buffer physical address */
19 void* bva; /* bounce buffer virtual address */
20 int blen; /* bounce buffer length */
21 void* va; /* virtual address destination/src */
22 long len; /* bytes to be transferred */
27 * the dma controllers. the first half of this structure specifies
28 * the I/O ports used by the DMA controllers.
32 uchar addr[4]; /* current address (4 channels) */
33 uchar count[4]; /* current count (4 channels) */
34 uchar page[4]; /* page registers (4 channels) */
35 uchar cmd; /* command status register */
36 uchar req; /* request registers */
37 uchar sbm; /* single bit mask register */
38 uchar mode; /* mode register */
39 uchar cbp; /* clear byte pointer */
40 uchar mc; /* master clear */
41 uchar cmask; /* clear mask register */
42 uchar wam; /* write all mask register bit */
54 { 0x00, 0x02, 0x04, 0x06,
55 0x01, 0x03, 0x05, 0x07,
56 0x87, 0x83, 0x81, 0x82,
57 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
60 { 0xc0, 0xc4, 0xc8, 0xcc,
61 0xc2, 0xc6, 0xca, 0xce,
62 0x8f, 0x8b, 0x89, 0x8a,
63 0xd0, 0xd2, 0xd4, 0xd6, 0xd8, 0xda, 0xdc, 0xde,
68 static void* i8237bva[2];
72 * DMA must be in the first 16MB. This gets called early by the
73 * initialisation routines of any devices which require DMA to ensure
74 * the allocated bounce buffers are below the 16MB limit.
86 bva = xspanalloc(64*1024*i8237dma, BY2PG, 64*1024);
87 if(bva == nil || PADDR(bva)+64*1024*i8237dma > 16*MB){
89 * This will panic with the current
90 * implementation of xspanalloc().
99 i8237bva[1] = ((uchar*)i8237bva[0])+64*1024;
103 dmastatus(DMA *dp, int chan, char c)
109 a = inb(dp->addr[chan]);
110 a |= inb(dp->addr[chan])<<8;
111 a |= inb(dp->page[chan])<<16;
112 a |= inb(0x400|dp->page[chan])<<24;
114 l = inb(dp->count[chan]);
115 l |= inb(dp->count[chan])<<8;
118 print("%c: addr %uX len %uX stat %uX\n", c, a, l, s);
122 dmainit(int chan, int maxtransfer)
129 if(ioalloc(0x00, 0x10, 0, "dma") < 0
130 || ioalloc(0x80, 0x10, 0, "dma") < 0
131 || ioalloc(0xd0, 0x10, 0, "dma") < 0)
135 outb(dma[0].cmask, 0);
136 outb(dma[1].cmask, 0);
137 outb(dma[1].mode, 0xC0);
141 if(maxtransfer > 64*1024)
142 maxtransfer = 64*1024;
144 dp = &dma[(chan>>2)&1];
148 if(xp->blen < maxtransfer)
152 //dmastatus(dp, chan, 'I');
154 if(i8237used >= i8237dma || i8237bva[i8237used] == nil){
155 print("no i8237 DMA bounce buffer < 16MB\n");
158 xp->bva = i8237bva[i8237used++];
159 xp->bpa = PADDR(xp->bva);
160 xp->blen = maxtransfer;
172 dp = &dma[(chan>>2)&1];
175 dmastatus(dp, chan, 'X');
179 * setup a dma transfer. if the destination is not in kernel
180 * memory, allocate a page for the transfer.
182 * we assume BIOS has set up the command register before we
185 * return the updated transfer length (we can't transfer across 64k
189 dmasetup(int chan, void *va, long len, int isread)
196 dp = &dma[(chan>>2)&1];
199 //print("va%lux+", va);
202 #define PCIWADDR(va) PADDR(va)
203 #endif /* PCIWADDR */
207 * if this isn't kernel memory or crossing 64k boundary or above 16 meg
208 * use the bounce buffer.
211 if((((ulong)va)&0xF0000000) != KZERO
212 || (pa&0xFFFF0000) != ((pa+len)&0xFFFF0000)
219 memmove(xp->bva, va, len);
233 if((((ulong)va)&0xF0000000) != KZERO){
239 memmove(xp->bva, va, len);
243 pa = PCIWADDR(xp->bva);
250 * this setup must be atomic
252 mode = (isread ? 0x44 : 0x48) | chan;
254 outb(dp->cbp, 0); /* set count & address to their first byte */
255 outb(dp->mode, mode); /* single mode dma (give CPU a chance at mem) */
256 outb(dp->addr[chan], pa>>dp->shift); /* set address */
257 outb(dp->addr[chan], pa>>(8+dp->shift));
258 outb(dp->page[chan], pa>>16);
260 outb(0x400|dp->page[chan], pa>>24);
262 outb(dp->cbp, 0); /* set count & address to their first byte */
263 outb(dp->count[chan], (len>>dp->shift)-1); /* set count */
264 outb(dp->count[chan], ((len>>dp->shift)-1)>>8);
265 outb(dp->sbm, chan); /* enable the channel */
267 //dmastatus(dp, chan, 'S');
277 dp = &dma[(chan>>2)&1];
280 return inb(dp->cmd) & (1<<chan);
284 * this must be called after a dma has been completed.
286 * if a page has been allocated for the dma,
287 * copy the data into the actual destination
296 dp = &dma[(chan>>2)&1];
299 //dmastatus(dp, chan, 'E');
301 * disable the channel
304 outb(dp->sbm, 4|chan);
308 if(xp->len == 0 || !xp->isread)
312 * copy out of temporary page
314 memmove(xp->va, xp->bva, xp->len);
325 dp = &dma[(chan>>2)&1];
327 retval = inb(dp->count[chan]);
328 retval |= inb(dp->count[chan]) << 8;
329 return((retval<<dp->shift)+1);