2 * Xen virtual network interface frontend
6 #include "../port/lib.h"
11 #include "../port/error.h"
12 #include "../port/netif.h"
13 #include "../port/etherif.h"
23 typedef struct Ctlr Ctlr;
24 typedef union Txframe Txframe;
25 typedef union Rxframe Rxframe;
36 netif_tx_front_ring_t txring;
37 netif_rx_front_ring_t rxring;
70 * conversions to machine page numbers, pages and addresses
72 #define MFN(pa) (patomfn[(pa)>>PGSHIFT])
73 #define MFNPG(pa) (MFN(pa)<<PGSHIFT)
74 #define PA2MA(pa) (MFNPG(pa) | PGOFF(pa))
75 #define VA2MA(va) PA2MA(PADDR(va))
78 puttxrequest(Ctlr *ctlr, netif_tx_request_t *tr)
80 netif_tx_request_t *req;
83 LOG(dprint("puttxrequest id %d ref %d size %d\n", tr->id, tr->gref, tr->size);)
84 i = ctlr->txring.req_prod_pvt;
85 req = RING_GET_REQUEST(&ctlr->txring, i);
86 memmove(req, tr, sizeof(*req));
87 ctlr->txring.req_prod_pvt = i+1;
88 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&ctlr->txring, notify);
93 putrxrequest(Ctlr *ctlr, netif_rx_request_t *rr)
95 netif_rx_request_t *req;
99 LOG(dprint("putrxrequest %d %d\n", rr->id, rr->gref);)
100 i = ctlr->rxring.req_prod_pvt;
101 req = RING_GET_REQUEST(&ctlr->rxring, i);
102 memmove(req, rr, sizeof(*req));
103 ctlr->rxring.req_prod_pvt = i+1;
104 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&ctlr->rxring, notify);
109 gettxresponse(Ctlr *ctlr, netif_tx_response_t *tr)
112 netif_tx_response_t *rx;
114 RING_FINAL_CHECK_FOR_RESPONSES(&ctlr->txring, avail);
117 i = ctlr->txring.rsp_cons;
118 rx = RING_GET_RESPONSE(&ctlr->txring, i);
119 LOG(dprint("gettxresponse id %d status %d\n", rx->id, rx->status);)
123 ctlr->txring.rsp_cons = ++i;
128 getrxresponse(Ctlr *ctlr, netif_rx_response_t* rr)
131 netif_rx_response_t *rx;
133 RING_FINAL_CHECK_FOR_RESPONSES(&ctlr->rxring, avail);
136 i = ctlr->rxring.rsp_cons;
137 rx = RING_GET_RESPONSE(&ctlr->rxring, i);
138 LOG(dprint("getrxresponse id %d offset %d flags %ux status %d\n", rx->id, rx->offset, rx->flags, rx->status);)
140 ctlr->rxring.rsp_cons = ++i;
145 ringinit(Ctlr *ctlr, char *a)
147 netif_tx_sring_t *txr;
148 netif_rx_sring_t *rxr;
150 txr = (netif_tx_sring_t*)a;
151 memset(txr, 0, BY2PG);
152 SHARED_RING_INIT(txr);
153 FRONT_RING_INIT(&ctlr->txring, txr, BY2PG);
154 ctlr->txringref = shareframe(ctlr->backend, txr, 1);
156 rxr = (netif_rx_sring_t*)(a+BY2PG);
157 SHARED_RING_INIT(rxr);
158 FRONT_RING_INIT(&ctlr->rxring, rxr, BY2PG);
159 ctlr->rxringref = shareframe(ctlr->backend, rxr, 1);
165 vifsend(Ctlr *ctlr, Block *bp)
167 netif_tx_request_t tr;
171 ilock(&ctlr->txlock);
172 tx = ctlr->freetxframe;
173 ctlr->freetxframe = tx->tf.next;
174 iunlock(&ctlr->txlock);
175 id = tx - ctlr->txframes;
176 tr.gref = ctlr->txrefs[id];
177 tr.offset = tx->tf.data - (char*)tx;
178 tr.flags = 0; // XXX checksum?
181 memmove(tx->tf.data, bp->rp, tr.size);
182 return puttxrequest(ctlr, &tr);
186 vifsenddone(Ctlr *ctlr, netif_tx_response_t *tr)
190 tx = &ctlr->txframes[tr->id]; // XXX check validity of id
191 ilock(&ctlr->txlock);
192 tx->tf.next = ctlr->freetxframe;
193 ctlr->freetxframe = tx;
194 iunlock(&ctlr->txlock);
199 vifrecv(Ctlr *ctlr, Rxframe *rx)
201 netif_rx_request_t rr;
205 id = rx - ctlr->rxframes;
207 ref = ctlr->rxrefs[id];
209 ref = donateframe(ctlr->backend, rx);
210 ctlr->rxrefs[id] = ref;
214 return putrxrequest(ctlr, &rr);
218 vifrecvdone(Ether *ether, netif_rx_response_t *rr)
226 rx = &ctlr->rxframes[rr->id]; // XXX check validity of id
228 acceptframe(ctlr->rxrefs[rr->id], rx);
229 if ((len = rr->status) <= 0) {
234 if(len > sizeof(Etherpkt) || (bp = iallocb(sizeof(Etherpkt))) == nil) {
241 memmove(bp->base, rx->page + rr->offset, len);
245 bp->wp = bp->rp + len;
249 if (rr->flags & NETRXF_data_validated)
250 bp->flag |= Btcpck|Budpck;
258 return ((struct Ctlr*)a)->freetxframe != 0;
264 return qcanread(((struct Ether*)a)->oq);
268 etherxenproc(void *a)
271 Ctlr *ctlr = ether->ctlr;
276 while (ctlr->freetxframe == 0)
277 sleep(&ctlr->wtxframe, wtxframe, ctlr);
278 while ((bp = qget(ether->oq)) == 0)
279 sleep(&ctlr->wtxblock, wtxblock, ether);
280 notify = vifsend(ctlr, bp);
283 xenchannotify(ctlr->evtchn);
288 etherxentransmit(Ether *ether)
294 wakeup(&ctlr->wtxblock);
298 etherxenintr(Ureg*, void *a)
301 Ctlr *ctlr = ether->ctlr;
303 netif_tx_response_t tr;
304 netif_rx_response_t rr;
308 while (getrxresponse(ctlr, &rr))
309 vifrecvdone(ether, &rr);
310 while (gettxresponse(ctlr, &tr)) {
311 if (vifsenddone(ctlr, &tr))
315 wakeup(&ctlr->wtxframe);
319 etherxenctl(Ether *ether, void *buf, long n)
324 cb = parsecmd(buf, n);
326 && strcmp(cb->f[0], "ea")==0
327 && parseether(ea, cb->f[1]) == 0){
329 memmove(ether->ea, ea, Eaddrlen);
330 memmove(ether->addr, ether->ea, Eaddrlen);
335 return -1; /* not reached */
339 backendconnect(Ctlr *ctlr)
344 sprint(dir, "device/vif/%d/", ctlr->vifno);
345 xenstore_setd(dir, "state", XenbusStateInitialising);
346 xenstore_setd(dir, "tx-ring-ref", ctlr->txringref);
347 xenstore_setd(dir, "rx-ring-ref", ctlr->rxringref);
348 xenstore_setd(dir, "event-channel", ctlr->evtchn);
349 print("etherxen: request-rx-copy=%d\n", ctlr->rxcopy);
351 xenstore_setd(dir, "request-rx-copy", 1);
352 xenstore_setd(dir, "state", XenbusStateConnected);
353 xenstore_gets(dir, "backend", buf, sizeof buf);
354 sprint(dir, "%s/", buf);
356 xenstore_gets(dir, "state", buf, sizeof buf);
357 while (strtol(buf, 0, 0) != XenbusStateConnected) {
358 print("etherxen: waiting for vif %d to connect\n", ctlr->vifno);
359 tsleep(&up->sleep, return0, 0, 50);
360 xenstore_gets(dir, "state", buf, sizeof buf);
365 etherxenattach(Ether *ether)
372 LOG(dprint("etherxenattach\n");)
374 qlock(&ctlr->attachlock);
375 if (ctlr->attached) {
376 qunlock(&ctlr->attachlock);
380 npage = 2 + Ntb + Nrb;
381 p = (char*)xspanalloc(npage<<PGSHIFT, BY2PG, 0);
382 p += ringinit(ctlr, p);
383 ctlr->txrefs = malloc(Ntb*sizeof(int));
384 ctlr->rxrefs = malloc(Nrb*sizeof(int));
385 ctlr->txframes = (Txframe*)p;
386 for (i = 0; i < Ntb; i++, p += BY2PG) {
389 tx->tf.next = tx + 1;
392 ctlr->txrefs[i] = shareframe(ctlr->backend, tx, 0);
394 ctlr->freetxframe = ctlr->txframes;
395 ctlr->rxframes = (Rxframe*)p;
396 for (i = 0; i < Nrb; i++, p += BY2PG) {
398 ctlr->rxrefs[i] = shareframe(ctlr->backend, (Rxframe*)p, 1);
399 vifrecv(ctlr, (Rxframe*)p);
402 ctlr->evtchn = xenchanalloc(ctlr->backend);
403 intrenable(ctlr->evtchn, etherxenintr, ether, BUSUNKNOWN, "vif");
405 kproc("vif", etherxenproc, ether);
406 backendconnect(ctlr);
408 qunlock(&ctlr->attachlock);
412 etherxenmulticast(void* arg, uchar* addr, int on)
418 ifstat(Ether* ether, void* a, long n, ulong offset)
427 if((p = malloc(READSTR)) == nil)
429 l = snprint(p, READSTR, "intr: %lud\n", ctlr->interrupts);
430 l += snprint(p+l, READSTR-l, "transmits: %lud\n", ctlr->transmits);
431 l += snprint(p+l, READSTR-l, "receives: %lud\n", ctlr->receives);
432 l += snprint(p+l, READSTR-l, "txerrors: %lud\n", ctlr->txerrors);
433 l += snprint(p+l, READSTR-l, "rxerrors: %lud\n", ctlr->rxerrors);
434 snprint(p+l, READSTR-l, "rxoverflows: %lud\n", ctlr->rxoverflows);
437 len = readstr(offset, buf, n, p);
454 sprint(dir, "device/vif/%d/", nvif);
455 if (xenstore_gets(dir, "backend-id", buf, sizeof buf) <= 0)
457 domid = strtol(buf, 0, 0);
458 if (xenstore_gets(dir, "mac", buf, sizeof buf) <= 0)
460 if (parseether(ea, buf) < 0)
462 if (xenstore_gets(dir, "backend", buf, sizeof buf) <= 0)
464 sprint(dir, "%s/", buf);
466 if (xenstore_gets(dir, "feature-rx-copy", buf, sizeof buf) >= 0)
467 rxcopy = strtol(buf, 0, 0);
468 ether->ctlr = ctlr = malloc(sizeof(Ctlr));
469 memset(ctlr, 0, sizeof(Ctlr));
470 ctlr->backend = domid;
471 ctlr->vifno = nvif++;
472 ctlr->rxcopy = rxcopy;
474 memmove(ether->ea, ea, sizeof ether->ea);
475 ether->mbps = 100; // XXX what speed?
476 ether->attach = etherxenattach;
477 ether->transmit = etherxentransmit;
479 ether->tbdf = BUSUNKNOWN;
480 ether->ifstat = ifstat;
481 ether->ctl = etherxenctl;
482 ether->promiscuous = nil;
483 ether->multicast = etherxenmulticast;
486 intrenable(ether->irq, etherxenintr, ether, ether->tbdf, ether->name);
494 addethercard("xen", pnp);