2 * marvell kirkwood gigabit ethernet (88e1116 and 88e1121) driver
3 * (as found in the sheevaplug, openrd and guruplug).
4 * the main difference is the flavour of phy kludgery necessary.
6 * from /public/doc/marvell/88f61xx.kirkwood.pdf,
7 * /public/doc/marvell/88e1116.pdf, and
8 * /public/doc/marvell/88e1121r.pdf.
12 #include "../port/lib.h"
17 #include "../port/error.h"
18 #include "../port/netif.h"
24 #define MIIDBG if(0)iprint
26 #define WINATTR(v) (((v) & MASK(8)) << 8)
27 #define WINSIZE(v) (((v)/(64*1024) - 1) << 16)
33 Rxblklen = 2+1522, /* ifc. supplies first 2 bytes as padding */
35 Maxrxintrsec = 20*1000, /* max. rx intrs. / sec */
36 Etherstuck = 70, /* must send or receive a packet in this many sec.s */
41 Pass = 1, /* accept packets */
43 Qno = 0, /* do everything on queue zero */
46 typedef struct Ctlr Ctlr;
47 typedef struct Gbereg Gbereg;
48 typedef struct Mibstats Mibstats;
57 /* hardware receive buffer descriptor */
60 ulong countsize; /* bytes, buffer size */
61 ulong buf; /* phys. addr. of packet buffer */
62 ulong next; /* phys. addr. of next Rx */
65 /* hardware transmit buffer descriptor */
68 ulong countchk; /* bytes, checksum */
69 ulong buf; /* phys. addr. of packet buffer */
70 ulong next; /* phys. addr. of next Tx */
73 /* fixed by hw; part of Gberegs */
76 uvlong rxby; /* good bytes rcv'd */
82 ulong badrxby; /* bad bytes rcv'd */
83 ulong mactxerr; /* tx err pkts */
84 ulong rxpkt; /* good pkts rcv'd */
85 ulong badrxpkt; /* bad pkts rcv'd */
86 ulong rxbcastpkt; /* b'cast pkts rcv'd */
87 ulong rxmcastpkt; /* m'cast pkts rcv'd */
89 ulong rx64; /* pkts <= 64 bytes */
90 ulong rx65_127; /* pkts 65—127 bytes */
91 ulong rx128_255; /* pkts 128—255 bytes */
92 ulong rx256_511; /* pkts 256—511 bytes */
93 ulong rx512_1023; /* pkts 512—1023 bytes */
94 ulong rx1024_max; /* pkts >= 1024 bytes */
97 uvlong txby; /* good bytes sent */
103 ulong txpkt; /* good pkts sent */
104 /* half-duplex: pkts dropped due to excessive collisions */
106 ulong txmcastpkt; /* m'cast pkts sent */
107 ulong txbcastpkt; /* b'cast pkts sent */
109 ulong badmacctlpkts; /* bad mac ctl pkts */
110 ulong txflctl; /* flow-control pkts sent */
111 ulong rxflctl; /* good flow-control pkts rcv'd */
112 ulong badrxflctl; /* bad flow-control pkts rcv'd */
114 ulong rxundersized; /* runts */
115 ulong rxfrags; /* fragments rcv'd */
116 ulong rxtoobig; /* oversized pkts rcv'd */
117 ulong rxjabber; /* jabber pkts rcv'd */
118 ulong rxerr; /* rx error events */
119 ulong crcerr; /* crc error events */
120 ulong collisions; /* collision events */
121 ulong latecoll; /* late collisions */
132 Rx *rx; /* receive descriptors */
133 Block *rxb[Nrx]; /* blocks belonging to the descriptors */
134 int rxhead; /* descr ethernet will write to next */
135 int rxtail; /* next descr that might need a buffer */
136 Rendez rrendez; /* interrupt wakes up read process */
141 int txhead; /* next descr we can use for new packet */
142 int txtail; /* next descr to reclaim on tx complete */
159 #define Rxqon(q) (1<<(q))
160 #define Txqon(q) (1<<(q))
166 /* sdma config, sdc bits */
172 SDCrifb = 1<<0, /* rx intr on pkt boundaries */
173 #define SDCrxburst(v) ((v)<<1)
174 SDCrxnobyteswap = 1<<4,
175 SDCtxnobyteswap = 1<<5,
176 SDCswap64byte = 1<<6,
177 #define SDCtxburst(v) ((v)<<22)
178 /* rx intr ipg (inter packet gap) */
179 #define SDCipgintrx(v) ((((v)>>15) & 1)<<25) | (((v) & MASK(15))<<7)
182 PCFGupromisc = 1<<0, /* unicast promiscuous mode */
183 #define Rxqdefault(q) ((q)<<1)
184 #define Rxqarp(q) ((q)<<4)
185 PCFGbcrejectnoiparp = 1<<7,
186 PCFGbcrejectip = 1<<8,
187 PCFGbcrejectarp = 1<<9,
188 PCFGamnotxes = 1<<12, /* auto mode, no summary update on tx */
189 PCFGtcpq = 1<<14, /* capture tcp frames to tcpq */
190 PCFGudpq = 1<<15, /* capture udp frames to udpq */
191 #define Rxqtcp(q) ((q)<<16)
192 #define Rxqudp(q) ((q)<<19)
193 #define Rxqbpdu(q) ((q)<<22)
194 PCFGrxcs = 1<<25, /* rx tcp checksum mode with header */
198 PCFGXcrcoff = 1<<2, /* no ethernet crc */
200 /* port serial control0, psc0 bits */
202 PSC0forcelinkup = 1<<1,
203 PSC0an_dplxoff = 1<<2, /* an_ = auto. negotiate */
204 PSC0an_flctloff = 1<<3,
205 PSC0an_pauseadv = 1<<4,
206 PSC0nofrclinkdown = 1<<10,
207 PSC0an_spdoff = 1<<13,
208 PSC0dteadv = 1<<14, /* dte advertise */
210 /* max. input pkt size */
211 #define PSC0mru(v) ((v)<<17)
212 PSC0mrumask = PSC0mru(MASK(3)),
213 PSC0mru1518 = 0, /* 1500+2* 6(addrs) +2 + 4(crc) */
214 PSC0mru1522, /* 1518 + 4(vlan tags) */
215 PSC0mru1552, /* `baby giant' */
216 PSC0mru9022, /* `jumbo' */
217 PSC0mru9192, /* bigger jumbo */
218 PSC0mru9700, /* still bigger jumbo */
220 PSC0fd_frc = 1<<21, /* force full duplex */
221 PSC0flctlfrc = 1<<22,
222 PSC0gmiispd_gbfrc = 1<<23,
223 PSC0miispdfrc100mbps = 1<<24,
225 /* port status 0, ps0 bits */
227 PS0fd = 1<<2, /* full duplex */
230 PS0mii100mbps = 1<<5,
232 PS0txfifoempty = 1<<10,
233 PS0rxfifo1empty = 1<<11,
234 PS0rxfifo2empty = 1<<12,
236 /* port serial control 1, psc1 bits */
239 PSC1rgmii = 1<<3, /* enable RGMII */
240 PSC1portreset = 1<<4,
241 PSC1clockbypass = 1<<5,
243 PSC1iban_bypass = 1<<7,
244 PSC1iban_restart= 1<<8,
246 PSC1encolonbp = 1<<15, /* "collision during back-pressure mib counting" */
247 PSC1coldomlimmask= MASK(6)<<16,
248 #define PSC1coldomlim(v) (((v) & MASK(6))<<16)
249 PSC1miiallowoddpreamble = 1<<22,
251 /* port status 1, ps1 bits */
255 PS1syncfail10ms = 1<<3,
257 PS1inbandan_bypassed = 1<<5,
258 PS1serdesplllocked = 1<<6,
263 /* rx buf returned to cpu ownership, or frame reception finished */
265 Iextend = 1<<1, /* IEsum of irqe set */
266 #define Irxbufferq(q) (1<<((q)+2)) /* rx buf returned to cpu ownership */
267 Irxerr = 1<<10, /* input ring full, usually */
268 #define Irxerrq(q) (1<<((q)+11))
269 #define Itxendq(q) (1<<((q)+19)) /* tx dma stopped for q */
272 /* irq extended, irqe bits */
273 #define IEtxbufferq(q) (1<<((q)+0)) /* tx buf returned to cpu ownership */
274 #define IEtxerrq(q) (1<<((q)+8))
278 IEtxunderrun = 1<<19,
280 IEintaddrerr = 1<<23,
284 /* tx fifo urgent threshold (tx interrupt coalescing), pxtfut */
285 #define TFUTipginttx(v) (((v) & MASK(16))<<4);
287 /* minimal frame size, mfs */
296 /* receive descriptor status */
303 RCSl4chkmask = MASK(16),
313 RCSip4headok = 1<<25,
316 RCSunknownaddr = 1<<28,
317 RCSenableintr = 1<<29,
321 /* transmit descriptor status */
328 TCSl4chkmode = 1<<10,
329 TCSipv4hdlenshift= 11,
337 TCSenableintr = 1<<23,
344 PhysmiTimeout = 10000, /* what units? in ms. */
345 Physmidataoff = 0, /* Data */
346 Physmidatamask = 0xffff<<Physmidataoff,
348 Physmiaddroff = 16, /* PHY device addr */
349 Physmiaddrmask = 0x1f << Physmiaddroff,
352 Physmiopmask = 3<<Physmiop,
353 PhysmiopWr = 0<<Physmiop,
354 PhysmiopRd = 1<<Physmiop,
356 PhysmiReadok = 1<<27,
359 SmiRegaddroff = 21, /* PHY device register addr */
360 SmiRegaddrmask = 0x1f << SmiRegaddroff,
364 ulong phy; /* PHY address */
365 ulong smi; /* serial mgmt. interface */
366 ulong euda; /* ether default address */
367 ulong eudid; /* ether default id */
368 uchar _pad0[0x80-0x10];
371 ulong euirq; /* interrupt cause */
372 ulong euirqmask; /* interrupt mask */
373 uchar _pad1[0x94-0x88];
374 ulong euea; /* error address */
375 ulong euiae; /* internal error address */
376 uchar _pad2[0xb0-0x9c];
377 ulong euc; /* control */
378 uchar _pad3[0x200-0xb4];
380 ulong base; /* window base */
381 ulong size; /* window size */
383 uchar _pad4[0x280-0x230];
384 ulong harr[4]; /* high address remap */
385 ulong bare; /* base address enable */
386 ulong epap; /* port access protect */
387 uchar _pad5[0x400-0x298];
389 ulong portcfg; /* port configuration */
390 ulong portcfgx; /* port config. extend */
391 ulong mii; /* mii serial parameters */
393 ulong evlane; /* vlan ether type */
394 ulong macal; /* mac address low */
395 ulong macah; /* mac address high */
396 ulong sdc; /* sdma config. */
397 ulong dscp[7]; /* ip diff. serv. code point -> pri */
398 ulong psc0; /* port serial control 0 */
399 ulong vpt2p; /* vlan priority tag -> pri */
400 ulong ps0; /* ether port status 0 */
401 ulong tqc; /* transmit queue command */
402 ulong psc1; /* port serial control 1 */
403 ulong ps1; /* ether port status 1 */
404 ulong mvhdr; /* marvell header */
408 ulong irq; /* interrupt cause; some rw0c bits */
409 ulong irqe; /* " " extended; some rw0c bits */
410 ulong irqmask; /* interrupt mask (actually enable) */
411 ulong irqemask; /* " " extended */
414 ulong pxtfut; /* port tx fifo urgent threshold */
416 ulong pxmfs; /* port rx minimum frame size */
420 * # of input frames discarded by addr filtering or lack of resources;
423 ulong pxdfc; /* port rx discard frame counter */
424 ulong pxofc; /* port overrun frame counter */
426 ulong piae; /* port internal address error */
427 uchar _pad13[0x4bc-0x498];
428 ulong etherprio; /* ether type priority */
429 uchar _pad14[0x4dc-0x4c0];
430 ulong tqfpc; /* tx queue fixed priority config. */
431 ulong pttbrc; /* port tx token-bucket rate config. */
432 ulong tqc1; /* tx queue command 1 */
433 ulong pmtu; /* port maximum transmit unit */
434 ulong pmtbs; /* port maximum token bucket size */
435 uchar _pad15[0x600-0x4f0];
439 ulong r; /* phys. addr.: cur. rx desc. ptrs */
441 ulong rqc; /* rx queue command */
442 ulong tcsdp; /* phys. addr.: cur. tx desc. ptr */
443 uchar _pad16[0x6c0-0x688];
445 ulong tcqdp[8]; /* phys. addr.: cur. tx q. desc. ptr */
446 uchar _pad17[0x700-0x6e0];
449 ulong tbctr; /* queue tx token-bucket counter */
450 ulong tbcfg; /* tx queue token-bucket config. */
451 ulong acfg; /* tx queue arbiter config. */
454 ulong pttbc; /* port tx token-bucket counter */
455 uchar _pad18[0x7a8-0x784];
457 ulong ipg2; /* tx queue ipg */
461 ulong htlp; /* high token in low packet */
462 ulong htap; /* high token in async packet */
463 ulong ltap; /* low token in async packet */
465 ulong ts; /* tx speed */
466 uchar _pad22[0x1000-0x7d4];
468 /* mac mib counters: statistics */
470 uchar _pad23[0x1400-0x1080];
472 /* multicast filtering; each byte: Qno<<1 | Pass */
473 ulong dfsmt[64]; /* dest addr filter special m'cast table */
474 ulong dfomt[64]; /* dest addr filter other m'cast table */
475 /* unicast filtering */
476 ulong dfut[4]; /* dest addr filter unicast table */
479 static Ctlr *ctlrs[MaxEther];
480 static uchar zeroea[Eaddrlen];
482 static void getmibstats(Ctlr *);
487 /* freeb(b) will have previously decremented b->ref to 0; raise to 1 */
490 (uchar*)((uintptr)(b->lim - Rxblklen) & ~(Bufalign - 1));
491 assert(((uintptr)b->rp & (Bufalign - 1)) == 0);
495 b->next = freeblocks.head;
497 iunlock(&freeblocks);
508 freeblocks.head = b->next;
512 iunlock(&freeblocks);
519 Gbereg *reg = ctlr->reg;
521 if (reg->crdp[Qno].r == 0)
522 reg->crdp[Qno].r = PADDR(&ctlr->rx[ctlr->rxhead]);
523 if ((reg->rqc & 0xff) == 0) /* all queues are stopped? */
524 reg->rqc = Rxqon(Qno); /* restart */
531 Gbereg *reg = ctlr->reg;
533 if (reg->tcqdp[Qno] == 0)
534 reg->tcqdp[Qno] = PADDR(&ctlr->tx[ctlr->txhead]);
535 if ((reg->tqc & 0xff) == 0) /* all q's stopped? */
536 reg->tqc = Txqon(Qno); /* restart */
541 rxreplenish(Ctlr *ctlr)
546 while(ctlr->rxb[ctlr->rxtail] == nil) {
549 iprint("#l%d: rxreplenish out of buffers\n",
550 ctlr->ether->ctlrno);
554 ctlr->rxb[ctlr->rxtail] = b;
556 /* set up uncached receive descriptor */
557 r = &ctlr->rx[ctlr->rxtail];
558 assert(((uintptr)r & (Descralign - 1)) == 0);
559 r->countsize = ROUNDUP(Rxblklen, 8);
560 r->buf = PADDR(b->rp);
564 r->cs = RCSdmaown | RCSenableintr;
567 ctlr->rxtail = NEXT(ctlr->rxtail, Nrx);
572 dump(uchar *bp, long max)
576 for (; max > 0; max--, bp++)
577 iprint("%02.2ux ", *bp);
582 etheractive(Ether *ether)
584 ether->starttime = TK2MS(MACHP(0)->ticks)/1000;
588 ethercheck(Ether *ether)
590 if (ether->starttime != 0 &&
591 TK2MS(MACHP(0)->ticks)/1000 - ether->starttime > Etherstuck) {
593 if (ether->ctlrno == 0) /* only complain about main ether */
594 iprint("#l%d: ethernet stuck\n", ether->ctlrno);
599 receive(Ether *ether)
604 Ctlr *ctlr = ether->ctlr;
608 for (i = Nrx-2; i > 0; i--) {
609 r = &ctlr->rx[ctlr->rxhead]; /* *r is uncached */
610 assert(((uintptr)r & (Descralign - 1)) == 0);
611 if(r->cs & RCSdmaown) /* descriptor busy? */
614 b = ctlr->rxb[ctlr->rxhead]; /* got input buffer? */
616 panic("ether1116: nil ctlr->rxb[ctlr->rxhead] "
618 ctlr->rxb[ctlr->rxhead] = nil;
619 ctlr->rxhead = NEXT(ctlr->rxhead, Nrx);
621 if((r->cs & (RCSfirst|RCSlast)) != (RCSfirst|RCSlast)) {
622 ctlr->nofirstlast++; /* partial packet */
626 if(r->cs & RCSmacerr) {
631 n = r->countsize >> 16; /* TODO includes 2 pad bytes? */
632 assert(n >= 2 && n < 2048);
634 /* clear any cached packet or part thereof */
635 l2cacheuinvse(b->rp, n+2);
636 cachedinvse(b->rp, n+2);
639 * skip hardware padding intended to align ipv4 address
640 * in memory (mv-s104860-u0 §8.3.4.1)
643 etheriq(ether, b, 1);
645 if (i % (Nrx / 2) == 0) {
655 txreplenish(Ether *ether) /* free transmitted packets */
660 while(ctlr->txtail != ctlr->txhead) {
661 /* ctlr->tx is uncached */
662 if(ctlr->tx[ctlr->txtail].cs & TCSdmaown)
664 if(ctlr->txb[ctlr->txtail] == nil)
665 panic("no block for sent packet?!");
666 freeb(ctlr->txb[ctlr->txtail]);
667 ctlr->txb[ctlr->txtail] = nil;
669 ctlr->txtail = NEXT(ctlr->txtail, Ntx);
675 * transmit strategy: fill the output ring as far as possible,
676 * perhaps leaving a few spare; kick off the output and take
677 * an interrupt only when the transmit queue is empty.
680 transmit(Ether *ether)
684 Ctlr *ctlr = ether->ctlr;
685 Gbereg *reg = ctlr->reg;
690 txreplenish(ether); /* reap old packets */
692 /* queue new packets; use at most half the tx descs to avoid livelock */
694 for (i = Ntx/2 - 2; i > 0; i--) {
695 t = &ctlr->tx[ctlr->txhead]; /* *t is uncached */
696 assert(((uintptr)t & (Descralign - 1)) == 0);
697 if(t->cs & TCSdmaown) { /* descriptor busy? */
702 b = qget(ether->oq); /* outgoing packet? */
706 if(len < ether->minmtu || len > ether->maxmtu) {
710 ctlr->txb[ctlr->txhead] = b;
712 /* make sure the whole packet is in memory */
713 cachedwbse(b->rp, len);
714 l2cacheuwbse(b->rp, len);
716 /* set up the transmit descriptor */
717 t->buf = PADDR(b->rp);
718 t->countchk = len << 16;
722 t->cs = TCSpadding | TCSfirst | TCSlast | TCSdmaown |
727 ctlr->txhead = NEXT(ctlr->txhead, Ntx);
732 reg->irqmask |= Itxendq(Qno);
733 reg->irqemask |= IEtxerrq(Qno) | IEtxunderrun;
739 dumprxdescs(Ctlr *ctlr)
742 Gbereg *reg = ctlr->reg;
744 iprint("\nrxhead %d rxtail %d; txcdp %#p rxcdp %#p\n",
745 ctlr->rxhead, ctlr->rxtail, reg->tcqdp[Qno], reg->crdp[Qno].r);
746 for (i = 0; i < Nrx; i++) {
747 iprint("rxb %d @ %#p: %#p\n", i, &ctlr->rxb[i], ctlr->rxb[i]);
750 for (i = 0; i < Nrx; i++) {
751 iprint("rx %d @ %#p: cs %#lux countsize %lud buf %#lux next %#lux\n",
752 i, &ctlr->rx[i], ctlr->rx[i].cs,
753 ctlr->rx[i].countsize >> 3, ctlr->rx[i].buf,
763 return ((Ctlr*)ctlr)->haveinput != 0;
767 * process any packets in the input ring.
768 * also sum mib stats frequently to avoid the overflow
769 * mentioned in the errata.
780 tsleep(&ctlr->rrendez, gotinput, ctlr, 10*1000);
783 if (ctlr->haveinput) {
793 interrupt(Ureg*, void *arg)
795 ulong irq, irqe, handled;
797 Ctlr *ctlr = ether->ctlr;
798 Gbereg *reg = ctlr->reg;
803 reg->irqe = 0; /* extinguish intr causes */
804 reg->irq = 0; /* extinguish intr causes */
807 if(irq & (Irx | Irxbufferq(Qno))) {
809 * letting a kproc process the input takes far less real time
810 * than doing it all at interrupt level.
813 wakeup(&ctlr->rrendez);
814 irq &= ~(Irx | Irxbufferq(Qno));
819 if(irq & Itxendq(Qno)) { /* transmit ring empty? */
820 reg->irqmask &= ~Itxendq(Qno); /* prevent more interrupts */
821 reg->irqemask &= ~(IEtxerrq(Qno) | IEtxunderrun);
823 irq &= ~Itxendq(Qno);
829 * IElinkchg appears to only be set when unplugging.
830 * autonegotiation is likely not done yet, so linkup not valid,
831 * thus we note the link change here, and check for
832 * that and autonegotiation done below.
834 if(irqe & IEphystschg) {
835 ether->link = (reg->ps0 & PS0linkup) != 0;
838 if(irqe & IEtxerrq(Qno))
840 if(irqe & IErxoverrun)
842 if(irqe & IEtxunderrun)
844 if(irqe & (IEphystschg | IEtxerrq(Qno) | IErxoverrun |
849 if (irq & Irxerr) { /* nil desc. ptr. or desc. owned by cpu */
850 ether->buffs++; /* approx. error */
852 /* if the input ring is full, drain it */
854 wakeup(&ctlr->rrendez);
856 if(irq & (Irxerr | Irxerrq(Qno)))
858 irq &= ~(Irxerr | Irxerrq(Qno));
861 if(ether->linkchg && (reg->ps1 & PS1an_done)) {
863 ether->link = (reg->ps0 & PS0linkup) != 0;
870 irqe &= ~IEtxbufferq(Qno);
871 if (irq == 0 && irqe == 0) {
872 /* seems to be triggered by continuous output */
873 // iprint("ether1116: spurious interrupt\n");
875 iprint("ether1116: interrupt cause unknown; "
876 "irq %#lux irqe %#lux\n", irq, irqe);
878 intrclear(Irqlo, ether->irq);
882 promiscuous(void *arg, int on)
885 Ctlr *ctlr = ether->ctlr;
886 Gbereg *reg = ctlr->reg;
891 reg->portcfg |= PCFGupromisc;
893 reg->portcfg &= ~PCFGupromisc;
898 multicast(void *, uchar *, int)
900 /* nothing to do; we always accept multicast */
903 static void quiesce(Gbereg *reg);
906 shutdown(Ether *ether)
909 Ctlr *ctlr = ether->ctlr;
910 Gbereg *reg = ctlr->reg;
914 reg->euc |= Portreset;
919 reg->euc &= ~Portreset;
923 reg->psc0 = 0; /* no PSC0porton */
924 reg->psc1 |= PSC1portreset;
927 reg->psc1 &= ~PSC1portreset;
930 for (i = 0; i < nelem(reg->tcqdp); i++)
932 for (i = 0; i < nelem(reg->crdp); i++)
943 static Cmdtab ctlmsg[] = {
948 ctl(Ether *e, void *p, long n)
952 Ctlr *ctlr = e->ctlr;
953 Gbereg *reg = ctlr->reg;
961 ct = lookupcmd(cb, ctlmsg, nelem(ctlmsg));
964 if(strcmp(cb->f[1], "on") == 0) {
965 /* incoming packet queue doesn't expect jumbo frames */
966 error("jumbo disabled");
967 reg->psc0 = (reg->psc0 & ~PSC0mrumask) |
968 PSC0mru(PSC0mru9022);
970 } else if(strcmp(cb->f[1], "off") == 0) {
971 reg->psc0 = (reg->psc0 & ~PSC0mrumask) |
972 PSC0mru(PSC0mru1522);
973 e->maxmtu = ETHERMAXTU;
991 smibusywait(Gbereg *reg, ulong waitbit)
993 ulong timeout, smi_reg;
995 timeout = PhysmiTimeout;
996 /* wait till the SMI is not busy */
998 /* read smi register */
1000 if (timeout-- == 0) {
1001 MIIDBG("SMI busy timeout\n");
1005 } while (smi_reg & waitbit);
1010 miird(Mii *mii, int pa, int ra)
1012 ulong smi_reg, timeout;
1015 reg = ((Ctlr*)mii->ctlr)->reg;
1018 if ((pa<<Physmiaddroff) & ~Physmiaddrmask ||
1019 (ra<<SmiRegaddroff) & ~SmiRegaddrmask)
1022 smibusywait(reg, PhysmiBusy);
1024 /* fill the phy address and register offset and read opcode */
1025 reg->smi = pa << Physmiaddroff | ra << SmiRegaddroff | PhysmiopRd;
1028 /* wait til read value is ready */
1029 timeout = PhysmiTimeout;
1032 if (timeout-- == 0) {
1033 MIIDBG("SMI read-valid timeout\n");
1036 } while (!(smi_reg & PhysmiReadok));
1038 /* Wait for the data to update in the SMI register */
1039 for (timeout = 0; timeout < PhysmiTimeout; timeout++)
1041 return reg->smi & Physmidatamask;
1045 miiwr(Mii *mii, int pa, int ra, int v)
1050 reg = ((Ctlr*)mii->ctlr)->reg;
1053 if (((pa<<Physmiaddroff) & ~Physmiaddrmask) ||
1054 ((ra<<SmiRegaddroff) & ~SmiRegaddrmask))
1057 smibusywait(reg, PhysmiBusy);
1059 /* fill the phy address and register offset and read opcode */
1060 smi_reg = v << Physmidataoff | pa << Physmiaddroff | ra << SmiRegaddroff;
1061 reg->smi = smi_reg & ~PhysmiopRd;
1066 #define MIIMODEL(idr2) (((idr2) >> 4) & MASK(6))
1072 Ouimarvell = 0x005043,
1074 /* idr2 mii/phy model numbers */
1075 Phy1000 = 0x00, /* 88E1000 Gb */
1076 Phy1011 = 0x02, /* 88E1011 Gb */
1077 Phy1000_3 = 0x03, /* 88E1000 Gb */
1078 Phy1000s = 0x04, /* 88E1000S Gb */
1079 Phy1000_5 = 0x05, /* 88E1000 Gb */
1080 Phy1000_6 = 0x06, /* 88E1000 Gb */
1081 Phy3082 = 0x08, /* 88E3082 10/100 */
1082 Phy1112 = 0x09, /* 88E1112 Gb */
1083 Phy1121r = 0x0b, /* says the 1121r manual */
1084 Phy1149 = 0x0b, /* 88E1149 Gb */
1085 Phy1111 = 0x0c, /* 88E1111 Gb */
1086 Phy1116 = 0x21, /* 88E1116 Gb */
1087 Phy1116r = 0x24, /* 88E1116R Gb */
1088 Phy1118 = 0x22, /* 88E1118 Gb */
1089 Phy3016 = 0x26, /* 88E3016 10/100 */
1092 static int hackflavour;
1095 * on openrd, ether0's phy has address 8, ether1's is ether0's 24.
1096 * on guruplug, ether0's is phy 0 and ether1's is ether0's phy 1.
1099 mymii(Mii* mii, int mask)
1103 int bit, ctlrno, oui, model, phyno, r, rmask;
1104 static int dualport, phyidx;
1105 static int phynos[NMiiPhy];
1108 ctlrno = ctlr->ether->ctlrno;
1110 /* first pass: figure out what kind of phy(s) we have. */
1113 for(phyno = 0; phyno < NMiiPhy; phyno++){
1115 if(!(mask & bit) || mii->mask & bit)
1117 if(mii->mir(mii, phyno, Bmsr) == -1)
1119 r = mii->mir(mii, phyno, Phyidr1);
1120 oui = (r & 0x3FFF)<<6;
1121 r = mii->mir(mii, phyno, Phyidr2);
1123 model = MIIMODEL(r);
1124 if (oui == 0xfffff && model == 0x3f)
1126 MIIDBG("ctlrno %d phy %d oui %#ux model %#ux\n",
1127 ctlrno, phyno, oui, model);
1128 if (oui == Ouimarvell &&
1129 (model == Phy1121r || model == Phy1116r))
1131 phynos[phyidx++] = phyno;
1133 hackflavour = dualport == 2 && phyidx == 2? Hackdual: Hacknone;
1134 MIIDBG("ether1116: %s-port phy\n",
1135 hackflavour == Hackdual? "dual": "single");
1139 * Probe through mii for PHYs in mask;
1140 * return the mask of those found in the current probe.
1141 * If the PHY has not already been probed, update
1142 * the Mii information.
1145 if (hackflavour == Hackdual && ctlrno < phyidx) {
1147 * openrd, guruplug or the like: use ether0's phys.
1148 * this is a nasty hack, but so is the hardware.
1150 MIIDBG("ctlrno %d using ctlrno 0's phyno %d\n",
1151 ctlrno, phynos[ctlrno]);
1152 ctlr->mii = mii = ctlrs[0]->mii;
1153 mask = 1 << phynos[ctlrno];
1156 for(phyno = 0; phyno < NMiiPhy; phyno++){
1160 if(mii->mask & bit){
1164 if(mii->mir(mii, phyno, Bmsr) == -1)
1166 r = mii->mir(mii, phyno, Phyidr1);
1167 oui = (r & 0x3FFF)<<6;
1168 r = mii->mir(mii, phyno, Phyidr2);
1170 if(oui == 0xFFFFF || oui == 0)
1173 if((miiphy = malloc(sizeof(MiiPhy))) == nil)
1177 miiphy->phyno = phyno;
1183 mii->phy[phyno] = miiphy;
1184 if(ctlrno == 0 || hackflavour != Hackdual && mii->curphy == nil)
1185 mii->curphy = miiphy;
1195 kirkwoodmii(Ether *ether)
1203 if((ctlr->mii = malloc(sizeof(Mii))) == nil)
1205 ctlr->mii->ctlr = ctlr;
1206 ctlr->mii->mir = miird;
1207 ctlr->mii->miw = miiwr;
1209 if(mymii(ctlr->mii, ~0) == 0 || (phy = ctlr->mii->curphy) == nil){
1210 print("#l%d: ether1116: init mii failure\n", ether->ctlrno);
1216 /* oui 005043 is marvell */
1217 MIIDBG("oui %#X phyno %d\n", phy->oui, phy->phyno);
1218 // TODO: does this make sense? shouldn't each phy be initialised?
1219 if((ctlr->ether->ctlrno == 0 || hackflavour != Hackdual) &&
1220 miistatus(ctlr->mii) < 0){
1221 miireset(ctlr->mii);
1222 MIIDBG("miireset\n");
1223 if(miiane(ctlr->mii, ~0, 0, ~0) < 0){
1224 iprint("miiane failed\n");
1227 MIIDBG("miistatus\n");
1228 miistatus(ctlr->mii);
1229 if(miird(ctlr->mii, phy->phyno, Bmsr) & BmsrLs){
1232 iprint("ether1116: autonegotiation failed\n");
1235 if(miird(ctlr->mii, phy->phyno, Bmsr) & BmsrAnc)
1239 if(miistatus(ctlr->mii) < 0)
1240 iprint("miistatus failed\n");
1242 iprint("ether1116: no link\n");
1243 phy->speed = 10; /* simple default */
1247 ether->mbps = phy->speed;
1248 MIIDBG("#l%d: kirkwoodmii: fd %d speed %d tfc %d rfc %d\n",
1249 ctlr->port, phy->fd, phy->speed, phy->tfc, phy->rfc);
1250 MIIDBG("mii done\n");
1254 enum { /* PHY register pages */
1267 miiregpage(Mii *mii, ulong dev, ulong page)
1269 miiwr(mii, dev, Eadr, page);
1273 miiphyinit(Mii *mii)
1279 ctlr = (Ctlr*)mii->ctlr;
1282 MIIDBG("phy dev addr %lux\n", dev);
1284 /* leds link & activity */
1285 miiregpage(mii, dev, Pagled);
1286 /* low 4 bits == 1: on - link, blink - activity, off - no link */
1287 miiwr(mii, dev, Scr, (miird(mii, dev, Scr) & ~0xf) | 1);
1289 miiregpage(mii, dev, Pagrgmii);
1290 miiwr(mii, dev, Scr, miird(mii, dev, Scr) | Rgmiipwrup);
1291 /* must now do a software reset, says the manual */
1292 miireset(ctlr->mii);
1294 /* enable RGMII delay on Tx and Rx for CPU port */
1295 miiwr(mii, dev, Recr, miird(mii, dev, Recr) | Rxtiming | Rxtiming);
1296 /* must now do a software reset, says the manual */
1297 miireset(ctlr->mii);
1299 miiregpage(mii, dev, Pagcopper);
1300 miiwr(mii, dev, Scr,
1301 (miird(mii, dev, Scr) & ~(Pwrdown|Endetect)) | Mdix);
1311 quiesce(Gbereg *reg)
1317 reg->tqc = v << 8; /* stop active channels */
1320 reg->rqc = v << 8; /* stop active channels */
1321 /* wait for all queues to stop */
1322 while (reg->tqc & 0xFF || reg->rqc & 0xFF)
1327 p16(uchar *p, ulong v) /* convert big-endian short to bytes */
1334 p32(uchar *p, ulong v) /* convert big-endian long to bytes */
1343 * set ether->ea from hw mac address,
1344 * configure unicast filtering to accept it.
1347 archetheraddr(Ether *ether, Gbereg *reg, int rxqno)
1350 ulong nibble, ucreg, tbloff, regoff;
1353 p32(ea, reg->macah);
1354 p16(ea+4, reg->macal);
1355 if (memcmp(ea, zeroea, sizeof zeroea) == 0 && ether->ctlrno > 0) {
1356 /* hack: use ctlr[0]'s + ctlrno */
1357 memmove(ea, ctlrs[0]->ether->ea, Eaddrlen);
1358 ea[Eaddrlen-1] += ether->ctlrno;
1359 reg->macah = ea[0] << 24 | ea[1] << 16 | ea[2] << 8 | ea[3];
1360 reg->macal = ea[4] << 8 | ea[5];
1364 /* accept frames on ea */
1365 nibble = ea[5] & 0xf;
1366 tbloff = nibble / 4;
1367 regoff = nibble % 4;
1370 ucreg = reg->dfut[tbloff] & (0xff << regoff);
1371 ucreg |= (rxqno << 1 | Pass) << regoff;
1372 reg->dfut[tbloff] = ucreg;
1374 /* accept all multicast too. set up special & other tables. */
1375 memset(reg->dfsmt, Qno<<1 | Pass, sizeof reg->dfsmt);
1376 memset(reg->dfomt, Qno<<1 | Pass, sizeof reg->dfomt);
1381 cfgdramacc(Gbereg *reg)
1383 memset(reg->harr, 0, sizeof reg->harr);
1384 memset(reg->base, 0, sizeof reg->base);
1386 reg->bare = MASK(6) - MASK(2); /* disable wins 2-5 */
1387 /* this doesn't make any sense, but it's required */
1388 reg->epap = 3 << 2 | 3; /* full access for wins 0 & 1 */
1389 // reg->epap = 0; /* no access on access violation for all wins */
1392 reg->base[0].base = PHYSDRAM | WINATTR(Attrcs0) | Targdram;
1393 reg->base[0].size = WINSIZE(256*MB);
1394 reg->base[1].base = (PHYSDRAM + 256*MB) | WINATTR(Attrcs1) | Targdram;
1395 reg->base[1].size = WINSIZE(256*MB);
1400 ctlralloc(Ctlr *ctlr)
1408 for(i = 0; i < Nrxblks; i++) {
1409 b = iallocb(Rxblklen+Bufalign-1);
1411 iprint("ether1116: no memory for rx buffers\n");
1414 assert(b->ref == 1);
1415 b->wp = b->rp = (uchar*)
1416 ((uintptr)(b->lim - Rxblklen) & ~(Bufalign - 1));
1417 assert(((uintptr)b->rp & (Bufalign - 1)) == 0);
1419 b->next = freeblocks.head;
1420 freeblocks.head = b;
1422 iunlock(&freeblocks);
1425 * allocate uncached rx ring descriptors because rings are shared
1426 * with the ethernet controller and more than one fits in a cache line.
1428 ctlr->rx = ucallocalign(Nrx * sizeof(Rx), Descralign, 0);
1430 panic("ether1116: no memory for rx ring");
1431 for(i = 0; i < Nrx; i++) {
1433 assert(((uintptr)r & (Descralign - 1)) == 0);
1434 r->cs = 0; /* owned by software until r->buf is non-nil */
1436 r->next = PADDR(&ctlr->rx[NEXT(i, Nrx)]);
1439 ctlr->rxtail = ctlr->rxhead = 0;
1442 /* allocate uncached tx ring descriptors */
1443 ctlr->tx = ucallocalign(Ntx * sizeof(Tx), Descralign, 0);
1445 panic("ether1116: no memory for tx ring");
1446 for(i = 0; i < Ntx; i++) {
1448 assert(((uintptr)t & (Descralign - 1)) == 0);
1451 t->next = PADDR(&ctlr->tx[NEXT(i, Ntx)]);
1454 ctlr->txtail = ctlr->txhead = 0;
1458 ctlrinit(Ether *ether)
1461 Ctlr *ctlr = ether->ctlr;
1462 Gbereg *reg = ctlr->reg;
1463 static char name[KNAMELEN];
1464 static Ctlr fakectlr; /* bigger than 4K; keep off the stack */
1466 for (i = 0; i < nelem(reg->tcqdp); i++)
1468 for (i = 0; i < nelem(reg->crdp); i++)
1475 reg->tcqdp[Qno] = PADDR(&ctlr->tx[ctlr->txhead]);
1476 reg->crdp[Qno].r = PADDR(&ctlr->rx[ctlr->rxhead]);
1479 // dumprxdescs(ctlr);
1481 /* clear stats by reading them into fake ctlr */
1482 getmibstats(&fakectlr);
1484 reg->pxmfs = MFS40by; /* allow runts in */
1487 * ipg's (inter packet gaps) for interrupt coalescing,
1488 * values in units of 64 clock cycles. A full-sized
1489 * packet (1514 bytes) takes just over 12µs to transmit.
1491 if (CLOCKFREQ/(Maxrxintrsec*64) >= (1<<16))
1492 panic("rx coalescing value %d too big for short",
1493 CLOCKFREQ/(Maxrxintrsec*64));
1494 reg->sdc = SDCrifb | SDCrxburst(Burst16) | SDCtxburst(Burst16) |
1495 SDCrxnobyteswap | SDCtxnobyteswap |
1496 SDCipgintrx(CLOCKFREQ/(Maxrxintrsec*64));
1497 reg->pxtfut = 0; /* TFUTipginttx(CLOCKFREQ/(Maxrxintrsec*64)) */
1499 /* allow just these interrupts */
1500 /* guruplug generates Irxerr interrupts continually */
1501 reg->irqmask = Isum | Irx | Irxbufferq(Qno) | Irxerr | Itxendq(Qno);
1502 reg->irqemask = IEsum | IEtxerrq(Qno) | IEphystschg | IErxoverrun |
1510 /* send errors to end of memory */
1511 // reg->euda = PHYSDRAM + 512*MB - 8*1024;
1513 reg->eudid = Attrcs1 << 4 | Targdram;
1515 // archetheraddr(ether, ctlr->reg, Qno); /* 2nd location */
1517 reg->portcfg = Rxqdefault(Qno) | Rxqarp(Qno);
1522 * start the controller running.
1523 * turn the port on, kick the receiver.
1526 reg->psc1 = PSC1rgmii | PSC1encolonbp | PSC1coldomlim(0x23);
1527 /* do this only when the controller is quiescent */
1528 reg->psc0 = PSC0porton | PSC0an_flctloff |
1529 PSC0an_pauseadv | PSC0nofrclinkdown | PSC0mru(PSC0mru1522);
1531 for (i = 0; i < 4000; i++) /* magic delay */
1534 ether->link = (reg->ps0 & PS0linkup) != 0;
1536 /* set ethernet MTU for leaky bucket mechanism to 0 (disabled) */
1540 snprint(name, sizeof name, "#l%drproc", ether->ctlrno);
1541 kproc(name, rcvproc, ether);
1543 reg->rqc = Rxqon(Qno);
1548 attach(Ether* ether)
1550 Ctlr *ctlr = ether->ctlr;
1552 lock(&ctlr->initlock);
1553 if(ctlr->init == 0) {
1557 unlock(&ctlr->initlock);
1562 * mib registers clear on read.
1566 getmibstats(Ctlr *ctlr)
1568 Gbereg *reg = ctlr->reg;
1571 * Marvell 88f6281 errata FE-ETH-120: high long of rxby and txby
1572 * can't be read correctly, so read the low long frequently
1573 * (every 30 seconds or less), thus avoiding overflow into high long.
1575 ctlr->rxby += reg->rxbylo;
1576 ctlr->txby += reg->txbylo;
1578 ctlr->badrxby += reg->badrxby;
1579 ctlr->mactxerr += reg->mactxerr;
1580 ctlr->rxpkt += reg->rxpkt;
1581 ctlr->badrxpkt += reg->badrxpkt;
1582 ctlr->rxbcastpkt+= reg->rxbcastpkt;
1583 ctlr->rxmcastpkt+= reg->rxmcastpkt;
1584 ctlr->rx64 += reg->rx64;
1585 ctlr->rx65_127 += reg->rx65_127;
1586 ctlr->rx128_255 += reg->rx128_255;
1587 ctlr->rx256_511 += reg->rx256_511;
1588 ctlr->rx512_1023+= reg->rx512_1023;
1589 ctlr->rx1024_max+= reg->rx1024_max;
1590 ctlr->txpkt += reg->txpkt;
1591 ctlr->txcollpktdrop+= reg->txcollpktdrop;
1592 ctlr->txmcastpkt+= reg->txmcastpkt;
1593 ctlr->txbcastpkt+= reg->txbcastpkt;
1594 ctlr->badmacctlpkts+= reg->badmacctlpkts;
1595 ctlr->txflctl += reg->txflctl;
1596 ctlr->rxflctl += reg->rxflctl;
1597 ctlr->badrxflctl+= reg->badrxflctl;
1598 ctlr->rxundersized+= reg->rxundersized;
1599 ctlr->rxfrags += reg->rxfrags;
1600 ctlr->rxtoobig += reg->rxtoobig;
1601 ctlr->rxjabber += reg->rxjabber;
1602 ctlr->rxerr += reg->rxerr;
1603 ctlr->crcerr += reg->crcerr;
1604 ctlr->collisions+= reg->collisions;
1605 ctlr->latecoll += reg->latecoll;
1609 ifstat(Ether *ether, void *a, long n, ulong off)
1611 Ctlr *ctlr = ether->ctlr;
1612 Gbereg *reg = ctlr->reg;
1615 buf = p = malloc(READSTR);
1621 ctlr->intrs += ctlr->newintrs;
1622 p = seprint(p, e, "interrupts: %lud\n", ctlr->intrs);
1623 p = seprint(p, e, "new interrupts: %lud\n", ctlr->newintrs);
1625 p = seprint(p, e, "tx underrun: %lud\n", ctlr->txunderrun);
1626 p = seprint(p, e, "tx ring full: %lud\n", ctlr->txringfull);
1628 ctlr->rxdiscard += reg->pxdfc;
1629 ctlr->rxoverrun += reg->pxofc;
1630 p = seprint(p, e, "rx discarded frames: %lud\n", ctlr->rxdiscard);
1631 p = seprint(p, e, "rx overrun frames: %lud\n", ctlr->rxoverrun);
1632 p = seprint(p, e, "no first+last flag: %lud\n", ctlr->nofirstlast);
1634 p = seprint(p, e, "duplex: %s\n", (reg->ps0 & PS0fd)? "full": "half");
1635 p = seprint(p, e, "flow control: %s\n", (reg->ps0 & PS0flctl)? "on": "off");
1636 /* p = seprint(p, e, "speed: %d mbps\n", ); */
1638 p = seprint(p, e, "received bytes: %llud\n", ctlr->rxby);
1639 p = seprint(p, e, "bad received bytes: %lud\n", ctlr->badrxby);
1640 p = seprint(p, e, "internal mac transmit errors: %lud\n", ctlr->mactxerr);
1641 p = seprint(p, e, "total received frames: %lud\n", ctlr->rxpkt);
1642 p = seprint(p, e, "received broadcast frames: %lud\n", ctlr->rxbcastpkt);
1643 p = seprint(p, e, "received multicast frames: %lud\n", ctlr->rxmcastpkt);
1644 p = seprint(p, e, "bad received frames: %lud\n", ctlr->badrxpkt);
1645 p = seprint(p, e, "received frames 0-64: %lud\n", ctlr->rx64);
1646 p = seprint(p, e, "received frames 65-127: %lud\n", ctlr->rx65_127);
1647 p = seprint(p, e, "received frames 128-255: %lud\n", ctlr->rx128_255);
1648 p = seprint(p, e, "received frames 256-511: %lud\n", ctlr->rx256_511);
1649 p = seprint(p, e, "received frames 512-1023: %lud\n", ctlr->rx512_1023);
1650 p = seprint(p, e, "received frames 1024-max: %lud\n", ctlr->rx1024_max);
1651 p = seprint(p, e, "transmitted bytes: %llud\n", ctlr->txby);
1652 p = seprint(p, e, "total transmitted frames: %lud\n", ctlr->txpkt);
1653 p = seprint(p, e, "transmitted broadcast frames: %lud\n", ctlr->txbcastpkt);
1654 p = seprint(p, e, "transmitted multicast frames: %lud\n", ctlr->txmcastpkt);
1655 p = seprint(p, e, "transmit frames dropped by collision: %lud\n", ctlr->txcollpktdrop);
1656 p = seprint(p, e, "misaligned buffers: %lud\n", ether->pktsmisaligned);
1658 p = seprint(p, e, "bad mac control frames: %lud\n", ctlr->badmacctlpkts);
1659 p = seprint(p, e, "transmitted flow control messages: %lud\n", ctlr->txflctl);
1660 p = seprint(p, e, "received flow control messages: %lud\n", ctlr->rxflctl);
1661 p = seprint(p, e, "bad received flow control messages: %lud\n", ctlr->badrxflctl);
1662 p = seprint(p, e, "received undersized packets: %lud\n", ctlr->rxundersized);
1663 p = seprint(p, e, "received fragments: %lud\n", ctlr->rxfrags);
1664 p = seprint(p, e, "received oversized packets: %lud\n", ctlr->rxtoobig);
1665 p = seprint(p, e, "received jabber packets: %lud\n", ctlr->rxjabber);
1666 p = seprint(p, e, "mac receive errors: %lud\n", ctlr->rxerr);
1667 p = seprint(p, e, "crc errors: %lud\n", ctlr->crcerr);
1668 p = seprint(p, e, "collisions: %lud\n", ctlr->collisions);
1669 p = seprint(p, e, "late collisions: %lud\n", ctlr->latecoll);
1673 n = readstr(off, a, n, buf);
1684 ether->ctlr = ctlr = malloc(sizeof *ctlr);
1685 switch(ether->ctlrno) {
1687 ether->irq = IRQ0gbe0sum;
1690 ether->irq = IRQ0gbe1sum;
1693 panic("ether1116: bad ether ctlr #%d", ether->ctlrno);
1695 ctlr->reg = (Gbereg*)soc.ether[ether->ctlrno];
1697 /* need this for guruplug, at least */
1698 *(ulong *)soc.iocfg |= 1 << 7 | 1 << 15; /* io cfg 0: 1.8v gbe */
1701 ctlr->ether = ether;
1702 ctlrs[ether->ctlrno] = ctlr;
1705 /* ensure that both interfaces are set to RGMII before calling mii */
1706 ((Gbereg*)soc.ether[0])->psc1 |= PSC1rgmii;
1707 ((Gbereg*)soc.ether[1])->psc1 |= PSC1rgmii;
1710 /* Set phy address of the port */
1711 ctlr->port = ether->ctlrno;
1712 ctlr->reg->phy = ether->ctlrno;
1714 ether->port = (uintptr)ctlr->reg;
1716 if(kirkwoodmii(ether) < 0){
1721 miiphyinit(ctlr->mii);
1722 archetheraddr(ether, ctlr->reg, Qno); /* original location */
1723 if (memcmp(ether->ea, zeroea, sizeof zeroea) == 0){
1724 iprint("ether1116: reset: zero ether->ea\n");
1727 return -1; /* no rj45 for this ether */
1730 ether->attach = attach;
1731 ether->transmit = transmit;
1732 ether->interrupt = interrupt;
1733 ether->ifstat = ifstat;
1734 ether->shutdown = shutdown;
1738 ether->promiscuous = promiscuous;
1739 ether->multicast = multicast;
1746 addethercard("88e1116", reset);