2 * marvell kirkwood gigabit ethernet (88e1116 and 88e1121) driver
3 * (as found in the sheevaplug, openrd and guruplug).
4 * the main difference is the flavour of phy kludgery necessary.
6 * from /public/doc/marvell/88f61xx.kirkwood.pdf,
7 * /public/doc/marvell/88e1116.pdf, and
8 * /public/doc/marvell/88e1121r.pdf.
12 #include "../port/lib.h"
17 #include "../port/error.h"
18 #include "../port/netif.h"
19 #include "../port/etherif.h"
24 #define MIIDBG if(0)iprint
26 #define WINATTR(v) (((v) & MASK(8)) << 8)
27 #define WINSIZE(v) (((v)/(64*1024) - 1) << 16)
33 Rxblklen = 2+1522, /* ifc. supplies first 2 bytes as padding */
35 Maxrxintrsec = 20*1000, /* max. rx intrs. / sec */
36 Etherstuck = 70, /* must send or receive a packet in this many sec.s */
41 Pass = 1, /* accept packets */
43 Qno = 0, /* do everything on queue zero */
46 typedef struct Ctlr Ctlr;
47 typedef struct Gbereg Gbereg;
48 typedef struct Mibstats Mibstats;
57 /* hardware receive buffer descriptor */
60 ulong countsize; /* bytes, buffer size */
61 ulong buf; /* phys. addr. of packet buffer */
62 ulong next; /* phys. addr. of next Rx */
65 /* hardware transmit buffer descriptor */
68 ulong countchk; /* bytes, checksum */
69 ulong buf; /* phys. addr. of packet buffer */
70 ulong next; /* phys. addr. of next Tx */
73 /* fixed by hw; part of Gberegs */
76 uvlong rxby; /* good bytes rcv'd */
82 ulong badrxby; /* bad bytes rcv'd */
83 ulong mactxerr; /* tx err pkts */
84 ulong rxpkt; /* good pkts rcv'd */
85 ulong badrxpkt; /* bad pkts rcv'd */
86 ulong rxbcastpkt; /* b'cast pkts rcv'd */
87 ulong rxmcastpkt; /* m'cast pkts rcv'd */
89 ulong rx64; /* pkts <= 64 bytes */
90 ulong rx65_127; /* pkts 65—127 bytes */
91 ulong rx128_255; /* pkts 128—255 bytes */
92 ulong rx256_511; /* pkts 256—511 bytes */
93 ulong rx512_1023; /* pkts 512—1023 bytes */
94 ulong rx1024_max; /* pkts >= 1024 bytes */
97 uvlong txby; /* good bytes sent */
103 ulong txpkt; /* good pkts sent */
104 /* half-duplex: pkts dropped due to excessive collisions */
106 ulong txmcastpkt; /* m'cast pkts sent */
107 ulong txbcastpkt; /* b'cast pkts sent */
109 ulong badmacctlpkts; /* bad mac ctl pkts */
110 ulong txflctl; /* flow-control pkts sent */
111 ulong rxflctl; /* good flow-control pkts rcv'd */
112 ulong badrxflctl; /* bad flow-control pkts rcv'd */
114 ulong rxundersized; /* runts */
115 ulong rxfrags; /* fragments rcv'd */
116 ulong rxtoobig; /* oversized pkts rcv'd */
117 ulong rxjabber; /* jabber pkts rcv'd */
118 ulong rxerr; /* rx error events */
119 ulong crcerr; /* crc error events */
120 ulong collisions; /* collision events */
121 ulong latecoll; /* late collisions */
132 Rx *rx; /* receive descriptors */
133 Block *rxb[Nrx]; /* blocks belonging to the descriptors */
134 int rxhead; /* descr ethernet will write to next */
135 int rxtail; /* next descr that might need a buffer */
136 Rendez rrendez; /* interrupt wakes up read process */
141 int txhead; /* next descr we can use for new packet */
142 int txtail; /* next descr to reclaim on tx complete */
147 int linkchg; /* link status changed? */
148 uvlong starttime; /* last activity time */
162 #define Rxqon(q) (1<<(q))
163 #define Txqon(q) (1<<(q))
169 /* sdma config, sdc bits */
175 SDCrifb = 1<<0, /* rx intr on pkt boundaries */
176 #define SDCrxburst(v) ((v)<<1)
177 SDCrxnobyteswap = 1<<4,
178 SDCtxnobyteswap = 1<<5,
179 SDCswap64byte = 1<<6,
180 #define SDCtxburst(v) ((v)<<22)
181 /* rx intr ipg (inter packet gap) */
182 #define SDCipgintrx(v) ((((v)>>15) & 1)<<25) | (((v) & MASK(15))<<7)
185 PCFGupromisc = 1<<0, /* unicast promiscuous mode */
186 #define Rxqdefault(q) ((q)<<1)
187 #define Rxqarp(q) ((q)<<4)
188 PCFGbcrejectnoiparp = 1<<7,
189 PCFGbcrejectip = 1<<8,
190 PCFGbcrejectarp = 1<<9,
191 PCFGamnotxes = 1<<12, /* auto mode, no summary update on tx */
192 PCFGtcpq = 1<<14, /* capture tcp frames to tcpq */
193 PCFGudpq = 1<<15, /* capture udp frames to udpq */
194 #define Rxqtcp(q) ((q)<<16)
195 #define Rxqudp(q) ((q)<<19)
196 #define Rxqbpdu(q) ((q)<<22)
197 PCFGrxcs = 1<<25, /* rx tcp checksum mode with header */
201 PCFGXcrcoff = 1<<2, /* no ethernet crc */
203 /* port serial control0, psc0 bits */
205 PSC0forcelinkup = 1<<1,
206 PSC0an_dplxoff = 1<<2, /* an_ = auto. negotiate */
207 PSC0an_flctloff = 1<<3,
208 PSC0an_pauseadv = 1<<4,
209 PSC0nofrclinkdown = 1<<10,
210 PSC0an_spdoff = 1<<13,
211 PSC0dteadv = 1<<14, /* dte advertise */
213 /* max. input pkt size */
214 #define PSC0mru(v) ((v)<<17)
215 PSC0mrumask = PSC0mru(MASK(3)),
216 PSC0mru1518 = 0, /* 1500+2* 6(addrs) +2 + 4(crc) */
217 PSC0mru1522, /* 1518 + 4(vlan tags) */
218 PSC0mru1552, /* `baby giant' */
219 PSC0mru9022, /* `jumbo' */
220 PSC0mru9192, /* bigger jumbo */
221 PSC0mru9700, /* still bigger jumbo */
223 PSC0fd_frc = 1<<21, /* force full duplex */
224 PSC0flctlfrc = 1<<22,
225 PSC0gmiispd_gbfrc = 1<<23,
226 PSC0miispdfrc100mbps = 1<<24,
228 /* port status 0, ps0 bits */
230 PS0fd = 1<<2, /* full duplex */
233 PS0mii100mbps = 1<<5,
235 PS0txfifoempty = 1<<10,
236 PS0rxfifo1empty = 1<<11,
237 PS0rxfifo2empty = 1<<12,
239 /* port serial control 1, psc1 bits */
242 PSC1rgmii = 1<<3, /* enable RGMII */
243 PSC1portreset = 1<<4,
244 PSC1clockbypass = 1<<5,
246 PSC1iban_bypass = 1<<7,
247 PSC1iban_restart= 1<<8,
249 PSC1encolonbp = 1<<15, /* "collision during back-pressure mib counting" */
250 PSC1coldomlimmask= MASK(6)<<16,
251 #define PSC1coldomlim(v) (((v) & MASK(6))<<16)
252 PSC1miiallowoddpreamble = 1<<22,
254 /* port status 1, ps1 bits */
258 PS1syncfail10ms = 1<<3,
260 PS1inbandan_bypassed = 1<<5,
261 PS1serdesplllocked = 1<<6,
266 /* rx buf returned to cpu ownership, or frame reception finished */
268 Iextend = 1<<1, /* IEsum of irqe set */
269 #define Irxbufferq(q) (1<<((q)+2)) /* rx buf returned to cpu ownership */
270 Irxerr = 1<<10, /* input ring full, usually */
271 #define Irxerrq(q) (1<<((q)+11))
272 #define Itxendq(q) (1<<((q)+19)) /* tx dma stopped for q */
275 /* irq extended, irqe bits */
276 #define IEtxbufferq(q) (1<<((q)+0)) /* tx buf returned to cpu ownership */
277 #define IEtxerrq(q) (1<<((q)+8))
281 IEtxunderrun = 1<<19,
283 IEintaddrerr = 1<<23,
287 /* tx fifo urgent threshold (tx interrupt coalescing), pxtfut */
288 #define TFUTipginttx(v) (((v) & MASK(16))<<4);
290 /* minimal frame size, mfs */
299 /* receive descriptor status */
306 RCSl4chkmask = MASK(16),
316 RCSip4headok = 1<<25,
319 RCSunknownaddr = 1<<28,
320 RCSenableintr = 1<<29,
324 /* transmit descriptor status */
331 TCSl4chkmode = 1<<10,
332 TCSipv4hdlenshift= 11,
340 TCSenableintr = 1<<23,
347 PhysmiTimeout = 10000, /* what units? in ms. */
348 Physmidataoff = 0, /* Data */
349 Physmidatamask = 0xffff<<Physmidataoff,
351 Physmiaddroff = 16, /* PHY device addr */
352 Physmiaddrmask = 0x1f << Physmiaddroff,
355 Physmiopmask = 3<<Physmiop,
356 PhysmiopWr = 0<<Physmiop,
357 PhysmiopRd = 1<<Physmiop,
359 PhysmiReadok = 1<<27,
362 SmiRegaddroff = 21, /* PHY device register addr */
363 SmiRegaddrmask = 0x1f << SmiRegaddroff,
367 ulong phy; /* PHY address */
368 ulong smi; /* serial mgmt. interface */
369 ulong euda; /* ether default address */
370 ulong eudid; /* ether default id */
371 uchar _pad0[0x80-0x10];
374 ulong euirq; /* interrupt cause */
375 ulong euirqmask; /* interrupt mask */
376 uchar _pad1[0x94-0x88];
377 ulong euea; /* error address */
378 ulong euiae; /* internal error address */
379 uchar _pad2[0xb0-0x9c];
380 ulong euc; /* control */
381 uchar _pad3[0x200-0xb4];
383 ulong base; /* window base */
384 ulong size; /* window size */
386 uchar _pad4[0x280-0x230];
387 ulong harr[4]; /* high address remap */
388 ulong bare; /* base address enable */
389 ulong epap; /* port access protect */
390 uchar _pad5[0x400-0x298];
392 ulong portcfg; /* port configuration */
393 ulong portcfgx; /* port config. extend */
394 ulong mii; /* mii serial parameters */
396 ulong evlane; /* vlan ether type */
397 ulong macal; /* mac address low */
398 ulong macah; /* mac address high */
399 ulong sdc; /* sdma config. */
400 ulong dscp[7]; /* ip diff. serv. code point -> pri */
401 ulong psc0; /* port serial control 0 */
402 ulong vpt2p; /* vlan priority tag -> pri */
403 ulong ps0; /* ether port status 0 */
404 ulong tqc; /* transmit queue command */
405 ulong psc1; /* port serial control 1 */
406 ulong ps1; /* ether port status 1 */
407 ulong mvhdr; /* marvell header */
411 ulong irq; /* interrupt cause; some rw0c bits */
412 ulong irqe; /* " " extended; some rw0c bits */
413 ulong irqmask; /* interrupt mask (actually enable) */
414 ulong irqemask; /* " " extended */
417 ulong pxtfut; /* port tx fifo urgent threshold */
419 ulong pxmfs; /* port rx minimum frame size */
423 * # of input frames discarded by addr filtering or lack of resources;
426 ulong pxdfc; /* port rx discard frame counter */
427 ulong pxofc; /* port overrun frame counter */
429 ulong piae; /* port internal address error */
430 uchar _pad13[0x4bc-0x498];
431 ulong etherprio; /* ether type priority */
432 uchar _pad14[0x4dc-0x4c0];
433 ulong tqfpc; /* tx queue fixed priority config. */
434 ulong pttbrc; /* port tx token-bucket rate config. */
435 ulong tqc1; /* tx queue command 1 */
436 ulong pmtu; /* port maximum transmit unit */
437 ulong pmtbs; /* port maximum token bucket size */
438 uchar _pad15[0x600-0x4f0];
442 ulong r; /* phys. addr.: cur. rx desc. ptrs */
444 ulong rqc; /* rx queue command */
445 ulong tcsdp; /* phys. addr.: cur. tx desc. ptr */
446 uchar _pad16[0x6c0-0x688];
448 ulong tcqdp[8]; /* phys. addr.: cur. tx q. desc. ptr */
449 uchar _pad17[0x700-0x6e0];
452 ulong tbctr; /* queue tx token-bucket counter */
453 ulong tbcfg; /* tx queue token-bucket config. */
454 ulong acfg; /* tx queue arbiter config. */
457 ulong pttbc; /* port tx token-bucket counter */
458 uchar _pad18[0x7a8-0x784];
460 ulong ipg2; /* tx queue ipg */
464 ulong htlp; /* high token in low packet */
465 ulong htap; /* high token in async packet */
466 ulong ltap; /* low token in async packet */
468 ulong ts; /* tx speed */
469 uchar _pad22[0x1000-0x7d4];
471 /* mac mib counters: statistics */
473 uchar _pad23[0x1400-0x1080];
475 /* multicast filtering; each byte: Qno<<1 | Pass */
476 ulong dfsmt[64]; /* dest addr filter special m'cast table */
477 ulong dfomt[64]; /* dest addr filter other m'cast table */
478 /* unicast filtering */
479 ulong dfut[4]; /* dest addr filter unicast table */
482 static Ctlr *ctlrs[MaxEther];
483 static uchar zeroea[Eaddrlen];
485 static void getmibstats(Ctlr *);
491 (uchar*)((uintptr)(b->lim - Rxblklen) & ~(Bufalign - 1));
492 assert(((uintptr)b->rp & (Bufalign - 1)) == 0);
496 b->next = freeblocks.head;
498 iunlock(&freeblocks);
509 freeblocks.head = b->next;
513 iunlock(&freeblocks);
520 Gbereg *reg = ctlr->reg;
522 if (reg->crdp[Qno].r == 0)
523 reg->crdp[Qno].r = PADDR(&ctlr->rx[ctlr->rxhead]);
524 if ((reg->rqc & 0xff) == 0) /* all queues are stopped? */
525 reg->rqc = Rxqon(Qno); /* restart */
532 Gbereg *reg = ctlr->reg;
534 if (reg->tcqdp[Qno] == 0)
535 reg->tcqdp[Qno] = PADDR(&ctlr->tx[ctlr->txhead]);
536 if ((reg->tqc & 0xff) == 0) /* all q's stopped? */
537 reg->tqc = Txqon(Qno); /* restart */
542 rxreplenish(Ctlr *ctlr)
547 while(ctlr->rxb[ctlr->rxtail] == nil) {
550 iprint("#l%d: rxreplenish out of buffers\n",
551 ctlr->ether->ctlrno);
555 ctlr->rxb[ctlr->rxtail] = b;
557 /* set up uncached receive descriptor */
558 r = &ctlr->rx[ctlr->rxtail];
559 assert(((uintptr)r & (Descralign - 1)) == 0);
560 r->countsize = ROUNDUP(Rxblklen, 8);
561 r->buf = PADDR(b->rp);
565 r->cs = RCSdmaown | RCSenableintr;
568 ctlr->rxtail = NEXT(ctlr->rxtail, Nrx);
573 dump(uchar *bp, long max)
577 for (; max > 0; max--, bp++)
578 iprint("%02.2ux ", *bp);
583 etheractive(Ether *ether)
585 Ctlr *ctlr = ether->ctlr;
586 ctlr->starttime = TK2MS(MACHP(0)->ticks)/1000;
590 ethercheck(Ether *ether)
592 Ctlr *ctlr = ether->ctlr;
593 if (ctlr->starttime != 0 &&
594 TK2MS(MACHP(0)->ticks)/1000 - ctlr->starttime > Etherstuck) {
596 if (ether->ctlrno == 0) /* only complain about main ether */
597 iprint("#l%d: ethernet stuck\n", ether->ctlrno);
602 receive(Ether *ether)
607 Ctlr *ctlr = ether->ctlr;
611 for (i = Nrx-2; i > 0; i--) {
612 r = &ctlr->rx[ctlr->rxhead]; /* *r is uncached */
613 assert(((uintptr)r & (Descralign - 1)) == 0);
614 if(r->cs & RCSdmaown) /* descriptor busy? */
617 b = ctlr->rxb[ctlr->rxhead]; /* got input buffer? */
619 panic("ether1116: nil ctlr->rxb[ctlr->rxhead] "
621 ctlr->rxb[ctlr->rxhead] = nil;
622 ctlr->rxhead = NEXT(ctlr->rxhead, Nrx);
624 if((r->cs & (RCSfirst|RCSlast)) != (RCSfirst|RCSlast)) {
625 ctlr->nofirstlast++; /* partial packet */
629 if(r->cs & RCSmacerr) {
634 n = r->countsize >> 16; /* TODO includes 2 pad bytes? */
635 assert(n >= 2 && n < 2048);
637 /* clear any cached packet or part thereof */
638 l2cacheuinvse(b->rp, n+2);
639 cachedinvse(b->rp, n+2);
642 * skip hardware padding intended to align ipv4 address
643 * in memory (mv-s104860-u0 §8.3.4.1)
648 if (i % (Nrx / 2) == 0) {
658 txreplenish(Ether *ether) /* free transmitted packets */
663 while(ctlr->txtail != ctlr->txhead) {
664 /* ctlr->tx is uncached */
665 if(ctlr->tx[ctlr->txtail].cs & TCSdmaown)
667 if(ctlr->txb[ctlr->txtail] == nil)
668 panic("no block for sent packet?!");
669 freeb(ctlr->txb[ctlr->txtail]);
670 ctlr->txb[ctlr->txtail] = nil;
672 ctlr->txtail = NEXT(ctlr->txtail, Ntx);
678 * transmit strategy: fill the output ring as far as possible,
679 * perhaps leaving a few spare; kick off the output and take
680 * an interrupt only when the transmit queue is empty.
683 transmit(Ether *ether)
687 Ctlr *ctlr = ether->ctlr;
688 Gbereg *reg = ctlr->reg;
693 txreplenish(ether); /* reap old packets */
695 /* queue new packets; use at most half the tx descs to avoid livelock */
697 for (i = Ntx/2 - 2; i > 0; i--) {
698 t = &ctlr->tx[ctlr->txhead]; /* *t is uncached */
699 assert(((uintptr)t & (Descralign - 1)) == 0);
700 if(t->cs & TCSdmaown) { /* descriptor busy? */
705 b = qget(ether->oq); /* outgoing packet? */
709 if(len < ether->minmtu || len > ether->maxmtu) {
713 ctlr->txb[ctlr->txhead] = b;
715 /* make sure the whole packet is in memory */
716 cachedwbse(b->rp, len);
717 l2cacheuwbse(b->rp, len);
719 /* set up the transmit descriptor */
720 t->buf = PADDR(b->rp);
721 t->countchk = len << 16;
725 t->cs = TCSpadding | TCSfirst | TCSlast | TCSdmaown |
730 ctlr->txhead = NEXT(ctlr->txhead, Ntx);
735 reg->irqmask |= Itxendq(Qno);
736 reg->irqemask |= IEtxerrq(Qno) | IEtxunderrun;
742 dumprxdescs(Ctlr *ctlr)
745 Gbereg *reg = ctlr->reg;
747 iprint("\nrxhead %d rxtail %d; txcdp %#p rxcdp %#p\n",
748 ctlr->rxhead, ctlr->rxtail, reg->tcqdp[Qno], reg->crdp[Qno].r);
749 for (i = 0; i < Nrx; i++) {
750 iprint("rxb %d @ %#p: %#p\n", i, &ctlr->rxb[i], ctlr->rxb[i]);
753 for (i = 0; i < Nrx; i++) {
754 iprint("rx %d @ %#p: cs %#lux countsize %lud buf %#lux next %#lux\n",
755 i, &ctlr->rx[i], ctlr->rx[i].cs,
756 ctlr->rx[i].countsize >> 3, ctlr->rx[i].buf,
766 return ((Ctlr*)ctlr)->haveinput != 0;
770 * process any packets in the input ring.
771 * also sum mib stats frequently to avoid the overflow
772 * mentioned in the errata.
785 tsleep(&ctlr->rrendez, gotinput, ctlr, 10*1000);
788 if (ctlr->haveinput) {
798 interrupt(Ureg*, void *arg)
800 ulong irq, irqe, handled;
802 Ctlr *ctlr = ether->ctlr;
803 Gbereg *reg = ctlr->reg;
808 reg->irqe = 0; /* extinguish intr causes */
809 reg->irq = 0; /* extinguish intr causes */
812 if(irq & (Irx | Irxbufferq(Qno))) {
814 * letting a kproc process the input takes far less real time
815 * than doing it all at interrupt level.
818 wakeup(&ctlr->rrendez);
819 irq &= ~(Irx | Irxbufferq(Qno));
824 if(irq & Itxendq(Qno)) { /* transmit ring empty? */
825 reg->irqmask &= ~Itxendq(Qno); /* prevent more interrupts */
826 reg->irqemask &= ~(IEtxerrq(Qno) | IEtxunderrun);
828 irq &= ~Itxendq(Qno);
834 * IElinkchg appears to only be set when unplugging.
835 * autonegotiation is likely not done yet, so linkup not valid,
836 * thus we note the link change here, and check for
837 * that and autonegotiation done below.
839 if(irqe & IEphystschg) {
840 ether->link = (reg->ps0 & PS0linkup) != 0;
843 if(irqe & IEtxerrq(Qno))
845 if(irqe & IErxoverrun)
847 if(irqe & IEtxunderrun)
849 if(irqe & (IEphystschg | IEtxerrq(Qno) | IErxoverrun |
854 if (irq & Irxerr) { /* nil desc. ptr. or desc. owned by cpu */
855 ether->buffs++; /* approx. error */
857 /* if the input ring is full, drain it */
859 wakeup(&ctlr->rrendez);
861 if(irq & (Irxerr | Irxerrq(Qno)))
863 irq &= ~(Irxerr | Irxerrq(Qno));
866 if(ctlr->linkchg && (reg->ps1 & PS1an_done)) {
868 ether->link = (reg->ps0 & PS0linkup) != 0;
875 irqe &= ~IEtxbufferq(Qno);
876 if (irq == 0 && irqe == 0) {
877 /* seems to be triggered by continuous output */
878 // iprint("ether1116: spurious interrupt\n");
880 iprint("ether1116: interrupt cause unknown; "
881 "irq %#lux irqe %#lux\n", irq, irqe);
883 intrclear(Irqlo, ether->irq);
887 promiscuous(void *arg, int on)
890 Ctlr *ctlr = ether->ctlr;
891 Gbereg *reg = ctlr->reg;
896 reg->portcfg |= PCFGupromisc;
898 reg->portcfg &= ~PCFGupromisc;
903 multicast(void *, uchar *, int)
905 /* nothing to do; we always accept multicast */
908 static void quiesce(Gbereg *reg);
911 shutdown(Ether *ether)
914 Ctlr *ctlr = ether->ctlr;
915 Gbereg *reg = ctlr->reg;
919 reg->euc |= Portreset;
924 reg->euc &= ~Portreset;
928 reg->psc0 = 0; /* no PSC0porton */
929 reg->psc1 |= PSC1portreset;
932 reg->psc1 &= ~PSC1portreset;
935 for (i = 0; i < nelem(reg->tcqdp); i++)
937 for (i = 0; i < nelem(reg->crdp); i++)
948 static Cmdtab ctlmsg[] = {
953 ctl(Ether *e, void *p, long n)
957 Ctlr *ctlr = e->ctlr;
958 Gbereg *reg = ctlr->reg;
966 ct = lookupcmd(cb, ctlmsg, nelem(ctlmsg));
969 if(strcmp(cb->f[1], "on") == 0) {
970 /* incoming packet queue doesn't expect jumbo frames */
971 error("jumbo disabled");
972 reg->psc0 = (reg->psc0 & ~PSC0mrumask) |
973 PSC0mru(PSC0mru9022);
975 } else if(strcmp(cb->f[1], "off") == 0) {
976 reg->psc0 = (reg->psc0 & ~PSC0mrumask) |
977 PSC0mru(PSC0mru1522);
978 e->maxmtu = ETHERMAXTU;
996 smibusywait(Gbereg *reg, ulong waitbit)
998 ulong timeout, smi_reg;
1000 timeout = PhysmiTimeout;
1001 /* wait till the SMI is not busy */
1003 /* read smi register */
1005 if (timeout-- == 0) {
1006 MIIDBG("SMI busy timeout\n");
1010 } while (smi_reg & waitbit);
1015 miird(Mii *mii, int pa, int ra)
1017 ulong smi_reg, timeout;
1020 reg = ((Ctlr*)mii->ctlr)->reg;
1023 if ((pa<<Physmiaddroff) & ~Physmiaddrmask ||
1024 (ra<<SmiRegaddroff) & ~SmiRegaddrmask)
1027 smibusywait(reg, PhysmiBusy);
1029 /* fill the phy address and register offset and read opcode */
1030 reg->smi = pa << Physmiaddroff | ra << SmiRegaddroff | PhysmiopRd;
1033 /* wait til read value is ready */
1034 timeout = PhysmiTimeout;
1037 if (timeout-- == 0) {
1038 MIIDBG("SMI read-valid timeout\n");
1041 } while (!(smi_reg & PhysmiReadok));
1043 /* Wait for the data to update in the SMI register */
1044 for (timeout = 0; timeout < PhysmiTimeout; timeout++)
1046 return reg->smi & Physmidatamask;
1050 miiwr(Mii *mii, int pa, int ra, int v)
1055 reg = ((Ctlr*)mii->ctlr)->reg;
1058 if (((pa<<Physmiaddroff) & ~Physmiaddrmask) ||
1059 ((ra<<SmiRegaddroff) & ~SmiRegaddrmask))
1062 smibusywait(reg, PhysmiBusy);
1064 /* fill the phy address and register offset and read opcode */
1065 smi_reg = v << Physmidataoff | pa << Physmiaddroff | ra << SmiRegaddroff;
1066 reg->smi = smi_reg & ~PhysmiopRd;
1071 #define MIIMODEL(idr2) (((idr2) >> 4) & MASK(6))
1077 Ouimarvell = 0x005043,
1079 /* idr2 mii/phy model numbers */
1080 Phy1000 = 0x00, /* 88E1000 Gb */
1081 Phy1011 = 0x02, /* 88E1011 Gb */
1082 Phy1000_3 = 0x03, /* 88E1000 Gb */
1083 Phy1000s = 0x04, /* 88E1000S Gb */
1084 Phy1000_5 = 0x05, /* 88E1000 Gb */
1085 Phy1000_6 = 0x06, /* 88E1000 Gb */
1086 Phy3082 = 0x08, /* 88E3082 10/100 */
1087 Phy1112 = 0x09, /* 88E1112 Gb */
1088 Phy1121r = 0x0b, /* says the 1121r manual */
1089 Phy1149 = 0x0b, /* 88E1149 Gb */
1090 Phy1111 = 0x0c, /* 88E1111 Gb */
1091 Phy1116 = 0x21, /* 88E1116 Gb */
1092 Phy1116r = 0x24, /* 88E1116R Gb */
1093 Phy1118 = 0x22, /* 88E1118 Gb */
1094 Phy3016 = 0x26, /* 88E3016 10/100 */
1097 static int hackflavour;
1100 * on openrd, ether0's phy has address 8, ether1's is ether0's 24.
1101 * on guruplug, ether0's is phy 0 and ether1's is ether0's phy 1.
1104 mymii(Mii* mii, int mask)
1108 int bit, ctlrno, oui, model, phyno, r, rmask;
1109 static int dualport, phyidx;
1110 static int phynos[NMiiPhy];
1113 ctlrno = ctlr->ether->ctlrno;
1115 /* first pass: figure out what kind of phy(s) we have. */
1118 for(phyno = 0; phyno < NMiiPhy; phyno++){
1120 if(!(mask & bit) || mii->mask & bit)
1122 if(mii->mir(mii, phyno, Bmsr) == -1)
1124 r = mii->mir(mii, phyno, Phyidr1);
1125 oui = (r & 0x3FFF)<<6;
1126 r = mii->mir(mii, phyno, Phyidr2);
1128 model = MIIMODEL(r);
1129 if (oui == 0xfffff && model == 0x3f)
1131 MIIDBG("ctlrno %d phy %d oui %#ux model %#ux\n",
1132 ctlrno, phyno, oui, model);
1133 if (oui == Ouimarvell &&
1134 (model == Phy1121r || model == Phy1116r))
1136 phynos[phyidx++] = phyno;
1138 hackflavour = dualport == 2 && phyidx == 2? Hackdual: Hacknone;
1139 MIIDBG("ether1116: %s-port phy\n",
1140 hackflavour == Hackdual? "dual": "single");
1144 * Probe through mii for PHYs in mask;
1145 * return the mask of those found in the current probe.
1146 * If the PHY has not already been probed, update
1147 * the Mii information.
1150 if (hackflavour == Hackdual && ctlrno < phyidx) {
1152 * openrd, guruplug or the like: use ether0's phys.
1153 * this is a nasty hack, but so is the hardware.
1155 MIIDBG("ctlrno %d using ctlrno 0's phyno %d\n",
1156 ctlrno, phynos[ctlrno]);
1157 ctlr->mii = mii = ctlrs[0]->mii;
1158 mask = 1 << phynos[ctlrno];
1161 for(phyno = 0; phyno < NMiiPhy; phyno++){
1165 if(mii->mask & bit){
1169 if(mii->mir(mii, phyno, Bmsr) == -1)
1171 r = mii->mir(mii, phyno, Phyidr1);
1172 oui = (r & 0x3FFF)<<6;
1173 r = mii->mir(mii, phyno, Phyidr2);
1175 if(oui == 0xFFFFF || oui == 0)
1178 if((miiphy = malloc(sizeof(MiiPhy))) == nil)
1182 miiphy->phyno = phyno;
1188 mii->phy[phyno] = miiphy;
1189 if(ctlrno == 0 || hackflavour != Hackdual && mii->curphy == nil)
1190 mii->curphy = miiphy;
1200 kirkwoodmii(Ether *ether)
1208 if((ctlr->mii = malloc(sizeof(Mii))) == nil)
1210 ctlr->mii->ctlr = ctlr;
1211 ctlr->mii->mir = miird;
1212 ctlr->mii->miw = miiwr;
1214 if(mymii(ctlr->mii, ~0) == 0 || (phy = ctlr->mii->curphy) == nil){
1215 print("#l%d: ether1116: init mii failure\n", ether->ctlrno);
1221 /* oui 005043 is marvell */
1222 MIIDBG("oui %#X phyno %d\n", phy->oui, phy->phyno);
1223 // TODO: does this make sense? shouldn't each phy be initialised?
1224 if((ctlr->ether->ctlrno == 0 || hackflavour != Hackdual) &&
1225 miistatus(ctlr->mii) < 0){
1226 miireset(ctlr->mii);
1227 MIIDBG("miireset\n");
1228 if(miiane(ctlr->mii, ~0, 0, ~0) < 0){
1229 iprint("miiane failed\n");
1232 MIIDBG("miistatus\n");
1233 miistatus(ctlr->mii);
1234 if(miird(ctlr->mii, phy->phyno, Bmsr) & BmsrLs){
1237 iprint("ether1116: autonegotiation failed\n");
1240 if(miird(ctlr->mii, phy->phyno, Bmsr) & BmsrAnc)
1244 if(miistatus(ctlr->mii) < 0)
1245 iprint("miistatus failed\n");
1247 iprint("ether1116: no link\n");
1248 phy->speed = 10; /* simple default */
1252 ether->mbps = phy->speed;
1253 MIIDBG("#l%d: kirkwoodmii: fd %d speed %d tfc %d rfc %d\n",
1254 ctlr->port, phy->fd, phy->speed, phy->tfc, phy->rfc);
1255 MIIDBG("mii done\n");
1259 enum { /* PHY register pages */
1272 miiregpage(Mii *mii, ulong dev, ulong page)
1274 miiwr(mii, dev, Eadr, page);
1278 miiphyinit(Mii *mii)
1284 ctlr = (Ctlr*)mii->ctlr;
1287 MIIDBG("phy dev addr %lux\n", dev);
1289 /* leds link & activity */
1290 miiregpage(mii, dev, Pagled);
1291 /* low 4 bits == 1: on - link, blink - activity, off - no link */
1292 miiwr(mii, dev, Scr, (miird(mii, dev, Scr) & ~0xf) | 1);
1294 miiregpage(mii, dev, Pagrgmii);
1295 miiwr(mii, dev, Scr, miird(mii, dev, Scr) | Rgmiipwrup);
1296 /* must now do a software reset, says the manual */
1297 miireset(ctlr->mii);
1299 /* enable RGMII delay on Tx and Rx for CPU port */
1300 miiwr(mii, dev, Recr, miird(mii, dev, Recr) | Rxtiming | Rxtiming);
1301 /* must now do a software reset, says the manual */
1302 miireset(ctlr->mii);
1304 miiregpage(mii, dev, Pagcopper);
1305 miiwr(mii, dev, Scr,
1306 (miird(mii, dev, Scr) & ~(Pwrdown|Endetect)) | Mdix);
1316 quiesce(Gbereg *reg)
1322 reg->tqc = v << 8; /* stop active channels */
1325 reg->rqc = v << 8; /* stop active channels */
1326 /* wait for all queues to stop */
1327 while (reg->tqc & 0xFF || reg->rqc & 0xFF)
1332 p16(uchar *p, ulong v) /* convert big-endian short to bytes */
1339 p32(uchar *p, ulong v) /* convert big-endian long to bytes */
1348 * set ether->ea from hw mac address,
1349 * configure unicast filtering to accept it.
1352 archetheraddr(Ether *ether, Gbereg *reg, int rxqno)
1355 ulong nibble, ucreg, tbloff, regoff;
1358 p32(ea, reg->macah);
1359 p16(ea+4, reg->macal);
1360 if (memcmp(ea, zeroea, sizeof zeroea) == 0 && ether->ctlrno > 0) {
1361 /* hack: use ctlr[0]'s + ctlrno */
1362 memmove(ea, ctlrs[0]->ether->ea, Eaddrlen);
1363 ea[Eaddrlen-1] += ether->ctlrno;
1364 reg->macah = ea[0] << 24 | ea[1] << 16 | ea[2] << 8 | ea[3];
1365 reg->macal = ea[4] << 8 | ea[5];
1369 /* accept frames on ea */
1370 nibble = ea[5] & 0xf;
1371 tbloff = nibble / 4;
1372 regoff = nibble % 4;
1375 ucreg = reg->dfut[tbloff] & (0xff << regoff);
1376 ucreg |= (rxqno << 1 | Pass) << regoff;
1377 reg->dfut[tbloff] = ucreg;
1379 /* accept all multicast too. set up special & other tables. */
1380 memset(reg->dfsmt, Qno<<1 | Pass, sizeof reg->dfsmt);
1381 memset(reg->dfomt, Qno<<1 | Pass, sizeof reg->dfomt);
1386 cfgdramacc(Gbereg *reg)
1388 memset(reg->harr, 0, sizeof reg->harr);
1389 memset(reg->base, 0, sizeof reg->base);
1391 reg->bare = MASK(6) - MASK(2); /* disable wins 2-5 */
1392 /* this doesn't make any sense, but it's required */
1393 reg->epap = 3 << 2 | 3; /* full access for wins 0 & 1 */
1394 // reg->epap = 0; /* no access on access violation for all wins */
1397 reg->base[0].base = PHYSDRAM | WINATTR(Attrcs0) | Targdram;
1398 reg->base[0].size = WINSIZE(256*MB);
1399 reg->base[1].base = (PHYSDRAM + 256*MB) | WINATTR(Attrcs1) | Targdram;
1400 reg->base[1].size = WINSIZE(256*MB);
1405 ctlralloc(Ctlr *ctlr)
1413 for(i = 0; i < Nrxblks; i++) {
1414 b = iallocb(Rxblklen+Bufalign-1);
1416 iprint("ether1116: no memory for rx buffers\n");
1419 b->wp = b->rp = (uchar*)
1420 ((uintptr)(b->lim - Rxblklen) & ~(Bufalign - 1));
1421 assert(((uintptr)b->rp & (Bufalign - 1)) == 0);
1423 b->next = freeblocks.head;
1424 freeblocks.head = b;
1426 iunlock(&freeblocks);
1429 * allocate uncached rx ring descriptors because rings are shared
1430 * with the ethernet controller and more than one fits in a cache line.
1432 ctlr->rx = ucallocalign(Nrx * sizeof(Rx), Descralign, 0);
1434 panic("ether1116: no memory for rx ring");
1435 for(i = 0; i < Nrx; i++) {
1437 assert(((uintptr)r & (Descralign - 1)) == 0);
1438 r->cs = 0; /* owned by software until r->buf is non-nil */
1440 r->next = PADDR(&ctlr->rx[NEXT(i, Nrx)]);
1443 ctlr->rxtail = ctlr->rxhead = 0;
1446 /* allocate uncached tx ring descriptors */
1447 ctlr->tx = ucallocalign(Ntx * sizeof(Tx), Descralign, 0);
1449 panic("ether1116: no memory for tx ring");
1450 for(i = 0; i < Ntx; i++) {
1452 assert(((uintptr)t & (Descralign - 1)) == 0);
1455 t->next = PADDR(&ctlr->tx[NEXT(i, Ntx)]);
1458 ctlr->txtail = ctlr->txhead = 0;
1462 ctlrinit(Ether *ether)
1465 Ctlr *ctlr = ether->ctlr;
1466 Gbereg *reg = ctlr->reg;
1467 static char name[KNAMELEN];
1468 static Ctlr fakectlr; /* bigger than 4K; keep off the stack */
1470 for (i = 0; i < nelem(reg->tcqdp); i++)
1472 for (i = 0; i < nelem(reg->crdp); i++)
1479 reg->tcqdp[Qno] = PADDR(&ctlr->tx[ctlr->txhead]);
1480 reg->crdp[Qno].r = PADDR(&ctlr->rx[ctlr->rxhead]);
1483 // dumprxdescs(ctlr);
1485 /* clear stats by reading them into fake ctlr */
1486 getmibstats(&fakectlr);
1488 reg->pxmfs = MFS40by; /* allow runts in */
1491 * ipg's (inter packet gaps) for interrupt coalescing,
1492 * values in units of 64 clock cycles. A full-sized
1493 * packet (1514 bytes) takes just over 12µs to transmit.
1495 if (CLOCKFREQ/(Maxrxintrsec*64) >= (1<<16))
1496 panic("rx coalescing value %d too big for short",
1497 CLOCKFREQ/(Maxrxintrsec*64));
1498 reg->sdc = SDCrifb | SDCrxburst(Burst16) | SDCtxburst(Burst16) |
1499 SDCrxnobyteswap | SDCtxnobyteswap |
1500 SDCipgintrx(CLOCKFREQ/(Maxrxintrsec*64));
1501 reg->pxtfut = 0; /* TFUTipginttx(CLOCKFREQ/(Maxrxintrsec*64)) */
1503 /* allow just these interrupts */
1504 /* guruplug generates Irxerr interrupts continually */
1505 reg->irqmask = Isum | Irx | Irxbufferq(Qno) | Irxerr | Itxendq(Qno);
1506 reg->irqemask = IEsum | IEtxerrq(Qno) | IEphystschg | IErxoverrun |
1514 /* send errors to end of memory */
1515 // reg->euda = PHYSDRAM + 512*MB - 8*1024;
1517 reg->eudid = Attrcs1 << 4 | Targdram;
1519 // archetheraddr(ether, ctlr->reg, Qno); /* 2nd location */
1521 reg->portcfg = Rxqdefault(Qno) | Rxqarp(Qno);
1526 * start the controller running.
1527 * turn the port on, kick the receiver.
1530 reg->psc1 = PSC1rgmii | PSC1encolonbp | PSC1coldomlim(0x23);
1531 /* do this only when the controller is quiescent */
1532 reg->psc0 = PSC0porton | PSC0an_flctloff |
1533 PSC0an_pauseadv | PSC0nofrclinkdown | PSC0mru(PSC0mru1522);
1535 for (i = 0; i < 4000; i++) /* magic delay */
1538 ether->link = (reg->ps0 & PS0linkup) != 0;
1540 /* set ethernet MTU for leaky bucket mechanism to 0 (disabled) */
1544 snprint(name, sizeof name, "#l%drproc", ether->ctlrno);
1545 kproc(name, rcvproc, ether);
1547 reg->rqc = Rxqon(Qno);
1552 attach(Ether* ether)
1554 Ctlr *ctlr = ether->ctlr;
1556 lock(&ctlr->initlock);
1557 if(ctlr->init == 0) {
1561 unlock(&ctlr->initlock);
1566 * mib registers clear on read.
1570 getmibstats(Ctlr *ctlr)
1572 Gbereg *reg = ctlr->reg;
1575 * Marvell 88f6281 errata FE-ETH-120: high long of rxby and txby
1576 * can't be read correctly, so read the low long frequently
1577 * (every 30 seconds or less), thus avoiding overflow into high long.
1579 ctlr->rxby += reg->rxbylo;
1580 ctlr->txby += reg->txbylo;
1582 ctlr->badrxby += reg->badrxby;
1583 ctlr->mactxerr += reg->mactxerr;
1584 ctlr->rxpkt += reg->rxpkt;
1585 ctlr->badrxpkt += reg->badrxpkt;
1586 ctlr->rxbcastpkt+= reg->rxbcastpkt;
1587 ctlr->rxmcastpkt+= reg->rxmcastpkt;
1588 ctlr->rx64 += reg->rx64;
1589 ctlr->rx65_127 += reg->rx65_127;
1590 ctlr->rx128_255 += reg->rx128_255;
1591 ctlr->rx256_511 += reg->rx256_511;
1592 ctlr->rx512_1023+= reg->rx512_1023;
1593 ctlr->rx1024_max+= reg->rx1024_max;
1594 ctlr->txpkt += reg->txpkt;
1595 ctlr->txcollpktdrop+= reg->txcollpktdrop;
1596 ctlr->txmcastpkt+= reg->txmcastpkt;
1597 ctlr->txbcastpkt+= reg->txbcastpkt;
1598 ctlr->badmacctlpkts+= reg->badmacctlpkts;
1599 ctlr->txflctl += reg->txflctl;
1600 ctlr->rxflctl += reg->rxflctl;
1601 ctlr->badrxflctl+= reg->badrxflctl;
1602 ctlr->rxundersized+= reg->rxundersized;
1603 ctlr->rxfrags += reg->rxfrags;
1604 ctlr->rxtoobig += reg->rxtoobig;
1605 ctlr->rxjabber += reg->rxjabber;
1606 ctlr->rxerr += reg->rxerr;
1607 ctlr->crcerr += reg->crcerr;
1608 ctlr->collisions+= reg->collisions;
1609 ctlr->latecoll += reg->latecoll;
1613 ifstat(Ether *ether, void *a, long n, ulong off)
1615 Ctlr *ctlr = ether->ctlr;
1616 Gbereg *reg = ctlr->reg;
1619 buf = p = malloc(READSTR);
1625 ctlr->intrs += ctlr->newintrs;
1626 p = seprint(p, e, "interrupts: %lud\n", ctlr->intrs);
1627 p = seprint(p, e, "new interrupts: %lud\n", ctlr->newintrs);
1629 p = seprint(p, e, "tx underrun: %lud\n", ctlr->txunderrun);
1630 p = seprint(p, e, "tx ring full: %lud\n", ctlr->txringfull);
1632 ctlr->rxdiscard += reg->pxdfc;
1633 ctlr->rxoverrun += reg->pxofc;
1634 p = seprint(p, e, "rx discarded frames: %lud\n", ctlr->rxdiscard);
1635 p = seprint(p, e, "rx overrun frames: %lud\n", ctlr->rxoverrun);
1636 p = seprint(p, e, "no first+last flag: %lud\n", ctlr->nofirstlast);
1638 p = seprint(p, e, "duplex: %s\n", (reg->ps0 & PS0fd)? "full": "half");
1639 p = seprint(p, e, "flow control: %s\n", (reg->ps0 & PS0flctl)? "on": "off");
1640 /* p = seprint(p, e, "speed: %d mbps\n", ); */
1642 p = seprint(p, e, "received bytes: %llud\n", ctlr->rxby);
1643 p = seprint(p, e, "bad received bytes: %lud\n", ctlr->badrxby);
1644 p = seprint(p, e, "internal mac transmit errors: %lud\n", ctlr->mactxerr);
1645 p = seprint(p, e, "total received frames: %lud\n", ctlr->rxpkt);
1646 p = seprint(p, e, "received broadcast frames: %lud\n", ctlr->rxbcastpkt);
1647 p = seprint(p, e, "received multicast frames: %lud\n", ctlr->rxmcastpkt);
1648 p = seprint(p, e, "bad received frames: %lud\n", ctlr->badrxpkt);
1649 p = seprint(p, e, "received frames 0-64: %lud\n", ctlr->rx64);
1650 p = seprint(p, e, "received frames 65-127: %lud\n", ctlr->rx65_127);
1651 p = seprint(p, e, "received frames 128-255: %lud\n", ctlr->rx128_255);
1652 p = seprint(p, e, "received frames 256-511: %lud\n", ctlr->rx256_511);
1653 p = seprint(p, e, "received frames 512-1023: %lud\n", ctlr->rx512_1023);
1654 p = seprint(p, e, "received frames 1024-max: %lud\n", ctlr->rx1024_max);
1655 p = seprint(p, e, "transmitted bytes: %llud\n", ctlr->txby);
1656 p = seprint(p, e, "total transmitted frames: %lud\n", ctlr->txpkt);
1657 p = seprint(p, e, "transmitted broadcast frames: %lud\n", ctlr->txbcastpkt);
1658 p = seprint(p, e, "transmitted multicast frames: %lud\n", ctlr->txmcastpkt);
1659 p = seprint(p, e, "transmit frames dropped by collision: %lud\n", ctlr->txcollpktdrop);
1661 p = seprint(p, e, "bad mac control frames: %lud\n", ctlr->badmacctlpkts);
1662 p = seprint(p, e, "transmitted flow control messages: %lud\n", ctlr->txflctl);
1663 p = seprint(p, e, "received flow control messages: %lud\n", ctlr->rxflctl);
1664 p = seprint(p, e, "bad received flow control messages: %lud\n", ctlr->badrxflctl);
1665 p = seprint(p, e, "received undersized packets: %lud\n", ctlr->rxundersized);
1666 p = seprint(p, e, "received fragments: %lud\n", ctlr->rxfrags);
1667 p = seprint(p, e, "received oversized packets: %lud\n", ctlr->rxtoobig);
1668 p = seprint(p, e, "received jabber packets: %lud\n", ctlr->rxjabber);
1669 p = seprint(p, e, "mac receive errors: %lud\n", ctlr->rxerr);
1670 p = seprint(p, e, "crc errors: %lud\n", ctlr->crcerr);
1671 p = seprint(p, e, "collisions: %lud\n", ctlr->collisions);
1672 p = seprint(p, e, "late collisions: %lud\n", ctlr->latecoll);
1676 n = readstr(off, a, n, buf);
1687 ether->ctlr = ctlr = malloc(sizeof *ctlr);
1688 switch(ether->ctlrno) {
1690 ether->irq = IRQ0gbe0sum;
1693 ether->irq = IRQ0gbe1sum;
1698 ctlr->reg = (Gbereg*)soc.ether[ether->ctlrno];
1700 /* need this for guruplug, at least */
1701 *(ulong *)soc.iocfg |= 1 << 7 | 1 << 15; /* io cfg 0: 1.8v gbe */
1704 ctlr->ether = ether;
1705 ctlrs[ether->ctlrno] = ctlr;
1708 /* ensure that both interfaces are set to RGMII before calling mii */
1709 ((Gbereg*)soc.ether[0])->psc1 |= PSC1rgmii;
1710 ((Gbereg*)soc.ether[1])->psc1 |= PSC1rgmii;
1713 /* Set phy address of the port */
1714 ctlr->port = ether->ctlrno;
1715 ctlr->reg->phy = ether->ctlrno;
1717 ether->port = (uintptr)ctlr->reg;
1719 if(kirkwoodmii(ether) < 0){
1724 miiphyinit(ctlr->mii);
1725 archetheraddr(ether, ctlr->reg, Qno); /* original location */
1726 if (memcmp(ether->ea, zeroea, sizeof zeroea) == 0){
1727 iprint("ether1116: reset: zero ether->ea\n");
1730 return -1; /* no rj45 for this ether */
1733 ether->attach = attach;
1734 ether->transmit = transmit;
1735 ether->ifstat = ifstat;
1736 ether->shutdown = shutdown;
1740 ether->promiscuous = promiscuous;
1741 ether->multicast = multicast;
1743 intrenable(Irqlo, ether->irq, interrupt, ether, ether->name);
1751 addethercard("88e1116", reset);