14 // protoID + src PeerID + channel number
20 // rawTypeSpilt + seqnum + chunk count + chunk number
21 SplitHdrSize = 1 + 2 + 2 + 2
23 // rawTypeRel + seqnum
30 MaxUnrelRawPktSize = MaxNetPktSize - MtHdrSize
31 MaxRelRawPktSize = MaxUnrelRawPktSize - RelHdrSize
33 MaxRelPktSize = (MaxRelRawPktSize - SplitHdrSize) * math.MaxUint16
34 MaxUnrelPktSize = (MaxUnrelRawPktSize - SplitHdrSize) * math.MaxUint16
37 var ErrPktTooBig = errors.New("can't send pkt: too big")
38 var ErrChNoTooBig = errors.New("can't send pkt: channel number >= ChannelCount")
40 // Send sends a packet to the Peer.
41 // It returns a channel that's closed when all chunks are acked or an error.
42 // The ack channel is nil if pkt.Unrel is true.
43 func (p *Peer) Send(pkt Pkt) (ack <-chan struct{}, err error) {
44 if pkt.ChNo >= ChannelCount {
45 return nil, ErrChNoTooBig
53 if hdrsize+OrigHdrSize+len(pkt.Data) > MaxNetPktSize {
54 c := &p.chans[pkt.ChNo]
61 chunks := split(pkt.Data, MaxNetPktSize-(hdrsize+SplitHdrSize))
63 if len(chunks) > math.MaxUint16 {
64 return nil, ErrPktTooBig
69 for i, chunk := range chunks {
70 data := make([]byte, SplitHdrSize+len(chunk))
71 data[0] = uint8(rawTypeSplit)
72 binary.BigEndian.PutUint16(data[1:3], uint16(sn))
73 binary.BigEndian.PutUint16(data[3:5], uint16(len(chunks)))
74 binary.BigEndian.PutUint16(data[5:7], uint16(i))
75 copy(data[SplitHdrSize:], chunk)
78 ack, err := p.sendRaw(rawPkt{
100 ack := make(chan struct{})
111 return p.sendRaw(rawPkt{
112 Data: append([]byte{uint8(rawTypeOrig)}, pkt.Data...),
118 // sendRaw sends a raw packet to the Peer.
119 func (p *Peer) sendRaw(pkt rawPkt) (ack <-chan struct{}, err error) {
120 if pkt.ChNo >= ChannelCount {
121 return nil, ErrChNoTooBig
129 return nil, net.ErrClosed
134 return p.sendRel(pkt)
137 data := make([]byte, MtHdrSize+len(pkt.Data))
138 binary.BigEndian.PutUint32(data[0:4], protoID)
139 binary.BigEndian.PutUint16(data[4:6], uint16(p.idOfPeer))
141 copy(data[MtHdrSize:], pkt.Data)
143 if len(data) > MaxNetPktSize {
144 return nil, ErrPktTooBig
147 _, err = p.Conn().WriteTo(data, p.Addr())
148 if errors.Is(err, net.ErrWriteToConnected) {
149 conn, ok := p.Conn().(net.Conn)
153 _, err = conn.Write(data)
159 p.ping.Reset(PingTimeout)
164 // sendRel sends a reliable raw packet to the Peer.
165 func (p *Peer) sendRel(pkt rawPkt) (ack <-chan struct{}, err error) {
167 panic("mt/rudp: sendRel: pkt.Unrel is true")
170 c := &p.chans[pkt.ChNo]
173 defer c.outrelmu.Unlock()
176 for ; sn-c.outrelwin >= 0x8000; c.outrelwin++ {
177 if ack, ok := c.ackchans.Load(c.outrelwin); ok {
178 <-ack.(chan struct{})
183 rwack := make(chan struct{}) // close-only
184 c.ackchans.Store(sn, rwack)
187 reldata := make([]byte, RelHdrSize+len(pkt.Data))
188 reldata[0] = uint8(rawTypeRel)
189 binary.BigEndian.PutUint16(reldata[1:3], uint16(sn))
190 copy(reldata[RelHdrSize:], pkt.Data)
197 if _, err := p.sendRaw(relpkt); err != nil {
198 c.ackchans.Delete(sn)
206 case <-time.After(500 * time.Millisecond):
207 if _, err := p.sendRaw(relpkt); err != nil {
208 p.errs <- fmt.Errorf("failed to re-send timed out reliable seqnum: %d: %w", sn, err)
221 // SendDisco sends a disconnect packet to the Peer but does not close it.
222 // It returns a channel that's closed when it's acked or an error.
223 // The ack channel is nil if unrel is true.
224 func (p *Peer) SendDisco(chno uint8, unrel bool) (ack <-chan struct{}, err error) {
225 return p.sendRaw(rawPkt{
226 Data: []byte{uint8(rawTypeCtl), uint8(ctlDisco)},
232 func split(data []byte, chunksize int) [][]byte {
233 chunks := make([][]byte, 0, (len(data)+chunksize-1)/chunksize)
235 for i := 0; i < len(data); i += chunksize {
241 chunks = append(chunks, data[i:end])