snowflake.go 4.9 KB
Newer Older
1
2
3
package lib

import (
4
	"context"
5
6
7
8
	"errors"
	"io"
	"log"
	"net"
9
	"time"
10
11
12
13

	"git.torproject.org/pluggable-transports/snowflake.git/common/turbotunnel"
	"github.com/xtaci/kcp-go/v5"
	"github.com/xtaci/smux"
14
15
16
)

const (
17
	ReconnectTimeout = 10 * time.Second
18
	SnowflakeTimeout = 20 * time.Second
19
	// How long to wait for the OnOpen callback on a DataChannel.
20
	DataChannelTimeout = 10 * time.Second
21
22
)

23
24
25
26
27
type dummyAddr struct{}

func (addr dummyAddr) Network() string { return "dummy" }
func (addr dummyAddr) String() string  { return "dummy" }

28
29
30
31
// newSession returns a new smux.Session and the net.PacketConn it is running
// over. The net.PacketConn successively connects through Snowflake proxies
// pulled from snowflakes.
func newSession(snowflakes SnowflakeCollector) (net.PacketConn, *smux.Session, error) {
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
	clientID := turbotunnel.NewClientID()

	// We build a persistent KCP session on a sequence of ephemeral WebRTC
	// connections. This dialContext tells RedialPacketConn how to get a new
	// WebRTC connection when the previous one dies. Inside each WebRTC
	// connection, we use EncapsulationPacketConn to encode packets into a
	// stream.
	dialContext := func(ctx context.Context) (net.PacketConn, error) {
		log.Printf("redialing on same connection")
		// Obtain an available WebRTC remote. May block.
		conn := snowflakes.Pop()
		if conn == nil {
			return nil, errors.New("handler: Received invalid Snowflake")
		}
		log.Println("---- Handler: snowflake assigned ----")
		// Send the magic Turbo Tunnel token.
		_, err := conn.Write(turbotunnel.Token[:])
		if err != nil {
			return nil, err
		}
		// Send ClientID prefix.
		_, err = conn.Write(clientID[:])
		if err != nil {
			return nil, err
		}
		return NewEncapsulationPacketConn(dummyAddr{}, dummyAddr{}, conn), nil
58
	}
59
	pconn := turbotunnel.NewRedialPacketConn(dummyAddr{}, dummyAddr{}, dialContext)
60

61
62
63
64
65
66
	// conn is built on the underlying RedialPacketConn—when one WebRTC
	// connection dies, another one will be found to take its place. The
	// sequence of packets across multiple WebRTC connections drives the KCP
	// engine.
	conn, err := kcp.NewConn2(dummyAddr{}, nil, 0, 0, pconn)
	if err != nil {
67
68
		pconn.Close()
		return nil, nil, err
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
	}
	// Permit coalescing the payloads of consecutive sends.
	conn.SetStreamMode(true)
	// Disable the dynamic congestion window (limit only by the
	// maximum of local and remote static windows).
	conn.SetNoDelay(
		0, // default nodelay
		0, // default interval
		0, // default resend
		1, // nc=1 => congestion window off
	)
	// On the KCP connection we overlay an smux session and stream.
	smuxConfig := smux.DefaultConfig()
	smuxConfig.Version = 2
	smuxConfig.KeepAliveTimeout = 10 * time.Minute
	sess, err := smux.Client(conn, smuxConfig)
85
86
87
88
89
90
91
92
93
94
95
	if err != nil {
		conn.Close()
		pconn.Close()
		return nil, nil, err
	}

	return pconn, sess, err
}

// Given an accepted SOCKS connection, establish a WebRTC connection to the
// remote peer and exchange traffic.
96
97
func Handler(socks net.Conn, tongue Tongue) error {
	// Prepare to collect remote WebRTC peers.
98
99
100
101
	snowflakes, err := NewPeers(tongue)
	if err != nil {
		return err
	}
102
103
104
105
106
107
108

	// Use a real logger to periodically output how much traffic is happening.
	snowflakes.BytesLogger = NewBytesSyncLogger()

	log.Printf("---- Handler: begin collecting snowflakes ---")
	go connectLoop(snowflakes)

109
110
111
	// Create a new smux session
	log.Printf("---- Handler: starting a new session ---")
	pconn, sess, err := newSession(snowflakes)
112
113
114
	if err != nil {
		return err
	}
115
116

	// On the smux session we overlay a stream.
117
118
119
120
121
	stream, err := sess.OpenStream()
	if err != nil {
		return err
	}
	defer stream.Close()
122

123
124
125
126
	// Begin exchanging data.
	log.Printf("---- Handler: begin stream %v ---", stream.ID())
	copyLoop(socks, stream)
	log.Printf("---- Handler: closed stream %v ---", stream.ID())
127
128
	snowflakes.End()
	log.Printf("---- Handler: end collecting snowflakes ---")
129
130
131
	pconn.Close()
	sess.Close()
	log.Printf("---- Handler: discarding finished session ---")
132
133
134
	return nil
}

135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
// Maintain |SnowflakeCapacity| number of available WebRTC connections, to
// transfer to the Tor SOCKS handler when needed.
func connectLoop(snowflakes SnowflakeCollector) {
	for {
		_, err := snowflakes.Collect()
		if err != nil {
			log.Printf("WebRTC: %v  Retrying in %v...",
				err, ReconnectTimeout)
		}
		select {
		case <-time.After(ReconnectTimeout):
			continue
		case <-snowflakes.Melted():
			log.Println("ConnectLoop: stopped.")
			return
		}
	}
}

154
// Exchanges bytes between two ReadWriters.
155
156
// (In this case, between a SOCKS connection and smux stream.)
func copyLoop(socks, stream io.ReadWriter) {
157
	done := make(chan struct{}, 2)
158
	go func() {
159
		if _, err := io.Copy(socks, stream); err != nil {
160
			log.Printf("copying WebRTC to SOCKS resulted in error: %v", err)
161
		}
162
		done <- struct{}{}
163
164
	}()
	go func() {
165
166
		if _, err := io.Copy(stream, socks); err != nil {
			log.Printf("copying SOCKS to stream resulted in error: %v", err)
167
		}
168
		done <- struct{}{}
169
	}()
170
	<-done
171
172
	log.Println("copy loop ended")
}