Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • tpo/anti-censorship/pluggable-transports/snowflake
  • cohosh/snowflake
  • eighthave/snowflake
  • phw/snowflake
  • dcf/snowflake
  • meskio/snowflake
  • sbs/snowflake
  • idk/snowflake
  • CyberTailor/snowflake
  • Flo418/snowflake
  • shelikhoo/snowflake
  • WofWca/snowflake
  • pjsier/snowflake
  • eta/snowflake
  • dangowrt/snowflake
  • luciole/snowflake
  • KokaKiwi/snowflake
  • trinity-1686a/snowflake
  • sambhavsaxena/snowflake
  • nomandhoni/snowflake
  • pranitpatil3112/snowflake
  • Vort/snowflake
  • mpu/snowflake
  • Gedsh/snowflake
  • mingyech/snowflake
  • just1602/snowflake
  • theodorsm/snowflake
  • obble/snowflake
  • tgragnato/snowflake
  • anarcat/snowflake
  • neel/snowflake
  • akiel/snowflake
  • onyinyang/snowflake
  • opara/snowflake
34 results
Show changes
Commits on Source (815)
......@@ -6,20 +6,14 @@
datadir/
broker/broker
client/client
server-webrtc/server-webrtc
server/server
proxy-go/proxy-go
proxy/proxy
probetest/probetest
snowflake.log
proxy/test
proxy/build
proxy/node_modules
proxy/snowflake-library.js
proxy/spec/support
proxy/webext/snowflake.js
proxy/webext/popup.js
proxy/webext/embed.html
proxy/webext/embed.css
proxy/webext/assets/
proxy/webext/_locales/
ignore/
npm-debug.log
# from running the vagrant setup
/.vagrant/
/sdk-tools-linux-*.zip*
/android-ndk-*
/tools/
\ No newline at end of file
image: golang:1.10-stretch
cache:
paths:
- .gradle/wrapper
- .gradle/caches
before_script:
# Create symbolic links under $GOPATH, this is needed for local build
- export src=$GOPATH/src
- mkdir -p $src/git.torproject.org/pluggable-transports
- mkdir -p $src/gitlab.com/$CI_PROJECT_NAMESPACE
- ln -s $CI_PROJECT_DIR $src/git.torproject.org/pluggable-transports/snowflake.git
- ln -s $CI_PROJECT_DIR $src/gitlab.com/$CI_PROJECT_PATH
build:
script:
- apt-get -qy update
- apt-get -qy install libx11-dev
- cd $src/gitlab.com/$CI_PROJECT_PATH/client
- go get ./...
- go build ./...
- go vet ./...
- go test -v -race ./...
after_script:
stages:
- test
- deploy
- container-build
- container-mirror
variables:
DEBIAN_FRONTEND: noninteractive
DEBIAN_OLD_STABLE: buster
DEBIAN_STABLE: bullseye
REPRODUCIBLE_FLAGS: -trimpath -ldflags=-buildid=
# set up apt for automated use
.apt-template: &apt-template
- export LC_ALL=C.UTF-8
- export DEBIAN_FRONTEND=noninteractive
- ln -fs /usr/share/zoneinfo/Etc/UTC /etc/localtime
- echo 'quiet "1";'
'APT::Install-Recommends "0";'
'APT::Install-Suggests "0";'
'APT::Acquire::Retries "20";'
'APT::Get::Assume-Yes "true";'
'Dpkg::Use-Pty "0";'
> /etc/apt/apt.conf.d/99gitlab
- apt-get update
- apt-get dist-upgrade
# Set things up to use the OS-native packages for Go. Anything that
# is downloaded by go during the `go fmt` stage is not coming from the
# Debian/Ubuntu repo. So those would need to be packaged for this to
# make it into Debian and/or Ubuntu.
.debian-native-template: &debian-native-template
variables:
GOPATH: /usr/share/gocode
before_script:
- apt-get update
- apt-get -qy install --no-install-recommends
build-essential
ca-certificates
git
golang
golang-github-cheekybits-genny-dev
golang-github-jtolds-gls-dev
golang-github-klauspost-reedsolomon-dev
golang-github-lucas-clemente-quic-go-dev
golang-github-smartystreets-assertions-dev
golang-github-smartystreets-goconvey-dev
golang-github-tjfoc-gmsm-dev
golang-github-xtaci-kcp-dev
golang-github-xtaci-smux-dev
golang-golang-x-crypto-dev
golang-golang-x-net-dev
golang-goptlib-dev
golang-golang-x-sys-dev
golang-golang-x-text-dev
golang-golang-x-xerrors-dev
# use Go installed as part of the official, Debian-based Docker images
.golang-docker-debian-template: &golang-docker-debian-template
before_script:
- apt-get update
- apt-get -qy install --no-install-recommends
ca-certificates
git
.go-test: &go-test
- gofmt -d .
- test -z "$(go fmt ./...)"
- go vet ./...
- go test -v -race ./...
- cd $CI_PROJECT_DIR/client/
- go get
- go build $REPRODUCIBLE_FLAGS
.test-template: &test-template
artifacts:
name: "${CI_PROJECT_PATH}_${CI_JOB_STAGE}_${CI_JOB_ID}_${CI_COMMIT_REF_NAME}_${CI_COMMIT_SHA}"
paths:
- client/*.aar
- client/*.jar
- client/client
expire_in: 1 week
when: on_success
after_script:
- echo "Download debug artifacts from https://gitlab.com/${CI_PROJECT_PATH}/-/jobs"
# this file changes every time but should not be cached
- rm -f $GRADLE_USER_HOME/caches/modules-2/modules-2.lock
- rm -fr $GRADLE_USER_HOME/caches/*/plugin-resolution/
- rm -rf $GRADLE_USER_HOME/caches/*/plugin-resolution/
# -- jobs ------------------------------------------------------------
android:
image: containers.torproject.org/tpo/anti-censorship/duplicatedcontainerimages:golang-1.23-$DEBIAN_STABLE
variables:
ANDROID_HOME: /usr/lib/android-sdk
LANG: C.UTF-8
cache:
paths:
- .gradle/wrapper
- .gradle/caches
<<: *test-template
before_script:
- *apt-template
- apt-get install
android-sdk-platform-23
android-sdk-platform-tools
build-essential
curl
default-jdk-headless
git
gnupg
unzip
wget
ca-certificates
- ndk=android-ndk-r21e-linux-x86_64.zip
- wget --continue --no-verbose https://dl.google.com/android/repository/$ndk
- echo "ad7ce5467e18d40050dc51b8e7affc3e635c85bd8c59be62de32352328ed467e $ndk" > $ndk.sha256
- sha256sum -c $ndk.sha256
- unzip -q $ndk
- rm ${ndk}*
- mv android-ndk-* $ANDROID_HOME/ndk-bundle/
- chmod -R a+rX $ANDROID_HOME
script:
- *go-test
- export GRADLE_USER_HOME=$CI_PROJECT_DIR/.gradle
- go version
- go env
- go get golang.org/x/mobile/cmd/gomobile
- go get golang.org/x/mobile/cmd/gobind
- go install golang.org/x/mobile/cmd/gobind
- go install golang.org/x/mobile/cmd/gomobile
- gomobile init
- cd $CI_PROJECT_DIR/client
# gomobile builds a shared library not a CLI executable
- sed -i 's,^package main$,package snowflakeclient,' *.go
- go get golang.org/x/mobile/bind
- gomobile bind -v -target=android $REPRODUCIBLE_FLAGS .
go-1.21:
image: containers.torproject.org/tpo/anti-censorship/duplicatedcontainerimages:golang-1.21-$DEBIAN_STABLE
<<: *golang-docker-debian-template
<<: *test-template
script:
- *go-test
go-1.23:
image: containers.torproject.org/tpo/anti-censorship/duplicatedcontainerimages:golang-1.23-$DEBIAN_STABLE
<<: *golang-docker-debian-template
<<: *test-template
script:
- *go-test
debian-testing:
image: debian:testing
<<: *debian-native-template
<<: *test-template
script:
- *go-test
shadow-integration:
image: containers.torproject.org/tpo/anti-censorship/duplicatedcontainerimages:golang-1.21-$DEBIAN_STABLE
variables:
SHADOW_VERSION: "193924aae0dab30ffda0abe29467f552949849fa"
TGEN_VERSION: "v1.1.2"
cache:
key: sf-integration-$SHADOW_VERSION-$TGEN_VERSION
paths:
- /opt/
artifacts:
paths:
- shadow.data.tar.gz
when: on_failure
tags:
- amd64
- tpa
script:
- apt-get update
- apt-get install -y git tor
- mkdir -p ~/.local/bin
- mkdir -p ~/.local/src
- export PATH=$PATH:$CI_PROJECT_DIR/opt/bin/
# Install shadow and tgen
- pushd ~/.local/src
- |
if [ ! -f opt/shadow/bin/shadow ]
then
echo "The required version of shadow was not cached, building from source"
git clone --shallow-since=2021-08-01 https://github.com/shadow/shadow.git
pushd shadow/
git checkout $SHADOW_VERSION
CONTAINER=debian:stable-slim ci/container_scripts/install_deps.sh
CC=gcc CONTAINER=debian:stable-slim ci/container_scripts/install_extra_deps.sh
export PATH="$HOME/.cargo/bin:${PATH}"
./setup build --jobs $(nproc) --prefix $CI_PROJECT_DIR/opt/
./setup install
popd
fi
- |
if [ ! -f opt/shadow/bin/tgen ]
then
echo "The required version of tgen was not cached, building from source"
git clone --branch $TGEN_VERSION --depth 1 https://github.com/shadow/tgen.git
pushd tgen/
apt-get install -y cmake libglib2.0-dev libigraph-dev
mkdir build && cd build
cmake .. -DCMAKE_INSTALL_PREFIX=$CI_PROJECT_DIR/opt/
make
make install
popd
fi
install $CI_PROJECT_DIR/opt/bin/tgen ~/.local/bin/tgen
- popd
# Apply snowflake patch(es)
- |
git clone --depth 1 https://github.com/cohosh/shadow-snowflake-minimal
git am -3 shadow-snowflake-minimal/*.patch
# Install snowflake binaries to .local folder
- |
for app in "proxy" "client" "server" "broker" "probetest"; do
pushd $app
go build
install $app ~/.local/bin/snowflake-$app
popd
done
# Install stun server
- GOBIN=~/.local/bin go install github.com/gortc/stund@latest
# Run a minimal snowflake shadow experiment
- pushd shadow-snowflake-minimal/
- shadow --log-level=debug --model-unblocked-syscall-latency=true snowflake-minimal.yaml > shadow.log
# Check to make sure streams succeeded
- |
if [ $(grep -c "stream-success" shadow.data/hosts/snowflakeclient/tgen.*.stdout) = 10 ]
then
echo "All streams in shadow completed successfully"
else
echo "Shadow simulation failed"
exit 1
fi
after_script:
- tar -czvf $CI_PROJECT_DIR/shadow.data.tar.gz shadow-snowflake-minimal/shadow.data/ shadow-snowflake-minimal/shadow.log
generate_tarball:
stage: deploy
image: golang:1.21-$DEBIAN_STABLE
rules:
- if: $CI_COMMIT_TAG
script:
- go mod vendor
- tar czf ${CI_PROJECT_NAME}-${CI_COMMIT_TAG}.tar.gz --transform "s,^,${CI_PROJECT_NAME}-${CI_COMMIT_TAG}/," *
after_script:
- echo TAR_JOB_ID=$CI_JOB_ID >> generate_tarball.env
artifacts:
paths:
- ${CI_PROJECT_NAME}-${CI_COMMIT_TAG}.tar.gz
reports:
dotenv: generate_tarball.env
release-job:
stage: deploy
image: registry.gitlab.com/gitlab-org/release-cli:latest
rules:
- if: $CI_COMMIT_TAG
needs:
- job: generate_tarball
artifacts: true
script:
- echo "running release_job"
release:
name: 'Release $CI_COMMIT_TAG'
description: 'Created using the release-cli'
tag_name: '$CI_COMMIT_TAG'
ref: '$CI_COMMIT_TAG'
assets:
links:
- name: '${CI_PROJECT_NAME}-${CI_COMMIT_TAG}.tar.gz'
url: '${CI_PROJECT_URL}/-/jobs/${TAR_JOB_ID}/artifacts/file/${CI_PROJECT_NAME}-${CI_COMMIT_TAG}.tar.gz'
# Build the container only if the commit is to main, or it is a tag.
# If the commit is to main, then the docker image tag should be set to `nightly`.
# If it is a tag, then the docker image tag should be set to the tag name.
build-container:
variables:
TAG: $CI_COMMIT_TAG # Will not be set on a non-tag build, will be set later
stage: container-build
parallel:
matrix:
- ARCH: amd64
- ARCH: arm64
- ARCH: s390x
tags:
- $ARCH
image:
name: gcr.io/kaniko-project/executor:debug
entrypoint: [""]
script:
- if [ $CI_COMMIT_REF_NAME == "main" ]; then export TAG='nightly'; fi
- >-
/kaniko/executor
--context "${CI_PROJECT_DIR}"
--dockerfile "${CI_PROJECT_DIR}/Dockerfile"
--destination "${CI_REGISTRY_IMAGE}:${TAG}_${ARCH}"
rules:
- if: $CI_COMMIT_REF_NAME == "main"
- if: $CI_COMMIT_TAG
merge-manifests:
variables:
TAG: $CI_COMMIT_TAG
stage: container-build
needs:
- job: build-container
artifacts: false
image:
name: containers.torproject.org/tpo/anti-censorship/duplicatedcontainerimages:mplatform-manifest-tool-alpine
entrypoint: [""]
script:
- if [ $CI_COMMIT_REF_NAME == "main" ]; then export TAG='nightly'; fi
- >-
manifest-tool
--username="${CI_REGISTRY_USER}"
--password="${CI_REGISTRY_PASSWORD}"
push from-args
--platforms linux/amd64,linux/arm64,linux/s390x
--template "${CI_REGISTRY_IMAGE}:${TAG}_ARCH"
--target "${CI_REGISTRY_IMAGE}:${TAG}"
rules:
- if: $CI_COMMIT_REF_NAME == "main"
when: always
- if: $CI_COMMIT_TAG
when: always
# If this is a tag, then we want to additionally tag the image as `latest`
tag-container-release:
stage: container-build
needs:
- job: merge-manifests
artifacts: false
image:
name: gcr.io/go-containerregistry/crane:debug
entrypoint: [""]
allow_failure: false
variables:
CI_REGISTRY: $CI_REGISTRY
IMAGE_TAG: $CI_REGISTRY_IMAGE:$CI_COMMIT_TAG
RELEASE_TAG: $CI_REGISTRY_IMAGE:latest
script:
- echo "Tagging docker image with stable tag with crane"
- echo -n "$CI_JOB_TOKEN" | crane auth login $CI_REGISTRY -u gitlab-ci-token --password-stdin
- crane cp $IMAGE_TAG $RELEASE_TAG
rules:
- if: $CI_COMMIT_TAG
when: always
clean-image-tags:
stage: container-build
needs:
- job: merge-manifests
artifacts: false
image: containers.torproject.org/tpo/tpa/base-images/debian:bookworm
before_script:
- *apt-template
- apt-get install -y jq curl
script:
- "REGISTRY_ID=$(curl --silent --request GET --header \"JOB-TOKEN: ${CI_JOB_TOKEN}\" \"https://gitlab.torproject.org/api/v4/projects/${CI_PROJECT_ID}/registry/repositories\" | jq '.[].id')"
- "curl --request DELETE --data \"name_regex_delete=(latest|${CI_COMMIT_TAG})_.*\" --header \"JOB-TOKEN: ${CI_JOB_TOKEN}\" \"https://gitlab.torproject.org/api/v4/projects/${CI_PROJECT_ID}/registry/repositories/${REGISTRY_ID}/tags\""
rules:
- if: $CI_COMMIT_REF_NAME == "main"
when: always
- if: $CI_COMMIT_TAG
when: always
mirror-image-to-dockerhub:
stage: container-mirror
variables:
DOCKERHUB_MIRROR_REPOURL: $DOCKERHUB_MIRROR_REPOURL
DOCKERHUB_USERNAME: $DOCKERHUB_MIRROR_USERNAME
DOCKERHUB_PASSWORD: $DOCKERHUB_MIRROR_PASSWORD
image:
name: gcr.io/go-containerregistry/crane:debug
entrypoint: [""]
rules:
- if: $CI_COMMIT_REF_NAME == "main"
when: always
- if: $CI_COMMIT_TAG
when: always
script:
- echo "$DOCKERHUB_PASSWORD" | crane auth login docker.io -u $DOCKERHUB_MIRROR_USERNAME --password-stdin
- crane cp -a containers.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake $DOCKERHUB_MIRROR_REPOURL
[submodule "proxy/translation"]
path = proxy/translation
url = https://git.torproject.org/translation.git
branch = snowflakeaddon-messages.json_completed
......@@ -2,43 +2,12 @@ language: go
dist: xenial
go_import_path: git.torproject.org/pluggable-transports/snowflake.git
addons:
apt:
sources:
- ubuntu-toolchain-r-test
packages:
- g++-5
- gcc-5
go_import_path: git.torproject.org/pluggable-transports/snowflake.git/v2
go:
- 1.13.x
env:
- TRAVIS_NODE_VERSION="8" CC="gcc-5" CXX="g++-5"
before_install:
- nvm install $TRAVIS_NODE_VERSION
install:
- go get -u github.com/smartystreets/goconvey
- go get -u github.com/keroserene/go-webrtc
- go get -u github.com/pion/webrtc
- go get -u github.com/dchest/uniuri
- go get -u github.com/gorilla/websocket
- go get -u git.torproject.org/pluggable-transports/goptlib.git
- go get -u google.golang.org/appengine
- go get -u golang.org/x/crypto/acme/autocert
- go get -u golang.org/x/net/http2
- pushd proxy
- npm install
- popd
script:
- test -z "$(go fmt ./...)"
- go vet ./...
- go test -v -race ./...
- cd proxy
- npm run lint
- npm test
Changes in version v2.11.0 - 2025-03-18
- Fix data race warnings for tokens_t
- Fix race condition in proxy connection count stats
- Make NATPolicy thread-safe
- Fix race conditions with error scope
- Fix race condition with proxy isClosing variable
- Issue 40454: Update broker metrics to count matches, denials, and timeouts
- Add proxy event and metrics for failed connections
- Issue 40377: Create CI artifact if shadow fails
- Issue 40438: Copy base client config for each SOCKS connection
- Fix minor data race in Snowflake broker metrics
- Issue 40363: Process and read broker SQS messages more quickly
- Issue 40419: delay before calling dc.Close() to improve NAT test on proxy
- Add country stats to proxy prometheus metrics
- Issue 40381: Avoid snowflake client dependency in proxy
- Issue 40446: Lower broker ClientTimeout to 5 seconds in line with CDN77 defaults
- Refactor out utls library into ptutil/utls
- Issue 40414: Use /etc/localtime for CI
- Issue 40440: Add LE self-signed ISRG Root X1 to cert pool
- Proxy refactor to simplify tokens.ret() on error
- Clarify ephemeral-ports-range proxy option
- Issue 40417: Fixes and updates to CI containers
- Issue 40178: Handle unknown client type better
- Issue 40304: Update STUN server list
- Issue 40210: Remove proxy log when offer is nil
- Issue 40413: Log EventOnCurrentNATTypeDetermined for proxy
- Use named return for some functions to improve readability
- Issue 40271: Use pion SetIPFilter rather than our own StripLocalAddress
- Issue 40413: Suppress logs of proxy events by default
- Add IsLinkLocalUnicast in IsLocal
- Fix comments
- Bump versions of dependencies
Changes in version v2.10.1 - 2024-11-11
- Issue 40406: Update version string
Changes in version v2.10.0 - 2024-11-07
- Issue 40402: Add proxy event for when client has connected
- Issue 40405: Prevent panic for duplicate SnowflakeConn.Close() calls
- Enable local time for proxy logging
- Have proxy summary statistics log average transfer rate
- Issue 40210: Remove duplicate poll interval loop in proxy
- Issue 40371: Prevent broker and proxy from rejecting clients without ICE candidates
- Issue 40392: Allow the proxy and probetest to set multiple STUN URLs
- Issue 40387: Fix error in probetest NAT check
- Fix proxy panic on invalid relayURL
- Set empty pattern if broker bridge-list is empty
- Improve documentation of Ephemeral[Min,Max]Port
- Fix resource leak and NAT check in probetest
- Fix memory leak from failed NAT check
- Improve NAT check logging
- Issue 40230: Send answer even if ICE gathering is not complete
- Improve broker error message on unknown bridge fingerprint
- Don't proxy private IP addresses
- Only accept ws:// and wss:// relay addresses
- Issue 40373: Add cli flag and SnowflakeProxy field to modify proxy poll interval
- Use %w not $v in fmt.Errorf
- Updates to documentation
- Adjust copy buffer size to improve proxy performance
- Improve descriptions of cli flags
- Cosmetic changes for code readability
- Issue 40367: Deduplicate prometheus metrics names
- Report the version of snowflake to the tor process
- Issue 40365: Indicate whether the repo was modified in the version string
- Simplify NAT checking logic
- Issue 40354: Use ptutil library for safelog and prometheus metrics
- Add cli flag to set a listen address for proxy prometheus metrics
- Issue 40345: Integrate docker image with release process
- Bump versions of dependencies
Changes in version v2.9.2 - 2024-03-18
- Issue 40288: Add integration testing with Shadow
- Issue 40345: Automatically build and push containers to our registry
- Issue 40339: Fix client ID reuse bug in SQS rendezvous
- Issue 40341: Modify SQS rendezvous arguments to use b64 encoded parameters
- Issue 40330: Add new metrics at the broker for per-country rendezvous stats
- Issue 40345: Update docker container tags
- Bump versions of dependencies
Changes in version v2.9.1 - 2024-02-27
- Issue 40335: Fix release job
- Change deprecated io/ioutil package to io package
- Bump versions of dependencies
Changes in version v2.9.0 - 2024-02-05
- Issue 40285: Add vcs revision to version string
- Issue 40294: Update recommended torrc options in client README
- Issue 40306: Scrub space-separated IP addresses
- Add proxy commandline option for probe server URL
- Use SetNet setting in probest to ignore net.Interfaces error
- Add probetest commandline option for STUN URL
- Issue 26151: Implement SQS rendezvous in client and broker
- Add broker metrics to track rendezvous method
- Cosmetic code quality fixes
- Bump versions of dependencies
Changes in version v2.8.1 - 2023-12-21
- Issue 40276: Reduce allocations in encapsulation.ReadData
- Issue 40310: Remove excessive logging for closed proxy connections
- Issue 40278: Add network fix for old version of android to proxy
- Bump versions of dependencies
Changes in version v2.8.0 - 2023-11-20
- Issue 40069: Add outbound proxy support
- Issue 40301: Fix for a bug in domain fronting configurations
- Issue 40302: Remove throughput summary from proxy logger
- Issue 40302: Change proxy stats logging to only log stats for traffic that occurred in the summary interval
- Update renovate bot configuration to use Go 1.21
- Bump versions of dependencies
Changes in version v2.7.0 - 2023-10-16
7142fa3 fix(proxy): Correctly close connection pipe when dealing with error
6393af6 Remove proxy churn measurements from broker.
a615e8b fix(proxy): remove _potential_ deadlock
d434549 Maintain backward compatability with old clients
9fdfb3d Randomly select front domain from comma-separated list
5cdf52c Update dependencies
1559963 chore(deps): update module github.com/xtaci/kcp-go/v5 to v5.6.3
60e66be Remove Golang 1.20 from CI Testing
1d069ca Update CI targets to test android from golang 1.21
3a050c6 Use ShouldBeNil to check for nil values
e45e8e5 chore(deps): update module github.com/smartystreets/goconvey to v1.8.1
f47ca18 chore(deps): update module gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/goptlib to v1.5.0
106da49 chore(deps): update module github.com/pion/webrtc/v3 to v3.2.20
2844ac6 Update CI targets to include only Go 1.20 and 1.21
f4e1ab9 chore(deps): update module golang.org/x/net to v0.15.0
caaff70 Update module golang.org/x/sys to v0.12.0
Changes in version v2.6.1 - 2023-09-11
- a3bfc28 Update module golang.org/x/crypto to v0.12.0
- e37e15a Update golang Docker tag to v1.21
- b632c7d Workaround for shadow in lieu of AF_NETLINK support
- 0cb2975 Update module golang.org/x/net to v0.13.0 [SECURITY]
- f73fe6e Keep the 'v' from the tag on the released .tar.gz
- 8104732 Change DefaultRelayURL back to wss://snowflake.torproject.net/.
- d932cb2 feat: add option to expose the stats by using metrics
- af73ab7 Add renovate config
- aaeab3f Update dependencies
- 58c3121 Close temporary UDPSession in TestQueuePacketConnWriteToKCP.
- 80980a3 Fix a comment left over from turbotunnel-quic.
- 08d1c6d Bump minimum required version of go
Changes in version v2.6.0 - 2023-06-19
- Issue 40243: Implement datachannel flow control at proxy
- Issue 40087: Append Let's Encrypt ISRG Root X1 to cert pool
- Issue 40198: Use IP_BIND_ADDRESS_NO_PORT when dialing the ORPort on linux
- Move from gitweb to gitlab
- Add warning log at broker when proxy does not connect with client
- Fix unit tests after SDP validation
- Soften non-critical log from error to warning
- Issue 40231: Validate SDP offers and answers
- Add scanner error check to ClusterCounter.Count
- Fix server benchmark tests
- Issue 40260: Use a sync.Pool to reuse QueuePacketConn buffers
- Issue 40043: Restore ListenAndServe error in server
- Update pion webrtc library versions
- Issue 40108: Add outbound address config option to proxy
- Issue 40260: Fix a data race in the Snowflake server
- Issue 40216: Add utls-imitate, utls-nosni documentation to the README
- Fix up/down traffic stats in standalone proxy
- Issue 40226: Filter out ICE servers that are not STUN
- Issue 40226: Update README to reflect the type of ICE servers we support
- Issue 40226: Parse ICE servers using the pion/ice library function
- Bring client torrc up to date with Tor Browser
Changes in version v2.5.1 - 2023-01-18
- Issue 40249: Fix issue with Skip Hello Verify patch
Changes in version v2.5.0 - 2023-01-18
- Issue 40249: Apply Skip Hello Verify Migration
Changes in version v2.4.3 - 2023-01-16
- Fix version number in version.go
Changes in version v2.4.2 - 2023-01-13
- Issue 40208: Enhance help info for capacity flag
- Issue 40232: Update README and fix help output
- Issue 40173: Increase clientIDAddrMapCapacity
- Issue 40177: Manually unlock mutex in ClientMap.SendQueue
- Issue 40177: Have SnowflakeClientConn implement io.WriterTo
- Issue 40179: Reduce turbotunnel queueSize from 2048 to 512
- Issue 40187/40199: Take ownership of buffer in QueuePacketConn QueueIncoming/WriteTo
- Add more tests for URL encoded IPs (safelog)
- Fix server flag name
- Issue 40200: Use multiple parallel KCP state machines in the server
- Add a num-turbotunnel server transport option
- Issue: 40241: Switch default proxy STUN server to stun.l.google.com
Changes in version v2.4.1 - 2022-12-01
- Issue 40224: Bug fix in utls roundtripper
Changes in version v2.4.0 - 2022-11-29
- Fix proxy command line help output
- Issue 40123: Reduce multicast DNS candidates
- Add ICE ephemeral ports range setting
- Reformat using Go 1.19
- Update CI tests to include latest and minimum Go versions
- Issue 40184: Use fixed unit for bandwidth logging
- Update gorilla/websocket to v1.5.0
- Issue 40175: Server performance improvements
- Issue 40183: Change snowflake proxy log verbosity
- Issue 40117: Display proxy NAT type in logs
- Issue 40198: Add a `orport-srcaddr` server transport option
- Add gofmt output to CI test
- Issue 40185: Change bandwidth type from int to int64 to prevent overflow
- Add version output support to snowflake
- Issue 40229: Change regexes for ipv6 addresses to catch url-encoded addresses
- Issue 40220: Close stale connections in standalone proxy
Changes in version v2.3.0 - 2022-06-23
- Issue 40146: Avoid performing two NAT probe tests at startup
- Issue 40134: Log messages from client NAT check failures are confusing
- Issue 34075: Implement metrics to measure snowflake churn
- Issue 28651: Prepare all pieces of the snowflake pipeline for a second snowflake bridge
- Issue 40129: Distributed Snowflake Server Support
Changes in version v2.2.0 - 2022-05-25
- Issue 40099: Initialize SnowflakeListener.closed
- Add connection failure events for proxy timeouts
- Issue 40103: Fix proxy logging verb tense
- Fix up and downstream metrics output for proxy
- Issue 40054: uTLS for broker negotiation
- Forward bridge fingerprint from client to broker (WIP, Issue 28651)
- Issue 40104: Make it easier to configure proxy type
- Remove version from ClientPollRequest
- Issue 40124: Move tor-specific code out of library
- Issue 40115: Scrub pt event logs
- Issue 40127: Bump webrtc and dtls library versions
- Bump version of webrtc and dtls to fix dtls CVEs
- Issue 40141: Ensure library calls of events can be scrubbed
Changes in version v2.1.0 - 2022-02-08
- Issue 40098: Remove support for legacy one shot mode
- Issue 40079: Make connection summary at proxy privacy preserving
- Issue 40076: Add snowflake event API for notifications of connection events
- Issue 40084: Increase capacity of client address map at the server
- Issue 40060: Further clean up snowflake server logs
- Issue 40089: Validate proxy and client supplied strings at broker
- Issue 40014: Update version of DTLS library to include fingerprinting fixes
- Issue 40075: Support recurring NAT type check in standalone proxy
Changes in version v2.0.0 - 2021-11-04
- Turn the standalone snowflake proxy code into a library
- Clean up and reworked the snowflake client and server library code
- Unify broker/bridge domains to *.torproject.net
- Updates to the snowflake library documentation
- New package functions to define and set a rendezvous method with the
broker
- Factor out the broker geoip code into its own external library
- Bug fix to check error calls in preparePeerConnection
- Bug fixes in snowflake tests
- Issue 40059: add the ability to pass in snowflake arguments through SOCKS
- Increase buffer sizes for sending and receiving snowflake data
- Issue 25985: rendezvous with the broker using AMP cache
- Issue 40055: wait for the full poll interval between proxy polls
Changes in version v1.1.0 - 2021-07-13
- Refactors of the Snowflake broker code
- Refactors of the Snowflake proxy code
- Issue 40048: assign proxies based on self-reported client load
- Issue 40052: fixed a memory leak in the server accept loop
- Version bump of kcp and smux libraries
- Bug fix to pass the correct client address to the Snowflake bridge metrics
counter
- Bug fixes to prevent race conditions in the Snowflake client
Changes in version v1.0.0 - 2021-06-07
- Initial release.
FROM docker.io/library/golang:1.23-bookworm AS build
# Set some labels
# io.containers.autoupdate label will instruct podman to reach out to the corres
# corresponding registry to check if the image has been updated. If an image
# must be updated, Podman pulls it down and restarts the systemd unit executing
# the container. See podman-auto-update(1) for more details, or
# https://docs.podman.io/en/latest/markdown/podman-auto-update.1.html
LABEL io.containers.autoupdate=registry
LABEL org.opencontainers.image.authors="anti-censorship-team@lists.torproject.org"
RUN apt-get update && apt-get install -y tor-geoipdb
ADD . /app
WORKDIR /app/proxy
RUN go get
RUN CGO_ENABLED=0 go build -o proxy -ldflags '-extldflags "-static" -w -s' .
FROM scratch
COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
COPY --from=build /usr/share/zoneinfo /usr/share/zoneinfo
COPY --from=build /usr/share/tor/geoip* /usr/share/tor/
COPY --from=build /app/proxy/proxy /bin/proxy
ENTRYPOINT [ "/bin/proxy" ]
......@@ -3,7 +3,7 @@
================================================================================
Copyright (c) 2016, Serene Han, Arlo Breault
All rights reserved.
Copyright (c) 2019-2020, The Tor Project, Inc
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
......
# Snowflake
[![Build Status](https://travis-ci.org/keroserene/snowflake.svg?branch=master)](https://travis-ci.org/keroserene/snowflake)
Pluggable Transport using WebRTC, inspired by Flashproxy.
Snowflake is a censorship-evasion pluggable transport using WebRTC, inspired by Flashproxy.
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
**Table of Contents**
- [Structure of this Repository](#structure-of-this-repository)
- [Usage](#usage)
- [Dependencies](#dependencies)
- [More Info](#more-info)
- [Building](#building)
- [Test Environment](#test-environment)
- [Using Snowflake with Tor](#using-snowflake-with-tor)
- [Running a Snowflake Proxy](#running-a-snowflake-proxy)
- [Using the Snowflake Library with Other Applications](#using-the-snowflake-library-with-other-applications)
- [Test Environment](#test-environment)
- [FAQ](#faq)
- [Appendix](#appendix)
- [-- Testing directly via WebRTC Server --](#---testing-directly-via-webrtc-server---)
- [More info and links](#more-info-and-links)
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
### Usage
```
cd client/
go get
go build
tor -f torrc
```
This should start the client plugin, bootstrapping to 100% using WebRTC.
#### Dependencies
Client:
- [pion/webrtc](https://github.com/pion/webrtc)
- Go 1.10+
Proxy:
- JavaScript
---
#### More Info
Tor can plug in the Snowflake client via a correctly configured `torrc`.
For example:
```
ClientTransportPlugin snowflake exec ./client \
-url https://snowflake-broker.azureedge.net/ \
-front ajax.aspnetcdn.com \
-ice stun:stun.l.google.com:19302
-max 3
```
### Structure of this Repository
The flags `-url` and `-front` allow the Snowflake client to speak to the Broker,
in order to get connected with some volunteer's browser proxy. `-ice` is a
comma-separated list of ICE servers, which are required for NAT traversal.
- `broker/` contains code for the Snowflake broker
- `doc/` contains Snowflake documentation and manpages
- `client/` contains the Tor pluggable transport client and client library code
- `common/` contains generic libraries used by multiple pieces of Snowflake
- `proxy/` contains code for the Go standalone Snowflake proxy
- `probetest/` contains code for a NAT probetesting service
- `server/` contains the Tor pluggable transport server and server library code
For logging, run `tail -F snowflake.log` in a second terminal.
You can modify the `torrc` to use your own broker:
```
ClientTransportPlugin snowflake exec ./client --meek
```
#### Building
### Usage
This describes how to build the in-browser snowflake. For the client, see Usage,
above.
Snowflake is currently deployed as a pluggable transport for Tor.
The client will only work if there are browser snowflakes available.
To run your own:
#### Using Snowflake with Tor
```
cd proxy/
npm run build
```
To use the Snowflake client with Tor, you will need to add the appropriate `Bridge` and `ClientTransportPlugin` lines to your [torrc](https://2019.www.torproject.org/docs/tor-manual.html.en) file. See the [client README](client) for more information on building and running the Snowflake client.
Then, start a local http server in the `proxy/build/` in any way you like.
For instance:
#### Running a Snowflake Proxy
```
cd build/
python -m http.server
```
You can contribute to Snowflake by running a Snowflake proxy. We have the option to run a proxy in your browser or as a standalone Go program. See our [community documentation](https://community.torproject.org/relay/setup/snowflake/) for more details.
Then, open a browser tab to `http://127.0.0.1:8000/embed.html` to view
the debug-console of the snowflake.,
So long as that tab is open, you are an ephemeral Tor bridge.
#### Using the Snowflake Library with Other Applications
Snowflake can be used as a Go API, and adheres to the [v2.1 pluggable transports specification](). For more information on using the Snowflake Go library, see the [Snowflake library documentation](doc/using-the-snowflake-library.md).
#### Test Environment
### Test Environment
There is a Docker-based test environment at https://github.com/cohosh/snowbox.
### FAQ
**Q: How does it work?**
In the Tor use-case:
1. Volunteers visit websites which host the "snowflake" proxy. (just
like flashproxy)
1. Volunteers visit websites that host the 'snowflake' proxy, run a snowflake [web extension](https://gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake-webext), or use a standalone proxy.
2. Tor clients automatically find available browser proxies via the Broker
(the domain fronted signaling channel).
3. Tor client and browser proxy establish a WebRTC peer connection.
......@@ -132,22 +81,26 @@ manual port forwarding!
It utilizes the "ICE" negotiation via WebRTC, and also involves a great
abundance of ephemeral and short-lived (and special!) volunteer proxies...
### Appendix
### More info and links
We have more documentation in the [Snowflake wiki](https://gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/-/wikis/home) and at https://snowflake.torproject.org/.
##### -- Android AAR Reproducible Build Setup --
##### -- Testing with Standalone Proxy --
Using `gomobile` it is possible to build snowflake as shared libraries for all
the architectures supported by Android. This is in the _.gitlab-ci.yml_, which
runs in GitLab CI. It is also possible to run this setup in a Virtual Machine
using [vagrant](https://www.vagrantup.com/). Just run `vagrant up` and it will
create and provision the VM. `vagrant ssh` to get into the VM to use it as a
development environment.
```
cd proxy-go
go build
./proxy-go
```
##### uTLS Settings
##### -- Testing directly via WebRTC Server --
Snowflake communicate with broker that serves as signaling server with TLS based domain fronting connection, which may be identified by its usage of Go language TLS stack.
See server-webrtc/README.md for information on connecting directly to a
WebRTC server transport plugin, bypassing the Broker and browser proxy.
uTLS is a software library designed to initiate the TLS Client Hello fingerprint of browsers or other popular software's TLS stack to evade censorship based on TLS client hello fingerprint with `-utls-imitate` . You can use `-version` to see a list of supported values.
More documentation on the way.
Depending on client and server configuration, it may not always work as expected as not all extensions are correctly implemented.
Also available at:
[torproject.org/pluggable-transports/snowflake](https://gitweb.torproject.org/pluggable-transports/snowflake.git/)
You can also remove SNI (Server Name Indication) from client hello to evade censorship with `-utls-nosni`, not all servers supports this.
require 'pathname'
require 'tempfile'
require 'yaml'
srvpath = Pathname.new(File.dirname(__FILE__)).realpath
configfile = YAML.load_file(File.join(srvpath, "/.gitlab-ci.yml"))
remote_url = 'https://gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake'
# set up essential environment variables
env = configfile['variables']
env = env.merge(configfile['android']['variables'])
env['CI_PROJECT_DIR'] = '/builds/tpo/anti-censorship/pluggable-transports/snowflake'
env_file = Tempfile.new('env')
File.chmod(0644, env_file.path)
env.each do |k,v|
env_file.write("export #{k}='#{v}'\n")
end
env_file.rewind
sourcepath = '/etc/profile.d/env.sh'
header = "#!/bin/bash -ex\nsource #{sourcepath}\ncd $CI_PROJECT_DIR\n"
before_script_file = Tempfile.new('before_script')
File.chmod(0755, before_script_file.path)
before_script_file.write(header)
configfile['android']['before_script'].flatten.each do |line|
before_script_file.write(line)
before_script_file.write("\n")
end
before_script_file.rewind
script_file = Tempfile.new('script')
File.chmod(0755, script_file.path)
script_file.write(header)
configfile['android']['script'].flatten.each do |line|
script_file.write(line)
script_file.write("\n")
end
script_file.rewind
Vagrant.configure("2") do |config|
config.vm.box = "debian/bullseye64"
config.vm.synced_folder '.', '/vagrant', disabled: true
config.vm.provision "file", source: env_file.path, destination: 'env.sh'
config.vm.provision :shell, inline: <<-SHELL
set -ex
mv ~vagrant/env.sh #{sourcepath}
source #{sourcepath}
test -d /go || mkdir /go
mkdir -p $(dirname $CI_PROJECT_DIR)
chown -R vagrant.vagrant $(dirname $CI_PROJECT_DIR)
apt-get update
apt-get -qy install --no-install-recommends git
git clone #{remote_url} $CI_PROJECT_DIR
chmod -R a+rX,u+w /go $CI_PROJECT_DIR
chown -R vagrant.vagrant /go $CI_PROJECT_DIR
SHELL
config.vm.provision "file", source: before_script_file.path, destination: 'before_script.sh'
config.vm.provision "file", source: script_file.path, destination: 'script.sh'
config.vm.provision :shell, inline: '/home/vagrant/before_script.sh'
config.vm.provision :shell, privileged: false, inline: '/home/vagrant/script.sh'
# remove this or comment it out to use VirtualBox instead of libvirt
config.vm.provider :libvirt do |libvirt|
libvirt.memory = 1536
end
end
This component runs on Google App Engine. It reflects domain-fronted
requests from a client to the Snowflake broker.
You need the Go App Engine SDK in order to deploy the app.
https://cloud.google.com/sdk/docs/#linux
After unpacking, install the app-engine-go component:
google-cloud-sdk/bin/gcloud components install app-engine-go
To test locally, run
google-cloud-sdk/bin/dev_appserver.py app.yaml
The app will be running at http://127.0.0.1:8080/.
To deploy to App Engine, first create a new project and app. You have to
think of a unique name (marked as "<appname>" in the commands). You only
have to do the "create" step once; subsequent times you can go straight
to the "deploy" step. The "gcloud auth login" command will open a
browser window so you can log in to a Google account.
google-cloud-sdk/bin/gcloud auth login
google-cloud-sdk/bin/gcloud projects create <appname>
google-cloud-sdk/bin/gcloud app create --project=<appname>
Then to deploy the project, run:
google-cloud-sdk/bin/gcloud app deploy --project=<appname>
To configure the Snowflake client to talk to the App Engine app, provide
"https://<appname>.appspot.com/" as the --url option.
UseBridges 1
Bridge snowflake 0.0.2.0:1
ClientTransportPlugin snowflake exec ./client -url https://<appname>.appspot.com/ -front www.google.com
runtime: go
api_version: go1
handlers:
- url: /.*
script: _go_app
secure: always
// A web app for Google App Engine that proxies HTTP requests and responses to
// the Snowflake broker.
package reflect
import (
"context"
"io"
"net/http"
"net/url"
"time"
"google.golang.org/appengine"
"google.golang.org/appengine/log"
"google.golang.org/appengine/urlfetch"
)
const (
forwardURL = "https://snowflake-broker.bamsoftware.com/"
// A timeout of 0 means to use the App Engine default (5 seconds).
urlFetchTimeout = 20 * time.Second
)
var ctx context.Context
// Join two URL paths.
func pathJoin(a, b string) string {
if len(a) > 0 && a[len(a)-1] == '/' {
a = a[:len(a)-1]
}
if len(b) == 0 || b[0] != '/' {
b = "/" + b
}
return a + b
}
// We reflect only a whitelisted set of header fields. Otherwise, we may copy
// headers like Transfer-Encoding that interfere with App Engine's own
// hop-by-hop headers.
var reflectedHeaderFields = []string{
"Content-Type",
"X-Session-Id",
}
// Make a copy of r, with the URL being changed to be relative to forwardURL,
// and including only the headers in reflectedHeaderFields.
func copyRequest(r *http.Request) (*http.Request, error) {
u, err := url.Parse(forwardURL)
if err != nil {
return nil, err
}
// Append the requested path to the path in forwardURL, so that
// forwardURL can be something like "https://example.com/reflect".
u.Path = pathJoin(u.Path, r.URL.Path)
c, err := http.NewRequest(r.Method, u.String(), r.Body)
if err != nil {
return nil, err
}
for _, key := range reflectedHeaderFields {
values, ok := r.Header[key]
if ok {
for _, value := range values {
c.Header.Add(key, value)
}
}
}
return c, nil
}
func handler(w http.ResponseWriter, r *http.Request) {
ctx = appengine.NewContext(r)
fr, err := copyRequest(r)
if err != nil {
log.Errorf(ctx, "copyRequest: %s", err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if urlFetchTimeout != 0 {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, urlFetchTimeout)
defer cancel()
}
// Use urlfetch.Transport directly instead of urlfetch.Client because we
// want only a single HTTP transaction, not following redirects.
transport := urlfetch.Transport{
Context: ctx,
}
resp, err := transport.RoundTrip(fr)
if err != nil {
log.Errorf(ctx, "RoundTrip: %s", err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
defer resp.Body.Close()
for _, key := range reflectedHeaderFields {
values, ok := resp.Header[key]
if ok {
for _, value := range values {
w.Header().Add(key, value)
}
}
}
w.WriteHeader(resp.StatusCode)
n, err := io.Copy(w, resp.Body)
if err != nil {
log.Errorf(ctx, "io.Copy after %d bytes: %s", n, err)
}
}
func init() {
http.HandleFunc("/", handler)
}
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
**Table of Contents**
- [Overview](#overview)
- [Running your own](#running-your-own)
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
This is the Broker component of Snowflake.
### Overview
......
package main
import (
"log"
"net/http"
"strings"
"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/amp"
"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/messages"
"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/util"
)
// ampClientOffers is the AMP-speaking endpoint for client poll messages,
// intended for access via an AMP cache. In contrast to the other clientOffers,
// the client's encoded poll message is stored in the URL path rather than the
// HTTP request body (because an AMP cache does not support POST), and the
// encoded client poll response is sent back as AMP-armored HTML.
func ampClientOffers(i *IPC, w http.ResponseWriter, r *http.Request) {
// The encoded client poll message immediately follows the /amp/client/
// path prefix, so this function unfortunately needs to be aware of and
// remote its own routing prefix.
path := strings.TrimPrefix(r.URL.Path, "/amp/client/")
if path == r.URL.Path {
// The path didn't start with the expected prefix. This probably
// indicates an internal bug.
log.Println("ampClientOffers: unexpected prefix in path")
w.WriteHeader(http.StatusInternalServerError)
return
}
var encPollReq []byte
var response []byte
var err error
encPollReq, err = amp.DecodePath(path)
if err == nil {
arg := messages.Arg{
Body: encPollReq,
RemoteAddr: util.GetClientIp(r),
RendezvousMethod: messages.RendezvousAmpCache,
}
err = i.ClientOffers(arg, &response)
} else {
response, err = (&messages.ClientPollResponse{
Error: "cannot decode URL path",
}).EncodePollResponse()
}
if err != nil {
// We couldn't even construct a JSON object containing an error
// message :( Nothing to do but signal an error at the HTTP
// layer. The AMP cache will translate this 500 status into a
// 404 status.
// https://amp.dev/documentation/guides-and-tutorials/learn/amp-caches-and-cors/amp-cache-urls/#redirect-%26-error-handling
log.Printf("ampClientOffers: %v", err)
w.WriteHeader(http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "text/html")
// Attempt to hint to an AMP cache not to waste resources caching this
// document. "The Google AMP Cache considers any document fresh for at
// least 15 seconds."
// https://developers.google.com/amp/cache/overview#google-amp-cache-updates
w.Header().Set("Cache-Control", "max-age=15")
w.WriteHeader(http.StatusOK)
enc, err := amp.NewArmorEncoder(w)
if err != nil {
log.Printf("amp.NewArmorEncoder: %v", err)
return
}
defer enc.Close()
if _, err := enc.Write(response); err != nil {
log.Printf("ampClientOffers: unable to write answer: %v", err)
}
}
/* (*BridgeListHolderFileBased).LoadBridgeInfo loads a Snowflake Server bridge info description file,
its format is as follows:
This file should be in newline-delimited JSON format(https://jsonlines.org/).
For each line, the format of json data should be in the format of:
{"displayName":"default", "webSocketAddress":"wss://snowflake.torproject.net/", "fingerprint":"2B280B23E1107BB62ABFC40DDCC8824814F80A72"}
displayName:string is the name of this bridge. This value is not currently used programmatically.
webSocketAddress:string is the WebSocket URL of this bridge.
This will be the address proxy used to connect to this snowflake server.
fingerprint:string is the identifier of the bridge.
This will be used by a client to identify the bridge it wishes to connect to.
The existence of ANY other fields is NOT permitted.
The file will be considered invalid if there is at least one invalid json record.
In this case, an error will be returned, and none of the records will be loaded.
*/
package main
import (
"bufio"
"bytes"
"encoding/json"
"errors"
"io"
"sync"
"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/bridgefingerprint"
)
var ErrBridgeNotFound = errors.New("bridge with requested fingerprint is unknown to the broker")
func NewBridgeListHolder() BridgeListHolderFileBased {
return &bridgeListHolder{}
}
type bridgeListHolder struct {
bridgeInfo map[bridgefingerprint.Fingerprint]BridgeInfo
accessBridgeInfo sync.RWMutex
}
type BridgeListHolder interface {
GetBridgeInfo(bridgefingerprint.Fingerprint) (BridgeInfo, error)
}
type BridgeListHolderFileBased interface {
BridgeListHolder
LoadBridgeInfo(reader io.Reader) error
}
type BridgeInfo struct {
DisplayName string `json:"displayName"`
WebSocketAddress string `json:"webSocketAddress"`
Fingerprint string `json:"fingerprint"`
}
func (h *bridgeListHolder) GetBridgeInfo(fingerprint bridgefingerprint.Fingerprint) (BridgeInfo, error) {
h.accessBridgeInfo.RLock()
defer h.accessBridgeInfo.RUnlock()
if bridgeInfo, ok := h.bridgeInfo[fingerprint]; ok {
return bridgeInfo, nil
}
return BridgeInfo{}, ErrBridgeNotFound
}
func (h *bridgeListHolder) LoadBridgeInfo(reader io.Reader) error {
bridgeInfoMap := map[bridgefingerprint.Fingerprint]BridgeInfo{}
inputScanner := bufio.NewScanner(reader)
for inputScanner.Scan() {
inputLine := inputScanner.Bytes()
bridgeInfo := BridgeInfo{}
decoder := json.NewDecoder(bytes.NewReader(inputLine))
decoder.DisallowUnknownFields()
if err := decoder.Decode(&bridgeInfo); err != nil {
return err
}
var bridgeFingerprint bridgefingerprint.Fingerprint
var err error
if bridgeFingerprint, err = bridgefingerprint.FingerprintFromHexString(bridgeInfo.Fingerprint); err != nil {
return err
}
bridgeInfoMap[bridgeFingerprint] = bridgeInfo
}
h.accessBridgeInfo.Lock()
defer h.accessBridgeInfo.Unlock()
h.bridgeInfo = bridgeInfoMap
return nil
}
package main
import (
"bytes"
"encoding/hex"
. "github.com/smartystreets/goconvey/convey"
"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/bridgefingerprint"
"testing"
)
const DefaultBridges = `{"displayName":"default", "webSocketAddress":"wss://snowflake.torproject.org", "fingerprint":"2B280B23E1107BB62ABFC40DDCC8824814F80A72"}
`
const ImaginaryBridges = `{"displayName":"default", "webSocketAddress":"wss://snowflake.torproject.org", "fingerprint":"2B280B23E1107BB62ABFC40DDCC8824814F80A72"}
{"displayName":"imaginary-1", "webSocketAddress":"wss://imaginary-1-snowflake.torproject.org", "fingerprint":"2B280B23E1107BB62ABFC40DDCC8824814F80B00"}
{"displayName":"imaginary-2", "webSocketAddress":"wss://imaginary-2-snowflake.torproject.org", "fingerprint":"2B280B23E1107BB62ABFC40DDCC8824814F80B01"}
{"displayName":"imaginary-3", "webSocketAddress":"wss://imaginary-3-snowflake.torproject.org", "fingerprint":"2B280B23E1107BB62ABFC40DDCC8824814F80B02"}
{"displayName":"imaginary-4", "webSocketAddress":"wss://imaginary-4-snowflake.torproject.org", "fingerprint":"2B280B23E1107BB62ABFC40DDCC8824814F80B03"}
{"displayName":"imaginary-5", "webSocketAddress":"wss://imaginary-5-snowflake.torproject.org", "fingerprint":"2B280B23E1107BB62ABFC40DDCC8824814F80B04"}
{"displayName":"imaginary-6", "webSocketAddress":"wss://imaginary-6-snowflake.torproject.org", "fingerprint":"2B280B23E1107BB62ABFC40DDCC8824814F80B05"}
{"displayName":"imaginary-7", "webSocketAddress":"wss://imaginary-7-snowflake.torproject.org", "fingerprint":"2B280B23E1107BB62ABFC40DDCC8824814F80B06"}
{"displayName":"imaginary-8", "webSocketAddress":"wss://imaginary-8-snowflake.torproject.org", "fingerprint":"2B280B23E1107BB62ABFC40DDCC8824814F80B07"}
{"displayName":"imaginary-9", "webSocketAddress":"wss://imaginary-9-snowflake.torproject.org", "fingerprint":"2B280B23E1107BB62ABFC40DDCC8824814F80B08"}
{"displayName":"imaginary-10", "webSocketAddress":"wss://imaginary-10-snowflake.torproject.org", "fingerprint":"2B280B23E1107BB62ABFC40DDCC8824814F80B09"}
`
func TestBridgeLoad(t *testing.T) {
Convey("load default list", t, func() {
bridgeList := NewBridgeListHolder()
So(bridgeList.LoadBridgeInfo(bytes.NewReader([]byte(DefaultBridges))), ShouldBeNil)
{
bridgeFingerprint := [20]byte{}
{
n, err := hex.Decode(bridgeFingerprint[:], []byte("2B280B23E1107BB62ABFC40DDCC8824814F80A72"))
So(n, ShouldEqual, 20)
So(err, ShouldBeNil)
}
Fingerprint, err := bridgefingerprint.FingerprintFromBytes(bridgeFingerprint[:])
So(err, ShouldBeNil)
bridgeInfo, err := bridgeList.GetBridgeInfo(Fingerprint)
So(err, ShouldBeNil)
So(bridgeInfo.DisplayName, ShouldEqual, "default")
So(bridgeInfo.WebSocketAddress, ShouldEqual, "wss://snowflake.torproject.org")
}
})
Convey("load imaginary list", t, func() {
bridgeList := NewBridgeListHolder()
So(bridgeList.LoadBridgeInfo(bytes.NewReader([]byte(ImaginaryBridges))), ShouldBeNil)
{
bridgeFingerprint := [20]byte{}
{
n, err := hex.Decode(bridgeFingerprint[:], []byte("2B280B23E1107BB62ABFC40DDCC8824814F80B07"))
So(n, ShouldEqual, 20)
So(err, ShouldBeNil)
}
Fingerprint, err := bridgefingerprint.FingerprintFromBytes(bridgeFingerprint[:])
So(err, ShouldBeNil)
bridgeInfo, err := bridgeList.GetBridgeInfo(Fingerprint)
So(err, ShouldBeNil)
So(bridgeInfo.DisplayName, ShouldEqual, "imaginary-8")
So(bridgeInfo.WebSocketAddress, ShouldEqual, "wss://imaginary-8-snowflake.torproject.org")
}
})
}
This diff is collapsed.
/*
This code is for loading database data that maps ip addresses to countries
for collecting and presenting statistics on snowflake use that might alert us
to censorship events.
The functions here are heavily based off of how tor maintains and searches their
geoip database
The tables used for geoip data must be structured as follows:
Recognized line format for IPv4 is:
INTIPLOW,INTIPHIGH,CC
where INTIPLOW and INTIPHIGH are IPv4 addresses encoded as big-endian 4-byte unsigned
integers, and CC is a country code.
Note that the IPv4 line format
"INTIPLOW","INTIPHIGH","CC","CC3","COUNTRY NAME"
is not currently supported.
Recognized line format for IPv6 is:
IPV6LOW,IPV6HIGH,CC
where IPV6LOW and IPV6HIGH are IPv6 addresses and CC is a country code.
It also recognizes, and skips over, blank lines and lines that start
with '#' (comments).
*/
package main
import (
"bufio"
"bytes"
"crypto/sha1"
"encoding/hex"
"fmt"
"io"
"log"
"net"
"os"
"sort"
"strconv"
"strings"
"sync"
)
type GeoIPTable interface {
parseEntry(string) (*GeoIPEntry, error)
Len() int
Append(GeoIPEntry)
ElementAt(int) GeoIPEntry
Lock()
Unlock()
}
type GeoIPEntry struct {
ipLow net.IP
ipHigh net.IP
country string
}
type GeoIPv4Table struct {
table []GeoIPEntry
lock sync.Mutex // synchronization for geoip table accesses and reloads
}
type GeoIPv6Table struct {
table []GeoIPEntry
lock sync.Mutex // synchronization for geoip table accesses and reloads
}
func (table *GeoIPv4Table) Len() int { return len(table.table) }
func (table *GeoIPv6Table) Len() int { return len(table.table) }
func (table *GeoIPv4Table) Append(entry GeoIPEntry) {
(*table).table = append(table.table, entry)
}
func (table *GeoIPv6Table) Append(entry GeoIPEntry) {
(*table).table = append(table.table, entry)
}
func (table *GeoIPv4Table) ElementAt(i int) GeoIPEntry { return table.table[i] }
func (table *GeoIPv6Table) ElementAt(i int) GeoIPEntry { return table.table[i] }
func (table *GeoIPv4Table) Lock() { (*table).lock.Lock() }
func (table *GeoIPv6Table) Lock() { (*table).lock.Lock() }
func (table *GeoIPv4Table) Unlock() { (*table).lock.Unlock() }
func (table *GeoIPv6Table) Unlock() { (*table).lock.Unlock() }
// Convert a geoip IP address represented as a big-endian unsigned integer to net.IP
func geoipStringToIP(ipStr string) (net.IP, error) {
ip, err := strconv.ParseUint(ipStr, 10, 32)
if err != nil {
return net.IPv4(0, 0, 0, 0), fmt.Errorf("error parsing IP %s", ipStr)
}
var bytes [4]byte
bytes[0] = byte(ip & 0xFF)
bytes[1] = byte((ip >> 8) & 0xFF)
bytes[2] = byte((ip >> 16) & 0xFF)
bytes[3] = byte((ip >> 24) & 0xFF)
return net.IPv4(bytes[3], bytes[2], bytes[1], bytes[0]), nil
}
//Parses a line in the provided geoip file that corresponds
//to an address range and a two character country code
func (table *GeoIPv4Table) parseEntry(candidate string) (*GeoIPEntry, error) {
if candidate[0] == '#' {
return nil, nil
}
parsedCandidate := strings.Split(candidate, ",")
if len(parsedCandidate) != 3 {
return nil, fmt.Errorf("provided geoip file is incorrectly formatted. Could not parse line:\n%s", parsedCandidate)
}
low, err := geoipStringToIP(parsedCandidate[0])
if err != nil {
return nil, err
}
high, err := geoipStringToIP(parsedCandidate[1])
if err != nil {
return nil, err
}
geoipEntry := &GeoIPEntry{
ipLow: low,
ipHigh: high,
country: parsedCandidate[2],
}
return geoipEntry, nil
}
//Parses a line in the provided geoip file that corresponds
//to an address range and a two character country code
func (table *GeoIPv6Table) parseEntry(candidate string) (*GeoIPEntry, error) {
if candidate[0] == '#' {
return nil, nil
}
parsedCandidate := strings.Split(candidate, ",")
if len(parsedCandidate) != 3 {
return nil, fmt.Errorf("")
}
low := net.ParseIP(parsedCandidate[0])
if low == nil {
return nil, fmt.Errorf("")
}
high := net.ParseIP(parsedCandidate[1])
if high == nil {
return nil, fmt.Errorf("")
}
geoipEntry := &GeoIPEntry{
ipLow: low,
ipHigh: high,
country: parsedCandidate[2],
}
return geoipEntry, nil
}
//Loads provided geoip file into our tables
//Entries are stored in a table
func GeoIPLoadFile(table GeoIPTable, pathname string) error {
//open file
geoipFile, err := os.Open(pathname)
if err != nil {
return err
}
defer geoipFile.Close()
hash := sha1.New()
table.Lock()
defer table.Unlock()
hashedFile := io.TeeReader(geoipFile, hash)
//read in strings and call parse function
scanner := bufio.NewScanner(hashedFile)
for scanner.Scan() {
entry, err := table.parseEntry(scanner.Text())
if err != nil {
return fmt.Errorf("provided geoip file is incorrectly formatted. Line is: %+q", scanner.Text())
}
if entry != nil {
table.Append(*entry)
}
}
if err := scanner.Err(); err != nil {
return err
}
sha1Hash := hex.EncodeToString(hash.Sum(nil))
log.Println("Using geoip file ", pathname, " with checksum", sha1Hash)
log.Println("Loaded ", table.Len(), " entries into table")
return nil
}
//Returns the country location of an IPv4 or IPv6 address, and a boolean value
//that indicates whether the IP address was present in the geoip database
func GetCountryByAddr(table GeoIPTable, ip net.IP) (string, bool) {
table.Lock()
defer table.Unlock()
//look IP up in database
index := sort.Search(table.Len(), func(i int) bool {
entry := table.ElementAt(i)
return (bytes.Compare(ip.To16(), entry.ipHigh.To16()) <= 0)
})
if index == table.Len() {
return "", false
}
// check to see if addr is in the range specified by the returned index
// search on IPs in invalid ranges (e.g., 127.0.0.0/8) will return the
//country code of the next highest range
entry := table.ElementAt(index)
if !(bytes.Compare(ip.To16(), entry.ipLow.To16()) >= 0 &&
bytes.Compare(ip.To16(), entry.ipHigh.To16()) <= 0) {
return "", false
}
return table.ElementAt(index).country, true
}
package main
import (
"bytes"
"errors"
"fmt"
"io"
"log"
"net/http"
"os"
"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/messages"
"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/util"
)
const (
readLimit = 100000 // Maximum number of bytes to be read from an HTTP request
)
// Implements the http.Handler interface
type SnowflakeHandler struct {
*IPC
handle func(*IPC, http.ResponseWriter, *http.Request)
}
func (sh SnowflakeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Headers", "Origin, X-Session-ID")
// Return early if it's CORS preflight.
if "OPTIONS" == r.Method {
return
}
sh.handle(sh.IPC, w, r)
}
// Implements the http.Handler interface
type MetricsHandler struct {
logFilename string
handle func(string, http.ResponseWriter, *http.Request)
}
func (mh MetricsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Headers", "Origin, X-Session-ID")
// Return early if it's CORS preflight.
if "OPTIONS" == r.Method {
return
}
mh.handle(mh.logFilename, w, r)
}
func robotsTxtHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
if _, err := w.Write([]byte("User-agent: *\nDisallow: /\n")); err != nil {
log.Printf("robotsTxtHandler unable to write, with this error: %v", err)
}
}
func metricsHandler(metricsFilename string, w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
if metricsFilename == "" {
http.NotFound(w, r)
return
}
metricsFile, err := os.OpenFile(metricsFilename, os.O_RDONLY, 0644)
if err != nil {
log.Println("Error opening metrics file for reading")
http.NotFound(w, r)
return
}
if _, err := io.Copy(w, metricsFile); err != nil {
log.Printf("copying metricsFile returned error: %v", err)
}
}
func debugHandler(i *IPC, w http.ResponseWriter, r *http.Request) {
var response string
err := i.Debug(new(interface{}), &response)
if err != nil {
log.Println(err)
w.WriteHeader(http.StatusInternalServerError)
return
}
if _, err := w.Write([]byte(response)); err != nil {
log.Printf("writing proxy information returned error: %v ", err)
}
}
/*
For snowflake proxies to request a client from the Broker.
*/
func proxyPolls(i *IPC, w http.ResponseWriter, r *http.Request) {
body, err := io.ReadAll(http.MaxBytesReader(w, r.Body, readLimit))
if err != nil {
log.Println("Invalid data.", err.Error())
w.WriteHeader(http.StatusBadRequest)
return
}
arg := messages.Arg{
Body: body,
RemoteAddr: util.GetClientIp(r),
}
var response []byte
err = i.ProxyPolls(arg, &response)
switch {
case err == nil:
case errors.Is(err, messages.ErrBadRequest):
w.WriteHeader(http.StatusBadRequest)
return
case errors.Is(err, messages.ErrInternal):
fallthrough
default:
log.Println(err)
w.WriteHeader(http.StatusInternalServerError)
return
}
if _, err := w.Write(response); err != nil {
log.Printf("proxyPolls unable to write offer with error: %v", err)
}
}
/*
Expects a WebRTC SDP offer in the Request to give to an assigned
snowflake proxy, which responds with the SDP answer to be sent in
the HTTP response back to the client.
*/
func clientOffers(i *IPC, w http.ResponseWriter, r *http.Request) {
body, err := io.ReadAll(http.MaxBytesReader(w, r.Body, readLimit))
if err != nil {
log.Printf("Error reading client request: %s", err.Error())
w.WriteHeader(http.StatusBadRequest)
return
}
// Handle the legacy version
//
// We support two client message formats. The legacy format is for backwards
// compatability and relies heavily on HTTP headers and status codes to convey
// information.
isLegacy := false
if len(body) > 0 && body[0] == '{' {
isLegacy = true
req := messages.ClientPollRequest{
Offer: string(body),
NAT: r.Header.Get("Snowflake-NAT-Type"),
}
body, err = req.EncodeClientPollRequest()
if err != nil {
log.Printf("Error shimming the legacy request: %s", err.Error())
w.WriteHeader(http.StatusInternalServerError)
return
}
}
arg := messages.Arg{
Body: body,
RemoteAddr: util.GetClientIp(r),
RendezvousMethod: messages.RendezvousHttp,
}
var response []byte
err = i.ClientOffers(arg, &response)
if err != nil {
log.Println(err)
w.WriteHeader(http.StatusInternalServerError)
return
}
if isLegacy {
resp, err := messages.DecodeClientPollResponse(response)
if err != nil {
log.Println(err)
w.WriteHeader(http.StatusInternalServerError)
return
}
switch resp.Error {
case "":
response = []byte(resp.Answer)
case messages.StrNoProxies:
w.WriteHeader(http.StatusServiceUnavailable)
return
case messages.StrTimedOut:
w.WriteHeader(http.StatusGatewayTimeout)
return
default:
panic("unknown error")
}
}
if _, err := w.Write(response); err != nil {
log.Printf("clientOffers unable to write answer with error: %v", err)
}
}
/*
Expects snowflake proxies which have previously successfully received
an offer from proxyHandler to respond with an answer in an HTTP POST,
which the broker will pass back to the original client.
*/
func proxyAnswers(i *IPC, w http.ResponseWriter, r *http.Request) {
body, err := io.ReadAll(http.MaxBytesReader(w, r.Body, readLimit))
if err != nil {
log.Println("Invalid data.", err.Error())
w.WriteHeader(http.StatusBadRequest)
return
}
err = validateSDP(body)
if err != nil {
log.Println("Error proxy SDP: ", err.Error())
w.WriteHeader(http.StatusBadRequest)
return
}
arg := messages.Arg{
Body: body,
RemoteAddr: util.GetClientIp(r),
}
var response []byte
err = i.ProxyAnswers(arg, &response)
switch {
case err == nil:
case errors.Is(err, messages.ErrBadRequest):
w.WriteHeader(http.StatusBadRequest)
return
case errors.Is(err, messages.ErrInternal):
fallthrough
default:
log.Println(err)
w.WriteHeader(http.StatusInternalServerError)
return
}
if _, err := w.Write(response); err != nil {
log.Printf("proxyAnswers unable to write answer response with error: %v", err)
}
}
func validateSDP(SDP []byte) error {
// TODO: more validation likely needed
if !bytes.Contains(SDP, []byte("a=candidate")) {
return fmt.Errorf("SDP contains no candidate")
}
return nil
}
package main
import (
"container/heap"
"encoding/hex"
"fmt"
"log"
"time"
"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/bridgefingerprint"
"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/constants"
"github.com/prometheus/client_golang/prometheus"
"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/messages"
)
const (
ClientTimeout = constants.BrokerClientTimeout
ProxyTimeout = 10
NATUnknown = "unknown"
NATRestricted = "restricted"
NATUnrestricted = "unrestricted"
)
type IPC struct {
ctx *BrokerContext
}
func (i *IPC) Debug(_ interface{}, response *string) error {
var unknowns int
var natRestricted, natUnrestricted, natUnknown int
proxyTypes := make(map[string]int)
i.ctx.snowflakeLock.Lock()
s := fmt.Sprintf("current snowflakes available: %d\n", len(i.ctx.idToSnowflake))
for _, snowflake := range i.ctx.idToSnowflake {
if messages.KnownProxyTypes[snowflake.proxyType] {
proxyTypes[snowflake.proxyType]++
} else {
unknowns++
}
switch snowflake.natType {
case NATRestricted:
natRestricted++
case NATUnrestricted:
natUnrestricted++
default:
natUnknown++
}
}
i.ctx.snowflakeLock.Unlock()
for pType, num := range proxyTypes {
s += fmt.Sprintf("\t%s proxies: %d\n", pType, num)
}
s += fmt.Sprintf("\tunknown proxies: %d", unknowns)
s += fmt.Sprintf("\nNAT Types available:")
s += fmt.Sprintf("\n\trestricted: %d", natRestricted)
s += fmt.Sprintf("\n\tunrestricted: %d", natUnrestricted)
s += fmt.Sprintf("\n\tunknown: %d", natUnknown)
*response = s
return nil
}
func (i *IPC) ProxyPolls(arg messages.Arg, response *[]byte) error {
sid, proxyType, natType, clients, relayPattern, relayPatternSupported, err := messages.DecodeProxyPollRequestWithRelayPrefix(arg.Body)
if err != nil {
return messages.ErrBadRequest
}
if !relayPatternSupported {
i.ctx.metrics.lock.Lock()
i.ctx.metrics.proxyPollWithoutRelayURLExtension++
i.ctx.metrics.promMetrics.ProxyPollWithoutRelayURLExtensionTotal.With(prometheus.Labels{"nat": natType, "type": proxyType}).Inc()
i.ctx.metrics.lock.Unlock()
} else {
i.ctx.metrics.lock.Lock()
i.ctx.metrics.proxyPollWithRelayURLExtension++
i.ctx.metrics.promMetrics.ProxyPollWithRelayURLExtensionTotal.With(prometheus.Labels{"nat": natType, "type": proxyType}).Inc()
i.ctx.metrics.lock.Unlock()
}
if !i.ctx.CheckProxyRelayPattern(relayPattern, !relayPatternSupported) {
i.ctx.metrics.lock.Lock()
i.ctx.metrics.proxyPollRejectedWithRelayURLExtension++
i.ctx.metrics.promMetrics.ProxyPollRejectedForRelayURLExtensionTotal.With(prometheus.Labels{"nat": natType, "type": proxyType}).Inc()
i.ctx.metrics.lock.Unlock()
log.Printf("bad request: rejected relay pattern from proxy = %v", messages.ErrBadRequest)
b, err := messages.EncodePollResponseWithRelayURL("", false, "", "", "incorrect relay pattern")
*response = b
if err != nil {
return messages.ErrInternal
}
return nil
}
// Log geoip stats
remoteIP := arg.RemoteAddr
if err != nil {
log.Println("Warning: cannot process proxy IP: ", err.Error())
} else {
i.ctx.metrics.lock.Lock()
i.ctx.metrics.UpdateCountryStats(remoteIP, proxyType, natType)
i.ctx.metrics.lock.Unlock()
}
var b []byte
// Wait for a client to avail an offer to the snowflake, or timeout if nil.
offer := i.ctx.RequestOffer(sid, proxyType, natType, clients)
if offer == nil {
i.ctx.metrics.lock.Lock()
i.ctx.metrics.proxyIdleCount++
i.ctx.metrics.promMetrics.ProxyPollTotal.With(prometheus.Labels{"nat": natType, "status": "idle"}).Inc()
i.ctx.metrics.lock.Unlock()
b, err = messages.EncodePollResponse("", false, "")
if err != nil {
return messages.ErrInternal
}
*response = b
return nil
}
i.ctx.metrics.promMetrics.ProxyPollTotal.With(prometheus.Labels{"nat": natType, "status": "matched"}).Inc()
var relayURL string
bridgeFingerprint, err := bridgefingerprint.FingerprintFromBytes(offer.fingerprint)
if err != nil {
return messages.ErrBadRequest
}
if info, err := i.ctx.bridgeList.GetBridgeInfo(bridgeFingerprint); err != nil {
return err
} else {
relayURL = info.WebSocketAddress
}
b, err = messages.EncodePollResponseWithRelayURL(string(offer.sdp), true, offer.natType, relayURL, "")
if err != nil {
return messages.ErrInternal
}
*response = b
return nil
}
func sendClientResponse(resp *messages.ClientPollResponse, response *[]byte) error {
data, err := resp.EncodePollResponse()
if err != nil {
log.Printf("error encoding answer")
return messages.ErrInternal
} else {
*response = []byte(data)
return nil
}
}
func (i *IPC) ClientOffers(arg messages.Arg, response *[]byte) error {
startTime := time.Now()
req, err := messages.DecodeClientPollRequest(arg.Body)
if err != nil {
return sendClientResponse(&messages.ClientPollResponse{Error: err.Error()}, response)
}
offer := &ClientOffer{
natType: req.NAT,
sdp: []byte(req.Offer),
}
fingerprint, err := hex.DecodeString(req.Fingerprint)
if err != nil {
return sendClientResponse(&messages.ClientPollResponse{Error: err.Error()}, response)
}
BridgeFingerprint, err := bridgefingerprint.FingerprintFromBytes(fingerprint)
if err != nil {
return sendClientResponse(&messages.ClientPollResponse{Error: err.Error()}, response)
}
if _, err := i.ctx.GetBridgeInfo(BridgeFingerprint); err != nil {
return sendClientResponse(
&messages.ClientPollResponse{Error: err.Error()},
response,
)
}
offer.fingerprint = BridgeFingerprint.ToBytes()
snowflake := i.matchSnowflake(offer.natType)
if snowflake != nil {
snowflake.offerChannel <- offer
} else {
i.ctx.metrics.lock.Lock()
i.ctx.metrics.UpdateRendezvousStats(arg.RemoteAddr, arg.RendezvousMethod, offer.natType, "denied")
i.ctx.metrics.lock.Unlock()
resp := &messages.ClientPollResponse{Error: messages.StrNoProxies}
return sendClientResponse(resp, response)
}
// Wait for the answer to be returned on the channel or timeout.
select {
case answer := <-snowflake.answerChannel:
i.ctx.metrics.lock.Lock()
i.ctx.metrics.UpdateRendezvousStats(arg.RemoteAddr, arg.RendezvousMethod, offer.natType, "matched")
i.ctx.metrics.lock.Unlock()
resp := &messages.ClientPollResponse{Answer: answer}
err = sendClientResponse(resp, response)
// Initial tracking of elapsed time.
i.ctx.metrics.lock.Lock()
i.ctx.metrics.clientRoundtripEstimate = time.Since(startTime) / time.Millisecond
i.ctx.metrics.lock.Unlock()
case <-time.After(time.Second * ClientTimeout):
i.ctx.metrics.lock.Lock()
i.ctx.metrics.UpdateRendezvousStats(arg.RemoteAddr, arg.RendezvousMethod, offer.natType, "timeout")
i.ctx.metrics.lock.Unlock()
resp := &messages.ClientPollResponse{Error: messages.StrTimedOut}
err = sendClientResponse(resp, response)
}
i.ctx.snowflakeLock.Lock()
i.ctx.metrics.promMetrics.AvailableProxies.With(prometheus.Labels{"nat": snowflake.natType, "type": snowflake.proxyType}).Dec()
delete(i.ctx.idToSnowflake, snowflake.id)
i.ctx.snowflakeLock.Unlock()
return err
}
func (i *IPC) matchSnowflake(natType string) *Snowflake {
i.ctx.snowflakeLock.Lock()
defer i.ctx.snowflakeLock.Unlock()
// Proiritize known restricted snowflakes for unrestricted clients
if natType == NATUnrestricted && i.ctx.restrictedSnowflakes.Len() > 0 {
return heap.Pop(i.ctx.restrictedSnowflakes).(*Snowflake)
}
if i.ctx.snowflakes.Len() > 0 {
return heap.Pop(i.ctx.snowflakes).(*Snowflake)
}
return nil
}
func (i *IPC) ProxyAnswers(arg messages.Arg, response *[]byte) error {
answer, id, err := messages.DecodeAnswerRequest(arg.Body)
if err != nil || answer == "" {
return messages.ErrBadRequest
}
var success = true
i.ctx.snowflakeLock.Lock()
snowflake, ok := i.ctx.idToSnowflake[id]
i.ctx.snowflakeLock.Unlock()
if !ok || snowflake == nil {
// The snowflake took too long to respond with an answer, so its client
// disappeared / the snowflake is no longer recognized by the Broker.
success = false
}
b, err := messages.EncodeAnswerResponse(success)
if err != nil {
log.Printf("Error encoding answer: %s", err.Error())
return messages.ErrInternal
}
*response = b
if success {
snowflake.answerChannel <- answer
}
return nil
}