Skip to content
GitLab
Menu
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
Cecylia Bocovich
probetest
Commits
c3f2b566
Commit
c3f2b566
authored
Dec 06, 2019
by
Cecylia Bocovich
Browse files
Add ability to measure snowflake connection phases
parent
efec8974
Changes
4
Hide whitespace changes
Inline
Side-by-side
snowflake-stage.R
0 → 100644
View file @
c3f2b566
library
(
data.table
)
library
(
ggplot2
)
args
<-
commandArgs
(
trailingOnly
=
T
)
x
<-
data.table
()
for
(
filename
in
args
)
{
x
<-
rbind
(
x
,
fread
(
filename
))
}
x
$
timestamp
<-
as.POSIXct
(
x
$
timestamp
,
tz
=
"UTC"
)
# Filter out the times the cn VPN wasn't working
# (otherwise it looks like timeouts; i.e. blocking)
x.max
<-
x
[
,
.SD
[
which.max
(
percent
)],
by
=
.
(
site
,
runid
,
ip
)]
setkey
(
x.max
,
site
,
runid
,
ip
)
cat
(
"
{{{#!html
<table class=\"wiki\">
<tr><th>bridge</th><th>CA average bootstrap %</th><th>CN average bootstrap %</th></tr>
"
)
ramp
<-
colorRamp
(
c
(
"#d6756b"
,
"#f7fbff"
))
summ
<-
x.max
[,
.
(
.N
,
avg.percent
=
mean
(
percent
)),
by
=
.
(
site
,
ip
)]
for
(
nick
in
unique
(
x
$
ip
))
{
na
<-
summ
[
site
==
"na"
&
ip
==
nick
]
cn
<-
summ
[
site
==
"cn"
&
ip
==
nick
]
cat
(
sprintf
(
"<tr><td>%s</td><td align=right style=\"background: %s\">%.2f%%</td><td align=right style=\"background: %s\">%.2f%%</td></tr>\n"
,
nick
,
rgb
(
ramp
(
na
$
avg.percent
/
100
)
/
255
),
na
$
avg.percent
,
rgb
(
ramp
(
cn
$
avg.percent
/
100
)
/
255
),
cn
$
avg.percent
))
}
cat
(
"</table>
}}}
"
)
pdf
(
width
=
8.5
,
height
=
14
)
# runids <- unique(x$runid)
# runids <- runids[order(runids)]
# p <- ggplot(x[x$runid %in% runids[(length(runids)-2):(length(runids)-1)], ])
# p <- p + geom_step(aes(timestamp, percent, group=sprintf("%s-%s", runid, ip), color=ip))
# p <- p + scale_y_continuous(limits=c(0, 100), breaks=seq(0, 100, 10))
# p <- p + theme_bw()
# p
# p <- ggplot(x.max)
# p <- p + geom_point(aes(ip, percent, color=site), alpha=0.4, size=0.7, position=position_jitter(width=0.3, height=0))
# p <- p + scale_y_continuous(limits=c(0, 100))
# p <- p + coord_flip()
# p <- p + theme_bw()
# p <- p + guides(color=guide_legend(override.aes=list(alpha=1, size=2)))
# p
tmp
<-
x.max
tmp
$
site
<-
factor
(
tmp
$
site
,
levels
=
c
(
"na"
,
"cn"
),
labels
=
c
(
"CA"
,
"CN"
))
p
<-
ggplot
(
tmp
)
p
<-
p
+
geom_point
(
aes
(
timestamp
,
percent
,
color
=
site
,
shape
=
site
,
size
=
site
),
alpha
=
0.4
)
p
<-
p
+
facet_grid
(
ip
~
.
)
p
<-
p
+
scale_y_continuous
(
limits
=
c
(
0
,
105
),
breaks
=
c
(
20
,
40
,
60
,
80
,
100
),
labels
=
c
(
"Gathering"
,
"Signaling"
,
"Connecting"
,
"Data"
,
"Done"
))
p
<-
p
+
scale_color_brewer
(
palette
=
"Set1"
)
p
<-
p
+
scale_shape_manual
(
values
=
c
(
CA
=
4
,
CN
=
16
))
p
<-
p
+
scale_size_manual
(
values
=
c
(
CA
=
1.0
,
CN
=
1.0
))
p
<-
p
+
theme_bw
()
p
<-
p
+
theme
(
strip.text.y
=
element_text
(
angle
=
0
))
p
<-
p
+
theme
(
legend.position
=
"top"
)
p
<-
p
+
guides
(
color
=
guide_legend
(
override.aes
=
list
(
alpha
=
1
,
size
=
2.5
)))
p
snowflake-stage.lua
0 → 100644
View file @
c3f2b566
-- Prints out the first remote IP address that the client sends a non-empty STUN message to
-- Usage: tshark -q <other opts> -Xlua_script:proxy-ip.lua -r <packet capture>
do
-- Extractor definitions
ip_addr_extractor
=
Field
.
new
(
"ip.addr"
)
ipv6_addr_extractor
=
Field
.
new
(
"ipv6.addr"
)
-- STUN fields
stun_extractor
=
Field
.
new
(
"stun"
)
stun_username_extractor
=
Field
.
new
(
"stun.att.username"
)
stun_len_extractor
=
Field
.
new
(
"stun.length"
)
stun_response_extractor
=
Field
.
new
(
"stun.att.port-xord"
)
stun_type_extractor
=
Field
.
new
(
"stun.type"
)
-- DNS
dns_extractor
=
Field
.
new
(
"dns"
)
dns_ipv4_response_extractor
=
Field
.
new
(
"dns.a"
)
-- IPv4
dns_ipv6_response_extractor
=
Field
.
new
(
"dns.aaaa"
)
-- IPv6
dns_response_name_extractor
=
Field
.
new
(
"dns.resp.name"
)
-- TLS
tls_extractor
=
Field
.
new
(
"ssl"
)
tls_success_extractor
=
Field
.
new
(
"ssl.app_data"
)
dtls_extractor
=
Field
.
new
(
"dtls"
)
dtls_success_extractor
=
Field
.
new
(
"dtls.app_data"
)
-- Constants
STUN_RESPONSE
=
"0x00000101"
-- Global variables
stage
=
"Gathering"
peer_candidates
=
{}
print
(
"stage:"
..
stage
)
local
function
main
()
local
ipv4_tap
=
Listener
.
new
(
"ip"
)
local
ipv6_tap
=
Listener
.
new
(
"ipv6"
)
function
process_packet
(
pinfo
,
tvb
)
local
ip_src
,
ip_dst
=
ip_addr_extractor
()
local
stun
=
stun_extractor
()
if
(
stage
==
"Gathering"
)
then
gathering_phase
()
elseif
(
stage
==
"Signaling"
)
then
signaling_phase
()
elseif
(
stage
==
"Connecting"
)
then
connecting_phase
()
elseif
(
stage
==
"Data"
)
then
data_phase
()
connecting_phase
()
--still sending connect information
end
end
function
gathering_phase
()
--looking for connection to STUN server
if
(
dns_extractor
())
then
local
stun_server
=
dns_response_name_extractor
()
local
stun_ipv4_addr
=
dns_ipv4_response_extractor
()
local
stun_ipv6_addr
=
dns_ipv6_response_extractor
()
if
(
stun_ipv4_addr
)
then
stun_ipv4
=
tostring
(
stun_ipv4_addr
)
print
(
"Received v4 address for STUN server "
..
tostring
(
stun_server
)
..
": "
..
tostring
(
stun_ipv4_addr
))
end
if
(
stun_ipv6_addr
)
then
stun_ipv6
=
tostring
(
stun_ipv6_addr
)
print
(
"Received v6 address for STUN server "
..
tostring
(
stun_server
)
..
": "
..
tostring
(
stun_ipv6_addr
))
end
end
if
(
stun_ipv4
or
stun_ipv6
)
then
--check if we've received a Binding success response
if
(
stun_extractor
()
and
stun_response_extractor
())
then
local
ip_src
,
_
=
ip_addr_extractor
()
if
(
not
ip_src
)
then
ip_src
=
ipv6_addr_extractor
()
end
if
(
(
tostring
(
ip_src
)
==
stun_ipv4
)
or
(
tostring
(
ip_src
)
==
stun_ipv6
))
then
print
(
"Received STUN success response from "
..
tostring
(
ip_src
))
stage
=
"Signaling"
print
(
"stage:"
..
stage
)
end
end
end
end
function
signaling_phase
()
--looking for connection to domain-fronted snowflake broker
if
(
dns_extractor
())
then
local
broker
=
dns_response_name_extractor
()
local
broker_ipv4_addr
=
dns_ipv4_response_extractor
()
local
broker_ipv6_addr
=
dns_ipv6_response_extractor
()
if
(
broker_ipv4_addr
)
then
broker_ipv4
=
tostring
(
broker_ipv4_addr
)
print
(
"Received v4 address for Broker front "
..
tostring
(
broker
)
..
": "
..
tostring
(
broker_ipv4_addr
))
end
if
(
broker_ipv6_addr
)
then
broker_ipv6
=
tostring
(
broker_ipv6_addr
)
print
(
"Received v6 address for Broker front "
..
tostring
(
broker
)
..
": "
..
tostring
(
broker_ipv6_addr
))
end
end
if
(
broker_ipv4
or
broker_ipv6
)
then
--look or successfull TLS handshake
local
ip_src
,
_
=
ip_addr_extractor
()
if
(
not
ip_src
)
then
ip_src
=
ipv6_addr_extractor
()
end
if
(
(
tostring
(
ip_src
)
==
broker_ipv4
)
or
(
tostring
(
ip_src
)
==
broker_ipv6
))
then
isTLS
=
tls_extractor
()
isTLSsuccess
=
tls_success_extractor
()
if
(
isTLS
and
isTLSsuccess
)
then
print
(
"Received signaling data from "
..
tostring
(
ip_src
))
stage
=
"Connecting"
print
(
"stage:"
..
stage
)
end
end
end
end
function
connecting_phase
()
--looking for connection to snowflake peer
local
isSTUN
=
stun_extractor
()
local
stun_username
=
stun_username_extractor
()
local
stun_type
=
stun_type_extractor
()
local
_
,
ip_dst
=
ip_addr_extractor
()
if
(
not
ip_dst
)
then
ip_dst
=
ipv6_addr_extractor
()
end
if
(
isSTUN
and
stun_username
)
then
local
names
=
string_split
(
tostring
(
stun_username
),
":"
)
if
(
not
myname
)
then
myname
=
names
[
1
]
peername
=
names
[
2
]
end
if
(
names
[
1
]
==
myname
)
then
if
(
not
peer_candidates
[
tostring
(
ip_dst
)])
then
print
(
"Sent Binding request with username "
..
tostring
(
stun_username
)
..
" to peer candidate "
..
tostring
(
ip_dst
))
peer_candidates
[
tostring
(
ip_dst
)]
=
"sent"
end
end
end
if
(
isSTUN
and
tostring
(
stun_type
)
==
STUN_RESPONSE
)
then
if
(
peer_candidates
[
tostring
(
ip_dst
)]
==
"sent"
)
then
print
(
"Received Success Response from peer candidate "
..
tostring
(
ip_dst
))
peer_candidates
[
tostring
(
ip_dst
)]
=
"success"
if
(
stage
==
"Connecting"
)
then
stage
=
"Data"
print
(
"stage:"
..
stage
)
end
end
end
end
function
data_phase
()
--looking for dtls with snowflake peer
local
ip_src
,
_
=
ip_addr_extractor
()
if
(
not
ip_src
)
then
ip_src
=
ipv6_addr_extractor
()
end
if
(
peer_candidates
[
tostring
(
ip_src
)]
==
"success"
)
then
isDTLS
=
dtls_extractor
()
if
(
isDTLS
and
dtls_success_extractor
())
then
print
(
"Successfully connected to snowflake "
..
tostring
(
ip_src
))
stage
=
"Done"
print
(
"stage:"
..
stage
)
end
end
end
function
string_split
(
inputstr
,
sep
)
if
sep
==
nil
then
sep
=
"%s"
end
local
t
=
{}
for
str
in
string.gmatch
(
inputstr
,
"([^"
..
sep
..
"]+)"
)
do
table.insert
(
t
,
str
)
end
return
t
end
-------------------
----- tap functions
-------------------
function
ipv4_tap
.
packet
(
pinfo
,
tvb
,
ip
)
process_packet
(
pinfo
,
tvb
)
end
function
ipv6_tap
.
packet
(
pinfo
,
tvb
,
ipv6
)
process_packet
(
pinfo
,
tvb
)
end
end
main
()
end
snowflake-stage.py
0 → 100755
View file @
c3f2b566
#!/usr/bin/env python2
import
csv
import
datetime
import
locale
import
os.path
import
re
import
sys
# For strptime.
locale
.
setlocale
(
locale
.
LC_ALL
,
"C"
)
# Dec 01 20:57:53.000
stage_re
=
re
.
compile
(
r
'^stage:(.*)'
)
date_re
=
re
.
compile
(
r
'^(\w+ \d+ \d\d:\d\d:\d\d\.\d\d\d)'
)
ip_re
=
re
.
compile
(
r
'^Successfully connected to snowflake (.*)'
)
csvW
=
csv
.
DictWriter
(
sys
.
stdout
,
fieldnames
=
(
"timestamp"
,
"site"
,
"runid"
,
"ip"
,
"percent"
))
csvW
.
writeheader
()
stages
=
{
'Gathering'
:
20
,
'Signaling'
:
40
,
'Connecting'
:
60
,
'Data'
:
80
,
'Done'
:
100
}
rows
=
[]
def
process_log
(
f
,
site
,
runid
,
nickname
):
timestamp
=
datetime
.
datetime
.
strptime
(
runid
,
"%Y%m%d-%H%M"
)
ip
=
None
for
line
in
f
:
m
=
ip_re
.
match
(
line
)
if
m
is
not
None
:
ip
=
m
.
group
(
1
)
m
=
stage_re
.
match
(
line
)
if
m
is
not
None
:
stage
=
m
.
group
(
1
)
percent
=
stages
[
stage
]
row
=
{
"timestamp"
:
timestamp
.
strftime
(
"%Y-%m-%d %H:%M:%S.%f"
),
"site"
:
site
,
"runid"
:
runid
,
"ip"
:
""
,
"percent"
:
percent
,
}
rows
.
append
(
row
)
for
row
in
rows
:
row
[
'ip'
]
=
ip
csvW
.
writerow
(
row
)
for
filename
in
sys
.
stdin
:
filename
=
filename
.
strip
()
nickname
,
ext
=
os
.
path
.
splitext
(
os
.
path
.
basename
(
filename
))
if
ext
!=
".log"
:
continue
if
nickname
==
"main"
:
continue
parent
=
os
.
path
.
dirname
(
filename
)
runid
=
os
.
path
.
basename
(
parent
)
parent
=
os
.
path
.
dirname
(
parent
)
site
=
os
.
path
.
basename
(
parent
)
with
open
(
filename
)
as
f
:
process_log
(
f
,
site
,
runid
,
nickname
)
snowflaketest
View file @
c3f2b566
...
...
@@ -17,30 +17,16 @@ BRIDGE_LINES = (
START_TOR_TIMEOUT
=
3
*
60
CIRCUIT_BUILD_TIMEOUT
=
3
*
60
OBFS4PROXY_PATH
=
"/usr/bin/obfs4proxy"
SNOWFLAKE_PATH
=
"/usr/bin/snowflake"
def
makedirs
(
path
):
try
:
return
os
.
makedirs
(
path
)
except
OSError
as
e
:
if
e
.
errno
!=
errno
.
EEXIST
:
raise
def
get_address_from_bridge_line
(
bridge_line
):
host
,
port
=
bridge_line
.
split
()[
1
].
split
(
":"
,
1
)
port
=
int
(
port
)
return
(
host
,
port
)
def
start_tcpdump
(
basename
):
# need to look at capture file to see which snowflake we got
def
start_tcpdump
(
basename
,
interface
):
bpf
=
"tcp"
# http://packetlife.net/blog/2010/mar/19/sniffing-wireshark-non-root-user/
# groupadd tcpdump
# usermod -a -G tcpdump user
# chgrp tcpdump /usr/sbin/tcpdump
# setcap cap_net_raw,cap_net_admin=eip /usr/sbin/tcpdump
p
=
subprocess
.
Popen
([
"/usr/sbin/tcpdump"
,
"-i"
,
"lo"
,
"-U"
,
"-B"
,
"4096"
,
"-w"
,
basename
+
".pcap"
,
bpf
],
p
=
subprocess
.
Popen
([
"/usr/sbin/tcpdump"
,
"-i"
,
interface
,
"-U"
,
"-B"
,
"4096"
,
"-w"
,
basename
+
".pcap"
,
bpf
],
stdout
=
open
(
basename
+
".pcap"
,
"w"
),
stderr
=
open
(
basename
+
".tcpdump.err"
,
"w"
))
return
p
...
...
@@ -68,7 +54,6 @@ def start_tor(tor_config):
"LearnCircuitBuildTimeout"
:
"0"
,
"CircuitBuildTimeout"
:
str
(
CIRCUIT_BUILD_TIMEOUT
),
"FetchHidServDescriptors"
:
"0"
,
"ClientTransportPlugin"
:
"obfs4 exec %s"
%
OBFS4PROXY_PATH
,
"LogTimeGranularity"
:
"1"
,
"Log"
:
"notice stdout"
,
}
...
...
@@ -120,14 +105,20 @@ logging.info("starting")
#Now do a set of probes of snowflake proxies
for
x
in
range
(
0
,
100
):
# Need to flush dns caches
subprocess
.
run
([
"/usr/bin/systemd-resolve"
,
"--flush-caches"
])
# Set up logs and datadir for probe
nickname
=
"snowflake-probe-%(num)d"
%
{
"num"
:
x
}
datadir
=
tempfile
.
mkdtemp
(
prefix
=
"datadir."
,
dir
=
"."
)
output
=
open
(
"%s.log"
%
nickname
,
"w"
)
logging
.
info
(
"created temporary DataDirectory %r"
,
datadir
)
tcpdump_proc
=
None
try
:
logging
.
info
(
"starting tcpdump for bridge %r"
%
nickname
)
try
:
tcpdump_proc
=
start_tcpdump
(
nickname
)
tcpdump_lo_proc
=
start_tcpdump
(
nickname
,
lo
)
tcpdump_eth0_proc
=
start_tcpdump
(
nickname
,
eth0
)
except
OSError
as
e
:
logging
.
info
(
"failed to start tcpdump, stopping snowflake probe: %s"
,
e
)
#these tests break if we can't find the proxy ip address
...
...
@@ -152,11 +143,21 @@ for x in range(0, 100):
except
OSError
as
err
:
logging
.
info
(
"failed to start tor: %s"
%
err
)
continue
finally
:
#give data to log file
timestamp
=
datetime
.
utcnow
().
strftime
(
"%b %d %H:%M:%S.%f"
)
output
.
write
(
timestamp
+
"
\n
"
)
subprocess
.
Popen
([
"/usr/bin/tshark"
,
"-q"
,
"-Xlua_script:../../../snowflake-stage.lua"
,
"-r"
,
"%s.pcap"
%
nickname
],
stdout
=
output
).
communicate
()[
0
]
logging
.
info
(
"Probed snowflake proxy %d time(s)"
%
(
x
+
1
))
finally
:
output
.
close
()
#Extract the proxy ip
logging
.
info
(
"deleting temporary DataDirectory %r"
,
datadir
)
shutil
.
rmtree
(
datadir
)
if
tcpdump_proc
is
not
None
:
tcpdump_proc
.
terminate
()
if
tcpdump_lo_proc
is
not
None
:
tcpdump_lo_proc
.
terminate
()
if
tcpdump_eth0_proc
is
not
None
:
tcpdump_eth0_proc
.
terminate
()
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment