Commit b23b3059 authored by Matt Traudt's avatar Matt Traudt Committed by Matt Traudt
Browse files

Mega commit that switches from PastlyLogger to std logging

May have some runtime bugs still. Needs a little more testing
parent a49f9a48
[loggers]
keys = root,sbws
[handlers]
keys = sbwsdefault
[formatters]
keys = sbwsdefault
[logger_root]
level = NOTSET
handlers = sbwsdefault
propagate = 1
qualname=root
[logger_sbws]
level = DEBUG
handlers = sbwsdefault
propagate = 0
qualname=sbws
[handler_sbwsdefault]
class = StreamHandler
formatter = sbwsdefault
args = (sys.stdout,)
[formatter_sbwsdefault]
format = [%(asctime)s] [%(name)s] [%(levelname)s] %(message)s
......@@ -6,6 +6,9 @@ from glob import glob
import os
import gzip
import shutil
import logging
log = logging.getLogger(__name__)
def gen_parser(sub):
......@@ -64,7 +67,7 @@ def _remove_rotten_files(datadir, rotten_days, dry_run=True):
fnames = _get_older_files_than(datadir, rotten_days,
['.txt', '.txt.gz'])
for fname in fnames:
log.info('Deleting', fname)
log.info('Deleting %s', fname)
if not dry_run:
os.remove(fname)
......@@ -78,7 +81,7 @@ def _compress_stale_files(datadir, stale_days, dry_run=True):
with lock_directory(datadir):
fnames = _get_older_files_than(datadir, stale_days, ['.txt'])
for fname in fnames:
log.info('Compressing', fname)
log.info('Compressing %s', fname)
if dry_run:
continue
with open(fname, 'rt') as in_fd:
......@@ -88,22 +91,19 @@ def _compress_stale_files(datadir, stale_days, dry_run=True):
os.remove(fname)
def main(args, conf, log_):
def main(args, conf):
'''
Main entry point in to the cleanup command.
:param argparse.Namespace args: command line arguments
:param configparser.ConfigParser conf: parsed config files
:param sbws.lib.pastlylogger.PastlyLogger log_: logging class instance
'''
global log
log = log_
if not is_initted(args.directory):
fail_hard('Sbws isn\'t initialized. Try sbws init', log=log)
fail_hard('Sbws isn\'t initialized. Try sbws init')
datadir = conf['paths']['datadir']
if not os.path.isdir(datadir):
fail_hard(datadir, 'does not exist', log=log)
fail_hard(datadir, 'does not exist')
fresh_days = conf.getint('general', 'data_period')
stale_days = conf.getint('cleanup', 'stale_days')
......@@ -111,17 +111,17 @@ def main(args, conf, log_):
if stale_days - 2 < fresh_days:
fail_hard('For safetly, cleanup/stale_days ({}) must be at least 2 '
'days larger than general/data_period ({})'.format(
stale_days, fresh_days), log=log)
stale_days, fresh_days))
if rotten_days < stale_days:
fail_hard('cleanup/rotten_days ({}) must be the same or larger than '
'cleanup/stale_days ({})'.format(rotten_days, stale_days),
log=log)
'cleanup/stale_days ({})'.format(rotten_days, stale_days))
if stale_days / 2 < fresh_days:
log.warn('cleanup/stale_days ({}) is less than twice '
'general/data_period ({}). For ease of parsing older results '
'if necessary, it is recommended to make stale_days at least '
'twice the data_period.'.format(stale_days, fresh_days))
log.warning(
'cleanup/stale_days (%d) is less than twice '
'general/data_period (%d). For ease of parsing older results '
'if necessary, it is recommended to make stale_days at least '
'twice the data_period.', stale_days, fresh_days)
_remove_rotten_files(datadir, rotten_days, dry_run=args.dry_run)
_compress_stale_files(datadir, stale_days, dry_run=args.dry_run)
......@@ -23,10 +23,11 @@ import time
import os
import sys
import traceback
import logging
end_event = Event()
stream_building_lock = RLock()
log = logging.getLogger(__name__)
def make_socket(socks_host, socks_port):
......@@ -52,10 +53,10 @@ def socket_connect(s, addr, port):
IPv6. Works with IPv4 and hostnames '''
try:
s.connect((addr, port))
log.debug('Connected to', addr, port, 'via', s.fileno())
log.debug('Connected to %s:%d via %d', addr, port, s.fileno())
except (socket.timeout, socks.GeneralProxyError,
socks.ProxyConnectionError) as e:
log.warn(e)
log.warning(e)
return False
return True
......@@ -103,7 +104,7 @@ def measure_rtt_to_server(sock, conf):
for _ in range(0, conf.getint('client', 'num_rtts')):
start_time = time_now()
if not tell_server_amount(sock, MIN_REQ_BYTES):
log.info('Unable to ping server on', sock.fileno())
log.info('Unable to ping server on %d', sock.fileno())
return
try:
amount_read = len(sock.recv(1))
......@@ -112,7 +113,7 @@ def measure_rtt_to_server(sock, conf):
return
end_time = time_now()
if amount_read == 0:
log.info('No pong from server on', sock.fileno())
log.info('No pong from server on %d', sock.fileno())
return
rtts.append(end_time - start_time)
return rtts
......@@ -152,11 +153,11 @@ def measure_relay(args, conf, helpers, cb, rl, relay):
our_nick = conf['client']['nickname']
helper = helpers.next(blacklist=[relay.fingerprint])
if not helper:
log.warn('Unable to get helper to measure', relay.nickname)
log.warning('Unable to get helper to measure %s', relay.nickname)
return None
circ_id = cb.build_circuit([relay.fingerprint, helper.fingerprint])
if not circ_id:
log.debug('Could not build circuit involving', relay.nickname)
log.debug('Could not build circuit involving %s', relay.nickname)
return ResultErrorCircuit(
relay, [relay.fingerprint, helper.fingerprint], helper.server_host,
our_nick)
......@@ -164,7 +165,7 @@ def measure_relay(args, conf, helpers, cb, rl, relay):
# A function that attaches all streams that gets created on
# connect() to the given circuit
listener = stem_utils.attach_stream_to_circuit_listener(
cb.controller, circ_id, log_fn=log.debug)
cb.controller, circ_id)
with stream_building_lock:
# Tell stem about our listener so it can attach the stream to the
# circuit when we connect()
......@@ -175,14 +176,13 @@ def measure_relay(args, conf, helpers, cb, rl, relay):
# This call blocks until we are connected (or give up). We get attched
# to the right circuit in the background.
connected = socket_connect(s, helper.server_host, helper.server_port)
stem_utils.remove_event_listener(cb.controller, listener,
log_fn=log.info)
stem_utils.remove_event_listener(cb.controller, listener)
if not connected:
log.info('Unable to connect to', helper.server_host,
log.info('Unable to connect to %s:%d', helper.server_host,
helper.server_port)
cb.close_circuit(circ_id)
return
if not authenticate_to_server(s, helper.password, log.info):
if not authenticate_to_server(s, helper.password):
log.info('Unable to authenticate to the server')
res = ResultErrorAuth(
relay, circ_fps, helper.server_host, our_nick)
......@@ -209,10 +209,10 @@ def measure_relay(args, conf, helpers, cb, rl, relay):
}
while len(results) < num_downloads:
if expected_amount == MAX_REQ_BYTES:
log.warn('We are requesting the maximum number of bytes we are '
'allowed to ask for from a server in order to measure',
relay.nickname, 'via helper', helper.fingerprint[0:8],
'and we don\'t expect this to happen very often.')
log.warning('We are requesting the maximum number of bytes we are '
'allowed to ask for from a server in order to measure '
'%s via helper %s and we don\'t expect this to happen '
'very often', relay.nickname, helper.fingerprint[0:8])
# Tell the server to send us the current expected_amount.
if not tell_server_amount(s, expected_amount):
close_socket(s)
......@@ -274,7 +274,7 @@ def _should_keep_result(did_request_maximum, result_time, download_times):
result_time < download_times['max']:
return True
# In all other cases, return false
log.debug('Not keeping result time {:.2f}s.'.format(result_time),
log.debug('Not keeping result time %f.%s', result_time,
'' if not did_request_maximum else 'We requested the maximum '
'amount allowed')
return False
......@@ -309,8 +309,8 @@ def result_putter_error(target):
measurement -- and return that function so it can be used by someone else
'''
def closure(err):
log.warn('Unhandled exception caught while measuring {}: {} {}'.format(
target.nickname, type(err), err))
log.warning('Unhandled exception caught while measuring {}: {} {}'
.format(target.nickname, type(err), err))
return closure
......@@ -318,14 +318,14 @@ def run_speedtest(args, conf):
controller = None
controller, error_msg = stem_utils.init_controller_with_config(conf)
if not controller:
fail_hard(error_msg, log=log)
fail_hard(error_msg)
assert controller
cb = CB(args, conf, log, controller=controller)
rl = RelayList(args, conf, log, controller=controller)
rd = ResultDump(args, conf, log, end_event)
rp = RelayPrioritizer(args, conf, log, rl, rd)
cb = CB(args, conf, controller=controller)
rl = RelayList(args, conf, controller=controller)
rd = ResultDump(args, conf, end_event)
rp = RelayPrioritizer(args, conf, rl, rd)
helpers, error_msg = HelperRelayList.from_config(
args, conf, log, controller=controller)
args, conf, controller=controller)
if not helpers:
fail_hard(error_msg)
max_pending_results = conf.getint('client', 'measurement_threads')
......@@ -333,7 +333,7 @@ def run_speedtest(args, conf):
pending_results = []
while True:
for target in rp.best_priority():
log.debug('Measuring', target.nickname)
log.debug('Measuring %s', target.nickname)
callback = result_putter(rd)
callback_err = result_putter_error(target)
async_result = pool.apply_async(
......@@ -354,24 +354,21 @@ def gen_parser(sub):
description=d)
def main(args, conf, log_):
global log
log = log_
def main(args, conf):
if not is_initted(args.directory):
fail_hard('Sbws isn\'t initialized. Try sbws init', log=log)
fail_hard('Sbws isn\'t initialized. Try sbws init')
if conf.getint('client', 'measurement_threads') < 1:
fail_hard('Number of measurement threads must be larger than 1',
log=log)
fail_hard('Number of measurement threads must be larger than 1')
if conf['tor']['control_type'] not in ['port', 'socket']:
fail_hard('Must specify either control port or socket. '
'Not "{}"'.format(conf['tor']['control_type'], log=log))
'Not "{}"'.format(conf['tor']['control_type']))
if conf['tor']['control_type'] == 'port':
try:
conf.getint('tor', 'control_location')
except ValueError as e:
fail_hard('Couldn\'t read control port from config:', e, log=log)
fail_hard('Couldn\'t read control port from config:', e)
os.makedirs(conf['paths']['datadir'], exist_ok=True)
try:
......
......@@ -6,6 +6,9 @@ from sbws.lib.resultdump import group_results_by_relay
from argparse import ArgumentDefaultsHelpFormatter
from statistics import median
import os
import logging
log = logging.getLogger(__name__)
class V3BWLine:
......@@ -45,8 +48,8 @@ def warn_if_not_accurate_enough(lines, constant):
log.info('The generated lines are within {:.5}% of what they should '
'be'.format((1-accuracy_ratio)*100))
if accuracy_ratio < 1 - margin or accuracy_ratio > 1 + margin:
log.warn('There was {:.3f}% error and only +/- {:.3f}% is '
'allowed'.format((1-accuracy_ratio)*100, margin*100, 2))
log.warning('There was {:.3f}% error and only +/- {:.3f}% is '
'allowed'.format((1-accuracy_ratio)*100, margin*100, 2))
def scale_lines(args, v3bw_lines):
......@@ -95,24 +98,22 @@ def log_stats(data_lines):
log.info('Mean bandwidth per line: {:.2f} "KiB"'.format(bw_per_line))
def main(args, conf, log_):
global log
log = log_
def main(args, conf):
if not is_initted(args.directory):
fail_hard('Sbws isn\'t initialized. Try sbws init', log=log)
fail_hard('Sbws isn\'t initialized. Try sbws init')
datadir = conf['paths']['datadir']
if not os.path.isdir(datadir):
fail_hard(datadir, 'does not exist', log=log)
fail_hard('%s does not exist', datadir)
if args.scale_constant < 1:
fail_hard('--scale-constant must be positive', log=log)
fail_hard('--scale-constant must be positive')
fresh_days = conf.getint('general', 'data_period')
results = load_recent_results_in_datadir(
fresh_days, datadir, success_only=True, log_fn=log.debug)
fresh_days, datadir, success_only=True)
if len(results) < 1:
log.warn('No recent results, so not generating anything. (Have you '
'ran sbws client recently?)')
log.warning('No recent results, so not generating anything. (Have you '
'ran sbws client recently?)')
return
data = group_results_by_relay(results)
data_lines = [result_data_to_v3bw_line(data, fp) for fp in data]
......
......@@ -3,6 +3,9 @@ from sbws.util.config import get_user_example_config
from argparse import ArgumentDefaultsHelpFormatter
import os
import shutil
import logging
log = logging.getLogger(__name__)
def gen_parser(sub):
......@@ -13,33 +16,31 @@ def gen_parser(sub):
description=d)
def main(args, conf, log_):
global log
log = log_
def main(args, conf):
if is_initted(args.directory):
fail_hard('Directory already seems to be initted', log=log)
fail_hard('Directory already seems to be initted')
if not os.path.isdir(args.directory):
log.info('Creating', args.directory)
log.info('Creating %s', args.directory)
os.makedirs(args.directory, exist_ok=False)
config_fname = os.path.join(args.directory, 'config.ini')
c = get_user_example_config(log_fn=log.debug)
c = get_user_example_config()
c['paths']['sbws_home'] = args.directory
log.info('Creating', config_fname, 'based on example config')
log.info('Creating %s based on example config', config_fname)
with open(config_fname, 'wt') as fd:
c.write(fd)
for src, dst, ftype in G_INIT_FILE_MAP:
dst = os.path.join(args.directory, dst)
if os.path.exists(dst):
log.warn(dst, 'already exists, not overwriting')
log.warning('%s already exists, not overwriting', dst)
continue
if ftype == 'file':
log.info('Creating', dst, '({})'.format(ftype))
log.info('Creating %s (%s)', dst, ftype)
try:
shutil.copy(src, dst)
except PermissionError as e:
log.warn('Unable to create {}: {}'.format(dst, e))
log.warning('Unable to create {}: {}'.format(dst, e))
else:
fail_hard('Cannot init ftype', ftype)
......@@ -9,6 +9,9 @@ import socket
import time
import random
import os
import logging
log = logging.getLogger(__name__)
def gen_parser(sub):
......@@ -21,7 +24,7 @@ def gen_parser(sub):
def close_socket(s):
try:
log.info('Closing fd', s.fileno())
log.info('Closing fd %d', s.fileno())
s.shutdown(socket.SHUT_RDWR)
s.close()
except OSError:
......@@ -29,7 +32,7 @@ def close_socket(s):
def get_send_amount(sock):
line = read_line(sock, max_len=16, log_fn=log.info)
line = read_line(sock, max_len=16)
if line is None:
return None
# if len(line) == 16, then it is much more likely we read garbage or not an
......@@ -70,8 +73,8 @@ def _generate_random_string(length):
# stop = time_now()
# _generate_random_string.acc += stop - start
# if stop >= 60 + _generate_random_string.last_log:
# log.notice('Spent', _generate_random_string.acc,
# 'seconds in the last minute generating "random" strings')
# log.info('Spent', _generate_random_string.acc,
# 'seconds in the last minute generating "random" strings')
# _generate_random_string.acc = 0
# _generate_random_string.last_log = stop
assert len(s) >= length
......@@ -87,7 +90,7 @@ _generate_random_string.alphabet = list('abcdefghijklmnopqrstuvwxyz'
def write_to_client(sock, conf, amount):
''' Returns True if successful; else False '''
log.info('Sending client no.', sock.fileno(), amount, 'bytes')
log.info('Sending client no. %d %d bytes', sock.fileno(), amount)
while amount > 0:
amount_this_time = min(conf.getint('server', 'max_send_per_write'),
amount)
......@@ -96,7 +99,7 @@ def write_to_client(sock, conf, amount):
sock.send(bytes(
_generate_random_string(amount_this_time), 'utf-8'))
except (socket.timeout, ConnectionResetError, BrokenPipeError) as e:
log.info('fd', sock.fileno(), ':', e)
log.info('fd %d: %s', sock.fileno(), e)
return False
return True
......@@ -104,45 +107,44 @@ def write_to_client(sock, conf, amount):
def new_thread(args, conf, sock):
def closure():
client_name = authenticate_client(
sock, conf['server.passwords'], log.info)
sock, conf['server.passwords'])
if not client_name:
log.info('Client did not provide valid auth')
close_socket(sock)
return
log.notice(client_name, 'authenticated on', sock.fileno())
log.info('%s authenticated on %d', client_name, sock.fileno())
while True:
send_amount = get_send_amount(sock)
if send_amount is None:
log.info('Couldn\'t get an amount to send to', sock.fileno())
log.info('Couldn\'t get an amount to send to %d',
sock.fileno())
break
if send_amount < MIN_REQ_BYTES or send_amount > MAX_REQ_BYTES:
log.warn(client_name, 'requested', send_amount, 'bytes, which '
'is not valid')
log.warning('%s requested %d bytes, which is not valid',
client_name, send_amount)
break
write_to_client(sock, conf, send_amount)
log.notice(client_name, 'on', sock.fileno(), 'went away')
log.info('%s on %d went away', client_name, sock.fileno())
close_socket(sock)
thread = Thread(target=closure)
return thread
def main(args, conf, log_):
global log
def main(args, conf):
global rng
log = log_
rng = random.SystemRandom()
if not is_initted(args.directory):
fail_hard('Sbws isn\'t initialized. Try sbws init', log=log)
fail_hard('Sbws isn\'t initialized. Try sbws init')
if len(conf['server.passwords']) < 1:
conf_fname = os.path.join(args.directory, 'config.ini')
fail_hard('Sbws server needs at least one password in the section'
' [server.passwords] in the config file in {}. See '
'DEPLOY.rst for more information.'
.format(conf_fname), log=log)
.format(conf_fname))
h = (conf['server']['bind_ip'], conf.getint('server', 'bind_port'))
log.notice('Binding to', h)
log.info('Binding to %s:%d', *h)
while True:
try:
# first try IPv4
......@@ -156,25 +158,26 @@ def main(args, conf, log_):
server = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
server.bind(h)
except OSError as e2:
log.warn(e1)
log.warn(e2)
log.warning('IPv4 bind error: %s', e1)
log.warning('IPv6 bind error: %s', e2)
time.sleep(5)
else:
break
else:
break
log.notice('Listening on', h)
log.info('Listening on %s:%d', h[0], h[1])
server.listen(5)
try:
while True:
sock, addr = server.accept()
sock.settimeout(SOCKET_TIMEOUT)
log.info('accepting connection from', addr, 'as', sock.fileno())
log.info('accepting connection from %s:%d as %d', addr[0], addr[1],
sock.fileno())
t = new_thread(args, conf, sock)
t.start()
except KeyboardInterrupt:
pass
finally:
log.info('Generate random string stats:',
log.info('Generate random string stats: %s',
_generate_random_string.cache_info())
close_socket(server)
......@@ -9,6 +9,9 @@ import os
from datetime import date
from datetime import timedelta
from statistics import mean
import logging
log = logging.getLogger(__name__)
def _print_stats_error_types(data):
......@@ -17,7 +20,7 @@ def _print_stats_error_types(data):
results = data[fp]
for result in results:
if result.type not in counts:
log.debug('Found a', result.type, 'for the first time')
log.debug('Found a %s for the first time', result.type)
counts[result.type] = 0
counts[result.type] += 1
counts['total'] += 1
......@@ -110,28 +113,25 @@ def gen_parser(sub):
help='Also print information about each error type')
def main(args, conf, log_):
def main(args, conf):
'''
Main entry point into the stats command.
:param argparse.Namespace args: command line arguments
:param configparser.ConfigParser conf: parsed config files
:param sbws.lib.pastlylogger.PastlyLogger log_: logging class instance
'''
global log
log = log_
if not is_initted(args.directory):
fail_hard('Sbws isn\'t initialized. Try sbws init', log=log)
fail_hard('Sbws isn\'t initialized. Try sbws init')
datadir = conf['paths']['datadir']
if not os.path.isdir(datadir):
fail_hard(datadir, 'does not exist', log=log)
fail_hard('%s does not exist', datadir)
fresh_days = conf.getint('general', 'data_period')
results = load_recent_results_in_datadir(
fresh_days, datadir, success_only=False, log_fn=log.debug)
fresh_days, datadir, success_only=False)
if len(results) < 1:
log.notice('No fresh results')
log.warning('No fresh results')
return
data = group_results_by_relay(results)
print_stats(args, data)
from sbws.lib.pastlylogger import PastlyLogger
import os
import time
import logging
from filelock import FileLock
log = logging.getLogger(__name__)
G_PKG_DIR = os.path.abspath(os.path.dirname(__file__))
G_INIT_FILE_MAP = [
......@@ -45,14 +46,10 @@ def is_initted(d):
return True
def fail_hard(*s, log=None):
def fail_hard(*a, **kw):
''' Optionally log something to stdout ... and then exit as fast as
possible '''
if s:
if log:
log.error(*s)
else:
print(*s)
log.error(*a, **kw)
exit(1)
......@@ -93,35 +90,6 @@ def _log_level_int_to_string(i):