test_generate.py 10.4 KB
Newer Older
1
import sbws.core.generate
2
3
4
5
from sbws.util.config import get_config
from sbws.lib.resultdump import load_recent_results_in_datadir
from sbws.lib.resultdump import ResultSuccess
from statistics import median
6
import logging
7

8
log = logging.getLogger(__name__)
9

10
11
# TODO: this should be parsed from the results
NUM_LINES_HEADER = 6
12

juga's avatar
juga committed
13

14
15
def test_generate_no_dotsbws(tmpdir, caplog, parser):
    caplog.set_level(logging.DEBUG)
16
17
    dotsbws = tmpdir
    args = parser.parse_args(
Matt Traudt's avatar
Matt Traudt committed
18
        '-d {} --log-level DEBUG generate'.format(dotsbws).split())
19
    conf = get_config(args)
20
    try:
21
        sbws.core.generate.main(args, conf)
22
23
24
25
    except SystemExit as e:
        assert e.code == 1
    else:
        assert None, 'Should have failed'
26
    assert 'Try sbws init' in caplog.records[-1].getMessage()
27
28


29
def test_generate_no_datadir(empty_dotsbws, caplog, parser):
30
31
    dotsbws = empty_dotsbws
    args = parser.parse_args(
32
33
        '-d {} --log-level DEBUG generate --output /dev/stdout'
        .format(dotsbws.name).split())
34
    conf = get_config(args)
35
    try:
36
        sbws.core.generate.main(args, conf)
37
38
39
40
    except SystemExit as e:
        assert e.code == 1
    else:
        assert None, 'Should have failed'
41
    dd = conf['paths']['datadir']
42
    assert '{} does not exist'.format(dd) in caplog.records[-1].getMessage()
43
44


45
def test_generate_bad_scale_constant(empty_dotsbws_datadir, caplog, parser):
46
47
    dotsbws = empty_dotsbws_datadir
    args = parser.parse_args(
48
49
        '-d {} --log-level DEBUG generate --scale-constant -1 '
        '--output /dev/stdout'.format(dotsbws.name).split())
50
    conf = get_config(args)
51
    try:
52
        sbws.core.generate.main(args, conf)
53
54
55
56
    except SystemExit as e:
        assert e.code == 1
    else:
        assert None, 'Should have failed'
57
58
    assert '--scale-constant must be positive' in \
        caplog.records[-1].getMessage()
59
60


61
def test_generate_empty_datadir(empty_dotsbws_datadir, caplog, parser):
62
63
    dotsbws = empty_dotsbws_datadir
    args = parser.parse_args(
64
65
        '-d {} --log-level DEBUG generate --output /dev/stdout'
        .format(dotsbws.name).split())
66
67
68
    conf = get_config(args)
    sbws.core.generate.main(args, conf)
    assert 'No recent results' in caplog.records[-1].getMessage()
69
70


71
72
def test_generate_single_error(dotsbws_error_result, caplog, parser):
    caplog.set_level(logging.DEBUG)
73
74
    dotsbws = dotsbws_error_result
    args = parser.parse_args(
75
76
        '-d {} --log-level DEBUG generate --output /dev/stdout'
        .format(dotsbws.name).split())
77
78
    conf = get_config(args)
    sbws.core.generate.main(args, conf)
79
    dd = conf['paths']['datadir']
80
    for record in caplog.records:
81
        if 'Keeping 0/1 read lines from {}'.format(dd) in record.getMessage():
82
83
84
85
            break
    else:
        assert None, 'Unable to find log line indicating 0 success results '\
            'in data file'
86
    assert 'No recent results' in caplog.records[-1].getMessage()
87
88


89
90
def test_generate_single_success_noscale(dotsbws_success_result, caplog,
                                         parser,  capfd):
91
92
    dotsbws = dotsbws_success_result
    args = parser.parse_args(
93
94
        '-d {} --log-level DEBUG generate --output /dev/stdout'
        .format(dotsbws.name).split())
95
96
    conf = get_config(args)
    sbws.core.generate.main(args, conf)
97
    dd = conf['paths']['datadir']
98
    # Here results is a dict
99
    results = load_recent_results_in_datadir(1, dd, success_only=False)
100
101
102
103
104
    assert isinstance(results, dict)
    res_len = sum([len(results[fp]) for fp in results])
    assert res_len == 1, 'There should be one result in the datadir'
    # And here we change it to a list
    results = [r for fp in results for r in results[fp]]
105
106
107
108
109
    result = results[0]
    assert isinstance(result, ResultSuccess), 'The one existing result '\
        'should be a success'
    captured = capfd.readouterr()
    stdout_lines = captured.out.strip().split('\n')
110
    assert len(stdout_lines) == 1 + NUM_LINES_HEADER
111

112
    bw = round(median([dl['amount'] / dl['duration'] / 1024
113
114
                       for dl in result.downloads]))
    rtt = median([round(r * 1000) for r in result.rtts])
115
    bw_line = 'node_id=${} bw={} nick={} rtt={} time={}'.format(
116
        result.fingerprint, bw, result.nickname, rtt, round(result.time))
117
    assert stdout_lines[NUM_LINES_HEADER] == bw_line
118
119


120
def test_generate_single_success_scale(dotsbws_success_result, parser,
121
122
123
                                       capfd):
    dotsbws = dotsbws_success_result
    args = parser.parse_args(
124
125
        '-d {} --log-level DEBUG generate --scale --output /dev/stdout'
        .format(dotsbws.name).split())
126
127
    conf = get_config(args)
    sbws.core.generate.main(args, conf)
128
    dd = conf['paths']['datadir']
129
    # Here results is a dict
130
    results = load_recent_results_in_datadir(1, dd, success_only=False)
131
132
133
134
135
    assert isinstance(results, dict)
    res_len = sum([len(results[fp]) for fp in results])
    assert res_len == 1, 'There should be one result in the datadir'
    # And here we change it to a list
    results = [r for fp in results for r in results[fp]]
136
137
138
139
140
    result = results[0]
    assert isinstance(result, ResultSuccess), 'The one existing result '\
        'should be a success'
    captured = capfd.readouterr()
    stdout_lines = captured.out.strip().split('\n')
141
    assert len(stdout_lines) == 1 + NUM_LINES_HEADER
142
143
144

    bw = 7500
    rtt = median([round(r * 1000) for r in result.rtts])
145
    bw_line = 'node_id=${} bw={} nick={} rtt={} time={}'.format(
146
        result.fingerprint, bw, result.nickname, rtt, round(result.time))
147
    assert stdout_lines[NUM_LINES_HEADER] == bw_line
148
149
150


def test_generate_single_relay_success_noscale(
151
        dotsbws_success_result_one_relay, parser, capfd):
152
153
    dotsbws = dotsbws_success_result_one_relay
    args = parser.parse_args(
154
155
        '-d {} --log-level DEBUG generate --output /dev/stdout'
        .format(dotsbws.name).split())
156
157
    conf = get_config(args)
    sbws.core.generate.main(args, conf)
158
    dd = conf['paths']['datadir']
159
    # Here results is a dict
160
    results = load_recent_results_in_datadir(1, dd, success_only=False)
161
162
163
164
165
    assert isinstance(results, dict)
    res_len = sum([len(results[fp]) for fp in results])
    assert res_len == 2, 'There should be two results in the datadir'
    # And here we change it to a list
    results = [r for fp in results for r in results[fp]]
166
167
168
169
170
    for result in results:
        assert isinstance(result, ResultSuccess), 'All existing results '\
            'should be a success'
    captured = capfd.readouterr()
    stdout_lines = captured.out.strip().split('\n')
171
    assert len(stdout_lines) == 1 + NUM_LINES_HEADER
172

173
174
    speeds = [dl['amount'] / dl['duration'] / 1024
              for r in results for dl in r.downloads]
175
176
    speed = round(median(speeds))
    rtt = round(median([round(r * 1000) for r in result.rtts]))
177
    bw_line = 'node_id=${} bw={} nick={} rtt={} time={}'.format(
178
        result.fingerprint, speed, result.nickname, rtt, round(result.time))
179
    assert stdout_lines[NUM_LINES_HEADER] == bw_line
180
181
182


def test_generate_single_relay_success_scale(
183
        dotsbws_success_result_one_relay, parser, capfd):
184
185
    dotsbws = dotsbws_success_result_one_relay
    args = parser.parse_args(
186
187
        '-d {} --log-level DEBUG generate --scale --output /dev/stdout'
        .format(dotsbws.name).split())
188
189
    conf = get_config(args)
    sbws.core.generate.main(args, conf)
190
    dd = conf['paths']['datadir']
191
    # Here results is a dict
192
    results = load_recent_results_in_datadir(1, dd, success_only=False)
193
194
195
196
197
    assert isinstance(results, dict)
    res_len = sum([len(results[fp]) for fp in results])
    assert res_len == 2, 'There should be two results in the datadir'
    # And here we change it to a list
    results = [r for fp in results for r in results[fp]]
198
199
200
201
202
    for result in results:
        assert isinstance(result, ResultSuccess), 'All existing results '\
            'should be a success'
    captured = capfd.readouterr()
    stdout_lines = captured.out.strip().split('\n')
203
    assert len(stdout_lines) == 1 + NUM_LINES_HEADER
204
205
206

    speed = 7500
    rtt = round(median([round(r * 1000) for r in result.rtts]))
207
    bw_line = 'node_id=${} bw={} nick={} rtt={} time={}'.format(
208
        result.fingerprint, speed, result.nickname, rtt, round(result.time))
209
    assert stdout_lines[NUM_LINES_HEADER] == bw_line
210
211
212


def test_generate_two_relays_success_noscale(
213
        dotsbws_success_result_two_relays, parser, capfd):
214
215
    dotsbws = dotsbws_success_result_two_relays
    args = parser.parse_args(
216
217
        '-d {} --log-level DEBUG generate --output /dev/stdout'
        .format(dotsbws.name).split())
218
219
    conf = get_config(args)
    sbws.core.generate.main(args, conf)
220
    dd = conf['paths']['datadir']
221
    # Here results is a dict
222
    results = load_recent_results_in_datadir(1, dd, success_only=False)
223
224
225
226
227
    assert isinstance(results, dict)
    res_len = sum([len(results[fp]) for fp in results])
    assert res_len == 4, 'There should be 4 results in the datadir'
    # And here we change it to a list
    results = [r for fp in results for r in results[fp]]
228
229
230
231
232
    for result in results:
        assert isinstance(result, ResultSuccess), 'All existing results '\
            'should be a success'
    captured = capfd.readouterr()
    stdout_lines = captured.out.strip().split('\n')
233
    assert len(stdout_lines) == 2 + NUM_LINES_HEADER
234
235
236
237
238

    r1_results = [r for r in results if r.fingerprint == 'A' * 40]
    r1_time = round(max([r.time for r in r1_results]))
    r1_name = r1_results[0].nickname
    r1_fingerprint = r1_results[0].fingerprint
239
240
    r1_speeds = [dl['amount'] / dl['duration'] / 1024
                 for r in r1_results for dl in r.downloads]
241
242
243
    r1_speed = round(median(r1_speeds))
    r1_rtt = round(median([round(rtt * 1000) for r in r1_results
                           for rtt in r.rtts]))
244
    bw_line = 'node_id=${} bw={} nick={} rtt={} time={}'.format(
245
        r1_fingerprint, r1_speed, r1_name, r1_rtt, r1_time)
246
    assert stdout_lines[1 + NUM_LINES_HEADER] == bw_line
247
248
249
250
251

    r2_results = [r for r in results if r.fingerprint == 'B' * 40]
    r2_time = round(max([r.time for r in r2_results]))
    r2_name = r2_results[0].nickname
    r2_fingerprint = r2_results[0].fingerprint
252
253
    r2_speeds = [dl['amount'] / dl['duration'] / 1024
                 for r in r2_results for dl in r.downloads]
254
255
256
    r2_speed = round(median(r2_speeds))
    r2_rtt = round(median([round(rtt * 1000) for r in r2_results
                           for rtt in r.rtts]))
257
    bw_line = 'node_id=${} bw={} nick={} rtt={} time={}'.format(
258
        r2_fingerprint, r2_speed, r2_name, r2_rtt, r2_time)
259
    assert stdout_lines[NUM_LINES_HEADER] == bw_line