v3bwfile.py 48.9 KB
Newer Older
juga  's avatar
juga committed
1
2
3
4
# -*- coding: utf-8 -*-
"""Classes and functions that create the bandwidth measurements document
(v3bw) used by bandwidth authorities."""

juga  's avatar
juga committed
5
import copy
juga  's avatar
juga committed
6
import logging
7
import math
juga  's avatar
juga committed
8
import os
9
from itertools import combinations
juga  's avatar
juga committed
10
from statistics import median, mean
11
from stem.descriptor import parse_file
12

13
from sbws import __version__
14
from sbws.globals import (SPEC_VERSION, BW_LINE_SIZE, SBWS_SCALE_CONSTANT,
15
16
                          TORFLOW_SCALING, SBWS_SCALING, TORFLOW_BW_MARGIN,
                          TORFLOW_OBS_LAST, TORFLOW_OBS_MEAN,
17
                          PROP276_ROUND_DIG, MIN_REPORT, MAX_BW_DIFF_PERC)
juga  's avatar
juga committed
18
from sbws.lib.resultdump import ResultSuccess, _ResultType
19
from sbws.util.filelock import DirectoryLock
juga  's avatar
juga committed
20
21
from sbws.util.timestamp import (now_isodt_str, unixts_to_isodt_str,
                                 now_unixts)
22
from sbws.util.state import State
juga  's avatar
juga committed
23
24
25

log = logging.getLogger(__name__)

26
LINE_SEP = '\n'
27
28
KEYVALUE_SEP_V1 = '='
KEYVALUE_SEP_V2 = ' '
29
30
31
32
33
34

# NOTE: in a future refactor make make all the KeyValues be a dictionary
# with their type, so that it's more similar to stem parser.

# Header KeyValues
# =================
35
36
# List of the extra KeyValues accepted by the class
EXTRA_ARG_KEYVALUES = ['software', 'software_version', 'file_created',
37
                       'earliest_bandwidth', 'generator_started',
38
                       'scanner_country', 'destinations_countries']
39
40
41
42
# number_eligible_relays is the number that ends in the bandwidth file
# ie, have not been excluded by one of the filters in 4. below
# They should be call recent_measurement_included_count to be congruent
# with the other KeyValues.
juga  's avatar
juga committed
43
44
45
STATS_KEYVALUES = ['number_eligible_relays', 'minimum_number_eligible_relays',
                   'number_consensus_relays', 'percent_eligible_relays',
                   'minimum_percent_eligible_relays']
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
# Added in #29591
BW_HEADER_KEYVALUES_MONITOR = [
    # 1.1 header: the number of different consensuses, that sbws has seen,
    # since the last 5 days
    'recent_consensus_count',
    # 2.4 Number of times a priority list has been created
    'recent_priority_list_count',
    # 2.5 Number of relays that there were in a priority list
    # [50, number of relays in the network * 0.05]
    'recent_priority_relay_count',
    # 3.6 header: the number of times that sbws has tried to measure any relay,
    # since the last 5 days
    # This would be the number of times a relays were in a priority list
    'recent_measurement_attempt_count',
    # 3.7 header: the number of times that sbws has tried to measure any relay,
    # since the last 5 days, but it didn't work
    # This should be the number of attempts - number of ResultSuccess -
    # something else we don't know yet
    # So far is the number of ResultError
    'recent_measurement_failure_count',
    # The number of success results should be:
    # the number of attempts - the number of failures
    # 4.6 header: the number of successful results, created in the last 5 days,
    # that were excluded by a filter
    # This is the sum of the following 3 + not success results
    # 'recent_measurement_exclusion_count',
    'recent_measurement_exclusion_not_distanciated_count',
    'recent_measurement_exclusion_not_recent_count',
    'recent_measurement_exclusion_not_min_num_count',
]
BANDWIDTH_HEADER_KEY_VALUES_INIT = \
    ['earliest_bandwidth', 'generator_started',
     'scanner_country', 'destinations_countries']\
    + STATS_KEYVALUES \
    + BW_HEADER_KEYVALUES_MONITOR

KEYVALUES_INT = STATS_KEYVALUES + BW_HEADER_KEYVALUES_MONITOR
83
# List of all unordered KeyValues currently being used to generate the file
84
UNORDERED_KEYVALUES = EXTRA_ARG_KEYVALUES + STATS_KEYVALUES + \
85
86
                      ['latest_bandwidth'] + \
                      BW_HEADER_KEYVALUES_MONITOR
87
88
# List of all the KeyValues currently being used to generate the file
ALL_KEYVALUES = ['version'] + UNORDERED_KEYVALUES
89

90
TERMINATOR = '====='
91
92
93

# Bandwidth Lines KeyValues
# =========================
94
95
# Num header lines in v1.X.X using all the KeyValues
NUM_LINES_HEADER_V1 = len(ALL_KEYVALUES) + 2
96
97
LINE_TERMINATOR = TERMINATOR + LINE_SEP

juga  's avatar
juga committed
98
# KeyValue separator in Bandwidth Lines
99
BW_KEYVALUE_SEP_V1 = ' '
100
101
102
103
# not inclding in the files the extra bws for now
BW_KEYVALUES_BASIC = ['node_id', 'bw']
BW_KEYVALUES_FILE = BW_KEYVALUES_BASIC + \
                    ['master_key_ed25519', 'nick', 'rtt', 'time',
104
105
106
                     'success', 'error_stream', 'error_circ', 'error_misc',
                     # Added in #292951
                     'error_second_relay', 'error_destination']
107
BW_KEYVALUES_EXTRA_BWS = ['bw_median', 'bw_mean', 'desc_bw_avg', 'desc_bw_bur',
108
109
110
                          'desc_bw_obs_last', 'desc_bw_obs_mean',
                          'consensus_bandwidth',
                          'consensus_bandwidth_is_unmeasured']
111
112
113
114
115
116

# Added in #292951
BANDWIDTH_LINE_KEY_VALUES_MONITOR = [
    # 1.2 relay: the number of different consensuses, that sbws has seen,
    # since the last 5 days, that have this relay
    'relay_in_recent_consensus_count',
117
118
119
    # 2.6 relay: the number of times a relay was "prioritized" to be measured
    # in the recent days (by default 5).
    'relay_recent_priority_list_count',
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
    # 3.8 relay:  the number of times that sbws has tried to measure
    # this relay, since the last 5 days
    # This would be the number of times a relay was in a priority list (2.6)
    # since once it gets measured, it either returns ResultError,
    # ResultSuccess or something else happened that we don't know yet
    'relay_recent_measurement_attempt_count',
    # 3.9 relay:  the number of times that sbws has tried to measure
    # this relay, since the last 5 days, but it didn't work
    # This should be the number of attempts - number of ResultSuccess -
    # something else we don't know yet
    # So far is the number of ResultError
    'relay_recent_measurement_failure_count',
    # The number of success results should be:
    # the number of attempts - the number of failures
    # 4.8 relay:  the number of successful results, created in the last 5 days,
    # that were excluded by a rule, for this relay
    # This would be the sum of the following 3 + the number of not success
    'relay_recent_measurement_exclusion_count',
    'relay_recent_measurement_exclusion_not_distanciated',
    'relay_recent_measurement_exclusion_not_recent_count',
    'relay_recent_measurement_exclusion_not_min_num_count',
]
BW_KEYVALUES_EXTRA = BW_KEYVALUES_FILE + BW_KEYVALUES_EXTRA_BWS \
               + BANDWIDTH_LINE_KEY_VALUES_MONITOR
144
BW_KEYVALUES_INT = ['bw', 'rtt', 'success', 'error_stream',
145
146
                    'error_circ', 'error_misc'] + BW_KEYVALUES_EXTRA_BWS \
                   + BANDWIDTH_LINE_KEY_VALUES_MONITOR
147
BW_KEYVALUES = BW_KEYVALUES_BASIC + BW_KEYVALUES_EXTRA
juga  's avatar
juga committed
148
149


150
def round_sig_dig(n, digits=PROP276_ROUND_DIG):
151
152
153
154
155
156
157
    """Round n to 'digits' significant digits in front of the decimal point.
       Results less than or equal to 1 are rounded to 1.
       Returns an integer.

       digits must be greater than 0.
       n must be less than or equal to 2**73, to avoid floating point errors.
       """
158
    digits = int(digits)
159
160
161
162
163
164
165
166
167
    assert digits >= 1
    if n <= 1:
        return 1
    digits_in_n = int(math.log10(n)) + 1
    round_digits = max(digits_in_n - digits, 0)
    rounded_n = round(n, -round_digits)
    return int(rounded_n)


168
def kb_round_x_sig_dig(bw_bs, digits=PROP276_ROUND_DIG):
169
170
171
172
173
174
175
176
177
178
179
    """Convert bw_bs from bytes to kilobytes, and round the result to
       'digits' significant digits.
       Results less than or equal to 1 are rounded up to 1.
       Returns an integer.

       digits must be greater than 0.
       n must be less than or equal to 2**82, to avoid floating point errors.
       """
    # avoid double-rounding by using floating-point
    bw_kb = bw_bs / 1000.0
    return round_sig_dig(bw_kb, digits=digits)
180
181


juga  's avatar
juga committed
182
183
184
185
186
187
188
189
190
def num_results_of_type(results, type_str):
    return len([r for r in results if r.type == type_str])


# Better way to use enums?
def result_type_to_key(type_str):
    return type_str.replace('-', '_')


191
class V3BWHeader(object):
juga  's avatar
juga committed
192
193
    """
    Create a bandwidth measurements (V3bw) header
194
    following bandwidth measurements document spec version 1.X.X.
juga  's avatar
juga committed
195

196
    :param str timestamp: timestamp in Unix Epoch seconds of the most recent
197
        generator result.
juga  's avatar
juga committed
198
199
200
    :param str version: the spec version
    :param str software: the name of the software that generates this
    :param str software_version: the version of the software
201
    :param dict kwargs: extra headers. Currently supported:
juga  's avatar
juga committed
202

203
204
205
206
        - earliest_bandwidth: str, ISO 8601 timestamp in UTC time zone
          when the first bandwidth was obtained
        - generator_started: str, ISO 8601 timestamp in UTC time zone
          when the generator started
juga  's avatar
juga committed
207
    """
208
    def __init__(self, timestamp, **kwargs):
209
210
211
212
        assert isinstance(timestamp, str)
        for v in kwargs.values():
            assert isinstance(v, str)
        self.timestamp = timestamp
213
214
215
216
        # KeyValues with default value when not given by kwargs
        self.version = kwargs.get('version', SPEC_VERSION)
        self.software = kwargs.get('software', 'sbws')
        self.software_version = kwargs.get('software_version', __version__)
217
        self.file_created = kwargs.get('file_created', now_isodt_str())
juga  's avatar
juga committed
218
        # latest_bandwidth should not be in kwargs, since it MUST be the
219
        # same as timestamp
juga  's avatar
juga committed
220
        self.latest_bandwidth = unixts_to_isodt_str(timestamp)
221
        [setattr(self, k, v) for k, v in kwargs.items()
222
         if k in BANDWIDTH_HEADER_KEY_VALUES_INIT]
223

224
    def __str__(self):
225
        if self.version.startswith('1.'):
226
227
            return self.strv1
        return self.strv2
228

juga  's avatar
juga committed
229
    @classmethod
230
231
    def from_results(cls, results, scanner_country=None,
                     destinations_countries=None, state_fpath=''):
juga  's avatar
juga committed
232
233
234
        kwargs = dict()
        latest_bandwidth = cls.latest_bandwidth_from_results(results)
        earliest_bandwidth = cls.earliest_bandwidth_from_results(results)
235
        # NOTE: Blocking, reads file
236
        generator_started = cls.generator_started_from_file(state_fpath)
237
        recent_consensus_count = cls.consensus_count_from_file(state_fpath)
juga  's avatar
juga committed
238
239
240
241
242
        timestamp = str(latest_bandwidth)
        kwargs['latest_bandwidth'] = unixts_to_isodt_str(latest_bandwidth)
        kwargs['earliest_bandwidth'] = unixts_to_isodt_str(earliest_bandwidth)
        if generator_started is not None:
            kwargs['generator_started'] = generator_started
243
244
245
        # To be compatible with older bandwidth files, do not require it.
        if scanner_country is not None:
            kwargs['scanner_country'] = scanner_country
246
247
        if destinations_countries is not None:
            kwargs['destinations_countries'] = destinations_countries
248
249
        if recent_consensus_count is not None:
            kwargs['recent_consensus_count'] = str(recent_consensus_count)
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278

        recent_measurement_attempt_count = \
            cls.recent_measurement_attempt_count_from_file(state_fpath)
        if recent_measurement_attempt_count is not None:
            kwargs['recent_measurement_attempt_count'] = \
                str(recent_measurement_attempt_count)

        # If it is a failure that is not a ResultError, then
        # failures = attempts - all mesaurements
        # Works only in the case that old measurements files already had
        # measurements count
        if recent_measurement_attempt_count is not None:
            all_measurements = 0
            for result_list in results.values():
                all_measurements += len(result_list)
            measurement_failures = (recent_measurement_attempt_count
                                    - all_measurements)
            kwargs['recent_measurement_failure_count'] = \
                str(measurement_failures)

        priority_lists = cls.recent_priority_list_count_from_file(state_fpath)
        if priority_lists is not None:
            kwargs['recent_priority_list_count'] = str(priority_lists)

        priority_relays = \
            cls.recent_priority_relay_count_from_file(state_fpath)
        if priority_relays is not None:
            kwargs['recent_priority_relay_count'] = str(priority_relays)

juga  's avatar
juga committed
279
280
281
        h = cls(timestamp, **kwargs)
        return h

282
    @classmethod
283
    def from_lines_v1(cls, lines):
284
285
        """
        :param list lines: list of lines to parse
286
        :returns: tuple of V3BWHeader object and non-header lines
287
288
289
290
        """
        assert isinstance(lines, list)
        try:
            index_terminator = lines.index(TERMINATOR)
Matt Traudt's avatar
Matt Traudt committed
291
        except ValueError:
292
293
294
            # is not a bw file or is v100
            log.warn('Terminator is not in lines')
            return None
295
        ts = lines[0]
296
        kwargs = dict([l.split(KEYVALUE_SEP_V1)
297
                       for l in lines[:index_terminator]
298
                       if l.split(KEYVALUE_SEP_V1)[0] in ALL_KEYVALUES])
299
        h = cls(ts, **kwargs)
juga  's avatar
juga committed
300
        # last line is new line
301
        return h, lines[index_terminator + 1:-1]
302
303

    @classmethod
304
    def from_text_v1(self, text):
305
        """
306
        :param str text: text to parse
307
        :returns: tuple of V3BWHeader object and non-header lines
308
309
        """
        assert isinstance(text, str)
310
        return self.from_lines_v1(text.split(LINE_SEP))
311

juga  's avatar
juga committed
312
313
314
315
316
317
318
319
320
321
322
    @classmethod
    def from_lines_v100(cls, lines):
        """
        :param list lines: list of lines to parse
        :returns: tuple of V3BWHeader object and non-header lines
        """
        assert isinstance(lines, list)
        h = cls(lines[0])
        # last line is new line
        return h, lines[1:-1]

323
    @staticmethod
324
    def generator_started_from_file(state_fpath):
325
326
327
328
        '''
        ISO formatted timestamp for the time when the scanner process most
        recently started.
        '''
329
        state = State(state_fpath)
330
331
332
333
        if 'scanner_started' in state:
            return state['scanner_started']
        else:
            return None
334

335
336
337
    @staticmethod
    def consensus_count_from_file(state_fpath):
        state = State(state_fpath)
338
339
        if 'recent_consensus_count' in state:
            return state['recent_consensus_count']
340
341
342
        else:
            return None

343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
    # NOTE: in future refactor store state in the class
    @staticmethod
    def recent_measurement_attempt_count_from_file(state_fpath):
        """
        Returns the number of times any relay was queued to be measured
        in the recent (by default 5) days from the state file.
        """
        state = State(state_fpath)
        return state.get('recent_measurement_attempt_count', None)

    @staticmethod
    def recent_priority_list_count_from_file(state_fpath):
        """
        Returns the number of times
        :meth:`~sbws.lib.relayprioritizer.RelayPrioritizer.best_priority`
        was run
        in the recent (by default 5) days from the state file.
        """
        state = State(state_fpath)
        return state.get('recent_priority_list_count', None)

    @staticmethod
    def recent_priority_relay_count_from_file(state_fpath):
        """
        Returns the number of times any relay was "prioritized" to be measured
        in the recent (by default 5) days from the state file.
        """
        state = State(state_fpath)
        return state.get('recent_priority_relay_count', None)

373
    @staticmethod
juga  's avatar
juga committed
374
    def latest_bandwidth_from_results(results):
375
        return round(max([r.time for fp in results for r in results[fp]]))
376
377
378

    @staticmethod
    def earliest_bandwidth_from_results(results):
379
        return round(min([r.time for fp in results for r in results[fp]]))
380

juga  's avatar
juga committed
381
382
383
384
385
386
387
388
389
390
391
392
393
394
    @property
    def keyvalue_unordered_tuple_ls(self):
        """Return list of KeyValue tuples that do not have specific order."""
        # sort the list to generate determinist headers
        keyvalue_tuple_ls = sorted([(k, v) for k, v in self.__dict__.items()
                                    if k in UNORDERED_KEYVALUES])
        return keyvalue_tuple_ls

    @property
    def keyvalue_tuple_ls(self):
        """Return list of all KeyValue tuples"""
        return [('version', self.version)] + self.keyvalue_unordered_tuple_ls

    @property
395
396
397
    def keyvalue_v1str_ls(self):
        """Return KeyValue list of strings following spec v1.X.X."""
        keyvalues = [self.timestamp] + [KEYVALUE_SEP_V1.join([k, v])
juga  's avatar
juga committed
398
399
400
401
                                        for k, v in self.keyvalue_tuple_ls]
        return keyvalues

    @property
402
403
404
    def strv1(self):
        """Return header string following spec v1.X.X."""
        header_str = LINE_SEP.join(self.keyvalue_v1str_ls) + LINE_SEP + \
juga  's avatar
juga committed
405
406
407
408
            LINE_TERMINATOR
        return header_str

    @property
409
410
411
    def keyvalue_v2_ls(self):
        """Return KeyValue list of strings following spec v2.X.X."""
        keyvalue = [self.timestamp] + [KEYVALUE_SEP_V2.join([k, v])
juga  's avatar
juga committed
412
413
414
415
                                       for k, v in self.keyvalue_tuple_ls]
        return keyvalue

    @property
416
417
418
    def strv2(self):
        """Return header string following spec v2.X.X."""
        header_str = LINE_SEP.join(self.keyvalue_v2_ls) + LINE_SEP + \
juga  's avatar
juga committed
419
420
421
422
423
424
            LINE_TERMINATOR
        return header_str

    @property
    def num_lines(self):
        return len(self.__str__().split(LINE_SEP))
juga  's avatar
juga committed
425

426
427
428
429
430
    def add_stats(self, **kwargs):
        # Using kwargs because attributes might chage.
        [setattr(self, k, str(v)) for k, v in kwargs.items()
         if k in STATS_KEYVALUES]

juga  's avatar
juga committed
431

juga  's avatar
juga committed
432
class V3BWLine(object):
juga  's avatar
juga committed
433
    """
434
    Create a Bandwidth List line following the spec version 1.X.X.
juga  's avatar
juga committed
435
436
437
438
439
440
441
442
443
444
445
446
447
448

    :param str node_id:
    :param int bw:
    :param dict kwargs: extra headers. Currently supported:

        - nickname, str
        - master_key_ed25519, str
        - rtt, int
        - time, str
        - sucess, int
        - error_stream, int
        - error_circ, int
        - error_misc, int
    """
juga  's avatar
juga committed
449
450
451
    def __init__(self, node_id, bw, **kwargs):
        assert isinstance(node_id, str)
        assert isinstance(bw, int)
452
        assert node_id.startswith('$')
juga  's avatar
juga committed
453
454
455
        self.node_id = node_id
        self.bw = bw
        [setattr(self, k, v) for k, v in kwargs.items()
456
         if k in BW_KEYVALUES_EXTRA]
juga  's avatar
juga committed
457

juga  's avatar
juga committed
458
    def __str__(self):
459
        return self.bw_strv1
juga  's avatar
juga committed
460

juga  's avatar
juga committed
461
    @classmethod
juga  's avatar
juga committed
462
463
    def from_results(cls, results, secs_recent=None, secs_away=None,
                     min_num=0):
464
465
466
        """Convert sbws results to relays' Bandwidth Lines

        ``bs`` stands for Bytes/seconds
467
        ``bw_mean`` means the bw is obtained from the mean of the all the
468
469
470
471
472
        downloads' bandwidth.
        Downloads' bandwidth are calculated as the amount of data received
        divided by the the time it took to received.
        bw = data (Bytes) / time (seconds)
        """
473
        # log.debug("Len success_results %s", len(success_results))
juga  's avatar
juga committed
474
475
476
477
478
479
480
        node_id = '$' + results[0].fingerprint
        kwargs = dict()
        kwargs['nick'] = results[0].nickname
        if getattr(results[0], 'master_key_ed25519'):
            kwargs['master_key_ed25519'] = results[0].master_key_ed25519
        kwargs['time'] = cls.last_time_from_results(results)
        kwargs.update(cls.result_types_from_results(results))
481
482
483
        consensuses_count = \
            [r.relay_in_recent_consensus_count for r in results
             if getattr(r, 'relay_in_recent_consensus_count', None)]
484
485
        if consensuses_count:
            consensus_count = max(consensuses_count)
486
            kwargs['relay_in_recent_consensus_count'] = str(consensus_count)
487

488
489
490
491
492
493
494
495
496
497
498
499
500
501
        measurements_attempts = \
            [r.relay_recent_measurement_attempt_count for r in results
             if getattr(r, 'relay_recent_measurement_attempt_count', None)]
        if measurements_attempts:
            kwargs['relay_recent_measurement_attempt_count'] = \
                str(max(measurements_attempts))

        relay_recent_priority_list_counts = \
            [r.relay_recent_priority_list_count for r in results
             if getattr(r, 'relay_recent_priority_list_count', None)]
        if relay_recent_priority_list_counts:
            kwargs['relay_recent_priority_list_count'] = \
                str(max(relay_recent_priority_list_counts))

502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
        success_results = [r for r in results if isinstance(r, ResultSuccess)]
        if not success_results:
            return None
        results_away = \
            cls.results_away_each_other(success_results, secs_away)
        if not results_away:
            return None
        # log.debug("Results away from each other: %s",
        #           [unixts_to_isodt_str(r.time) for r in results_away])
        results_recent = cls.results_recent_than(results_away, secs_recent)
        if not results_recent:
            return None
        if not len(results_recent) >= min_num:
            # log.debug('The number of results is less than %s', min_num)
            return None
        rtt = cls.rtt_from_results(results_recent)
        if rtt:
            kwargs['rtt'] = rtt
        bw = cls.bw_median_from_results(results_recent)
        kwargs['bw_mean'] = cls.bw_mean_from_results(results_recent)
        kwargs['bw_median'] = cls.bw_median_from_results(
            results_recent)
        kwargs['desc_bw_avg'] = \
            cls.desc_bw_avg_from_results(results_recent)
        kwargs['desc_bw_bur'] = \
            cls.desc_bw_bur_from_results(results_recent)
        kwargs['consensus_bandwidth'] = \
            cls.consensus_bandwidth_from_results(results_recent)
        kwargs['consensus_bandwidth_is_unmeasured'] = \
            cls.consensus_bandwidth_is_unmeasured_from_results(
532
                results_recent)
533
534
535
536
537
538
        kwargs['desc_bw_obs_last'] = \
            cls.desc_bw_obs_last_from_results(results_recent)
        kwargs['desc_bw_obs_mean'] = \
            cls.desc_bw_obs_mean_from_results(results_recent)
        bwl = cls(node_id, bw, **kwargs)
        return bwl
juga  's avatar
juga committed
539
540
541
542
543
544

    @classmethod
    def from_data(cls, data, fingerprint):
        assert fingerprint in data
        return cls.from_results(data[fingerprint])

juga  's avatar
juga committed
545
    @classmethod
546
    def from_bw_line_v1(cls, line):
juga  's avatar
juga committed
547
        assert isinstance(line, str)
548
549
550
        kwargs = dict([kv.split(KEYVALUE_SEP_V1)
                       for kv in line.split(BW_KEYVALUE_SEP_V1)
                       if kv.split(KEYVALUE_SEP_V1)[0] in BW_KEYVALUES])
juga  's avatar
juga committed
551
552
553
        for k, v in kwargs.items():
            if k in BW_KEYVALUES_INT:
                kwargs[k] = int(v)
554
555
556
557
558
        node_id = kwargs['node_id']
        bw = kwargs['bw']
        del kwargs['node_id']
        del kwargs['bw']
        bw_line = cls(node_id, bw, **kwargs)
juga  's avatar
juga committed
559
560
        return bw_line

juga  's avatar
juga committed
561
562
    @staticmethod
    def results_away_each_other(results, secs_away=None):
juga  's avatar
juga committed
563
564
        # log.debug("Checking whether results are away from each other in %s "
        #           "secs.", secs_away)
juga  's avatar
juga committed
565
566
        if secs_away is None or len(results) < 2:
            return results
567
568
569
570
571
572
        for a, b in combinations(results, 2):
            if abs(a.time - b.time) > secs_away:
                return results
        # log.debug("Results are NOT away from each other in at least %ss: %s",
        #           secs_away, [unixts_to_isodt_str(r.time) for r in results])
        return None
juga  's avatar
juga committed
573
574
575
576
577

    @staticmethod
    def results_recent_than(results, secs_recent=None):
        if secs_recent is None:
            return results
578
        results_recent = list(filter(
juga  's avatar
juga committed
579
                            lambda x: (now_unixts() - x.time) < secs_recent,
580
581
582
583
584
585
                            results))
        # if not results_recent:
        #     log.debug("Results are NOT more recent than %ss: %s",
        #               secs_recent,
        #               [unixts_to_isodt_str(r.time) for r in results])
        return results_recent
juga  's avatar
juga committed
586

587
    @staticmethod
588
    def bw_median_from_results(results):
589
590
591
592
        return max(round(median([dl['amount'] / dl['duration']
                                 for r in results for dl in r.downloads])), 1)

    @staticmethod
593
    def bw_mean_from_results(results):
594
595
596
        return max(round(mean([dl['amount'] / dl['duration']
                               for r in results for dl in r.downloads])), 1)

juga  's avatar
juga committed
597
598
599
600
601
602
603
604
    @staticmethod
    def last_time_from_results(results):
        return unixts_to_isodt_str(round(max([r.time for r in results])))

    @staticmethod
    def rtt_from_results(results):
        # convert from miliseconds to seconds
        rtts = [(round(rtt * 1000)) for r in results for rtt in r.rtts]
juga  's avatar
juga committed
605
        rtt = round(median(rtts)) if rtts else None
juga  's avatar
juga committed
606
607
608
609
610
611
612
613
614
        return rtt

    @staticmethod
    def result_types_from_results(results):
        rt_dict = dict([(result_type_to_key(rt.value),
                         num_results_of_type(results, rt.value))
                        for rt in _ResultType])
        return rt_dict

615
616
617
618
619
620
621
622
    @staticmethod
    def desc_bw_avg_from_results(results):
        """Obtain the last descriptor bandwidth average from the results."""
        for r in reversed(results):
            if r.relay_average_bandwidth is not None:
                return r.relay_average_bandwidth
        return None

623
624
625
626
627
628
629
630
    @staticmethod
    def desc_bw_bur_from_results(results):
        """Obtain the last descriptor bandwidth burst from the results."""
        for r in reversed(results):
            if r.relay_burst_bandwidth is not None:
                return r.relay_burst_bandwidth
        return None

631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
    @staticmethod
    def consensus_bandwidth_from_results(results):
        """Obtain the last consensus bandwidth from the results."""
        for r in reversed(results):
            if r.consensus_bandwidth is not None:
                return r.consensus_bandwidth
        return None

    @staticmethod
    def consensus_bandwidth_is_unmeasured_from_results(results):
        """Obtain the last consensus unmeasured flag from the results."""
        for r in reversed(results):
            if r.consensus_bandwidth_is_unmeasured is not None:
                return r.consensus_bandwidth_is_unmeasured
        return None

647
    @staticmethod
648
649
    def desc_bw_obs_mean_from_results(results):
        desc_bw_obs_ls = []
650
651
        for r in results:
            if r.relay_observed_bandwidth is not None:
652
653
654
                desc_bw_obs_ls.append(r.relay_observed_bandwidth)
        if desc_bw_obs_ls:
            return max(round(mean(desc_bw_obs_ls)), 1)
655
656
657
        return None

    @staticmethod
658
    def desc_bw_obs_last_from_results(results):
659
660
661
662
663
664
        # the last is at the end of the list
        for r in reversed(results):
            if r.relay_observed_bandwidth is not None:
                return r.relay_observed_bandwidth
        return None

juga  's avatar
juga committed
665
666
667
668
669
670
671
    @property
    def bw_keyvalue_tuple_ls(self):
        """Return list of KeyValue Bandwidth Line tuples."""
        # sort the list to generate determinist headers
        keyvalue_tuple_ls = sorted([(k, v) for k, v in self.__dict__.items()
                                    if k in BW_KEYVALUES])
        return keyvalue_tuple_ls
juga  's avatar
juga committed
672

juga  's avatar
juga committed
673
    @property
674
    def bw_keyvalue_v1str_ls(self):
juga  's avatar
juga committed
675
        """Return list of KeyValue Bandwidth Line strings following
676
        spec v1.X.X.
juga  's avatar
juga committed
677
        """
678
        bw_keyvalue_str = [KEYVALUE_SEP_V1 .join([k, str(v)])
juga  's avatar
juga committed
679
680
                           for k, v in self.bw_keyvalue_tuple_ls]
        return bw_keyvalue_str
juga  's avatar
juga committed
681

juga  's avatar
juga committed
682
    @property
683
684
685
686
    def bw_strv1(self):
        """Return Bandwidth Line string following spec v1.X.X."""
        bw_line_str = BW_KEYVALUE_SEP_V1.join(
                        self.bw_keyvalue_v1str_ls) + LINE_SEP
juga  's avatar
juga committed
687
688
689
690
691
692
        if len(bw_line_str) > BW_LINE_SIZE:
            # if this is the case, probably there are too many KeyValues,
            # or the limit needs to be changed in Tor
            log.warn("The bandwidth line %s is longer than %s",
                     len(bw_line_str), BW_LINE_SIZE)
        return bw_line_str
juga  's avatar
juga committed
693

juga  's avatar
juga committed
694

695
class V3BWFile(object):
juga  's avatar
juga committed
696
    """
697
    Create a Bandwidth List file following spec version 1.X.X
juga  's avatar
juga committed
698
699
700
701

    :param V3BWHeader v3bwheader: header
    :param list v3bwlines: V3BWLines
    """
juga  's avatar
juga committed
702
703
704
705
706
    def __init__(self, v3bwheader, v3bwlines):
        self.header = v3bwheader
        self.bw_lines = v3bwlines

    def __str__(self):
707
        return str(self.header) + ''.join([str(bw_line) or ''
juga  's avatar
juga committed
708
709
                                           for bw_line in self.bw_lines])

juga  's avatar
juga committed
710
    @classmethod
711
712
    def from_results(cls, results, scanner_country=None,
                     destinations_countries=None, state_fpath='',
713
                     scale_constant=SBWS_SCALE_CONSTANT,
714
715
                     scaling_method=TORFLOW_SCALING,
                     torflow_obs=TORFLOW_OBS_LAST,
juga  's avatar
juga committed
716
                     torflow_cap=TORFLOW_BW_MARGIN,
717
                     round_digs=PROP276_ROUND_DIG,
718
                     secs_recent=None, secs_away=None, min_num=0,
719
720
                     consensus_path=None, max_bw_diff_perc=MAX_BW_DIFF_PERC,
                     reverse=False):
721
722
723
724
725
726
        """Create V3BWFile class from sbws Results.

        :param dict results: see below
        :param str state_fpath: path to the state file
        :param int scaling_method:
            Scaling method to obtain the bandwidth
727
            Possible values: {None, SBWS_SCALING, TORFLOW_SCALING} = {0, 1, 2}
728
729
730
731
732
733
734
735
736
737
738
        :param int scale_constant: sbws scaling constant
        :param int torflow_obs: method to choose descriptor observed bandwidth
        :param bool reverse: whether to sort the bw lines descending or not

        Results are in the form::

            {'relay_fp1': [Result1, Result2, ...],
             'relay_fp2': [Result1, Result2, ...]}

        """
        log.info('Processing results to generate a bandwidth list file.')
739
740
        header = V3BWHeader.from_results(results, scanner_country,
                                         destinations_countries, state_fpath)
741
        bw_lines_raw = []
juga  's avatar
juga committed
742
743
        number_consensus_relays = cls.read_number_consensus_relays(
            consensus_path)
744
        state = State(state_fpath)
juga  's avatar
juga committed
745
        for fp, values in results.items():
746
            # log.debug("Relay fp %s", fp)
juga  's avatar
juga committed
747
748
            line = V3BWLine.from_results(values, secs_recent, secs_away,
                                         min_num)
juga  's avatar
juga committed
749
750
            if line is not None:
                bw_lines_raw.append(line)
751
        if not bw_lines_raw:
752
753
            log.info("After applying restrictions to the raw results, "
                     "there is not any. Scaling can not be applied.")
juga  's avatar
juga committed
754
755
            cls.update_progress(
                cls, bw_lines_raw, header, number_consensus_relays, state)
756
757
758
            return cls(header, [])
        if scaling_method == SBWS_SCALING:
            bw_lines = cls.bw_sbws_scale(bw_lines_raw, scale_constant)
759
            cls.warn_if_not_accurate_enough(bw_lines, scale_constant)
760
761
            # log.debug(bw_lines[-1])
        elif scaling_method == TORFLOW_SCALING:
juga  's avatar
juga committed
762
            bw_lines = cls.bw_torflow_scale(bw_lines_raw, torflow_obs,
763
                                            torflow_cap, round_digs)
764
            # log.debug(bw_lines[-1])
juga  's avatar
juga committed
765
766
            cls.update_progress(
                cls, bw_lines, header, number_consensus_relays, state)
juga  's avatar
juga committed
767
        else:
768
769
            bw_lines = cls.bw_kb(bw_lines_raw)
            # log.debug(bw_lines[-1])
770
771
        # Not using the result for now, just warning
        cls.is_max_bw_diff_perc_reached(bw_lines, max_bw_diff_perc)
juga  's avatar
juga committed
772
773
774
        f = cls(header, bw_lines)
        return f

775
    @classmethod
776
    def from_v1_fpath(cls, fpath):
777
778
779
780
        log.info('Parsing bandwidth file %s', fpath)
        with open(fpath) as fd:
            text = fd.read()
        all_lines = text.split(LINE_SEP)
781
782
        header, lines = V3BWHeader.from_lines_v1(all_lines)
        bw_lines = [V3BWLine.from_bw_line_v1(line) for line in lines]
783
784
        return cls(header, bw_lines)

785
786
787
788
789
790
791
    @classmethod
    def from_v100_fpath(cls, fpath):
        log.info('Parsing bandwidth file %s', fpath)
        with open(fpath) as fd:
            text = fd.read()
        all_lines = text.split(LINE_SEP)
        header, lines = V3BWHeader.from_lines_v100(all_lines)
792
        bw_lines = sorted([V3BWLine.from_bw_line_v1(l) for l in lines],
793
794
795
                          key=lambda l: l.bw)
        return cls(header, bw_lines)

juga  's avatar
juga committed
796
797
798
799
800
801
802
    @staticmethod
    def bw_kb(bw_lines, reverse=False):
        bw_lines_scaled = copy.deepcopy(bw_lines)
        for l in bw_lines_scaled:
            l.bw = max(round(l.bw / 1000), 1)
        return sorted(bw_lines_scaled, key=lambda x: x.bw, reverse=reverse)

juga  's avatar
juga committed
803
    @staticmethod
804
    def bw_sbws_scale(bw_lines, scale_constant=SBWS_SCALE_CONSTANT,
juga  's avatar
juga committed
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
                      reverse=False):
        """Return a new V3BwLine list scaled using sbws method.

        :param list bw_lines:
            bw lines to scale, not self.bw_lines,
            since this method will be before self.bw_lines have been
            initialized.
        :param int scale_constant:
            the constant to multiply by the ratio and
            the bandwidth to obtain the new bandwidth
        :returns list: V3BwLine list
        """
        log.debug('Scaling bandwidth using sbws method.')
        m = median([l.bw for l in bw_lines])
        bw_lines_scaled = copy.deepcopy(bw_lines)
        for l in bw_lines_scaled:
            # min is to limit the bw to descriptor average-bandwidth
            # max to avoid bandwidth with 0 value
823
            l.bw = max(round(min(l.desc_bw_avg,
824
                                 l.bw * scale_constant / m)
juga  's avatar
juga committed
825
826
827
828
829
                             / 1000), 1)
        return sorted(bw_lines_scaled, key=lambda x: x.bw, reverse=reverse)

    @staticmethod
    def warn_if_not_accurate_enough(bw_lines,
830
                                    scale_constant=SBWS_SCALE_CONSTANT):
juga  's avatar
juga committed
831
832
833
834
835
836
837
838
        margin = 0.001
        accuracy_ratio = median([l.bw for l in bw_lines]) / scale_constant
        log.info('The generated lines are within {:.5}% of what they should '
                 'be'.format((1 - accuracy_ratio) * 100))
        if accuracy_ratio < 1 - margin or accuracy_ratio > 1 + margin:
            log.warning('There was %f%% error and only +/- %f%% is '
                        'allowed', (1 - accuracy_ratio) * 100, margin * 100)

839
    @staticmethod
840
841
842
    def is_max_bw_diff_perc_reached(bw_lines,
                                    max_bw_diff_perc=MAX_BW_DIFF_PERC):
        sum_consensus_bw = sum([l.desc_bw_obs_last for l in bw_lines])
843
844
845
846
847
848
849
850
851
852
853
        sum_bw = sum([l.bw for l in bw_lines])
        diff = min(sum_consensus_bw, sum_bw) / max(sum_consensus_bw, sum_bw)
        diff_perc = diff * 100
        log.info("The difference between the total consensus bandwidth "
                 "and the total measured bandwidth is %s%% percent",
                 diff_perc)
        if diff_perc > MAX_BW_DIFF_PERC:
            log.warning("It is more than %s%%", max_bw_diff_perc)
            return True
        return False

854
    @staticmethod
855
    def bw_torflow_scale(bw_lines, desc_bw_obs_type=TORFLOW_OBS_MEAN,
856
                         cap=TORFLOW_BW_MARGIN,
857
                         num_round_dig=PROP276_ROUND_DIG, reverse=False):
juga  's avatar
juga committed
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
        """
        Obtain final bandwidth measurements applying Torflow's scaling
        method.

        From Torflow's README.spec.txt (section 2.2)::

            In this way, the resulting network status consensus bandwidth values  # NOQA
            are effectively re-weighted proportional to how much faster the node  # NOQA
            was as compared to the rest of the network.

        The variables and steps used in Torflow:

        **strm_bw**::

            The strm_bw field is the average (mean) of all the streams for the relay  # NOQA
            identified by the fingerprint field.
            strm_bw = sum(bw stream x)/|n stream|

        **filt_bw**::

            The filt_bw field is computed similarly, but only the streams equal to  # NOQA
            or greater than the strm_bw are counted in order to filter very slow  # NOQA
            streams due to slow node pairings.

        **filt_sbw and strm_sbw**::

            for rs in RouterStats.query.filter(stats_clause).\
                  options(eagerload_all('router.streams.circuit.routers')).all():  # NOQA
              tot_sbw = 0
              sbw_cnt = 0
              for s in rs.router.streams:
                if isinstance(s, ClosedStream):
                  skip = False
                  #for br in badrouters:
                  #  if br != rs:
                  #    if br.router in s.circuit.routers:
                  #      skip = True
                  if not skip:
                    # Throw out outliers < mean
                    # (too much variance for stddev to filter much)
                    if rs.strm_closed == 1 or s.bandwidth() >= rs.sbw:
                      tot_sbw += s.bandwidth()
                      sbw_cnt += 1

            if sbw_cnt: rs.filt_sbw = tot_sbw/sbw_cnt
            else: rs.filt_sbw = None

        **filt_avg, and strm_avg**::

            Once we have determined the most recent measurements for each node, we  # NOQA
            compute an average of the filt_bw fields over all nodes we have measured.  # NOQA

        ::

            filt_avg = sum(map(lambda n: n.filt_bw, nodes.itervalues()))/float(len(nodes))  # NOQA
            strm_avg = sum(map(lambda n: n.strm_bw, nodes.itervalues()))/float(len(nodes))  # NOQA

        **true_filt_avg and true_strm_avg**::

            for cl in ["Guard+Exit", "Guard", "Exit", "Middle"]:
                true_filt_avg[cl] = filt_avg
                true_strm_avg[cl] = strm_avg

        In the non-pid case, all types of nodes get the same avg

        **n.fbw_ratio and n.fsw_ratio**::

            for n in nodes.itervalues():
                n.fbw_ratio = n.filt_bw/true_filt_avg[n.node_class()]
                n.sbw_ratio = n.strm_bw/true_strm_avg[n.node_class()]

        **n.ratio**::

            These averages are used to produce ratios for each node by dividing the  # NOQA
            measured value for that node by the network average.

        ::

            # Choose the larger between sbw and fbw
              if n.sbw_ratio > n.fbw_ratio:
                n.ratio = n.sbw_ratio
              else:
                n.ratio = n.fbw_ratio

        **desc_bw**:

juga  's avatar
juga committed
944
945
946
947
        It is the minimum of all the descriptor bandwidth values::

            bws = map(int, g)
            bw_observed = min(bws)
juga  's avatar
juga committed
948
949
950
951

            return Router(ns.idhex, ns.nickname, bw_observed, dead, exitpolicy,
            ns.flags, ip, version, os, uptime, published, contact, rate_limited,  # NOQA
            ns.orhash, ns.bandwidth, extra_info_digest, ns.unmeasured)
juga  's avatar
juga committed
952

juga  's avatar
juga committed
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
            self.desc_bw = max(bw,1) # Avoid div by 0

        **new_bw**::

            These ratios are then multiplied by the most recent observed descriptor  # NOQA
            bandwidth we have available for each node, to produce a new value for  # NOQA
            the network status consensus process.

        ::

            n.new_bw = n.desc_bw*n.ratio

        The descriptor observed bandwidth is multiplied by the ratio.

        **Limit the bandwidth to a maximum**::

            NODE_CAP = 0.05

        ::

            if n.new_bw > tot_net_bw*NODE_CAP:
              plog("INFO", "Clipping extremely fast "+n.node_class()+" node "+n.idhex+"="+n.nick+  # NOQA
                   " at "+str(100*NODE_CAP)+"% of network capacity ("+
                   str(n.new_bw)+"->"+str(int(tot_net_bw*NODE_CAP))+") "+
                   " pid_error="+str(n.pid_error)+
                   " pid_error_sum="+str(n.pid_error_sum))
              n.new_bw = int(tot_net_bw*NODE_CAP)

        However, tot_net_bw does not seems to be updated when not using pid.
        This clipping would make faster relays to all have the same value.

        All of that can be expressed as:

        .. math::
987

988
989
990
991
           bwn_i =& min\\left(bwnew_i,
                      \\sum_{i=1}^{n}bwnew_i \\times 0.05\\right) \\

                 &= min\\left(
juga  's avatar
juga committed
992
993
                      \\left(min\\left(bwobs_i, bwavg_i, bwbur_i \\right) \\times r_i\\right),
                        \\sum_{i=1}^{n}\\left(min\\left(bwobs_i, bwavg_i, bwbur_i \\right) \\times r_i\\right)
994
995
996
                        \\times 0.05\\right)\\

                 &= min\\left(
juga  's avatar
juga committed
997
998
                      \\left(min\\left(bwobs_i, bwavg_i, bwbur_i \\right) \\times max\\left(rf_i, rs_i\\right)\\right),
                        \\sum_{i=1}^{n}\\left(min\\left(bwobs_i, bwavg_i, bwbur_i \\right) \\times
999
1000
1001
                          max\\left(rf_i, rs_i\\right)\\right) \\times 0.05\\right)\\

                 &= min\\left(
juga  's avatar
juga committed
1002
                      \\left(min\\left(bwobs_i, bwavg_i, bwbur_i \\right) \\times max\\left(\\frac{bwfilt_i}{bwfilt},
1003
                          \\frac{bw_i}{bwstrm}\\right)\\right),
juga  's avatar
juga committed
1004
                        \\sum_{i=1}^{n}\\left(min\\left(bwobs_i, bwavg_i, bwbur_i \\right) \\times
1005
1006
                          max\\left(\\frac{bwfilt_i}{bwfilt},
                            \\frac{bw_i}{bwstrm}\\right)\\right) \\times 0.05\\right)
juga  's avatar
juga committed
1007
1008
1009
1010
1011

        """
        log.info("Calculating relays' bandwidth using Torflow method.")
        bw_lines_tf = copy.deepcopy(bw_lines)
        # mean (Torflow's strm_avg)
1012
        mu = mean([l.bw_mean for l in bw_lines])
juga  's avatar
juga committed
1013
        # filtered mean (Torflow's filt_avg)
1014
        muf = mean([max(l.bw_mean, mu) for l in bw_lines])
juga  's avatar
juga committed
1015
        # bw sum (Torflow's tot_net_bw or tot_sbw)
1016
        sum_bw = sum([l.bw_mean for l in bw_lines])
juga  's avatar
juga committed
1017
1018
        # Torflow's clipping
        hlimit = sum_bw * TORFLOW_BW_MARGIN
juga  's avatar
juga committed
1019
1020
1021
        log.debug('sum %s', sum_bw)
        log.debug('mu %s', mu)
        log.debug('muf %s', muf)
juga  's avatar
juga committed
1022
        log.debug('hlimit %s', hlimit)
juga  's avatar
juga committed
1023
        for l in bw_lines_tf:
1024
1025
1026
1027
            if desc_bw_obs_type == TORFLOW_OBS_LAST:
                desc_bw_obs = l.desc_bw_obs_last
            elif desc_bw_obs_type == TORFLOW_OBS_MEAN:
                desc_bw_obs = l.desc_bw_obs_mean
1028
1029
1030
1031
1032
1033
1034
1035
1036
            # Excerpt from bandwidth-file-spec.txt section 2.3
            # A relay's MaxAdvertisedBandwidth limits the bandwidth-avg in its
            # descriptor.
            # Therefore generators MUST limit a relay's measured bandwidth to
            # its descriptor's bandwidth-avg.
            # Generators SHOULD NOT limit measured bandwidths based on
            # descriptors' bandwidth-observed, because that penalises new
            # relays.
            # See https://trac.torproject.org/projects/tor/ticket/8494
1037
1038
1039
1040
1041
1042
1043
1044
1045
            if l.desc_bw_bur is not None:
                # Because in previous versions results were not storing
                # desc_bw_bur
                desc_bw = min(desc_bw_obs, l.desc_bw_bur, l.desc_bw_avg)
            else:
                desc_bw = min(desc_bw_obs, l.desc_bw_avg)
            # In previous versions results were not storing consensus_bandwidth
            if l.consensus_bandwidth_is_unmeasured \
                    or l.consensus_bandwidth is None:
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
                min_bandwidth = desc_bw
            # If the relay is measured, use the minimum between the descriptors
            # bandwidth and the consensus bandwidth, so that
            # MaxAdvertisedBandwidth limits the consensus weight
            # The consensus bandwidth in a measured relay has been obtained
            # doing the same calculation as here
            else:
                min_bandwidth = min(desc_bw, l.consensus_bandwidth)
            # Torflow's scaling
            ratio_stream = l.bw_mean / mu
            ratio_stream_filtered = max(l.bw_mean, mu) / muf
            ratio = max(ratio_stream, ratio_stream_filtered)
            bw_scaled = ratio * min_bandwidth
            # round and convert to KB
            bw_new = kb_round_x_sig_dig(bw_scaled, digits=num_round_dig)
juga  's avatar
juga committed
1061
1062
1063
            # Cap maximum bw
            if cap is not None:
                bw_new = min(hlimit, bw_new)
1064
1065
            # avoid 0
            l.bw = max(bw_new, 1)
juga  's avatar
juga committed
1066
        return sorted(bw_lines_tf, key=lambda x: x.bw, reverse=reverse)
1067

1068
    @staticmethod
juga  's avatar
juga committed
1069
    def read_number_consensus_relays(consensus_path):
1070
1071
1072
1073
1074
        """Read the number of relays in the Network from the cached consensus
        file."""
        num = None
        try:
            num = len(list(parse_file(consensus_path)))
1075
        except (FileNotFoundError, AttributeError):
1076
1077
1078
1079
1080
1081
            log.info("It is not possible to obtain statistics about the "
                     "percentage of measured relays because the cached "
                     "consensus file is not found.")
        log.debug("Number of relays in the network %s", num)
        return num

1082
    @staticmethod
juga  's avatar
juga committed
1083
    def measured_progress_stats(bw_lines, number_consensus_relays,
1084
                                min_perc_reached_before):
1085
1086
1087
1088
1089
        """ Statistics about measurements progress,
        to be included in the header.

        :param list bw_lines: the bw_lines after scaling and applying filters.
        :param str consensus_path: the path to the cached consensus file.
1090
        :param str state_fpath: the path to the state file
1091
1092
1093
1094
1095
1096
1097
1098
1099
        :returns dict, bool: Statistics about the progress made with
            measurements and whether the percentage of measured relays has been
            reached.

        """
        # cached-consensus should be updated every time that scanner get the
        # network status or descriptors?
        # It will not be updated to the last consensus, but the list of
        # measured relays is not either.
juga  's avatar
juga committed
1100
        assert isinstance(number_consensus_relays, int)
1101
1102
        assert isinstance(bw_lines, list)
        statsd = {}
juga  's avatar
juga committed
1103
1104
1105
1106
1107
1108
1109
1110
1111
        statsd['number_eligible_relays'] = len(bw_lines)
        statsd['number_consensus_relays'] = number_consensus_relays
        statsd['minimum_number_eligible_relays'] = round(
            statsd['number_consensus_relays'] * MIN_REPORT / 100)
        statsd['percent_eligible_relays'] = round(
            len(bw_lines) * 100 / statsd['number_consensus_relays'])
        statsd['minimum_percent_eligible_relays'] = MIN_REPORT
        if statsd['number_eligible_relays'] < \
                statsd['minimum_number_eligible_relays']:
1112
            # if min percent was was reached before, warn
1113
1114
1115
1116
            # otherwise, debug
            if min_perc_reached_before is not None:
                log.warning('The percentage of the measured relays is less '
                            'than the %s%% of the relays in the network (%s).',
juga  's avatar
juga committed
1117
                            MIN_REPORT, statsd['number_consensus_relays'])
1118
1119
1120
            else:
                log.info('The percentage of the measured relays is less '
                         'than the %s%% of the relays in the network (%s).',
juga  's avatar
juga committed
1121
                         MIN_REPORT, statsd['number_consensus_relays'])
1122
1123
1124
1125
1126
            return statsd, False
        return statsd, True

    @property
    def is_min_perc(self):
juga  's avatar
juga committed
1127
1128
        if getattr(self.header, 'number_eligible_relays', 0) \
                < getattr(self.header, 'minimum_number_eligible_relays', 0):
1129
1130
1131
            return False
        return True

juga  's avatar
juga committed
1132
    @property
juga  's avatar
juga committed
1133
1134
    def sum_bw(self):
        return sum([l.bw for l in self.bw_lines])
juga  's avatar
juga committed
1135
1136

    @property
juga  's avatar
juga committed
1137
    def num(self):
juga  's avatar
juga committed
1138
1139
1140
        return len(self.bw_lines)

    @property
juga  's avatar
juga committed
1141
1142
1143
1144
1145
1146
    def mean_bw(self):
        return mean([l.bw for l in self.bw_lines])

    @property
    def median_bw(self):
        return median([l.bw for l in self.bw_lines])
juga  's avatar
juga committed
1147

juga  's avatar
juga committed
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
    @property
    def max_bw(self):
        return max([l.bw for l in self.bw_lines])

    @property
    def min_bw(self):
        return min([l.bw for l in self.bw_lines])

    @property
    def info_stats(self):
        if not self.bw_lines:
            return
        [log.info(': '.join([attr, str(getattr(self, attr))])) for attr in
         ['sum_bw', 'mean_bw', 'median_bw', 'num',
          'max_bw', 'min_bw']]

juga  's avatar
juga committed
1164
1165
    def update_progress(self, bw_lines, header, number_consensus_relays,
                        state):
1166
        min_perc_reached_before = state.get('min_perc_reached')
juga  's avatar
juga committed
1167
        if number_consensus_relays is not None:
1168
            statsd, success = self.measured_progress_stats(
juga  's avatar
juga committed
1169
                bw_lines, number_consensus_relays, min_perc_reached_before)
1170
1171
            # add statistics about progress always
            header.add_stats(**statsd)
1172
1173
1174
1175
1176
1177
1178
            if not success:
                bw_lines = []
                state['min_perc_reached'] = None
            else:
                state['min_perc_reached'] = now_isodt_str()
        return bw_lines

juga  's avatar
juga committed
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
    def bw_line_for_node_id(self, node_id):
        """Returns the bandwidth line for a given node fingerprint.

        Used to combine data when plotting.
        """
        bwl = [l for l in self.bw_lines if l.node_id == node_id]
        if bwl:
            return bwl[0]
        return None

1189
1190
1191
1192
1193
1194
1195
1196
1197
    def to_plt(self, attrs=['bw'], sorted_by=None):
        """Return bandwidth data in a format useful for matplotlib.

        Used from external tool to plot.
        """
        x = [i for i in range(0, self.num)]
        ys = [[getattr(l, k) for l in self.bw_lines] for k in attrs]
        return x, ys, attrs

juga  's avatar
juga committed
1198
1199
1200
1201
1202
    def write(self, output):
        if output == '/dev/stdout':
            log.info("Writing to stdout is not supported.")
            return
        log.info('Writing v3bw file to %s', output)
1203
1204
        # To avoid inconsistent reads, the bandwidth data is written to an
        # archive path, then atomically symlinked to 'latest.v3bw'
juga  's avatar
juga committed
1205
1206
        out_dir = os.path.dirname(output)
        out_link = os.path.join(out_dir, 'latest.v3bw')
1207
        out_link_tmp = out_link + '.tmp'
juga  's avatar
juga committed
1208
1209
1210
1211
1212
1213
        with DirectoryLock(out_dir):
            with open(output, 'wt') as fd:
                fd.write(str(self.header))
                for line in self.bw_lines:
                    fd.write(str(line))
            output_basename = os.path.basename(output)
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
            # To atomically symlink a file, we need to create a temporary link,
            # then rename it to the final link name. (POSIX guarantees that
            # rename is atomic.)
            log.debug('Creating symlink {} -> {}.'
                      .format(out_link_tmp, output_basename))
            os.symlink(output_basename, out_link_tmp)
            log.debug('Renaming symlink {} -> {} to {} -> {}.'
                      .format(out_link_tmp, output_basename,
                              out_link, output_basename))
            os.rename(out_link_tmp, out_link)