v3bwfile.py 45.7 KB
Newer Older
juga  's avatar
juga committed
1
2
3
4
# -*- coding: utf-8 -*-
"""Classes and functions that create the bandwidth measurements document
(v3bw) used by bandwidth authorities."""

juga  's avatar
juga committed
5
import copy
juga  's avatar
juga committed
6
import logging
7
import math
juga  's avatar
juga committed
8
import os
9
from itertools import combinations
juga  's avatar
juga committed
10
from statistics import median, mean
11
from stem.descriptor import parse_file
12

13
from sbws import __version__
14
from sbws.globals import (SPEC_VERSION, BW_LINE_SIZE, SBWS_SCALE_CONSTANT,
15
16
                          TORFLOW_SCALING, SBWS_SCALING, TORFLOW_BW_MARGIN,
                          TORFLOW_OBS_LAST, TORFLOW_OBS_MEAN,
17
                          PROP276_ROUND_DIG, MIN_REPORT, MAX_BW_DIFF_PERC)
juga  's avatar
juga committed
18
from sbws.lib.resultdump import ResultSuccess, _ResultType
19
from sbws.util.filelock import DirectoryLock
juga  's avatar
juga committed
20
21
from sbws.util.timestamp import (now_isodt_str, unixts_to_isodt_str,
                                 now_unixts)
22
from sbws.util.state import State
juga  's avatar
juga committed
23
24
25

log = logging.getLogger(__name__)

26
LINE_SEP = '\n'
27
28
KEYVALUE_SEP_V1 = '='
KEYVALUE_SEP_V2 = ' '
29
30
31
32
33
34

# NOTE: in a future refactor make make all the KeyValues be a dictionary
# with their type, so that it's more similar to stem parser.

# Header KeyValues
# =================
35
36
# List of the extra KeyValues accepted by the class
EXTRA_ARG_KEYVALUES = ['software', 'software_version', 'file_created',
37
                       'earliest_bandwidth', 'generator_started',
38
                       'scanner_country', 'destinations_countries']
39
40
41
42
# number_eligible_relays is the number that ends in the bandwidth file
# ie, have not been excluded by one of the filters in 4. below
# They should be call recent_measurement_included_count to be congruent
# with the other KeyValues.
juga  's avatar
juga committed
43
44
45
STATS_KEYVALUES = ['number_eligible_relays', 'minimum_number_eligible_relays',
                   'number_consensus_relays', 'percent_eligible_relays',
                   'minimum_percent_eligible_relays']
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
# Added in #29591
BW_HEADER_KEYVALUES_MONITOR = [
    # 1.1 header: the number of different consensuses, that sbws has seen,
    # since the last 5 days
    'recent_consensus_count',
    # 2.4 Number of times a priority list has been created
    'recent_priority_list_count',
    # 2.5 Number of relays that there were in a priority list
    # [50, number of relays in the network * 0.05]
    'recent_priority_relay_count',
    # 3.6 header: the number of times that sbws has tried to measure any relay,
    # since the last 5 days
    # This would be the number of times a relays were in a priority list
    'recent_measurement_attempt_count',
    # 3.7 header: the number of times that sbws has tried to measure any relay,
    # since the last 5 days, but it didn't work
    # This should be the number of attempts - number of ResultSuccess -
    # something else we don't know yet
    # So far is the number of ResultError
    'recent_measurement_failure_count',
    # The number of success results should be:
    # the number of attempts - the number of failures
    # 4.6 header: the number of successful results, created in the last 5 days,
    # that were excluded by a filter
    # This is the sum of the following 3 + not success results
    # 'recent_measurement_exclusion_count',
    'recent_measurement_exclusion_not_distanciated_count',
    'recent_measurement_exclusion_not_recent_count',
    'recent_measurement_exclusion_not_min_num_count',
]
BANDWIDTH_HEADER_KEY_VALUES_INIT = \
    ['earliest_bandwidth', 'generator_started',
     'scanner_country', 'destinations_countries']\
    + STATS_KEYVALUES \
    + BW_HEADER_KEYVALUES_MONITOR

KEYVALUES_INT = STATS_KEYVALUES + BW_HEADER_KEYVALUES_MONITOR
83
# List of all unordered KeyValues currently being used to generate the file
84
UNORDERED_KEYVALUES = EXTRA_ARG_KEYVALUES + STATS_KEYVALUES + \
85
86
                      ['latest_bandwidth'] + \
                      BW_HEADER_KEYVALUES_MONITOR
87
88
# List of all the KeyValues currently being used to generate the file
ALL_KEYVALUES = ['version'] + UNORDERED_KEYVALUES
89

90
TERMINATOR = '====='
91
92
93

# Bandwidth Lines KeyValues
# =========================
94
95
# Num header lines in v1.X.X using all the KeyValues
NUM_LINES_HEADER_V1 = len(ALL_KEYVALUES) + 2
96
97
LINE_TERMINATOR = TERMINATOR + LINE_SEP

juga  's avatar
juga committed
98
# KeyValue separator in Bandwidth Lines
99
BW_KEYVALUE_SEP_V1 = ' '
100
101
102
103
# not inclding in the files the extra bws for now
BW_KEYVALUES_BASIC = ['node_id', 'bw']
BW_KEYVALUES_FILE = BW_KEYVALUES_BASIC + \
                    ['master_key_ed25519', 'nick', 'rtt', 'time',
104
105
106
                     'success', 'error_stream', 'error_circ', 'error_misc',
                     # Added in #292951
                     'error_second_relay', 'error_destination']
107
BW_KEYVALUES_EXTRA_BWS = ['bw_median', 'bw_mean', 'desc_bw_avg', 'desc_bw_bur',
108
109
110
                          'desc_bw_obs_last', 'desc_bw_obs_mean',
                          'consensus_bandwidth',
                          'consensus_bandwidth_is_unmeasured']
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140

# Added in #292951
BANDWIDTH_LINE_KEY_VALUES_MONITOR = [
    # 1.2 relay: the number of different consensuses, that sbws has seen,
    # since the last 5 days, that have this relay
    'relay_in_recent_consensus_count',
    # 3.8 relay:  the number of times that sbws has tried to measure
    # this relay, since the last 5 days
    # This would be the number of times a relay was in a priority list (2.6)
    # since once it gets measured, it either returns ResultError,
    # ResultSuccess or something else happened that we don't know yet
    'relay_recent_measurement_attempt_count',
    # 3.9 relay:  the number of times that sbws has tried to measure
    # this relay, since the last 5 days, but it didn't work
    # This should be the number of attempts - number of ResultSuccess -
    # something else we don't know yet
    # So far is the number of ResultError
    'relay_recent_measurement_failure_count',
    # The number of success results should be:
    # the number of attempts - the number of failures
    # 4.8 relay:  the number of successful results, created in the last 5 days,
    # that were excluded by a rule, for this relay
    # This would be the sum of the following 3 + the number of not success
    'relay_recent_measurement_exclusion_count',
    'relay_recent_measurement_exclusion_not_distanciated',
    'relay_recent_measurement_exclusion_not_recent_count',
    'relay_recent_measurement_exclusion_not_min_num_count',
]
BW_KEYVALUES_EXTRA = BW_KEYVALUES_FILE + BW_KEYVALUES_EXTRA_BWS \
               + BANDWIDTH_LINE_KEY_VALUES_MONITOR
141
BW_KEYVALUES_INT = ['bw', 'rtt', 'success', 'error_stream',
142
143
                    'error_circ', 'error_misc'] + BW_KEYVALUES_EXTRA_BWS \
                   + BANDWIDTH_LINE_KEY_VALUES_MONITOR
144
BW_KEYVALUES = BW_KEYVALUES_BASIC + BW_KEYVALUES_EXTRA
juga  's avatar
juga committed
145
146


147
def round_sig_dig(n, digits=PROP276_ROUND_DIG):
148
149
150
151
152
153
154
    """Round n to 'digits' significant digits in front of the decimal point.
       Results less than or equal to 1 are rounded to 1.
       Returns an integer.

       digits must be greater than 0.
       n must be less than or equal to 2**73, to avoid floating point errors.
       """
155
    digits = int(digits)
156
157
158
159
160
161
162
163
164
    assert digits >= 1
    if n <= 1:
        return 1
    digits_in_n = int(math.log10(n)) + 1
    round_digits = max(digits_in_n - digits, 0)
    rounded_n = round(n, -round_digits)
    return int(rounded_n)


165
def kb_round_x_sig_dig(bw_bs, digits=PROP276_ROUND_DIG):
166
167
168
169
170
171
172
173
174
175
176
    """Convert bw_bs from bytes to kilobytes, and round the result to
       'digits' significant digits.
       Results less than or equal to 1 are rounded up to 1.
       Returns an integer.

       digits must be greater than 0.
       n must be less than or equal to 2**82, to avoid floating point errors.
       """
    # avoid double-rounding by using floating-point
    bw_kb = bw_bs / 1000.0
    return round_sig_dig(bw_kb, digits=digits)
177
178


juga  's avatar
juga committed
179
180
181
182
183
184
185
186
187
def num_results_of_type(results, type_str):
    return len([r for r in results if r.type == type_str])


# Better way to use enums?
def result_type_to_key(type_str):
    return type_str.replace('-', '_')


188
class V3BWHeader(object):
juga  's avatar
juga committed
189
190
    """
    Create a bandwidth measurements (V3bw) header
191
    following bandwidth measurements document spec version 1.X.X.
juga  's avatar
juga committed
192

193
    :param str timestamp: timestamp in Unix Epoch seconds of the most recent
194
        generator result.
juga  's avatar
juga committed
195
196
197
    :param str version: the spec version
    :param str software: the name of the software that generates this
    :param str software_version: the version of the software
198
    :param dict kwargs: extra headers. Currently supported:
juga  's avatar
juga committed
199

200
201
202
203
        - earliest_bandwidth: str, ISO 8601 timestamp in UTC time zone
          when the first bandwidth was obtained
        - generator_started: str, ISO 8601 timestamp in UTC time zone
          when the generator started
juga  's avatar
juga committed
204
    """
205
    def __init__(self, timestamp, **kwargs):
206
207
208
209
        assert isinstance(timestamp, str)
        for v in kwargs.values():
            assert isinstance(v, str)
        self.timestamp = timestamp
210
211
212
213
        # KeyValues with default value when not given by kwargs
        self.version = kwargs.get('version', SPEC_VERSION)
        self.software = kwargs.get('software', 'sbws')
        self.software_version = kwargs.get('software_version', __version__)
214
        self.file_created = kwargs.get('file_created', now_isodt_str())
juga  's avatar
juga committed
215
        # latest_bandwidth should not be in kwargs, since it MUST be the
216
        # same as timestamp
juga  's avatar
juga committed
217
        self.latest_bandwidth = unixts_to_isodt_str(timestamp)
218
        [setattr(self, k, v) for k, v in kwargs.items()
219
         if k in BANDWIDTH_HEADER_KEY_VALUES_INIT]
220

221
    def __str__(self):
222
        if self.version.startswith('1.'):
223
224
            return self.strv1
        return self.strv2
225

juga  's avatar
juga committed
226
    @classmethod
227
228
    def from_results(cls, results, scanner_country=None,
                     destinations_countries=None, state_fpath=''):
juga  's avatar
juga committed
229
230
231
        kwargs = dict()
        latest_bandwidth = cls.latest_bandwidth_from_results(results)
        earliest_bandwidth = cls.earliest_bandwidth_from_results(results)
232
        # NOTE: Blocking, reads file
233
        generator_started = cls.generator_started_from_file(state_fpath)
234
        recent_consensus_count = cls.consensus_count_from_file(state_fpath)
juga  's avatar
juga committed
235
236
237
238
239
        timestamp = str(latest_bandwidth)
        kwargs['latest_bandwidth'] = unixts_to_isodt_str(latest_bandwidth)
        kwargs['earliest_bandwidth'] = unixts_to_isodt_str(earliest_bandwidth)
        if generator_started is not None:
            kwargs['generator_started'] = generator_started
240
241
242
        # To be compatible with older bandwidth files, do not require it.
        if scanner_country is not None:
            kwargs['scanner_country'] = scanner_country
243
244
        if destinations_countries is not None:
            kwargs['destinations_countries'] = destinations_countries
245
246
        if recent_consensus_count is not None:
            kwargs['recent_consensus_count'] = str(recent_consensus_count)
juga  's avatar
juga committed
247
248
249
        h = cls(timestamp, **kwargs)
        return h

250
    @classmethod
251
    def from_lines_v1(cls, lines):
252
253
        """
        :param list lines: list of lines to parse
254
        :returns: tuple of V3BWHeader object and non-header lines
255
256
257
258
        """
        assert isinstance(lines, list)
        try:
            index_terminator = lines.index(TERMINATOR)
Matt Traudt's avatar
Matt Traudt committed
259
        except ValueError:
260
261
262
            # is not a bw file or is v100
            log.warn('Terminator is not in lines')
            return None
263
        ts = lines[0]
264
        kwargs = dict([l.split(KEYVALUE_SEP_V1)
265
                       for l in lines[:index_terminator]
266
                       if l.split(KEYVALUE_SEP_V1)[0] in ALL_KEYVALUES])
267
        h = cls(ts, **kwargs)
juga  's avatar
juga committed
268
        # last line is new line
269
        return h, lines[index_terminator + 1:-1]
270
271

    @classmethod
272
    def from_text_v1(self, text):
273
        """
274
        :param str text: text to parse
275
        :returns: tuple of V3BWHeader object and non-header lines
276
277
        """
        assert isinstance(text, str)
278
        return self.from_lines_v1(text.split(LINE_SEP))
279

juga  's avatar
juga committed
280
281
282
283
284
285
286
287
288
289
290
    @classmethod
    def from_lines_v100(cls, lines):
        """
        :param list lines: list of lines to parse
        :returns: tuple of V3BWHeader object and non-header lines
        """
        assert isinstance(lines, list)
        h = cls(lines[0])
        # last line is new line
        return h, lines[1:-1]

291
    @staticmethod
292
    def generator_started_from_file(state_fpath):
293
294
295
296
        '''
        ISO formatted timestamp for the time when the scanner process most
        recently started.
        '''
297
        state = State(state_fpath)
298
299
300
301
        if 'scanner_started' in state:
            return state['scanner_started']
        else:
            return None
302

303
304
305
    @staticmethod
    def consensus_count_from_file(state_fpath):
        state = State(state_fpath)
306
307
        if 'recent_consensus_count' in state:
            return state['recent_consensus_count']
308
309
310
        else:
            return None

311
    @staticmethod
juga  's avatar
juga committed
312
    def latest_bandwidth_from_results(results):
313
        return round(max([r.time for fp in results for r in results[fp]]))
314
315
316

    @staticmethod
    def earliest_bandwidth_from_results(results):
317
        return round(min([r.time for fp in results for r in results[fp]]))
318

juga  's avatar
juga committed
319
320
321
322
323
324
325
326
327
328
329
330
331
332
    @property
    def keyvalue_unordered_tuple_ls(self):
        """Return list of KeyValue tuples that do not have specific order."""
        # sort the list to generate determinist headers
        keyvalue_tuple_ls = sorted([(k, v) for k, v in self.__dict__.items()
                                    if k in UNORDERED_KEYVALUES])
        return keyvalue_tuple_ls

    @property
    def keyvalue_tuple_ls(self):
        """Return list of all KeyValue tuples"""
        return [('version', self.version)] + self.keyvalue_unordered_tuple_ls

    @property
333
334
335
    def keyvalue_v1str_ls(self):
        """Return KeyValue list of strings following spec v1.X.X."""
        keyvalues = [self.timestamp] + [KEYVALUE_SEP_V1.join([k, v])
juga  's avatar
juga committed
336
337
338
339
                                        for k, v in self.keyvalue_tuple_ls]
        return keyvalues

    @property
340
341
342
    def strv1(self):
        """Return header string following spec v1.X.X."""
        header_str = LINE_SEP.join(self.keyvalue_v1str_ls) + LINE_SEP + \
juga  's avatar
juga committed
343
344
345
346
            LINE_TERMINATOR
        return header_str

    @property
347
348
349
    def keyvalue_v2_ls(self):
        """Return KeyValue list of strings following spec v2.X.X."""
        keyvalue = [self.timestamp] + [KEYVALUE_SEP_V2.join([k, v])
juga  's avatar
juga committed
350
351
352
353
                                       for k, v in self.keyvalue_tuple_ls]
        return keyvalue

    @property
354
355
356
    def strv2(self):
        """Return header string following spec v2.X.X."""
        header_str = LINE_SEP.join(self.keyvalue_v2_ls) + LINE_SEP + \
juga  's avatar
juga committed
357
358
359
360
361
362
            LINE_TERMINATOR
        return header_str

    @property
    def num_lines(self):
        return len(self.__str__().split(LINE_SEP))
juga  's avatar
juga committed
363

364
365
366
367
368
    def add_stats(self, **kwargs):
        # Using kwargs because attributes might chage.
        [setattr(self, k, str(v)) for k, v in kwargs.items()
         if k in STATS_KEYVALUES]

juga  's avatar
juga committed
369

juga  's avatar
juga committed
370
class V3BWLine(object):
juga  's avatar
juga committed
371
    """
372
    Create a Bandwidth List line following the spec version 1.X.X.
juga  's avatar
juga committed
373
374
375
376
377
378
379
380
381
382
383
384
385
386

    :param str node_id:
    :param int bw:
    :param dict kwargs: extra headers. Currently supported:

        - nickname, str
        - master_key_ed25519, str
        - rtt, int
        - time, str
        - sucess, int
        - error_stream, int
        - error_circ, int
        - error_misc, int
    """
juga  's avatar
juga committed
387
388
389
    def __init__(self, node_id, bw, **kwargs):
        assert isinstance(node_id, str)
        assert isinstance(bw, int)
390
        assert node_id.startswith('$')
juga  's avatar
juga committed
391
392
393
        self.node_id = node_id
        self.bw = bw
        [setattr(self, k, v) for k, v in kwargs.items()
394
         if k in BW_KEYVALUES_EXTRA]
juga  's avatar
juga committed
395

juga  's avatar
juga committed
396
    def __str__(self):
397
        return self.bw_strv1
juga  's avatar
juga committed
398

juga  's avatar
juga committed
399
    @classmethod
juga  's avatar
juga committed
400
401
    def from_results(cls, results, secs_recent=None, secs_away=None,
                     min_num=0):
402
403
404
        """Convert sbws results to relays' Bandwidth Lines

        ``bs`` stands for Bytes/seconds
405
        ``bw_mean`` means the bw is obtained from the mean of the all the
406
407
408
409
410
        downloads' bandwidth.
        Downloads' bandwidth are calculated as the amount of data received
        divided by the the time it took to received.
        bw = data (Bytes) / time (seconds)
        """
411
        # log.debug("Len success_results %s", len(success_results))
juga  's avatar
juga committed
412
413
414
415
416
417
418
        node_id = '$' + results[0].fingerprint
        kwargs = dict()
        kwargs['nick'] = results[0].nickname
        if getattr(results[0], 'master_key_ed25519'):
            kwargs['master_key_ed25519'] = results[0].master_key_ed25519
        kwargs['time'] = cls.last_time_from_results(results)
        kwargs.update(cls.result_types_from_results(results))
419
420
421
        consensuses_count = \
            [r.relay_in_recent_consensus_count for r in results
             if getattr(r, 'relay_in_recent_consensus_count', None)]
422
423
424
        if consensuses_count:
            consensus_count = max(consensuses_count)
            kwargs['relay_in_recent_consensus_count'] = consensus_count
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455

        success_results = [r for r in results if isinstance(r, ResultSuccess)]
        if not success_results:
            return None
        results_away = \
            cls.results_away_each_other(success_results, secs_away)
        if not results_away:
            return None
        # log.debug("Results away from each other: %s",
        #           [unixts_to_isodt_str(r.time) for r in results_away])
        results_recent = cls.results_recent_than(results_away, secs_recent)
        if not results_recent:
            return None
        if not len(results_recent) >= min_num:
            # log.debug('The number of results is less than %s', min_num)
            return None
        rtt = cls.rtt_from_results(results_recent)
        if rtt:
            kwargs['rtt'] = rtt
        bw = cls.bw_median_from_results(results_recent)
        kwargs['bw_mean'] = cls.bw_mean_from_results(results_recent)
        kwargs['bw_median'] = cls.bw_median_from_results(
            results_recent)
        kwargs['desc_bw_avg'] = \
            cls.desc_bw_avg_from_results(results_recent)
        kwargs['desc_bw_bur'] = \
            cls.desc_bw_bur_from_results(results_recent)
        kwargs['consensus_bandwidth'] = \
            cls.consensus_bandwidth_from_results(results_recent)
        kwargs['consensus_bandwidth_is_unmeasured'] = \
            cls.consensus_bandwidth_is_unmeasured_from_results(
456
                results_recent)
457
458
459
460
461
462
        kwargs['desc_bw_obs_last'] = \
            cls.desc_bw_obs_last_from_results(results_recent)
        kwargs['desc_bw_obs_mean'] = \
            cls.desc_bw_obs_mean_from_results(results_recent)
        bwl = cls(node_id, bw, **kwargs)
        return bwl
juga  's avatar
juga committed
463
464
465
466
467
468

    @classmethod
    def from_data(cls, data, fingerprint):
        assert fingerprint in data
        return cls.from_results(data[fingerprint])

juga  's avatar
juga committed
469
    @classmethod
470
    def from_bw_line_v1(cls, line):
juga  's avatar
juga committed
471
        assert isinstance(line, str)
472
473
474
        kwargs = dict([kv.split(KEYVALUE_SEP_V1)
                       for kv in line.split(BW_KEYVALUE_SEP_V1)
                       if kv.split(KEYVALUE_SEP_V1)[0] in BW_KEYVALUES])
juga  's avatar
juga committed
475
476
477
        for k, v in kwargs.items():
            if k in BW_KEYVALUES_INT:
                kwargs[k] = int(v)
478
479
480
481
482
        node_id = kwargs['node_id']
        bw = kwargs['bw']
        del kwargs['node_id']
        del kwargs['bw']
        bw_line = cls(node_id, bw, **kwargs)
juga  's avatar
juga committed
483
484
        return bw_line

juga  's avatar
juga committed
485
486
    @staticmethod
    def results_away_each_other(results, secs_away=None):
juga  's avatar
juga committed
487
488
        # log.debug("Checking whether results are away from each other in %s "
        #           "secs.", secs_away)
juga  's avatar
juga committed
489
490
        if secs_away is None or len(results) < 2:
            return results
491
492
493
494
495
496
        for a, b in combinations(results, 2):
            if abs(a.time - b.time) > secs_away:
                return results
        # log.debug("Results are NOT away from each other in at least %ss: %s",
        #           secs_away, [unixts_to_isodt_str(r.time) for r in results])
        return None
juga  's avatar
juga committed
497
498
499
500
501

    @staticmethod
    def results_recent_than(results, secs_recent=None):
        if secs_recent is None:
            return results
502
        results_recent = list(filter(
juga  's avatar
juga committed
503
                            lambda x: (now_unixts() - x.time) < secs_recent,
504
505
506
507
508
509
                            results))
        # if not results_recent:
        #     log.debug("Results are NOT more recent than %ss: %s",
        #               secs_recent,
        #               [unixts_to_isodt_str(r.time) for r in results])
        return results_recent
juga  's avatar
juga committed
510

511
    @staticmethod
512
    def bw_median_from_results(results):
513
514
515
516
        return max(round(median([dl['amount'] / dl['duration']
                                 for r in results for dl in r.downloads])), 1)

    @staticmethod
517
    def bw_mean_from_results(results):
518
519
520
        return max(round(mean([dl['amount'] / dl['duration']
                               for r in results for dl in r.downloads])), 1)

juga  's avatar
juga committed
521
522
523
524
525
526
527
528
    @staticmethod
    def last_time_from_results(results):
        return unixts_to_isodt_str(round(max([r.time for r in results])))

    @staticmethod
    def rtt_from_results(results):
        # convert from miliseconds to seconds
        rtts = [(round(rtt * 1000)) for r in results for rtt in r.rtts]
juga  's avatar
juga committed
529
        rtt = round(median(rtts)) if rtts else None
juga  's avatar
juga committed
530
531
532
533
534
535
536
537
538
        return rtt

    @staticmethod
    def result_types_from_results(results):
        rt_dict = dict([(result_type_to_key(rt.value),
                         num_results_of_type(results, rt.value))
                        for rt in _ResultType])
        return rt_dict

539
540
541
542
543
544
545
546
    @staticmethod
    def desc_bw_avg_from_results(results):
        """Obtain the last descriptor bandwidth average from the results."""
        for r in reversed(results):
            if r.relay_average_bandwidth is not None:
                return r.relay_average_bandwidth
        return None

547
548
549
550
551
552
553
554
    @staticmethod
    def desc_bw_bur_from_results(results):
        """Obtain the last descriptor bandwidth burst from the results."""
        for r in reversed(results):
            if r.relay_burst_bandwidth is not None:
                return r.relay_burst_bandwidth
        return None

555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
    @staticmethod
    def consensus_bandwidth_from_results(results):
        """Obtain the last consensus bandwidth from the results."""
        for r in reversed(results):
            if r.consensus_bandwidth is not None:
                return r.consensus_bandwidth
        return None

    @staticmethod
    def consensus_bandwidth_is_unmeasured_from_results(results):
        """Obtain the last consensus unmeasured flag from the results."""
        for r in reversed(results):
            if r.consensus_bandwidth_is_unmeasured is not None:
                return r.consensus_bandwidth_is_unmeasured
        return None

571
    @staticmethod
572
573
    def desc_bw_obs_mean_from_results(results):
        desc_bw_obs_ls = []
574
575
        for r in results:
            if r.relay_observed_bandwidth is not None:
576
577
578
                desc_bw_obs_ls.append(r.relay_observed_bandwidth)
        if desc_bw_obs_ls:
            return max(round(mean(desc_bw_obs_ls)), 1)
579
580
581
        return None

    @staticmethod
582
    def desc_bw_obs_last_from_results(results):
583
584
585
586
587
588
        # the last is at the end of the list
        for r in reversed(results):
            if r.relay_observed_bandwidth is not None:
                return r.relay_observed_bandwidth
        return None

juga  's avatar
juga committed
589
590
591
592
593
594
595
    @property
    def bw_keyvalue_tuple_ls(self):
        """Return list of KeyValue Bandwidth Line tuples."""
        # sort the list to generate determinist headers
        keyvalue_tuple_ls = sorted([(k, v) for k, v in self.__dict__.items()
                                    if k in BW_KEYVALUES])
        return keyvalue_tuple_ls
juga  's avatar
juga committed
596

juga  's avatar
juga committed
597
    @property
598
    def bw_keyvalue_v1str_ls(self):
juga  's avatar
juga committed
599
        """Return list of KeyValue Bandwidth Line strings following
600
        spec v1.X.X.
juga  's avatar
juga committed
601
        """
602
        bw_keyvalue_str = [KEYVALUE_SEP_V1 .join([k, str(v)])
juga  's avatar
juga committed
603
604
                           for k, v in self.bw_keyvalue_tuple_ls]
        return bw_keyvalue_str
juga  's avatar
juga committed
605

juga  's avatar
juga committed
606
    @property
607
608
609
610
    def bw_strv1(self):
        """Return Bandwidth Line string following spec v1.X.X."""
        bw_line_str = BW_KEYVALUE_SEP_V1.join(
                        self.bw_keyvalue_v1str_ls) + LINE_SEP
juga  's avatar
juga committed
611
612
613
614
615
616
        if len(bw_line_str) > BW_LINE_SIZE:
            # if this is the case, probably there are too many KeyValues,
            # or the limit needs to be changed in Tor
            log.warn("The bandwidth line %s is longer than %s",
                     len(bw_line_str), BW_LINE_SIZE)
        return bw_line_str
juga  's avatar
juga committed
617

juga  's avatar
juga committed
618

619
class V3BWFile(object):
juga  's avatar
juga committed
620
    """
621
    Create a Bandwidth List file following spec version 1.X.X
juga  's avatar
juga committed
622
623
624
625

    :param V3BWHeader v3bwheader: header
    :param list v3bwlines: V3BWLines
    """
juga  's avatar
juga committed
626
627
628
629
630
    def __init__(self, v3bwheader, v3bwlines):
        self.header = v3bwheader
        self.bw_lines = v3bwlines

    def __str__(self):
631
        return str(self.header) + ''.join([str(bw_line) or ''
juga  's avatar
juga committed
632
633
                                           for bw_line in self.bw_lines])

juga  's avatar
juga committed
634
    @classmethod
635
636
    def from_results(cls, results, scanner_country=None,
                     destinations_countries=None, state_fpath='',
637
                     scale_constant=SBWS_SCALE_CONSTANT,
638
639
                     scaling_method=TORFLOW_SCALING,
                     torflow_obs=TORFLOW_OBS_LAST,
juga  's avatar
juga committed
640
                     torflow_cap=TORFLOW_BW_MARGIN,
641
                     round_digs=PROP276_ROUND_DIG,
642
                     secs_recent=None, secs_away=None, min_num=0,
643
644
                     consensus_path=None, max_bw_diff_perc=MAX_BW_DIFF_PERC,
                     reverse=False):
645
646
647
648
649
650
        """Create V3BWFile class from sbws Results.

        :param dict results: see below
        :param str state_fpath: path to the state file
        :param int scaling_method:
            Scaling method to obtain the bandwidth
651
            Possible values: {None, SBWS_SCALING, TORFLOW_SCALING} = {0, 1, 2}
652
653
654
655
656
657
658
659
660
661
662
        :param int scale_constant: sbws scaling constant
        :param int torflow_obs: method to choose descriptor observed bandwidth
        :param bool reverse: whether to sort the bw lines descending or not

        Results are in the form::

            {'relay_fp1': [Result1, Result2, ...],
             'relay_fp2': [Result1, Result2, ...]}

        """
        log.info('Processing results to generate a bandwidth list file.')
663
664
        header = V3BWHeader.from_results(results, scanner_country,
                                         destinations_countries, state_fpath)
665
        bw_lines_raw = []
juga  's avatar
juga committed
666
667
        number_consensus_relays = cls.read_number_consensus_relays(
            consensus_path)
668
        state = State(state_fpath)
juga  's avatar
juga committed
669
        for fp, values in results.items():
670
            # log.debug("Relay fp %s", fp)
juga  's avatar
juga committed
671
672
            line = V3BWLine.from_results(values, secs_recent, secs_away,
                                         min_num)
juga  's avatar
juga committed
673
674
            if line is not None:
                bw_lines_raw.append(line)
675
        if not bw_lines_raw:
676
677
            log.info("After applying restrictions to the raw results, "
                     "there is not any. Scaling can not be applied.")
juga  's avatar
juga committed
678
679
            cls.update_progress(
                cls, bw_lines_raw, header, number_consensus_relays, state)
680
681
682
            return cls(header, [])
        if scaling_method == SBWS_SCALING:
            bw_lines = cls.bw_sbws_scale(bw_lines_raw, scale_constant)
683
            cls.warn_if_not_accurate_enough(bw_lines, scale_constant)
684
685
            # log.debug(bw_lines[-1])
        elif scaling_method == TORFLOW_SCALING:
juga  's avatar
juga committed
686
            bw_lines = cls.bw_torflow_scale(bw_lines_raw, torflow_obs,
687
                                            torflow_cap, round_digs)
688
            # log.debug(bw_lines[-1])
juga  's avatar
juga committed
689
690
            cls.update_progress(
                cls, bw_lines, header, number_consensus_relays, state)
juga  's avatar
juga committed
691
        else:
692
693
            bw_lines = cls.bw_kb(bw_lines_raw)
            # log.debug(bw_lines[-1])
694
695
        # Not using the result for now, just warning
        cls.is_max_bw_diff_perc_reached(bw_lines, max_bw_diff_perc)
juga  's avatar
juga committed
696
697
698
        f = cls(header, bw_lines)
        return f

699
    @classmethod
700
    def from_v1_fpath(cls, fpath):
701
702
703
704
        log.info('Parsing bandwidth file %s', fpath)
        with open(fpath) as fd:
            text = fd.read()
        all_lines = text.split(LINE_SEP)
705
706
        header, lines = V3BWHeader.from_lines_v1(all_lines)
        bw_lines = [V3BWLine.from_bw_line_v1(line) for line in lines]
707
708
        return cls(header, bw_lines)

709
710
711
712
713
714
715
    @classmethod
    def from_v100_fpath(cls, fpath):
        log.info('Parsing bandwidth file %s', fpath)
        with open(fpath) as fd:
            text = fd.read()
        all_lines = text.split(LINE_SEP)
        header, lines = V3BWHeader.from_lines_v100(all_lines)
716
        bw_lines = sorted([V3BWLine.from_bw_line_v1(l) for l in lines],
717
718
719
                          key=lambda l: l.bw)
        return cls(header, bw_lines)

juga  's avatar
juga committed
720
721
722
723
724
725
726
    @staticmethod
    def bw_kb(bw_lines, reverse=False):
        bw_lines_scaled = copy.deepcopy(bw_lines)
        for l in bw_lines_scaled:
            l.bw = max(round(l.bw / 1000), 1)
        return sorted(bw_lines_scaled, key=lambda x: x.bw, reverse=reverse)

juga  's avatar
juga committed
727
    @staticmethod
728
    def bw_sbws_scale(bw_lines, scale_constant=SBWS_SCALE_CONSTANT,
juga  's avatar
juga committed
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
                      reverse=False):
        """Return a new V3BwLine list scaled using sbws method.

        :param list bw_lines:
            bw lines to scale, not self.bw_lines,
            since this method will be before self.bw_lines have been
            initialized.
        :param int scale_constant:
            the constant to multiply by the ratio and
            the bandwidth to obtain the new bandwidth
        :returns list: V3BwLine list
        """
        log.debug('Scaling bandwidth using sbws method.')
        m = median([l.bw for l in bw_lines])
        bw_lines_scaled = copy.deepcopy(bw_lines)
        for l in bw_lines_scaled:
            # min is to limit the bw to descriptor average-bandwidth
            # max to avoid bandwidth with 0 value
747
            l.bw = max(round(min(l.desc_bw_avg,
748
                                 l.bw * scale_constant / m)
juga  's avatar
juga committed
749
750
751
752
753
                             / 1000), 1)
        return sorted(bw_lines_scaled, key=lambda x: x.bw, reverse=reverse)

    @staticmethod
    def warn_if_not_accurate_enough(bw_lines,
754
                                    scale_constant=SBWS_SCALE_CONSTANT):
juga  's avatar
juga committed
755
756
757
758
759
760
761
762
        margin = 0.001
        accuracy_ratio = median([l.bw for l in bw_lines]) / scale_constant
        log.info('The generated lines are within {:.5}% of what they should '
                 'be'.format((1 - accuracy_ratio) * 100))
        if accuracy_ratio < 1 - margin or accuracy_ratio > 1 + margin:
            log.warning('There was %f%% error and only +/- %f%% is '
                        'allowed', (1 - accuracy_ratio) * 100, margin * 100)

763
    @staticmethod
764
765
766
    def is_max_bw_diff_perc_reached(bw_lines,
                                    max_bw_diff_perc=MAX_BW_DIFF_PERC):
        sum_consensus_bw = sum([l.desc_bw_obs_last for l in bw_lines])
767
768
769
770
771
772
773
774
775
776
777
        sum_bw = sum([l.bw for l in bw_lines])
        diff = min(sum_consensus_bw, sum_bw) / max(sum_consensus_bw, sum_bw)
        diff_perc = diff * 100
        log.info("The difference between the total consensus bandwidth "
                 "and the total measured bandwidth is %s%% percent",
                 diff_perc)
        if diff_perc > MAX_BW_DIFF_PERC:
            log.warning("It is more than %s%%", max_bw_diff_perc)
            return True
        return False

778
    @staticmethod
779
    def bw_torflow_scale(bw_lines, desc_bw_obs_type=TORFLOW_OBS_MEAN,
780
                         cap=TORFLOW_BW_MARGIN,
781
                         num_round_dig=PROP276_ROUND_DIG, reverse=False):
juga  's avatar
juga committed
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
        """
        Obtain final bandwidth measurements applying Torflow's scaling
        method.

        From Torflow's README.spec.txt (section 2.2)::

            In this way, the resulting network status consensus bandwidth values  # NOQA
            are effectively re-weighted proportional to how much faster the node  # NOQA
            was as compared to the rest of the network.

        The variables and steps used in Torflow:

        **strm_bw**::

            The strm_bw field is the average (mean) of all the streams for the relay  # NOQA
            identified by the fingerprint field.
            strm_bw = sum(bw stream x)/|n stream|

        **filt_bw**::

            The filt_bw field is computed similarly, but only the streams equal to  # NOQA
            or greater than the strm_bw are counted in order to filter very slow  # NOQA
            streams due to slow node pairings.

        **filt_sbw and strm_sbw**::

            for rs in RouterStats.query.filter(stats_clause).\
                  options(eagerload_all('router.streams.circuit.routers')).all():  # NOQA
              tot_sbw = 0
              sbw_cnt = 0
              for s in rs.router.streams:
                if isinstance(s, ClosedStream):
                  skip = False
                  #for br in badrouters:
                  #  if br != rs:
                  #    if br.router in s.circuit.routers:
                  #      skip = True
                  if not skip:
                    # Throw out outliers < mean
                    # (too much variance for stddev to filter much)
                    if rs.strm_closed == 1 or s.bandwidth() >= rs.sbw:
                      tot_sbw += s.bandwidth()
                      sbw_cnt += 1

            if sbw_cnt: rs.filt_sbw = tot_sbw/sbw_cnt
            else: rs.filt_sbw = None

        **filt_avg, and strm_avg**::

            Once we have determined the most recent measurements for each node, we  # NOQA
            compute an average of the filt_bw fields over all nodes we have measured.  # NOQA

        ::

            filt_avg = sum(map(lambda n: n.filt_bw, nodes.itervalues()))/float(len(nodes))  # NOQA
            strm_avg = sum(map(lambda n: n.strm_bw, nodes.itervalues()))/float(len(nodes))  # NOQA

        **true_filt_avg and true_strm_avg**::

            for cl in ["Guard+Exit", "Guard", "Exit", "Middle"]:
                true_filt_avg[cl] = filt_avg
                true_strm_avg[cl] = strm_avg

        In the non-pid case, all types of nodes get the same avg

        **n.fbw_ratio and n.fsw_ratio**::

            for n in nodes.itervalues():
                n.fbw_ratio = n.filt_bw/true_filt_avg[n.node_class()]
                n.sbw_ratio = n.strm_bw/true_strm_avg[n.node_class()]

        **n.ratio**::

            These averages are used to produce ratios for each node by dividing the  # NOQA
            measured value for that node by the network average.

        ::

            # Choose the larger between sbw and fbw
              if n.sbw_ratio > n.fbw_ratio:
                n.ratio = n.sbw_ratio
              else:
                n.ratio = n.fbw_ratio

        **desc_bw**:

juga  's avatar
juga committed
868
869
870
871
        It is the minimum of all the descriptor bandwidth values::

            bws = map(int, g)
            bw_observed = min(bws)
juga  's avatar
juga committed
872
873
874
875

            return Router(ns.idhex, ns.nickname, bw_observed, dead, exitpolicy,
            ns.flags, ip, version, os, uptime, published, contact, rate_limited,  # NOQA
            ns.orhash, ns.bandwidth, extra_info_digest, ns.unmeasured)
juga  's avatar
juga committed
876

juga  's avatar
juga committed
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
            self.desc_bw = max(bw,1) # Avoid div by 0

        **new_bw**::

            These ratios are then multiplied by the most recent observed descriptor  # NOQA
            bandwidth we have available for each node, to produce a new value for  # NOQA
            the network status consensus process.

        ::

            n.new_bw = n.desc_bw*n.ratio

        The descriptor observed bandwidth is multiplied by the ratio.

        **Limit the bandwidth to a maximum**::

            NODE_CAP = 0.05

        ::

            if n.new_bw > tot_net_bw*NODE_CAP:
              plog("INFO", "Clipping extremely fast "+n.node_class()+" node "+n.idhex+"="+n.nick+  # NOQA
                   " at "+str(100*NODE_CAP)+"% of network capacity ("+
                   str(n.new_bw)+"->"+str(int(tot_net_bw*NODE_CAP))+") "+
                   " pid_error="+str(n.pid_error)+
                   " pid_error_sum="+str(n.pid_error_sum))
              n.new_bw = int(tot_net_bw*NODE_CAP)

        However, tot_net_bw does not seems to be updated when not using pid.
        This clipping would make faster relays to all have the same value.

        All of that can be expressed as:

        .. math::
911

912
913
914
915
           bwn_i =& min\\left(bwnew_i,
                      \\sum_{i=1}^{n}bwnew_i \\times 0.05\\right) \\

                 &= min\\left(
juga  's avatar
juga committed
916
917
                      \\left(min\\left(bwobs_i, bwavg_i, bwbur_i \\right) \\times r_i\\right),
                        \\sum_{i=1}^{n}\\left(min\\left(bwobs_i, bwavg_i, bwbur_i \\right) \\times r_i\\right)
918
919
920
                        \\times 0.05\\right)\\

                 &= min\\left(
juga  's avatar
juga committed
921
922
                      \\left(min\\left(bwobs_i, bwavg_i, bwbur_i \\right) \\times max\\left(rf_i, rs_i\\right)\\right),
                        \\sum_{i=1}^{n}\\left(min\\left(bwobs_i, bwavg_i, bwbur_i \\right) \\times
923
924
925
                          max\\left(rf_i, rs_i\\right)\\right) \\times 0.05\\right)\\

                 &= min\\left(
juga  's avatar
juga committed
926
                      \\left(min\\left(bwobs_i, bwavg_i, bwbur_i \\right) \\times max\\left(\\frac{bwfilt_i}{bwfilt},
927
                          \\frac{bw_i}{bwstrm}\\right)\\right),
juga  's avatar
juga committed
928
                        \\sum_{i=1}^{n}\\left(min\\left(bwobs_i, bwavg_i, bwbur_i \\right) \\times
929
930
                          max\\left(\\frac{bwfilt_i}{bwfilt},
                            \\frac{bw_i}{bwstrm}\\right)\\right) \\times 0.05\\right)
juga  's avatar
juga committed
931
932
933
934
935

        """
        log.info("Calculating relays' bandwidth using Torflow method.")
        bw_lines_tf = copy.deepcopy(bw_lines)
        # mean (Torflow's strm_avg)
936
        mu = mean([l.bw_mean for l in bw_lines])
juga  's avatar
juga committed
937
        # filtered mean (Torflow's filt_avg)
938
        muf = mean([max(l.bw_mean, mu) for l in bw_lines])
juga  's avatar
juga committed
939
        # bw sum (Torflow's tot_net_bw or tot_sbw)
940
        sum_bw = sum([l.bw_mean for l in bw_lines])
juga  's avatar
juga committed
941
942
        # Torflow's clipping
        hlimit = sum_bw * TORFLOW_BW_MARGIN
juga  's avatar
juga committed
943
944
945
        log.debug('sum %s', sum_bw)
        log.debug('mu %s', mu)
        log.debug('muf %s', muf)
juga  's avatar
juga committed
946
        log.debug('hlimit %s', hlimit)
juga  's avatar
juga committed
947
        for l in bw_lines_tf:
948
949
950
951
            if desc_bw_obs_type == TORFLOW_OBS_LAST:
                desc_bw_obs = l.desc_bw_obs_last
            elif desc_bw_obs_type == TORFLOW_OBS_MEAN:
                desc_bw_obs = l.desc_bw_obs_mean
952
953
954
955
956
957
958
959
960
            # Excerpt from bandwidth-file-spec.txt section 2.3
            # A relay's MaxAdvertisedBandwidth limits the bandwidth-avg in its
            # descriptor.
            # Therefore generators MUST limit a relay's measured bandwidth to
            # its descriptor's bandwidth-avg.
            # Generators SHOULD NOT limit measured bandwidths based on
            # descriptors' bandwidth-observed, because that penalises new
            # relays.
            # See https://trac.torproject.org/projects/tor/ticket/8494
961
962
963
964
965
966
967
968
969
            if l.desc_bw_bur is not None:
                # Because in previous versions results were not storing
                # desc_bw_bur
                desc_bw = min(desc_bw_obs, l.desc_bw_bur, l.desc_bw_avg)
            else:
                desc_bw = min(desc_bw_obs, l.desc_bw_avg)
            # In previous versions results were not storing consensus_bandwidth
            if l.consensus_bandwidth_is_unmeasured \
                    or l.consensus_bandwidth is None:
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
                min_bandwidth = desc_bw
            # If the relay is measured, use the minimum between the descriptors
            # bandwidth and the consensus bandwidth, so that
            # MaxAdvertisedBandwidth limits the consensus weight
            # The consensus bandwidth in a measured relay has been obtained
            # doing the same calculation as here
            else:
                min_bandwidth = min(desc_bw, l.consensus_bandwidth)
            # Torflow's scaling
            ratio_stream = l.bw_mean / mu
            ratio_stream_filtered = max(l.bw_mean, mu) / muf
            ratio = max(ratio_stream, ratio_stream_filtered)
            bw_scaled = ratio * min_bandwidth
            # round and convert to KB
            bw_new = kb_round_x_sig_dig(bw_scaled, digits=num_round_dig)
juga  's avatar
juga committed
985
986
987
            # Cap maximum bw
            if cap is not None:
                bw_new = min(hlimit, bw_new)
988
989
            # avoid 0
            l.bw = max(bw_new, 1)
juga  's avatar
juga committed
990
        return sorted(bw_lines_tf, key=lambda x: x.bw, reverse=reverse)
991

992
    @staticmethod
juga  's avatar
juga committed
993
    def read_number_consensus_relays(consensus_path):
994
995
996
997
998
        """Read the number of relays in the Network from the cached consensus
        file."""
        num = None
        try:
            num = len(list(parse_file(consensus_path)))
999
        except (FileNotFoundError, AttributeError):
1000
1001
1002
1003
1004
1005
            log.info("It is not possible to obtain statistics about the "
                     "percentage of measured relays because the cached "
                     "consensus file is not found.")
        log.debug("Number of relays in the network %s", num)
        return num

1006
    @staticmethod
juga  's avatar
juga committed
1007
    def measured_progress_stats(bw_lines, number_consensus_relays,
1008
                                min_perc_reached_before):
1009
1010
1011
1012
1013
        """ Statistics about measurements progress,
        to be included in the header.

        :param list bw_lines: the bw_lines after scaling and applying filters.
        :param str consensus_path: the path to the cached consensus file.
1014
        :param str state_fpath: the path to the state file
1015
1016
1017
1018
1019
1020
1021
1022
1023
        :returns dict, bool: Statistics about the progress made with
            measurements and whether the percentage of measured relays has been
            reached.

        """
        # cached-consensus should be updated every time that scanner get the
        # network status or descriptors?
        # It will not be updated to the last consensus, but the list of
        # measured relays is not either.
juga  's avatar
juga committed
1024
        assert isinstance(number_consensus_relays, int)
1025
1026
        assert isinstance(bw_lines, list)
        statsd = {}
juga  's avatar
juga committed
1027
1028
1029
1030
1031
1032
1033
1034
1035
        statsd['number_eligible_relays'] = len(bw_lines)
        statsd['number_consensus_relays'] = number_consensus_relays
        statsd['minimum_number_eligible_relays'] = round(
            statsd['number_consensus_relays'] * MIN_REPORT / 100)
        statsd['percent_eligible_relays'] = round(
            len(bw_lines) * 100 / statsd['number_consensus_relays'])
        statsd['minimum_percent_eligible_relays'] = MIN_REPORT
        if statsd['number_eligible_relays'] < \
                statsd['minimum_number_eligible_relays']:
1036
            # if min percent was was reached before, warn
1037
1038
1039
1040
            # otherwise, debug
            if min_perc_reached_before is not None:
                log.warning('The percentage of the measured relays is less '
                            'than the %s%% of the relays in the network (%s).',
juga  's avatar
juga committed
1041
                            MIN_REPORT, statsd['number_consensus_relays'])
1042
1043
1044
            else:
                log.info('The percentage of the measured relays is less '
                         'than the %s%% of the relays in the network (%s).',
juga  's avatar
juga committed
1045
                         MIN_REPORT, statsd['number_consensus_relays'])
1046
1047
1048
1049
1050
            return statsd, False
        return statsd, True

    @property
    def is_min_perc(self):
juga  's avatar
juga committed
1051
1052
        if getattr(self.header, 'number_eligible_relays', 0) \
                < getattr(self.header, 'minimum_number_eligible_relays', 0):
1053
1054
1055
            return False
        return True

juga  's avatar
juga committed
1056
    @property
juga  's avatar
juga committed
1057
1058
    def sum_bw(self):
        return sum([l.bw for l in self.bw_lines])
juga  's avatar
juga committed
1059
1060

    @property
juga  's avatar
juga committed
1061
    def num(self):
juga  's avatar
juga committed
1062
1063
1064
        return len(self.bw_lines)

    @property
juga  's avatar
juga committed
1065
1066
1067
1068
1069
1070
    def mean_bw(self):
        return mean([l.bw for l in self.bw_lines])

    @property
    def median_bw(self):
        return median([l.bw for l in self.bw_lines])
juga  's avatar
juga committed
1071

juga  's avatar
juga committed
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
    @property
    def max_bw(self):
        return max([l.bw for l in self.bw_lines])

    @property
    def min_bw(self):
        return min([l.bw for l in self.bw_lines])

    @property
    def info_stats(self):
        if not self.bw_lines:
            return
        [log.info(': '.join([attr, str(getattr(self, attr))])) for attr in
         ['sum_bw', 'mean_bw', 'median_bw', 'num',
          'max_bw', 'min_bw']]

juga  's avatar
juga committed
1088
1089
    def update_progress(self, bw_lines, header, number_consensus_relays,
                        state):
1090
        min_perc_reached_before = state.get('min_perc_reached')
juga  's avatar
juga committed
1091
        if number_consensus_relays is not None:
1092
            statsd, success = self.measured_progress_stats(
juga  's avatar
juga committed
1093
                bw_lines, number_consensus_relays, min_perc_reached_before)
1094
1095
            # add statistics about progress always
            header.add_stats(**statsd)
1096
1097
1098
1099
1100
1101
1102
            if not success:
                bw_lines = []
                state['min_perc_reached'] = None
            else:
                state['min_perc_reached'] = now_isodt_str()
        return bw_lines

juga  's avatar
juga committed
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
    def bw_line_for_node_id(self, node_id):
        """Returns the bandwidth line for a given node fingerprint.

        Used to combine data when plotting.
        """
        bwl = [l for l in self.bw_lines if l.node_id == node_id]
        if bwl:
            return bwl[0]
        return None

1113
1114
1115
1116
1117
1118
1119
1120
1121
    def to_plt(self, attrs=['bw'], sorted_by=None):
        """Return bandwidth data in a format useful for matplotlib.

        Used from external tool to plot.
        """
        x = [i for i in range(0, self.num)]
        ys = [[getattr(l, k) for l in self.bw_lines] for k in attrs]
        return x, ys, attrs

juga  's avatar
juga committed
1122
1123
1124
1125
1126
    def write(self, output):
        if output == '/dev/stdout':
            log.info("Writing to stdout is not supported.")
            return
        log.info('Writing v3bw file to %s', output)
1127
1128
        # To avoid inconsistent reads, the bandwidth data is written to an
        # archive path, then atomically symlinked to 'latest.v3bw'
juga  's avatar
juga committed
1129
1130
        out_dir = os.path.dirname(output)
        out_link = os.path.join(out_dir, 'latest.v3bw')
1131
        out_link_tmp = out_link + '.tmp'
juga  's avatar
juga committed
1132
1133
1134
1135
1136
1137
        with DirectoryLock(out_dir):
            with open(output, 'wt') as fd:
                fd.write(str(self.header))
                for line in self.bw_lines:
                    fd.write(str(line))
            output_basename = os.path.basename(output)
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
            # To atomically symlink a file, we need to create a temporary link,
            # then rename it to the final link name. (POSIX guarantees that
            # rename is atomic.)
            log.debug('Creating symlink {} -> {}.'
                      .format(out_link_tmp, output_basename))
            os.symlink(output_basename, out_link_tmp)
            log.debug('Renaming symlink {} -> {} to {} -> {}.'
                      .format(out_link_tmp, output_basename,
                              out_link, output_basename))
            os.rename(out_link_tmp, out_link)