v3bwfile.py 60.8 KB
Newer Older
juga  's avatar
juga committed
1
2
3
4
# -*- coding: utf-8 -*-
"""Classes and functions that create the bandwidth measurements document
(v3bw) used by bandwidth authorities."""

juga  's avatar
juga committed
5
import copy
juga  's avatar
juga committed
6
import logging
7
import math
juga  's avatar
juga committed
8
import os
9
from itertools import combinations
juga  's avatar
juga committed
10
from statistics import median, mean
11
from stem.descriptor import parse_file
juga  's avatar
juga committed
12

13
from sbws import __version__
14
from sbws.globals import (SPEC_VERSION, BW_LINE_SIZE, SBWS_SCALE_CONSTANT,
15
16
                          TORFLOW_SCALING, SBWS_SCALING, TORFLOW_BW_MARGIN,
                          TORFLOW_OBS_LAST, TORFLOW_OBS_MEAN,
17
                          PROP276_ROUND_DIG, MIN_REPORT, MAX_BW_DIFF_PERC)
juga  's avatar
juga committed
18
from sbws.lib.resultdump import ResultSuccess, _ResultType
19
from sbws.util.filelock import DirectoryLock
juga  's avatar
juga committed
20
from sbws.util.timestamp import (now_isodt_str, unixts_to_isodt_str,
21
                                 now_unixts, isostr_to_dt_obj)
22
from sbws.util.state import State
juga  's avatar
juga committed
23
24
25

log = logging.getLogger(__name__)

26
LINE_SEP = '\n'
27
28
KEYVALUE_SEP_V1 = '='
KEYVALUE_SEP_V2 = ' '
29
30
31
32
33
34

# NOTE: in a future refactor make make all the KeyValues be a dictionary
# with their type, so that it's more similar to stem parser.

# Header KeyValues
# =================
35
# List of the extra KeyValues accepted by the class
36
HEADER_KEYS_V1X = ['software', 'software_version', 'file_created',
37
                       'earliest_bandwidth', 'generator_started',
38
                       'scanner_country', 'destinations_countries']
39
40
41
42
# number_eligible_relays is the number that ends in the bandwidth file
# ie, have not been excluded by one of the filters in 4. below
# They should be call recent_measurement_included_count to be congruent
# with the other KeyValues.
43
HEADER_KEYS_V1_2 = ['number_eligible_relays', 'minimum_number_eligible_relays',
juga  's avatar
juga committed
44
45
                   'number_consensus_relays', 'percent_eligible_relays',
                   'minimum_percent_eligible_relays']
46
47
48
49

# KeyValues that count the number of relays that are in the bandwidth file,
# but ignored by Tor when voting, because they do not have a
# measured bandwidth.
50
HEADER_RECENT_MEASUREMENTS_EXCLUDED_KEYS = [
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
    # Number of relays that were measured but all the measurements failed
    # because of network failures or it was
    # not found a suitable helper relay
    'recent_measurements_excluded_error_count',
    # Number of relays that have successful measurements but the measurements
    # were not away from each other in X time (by default 1 day).
    'recent_measurements_excluded_near_count',
    # Number of relays that have successful measurements and they are away from
    # each other but they are not X time recent.
    # By default this is 5 days, which is the same time the older
    # the measurements can be by default.
    'recent_measurements_excluded_old_count',
    # Number of relays that have successful measurements and they are away from
    # each other and recent
    # but the number of measurements are less than X (by default 2).
    'recent_measurements_excluded_few_count',
]
68
# Added in #29591
69
70
71
72
# NOTE: recent_consensus_count, recent_priority_list_count,
# recent_measurement_attempt_count and recent_priority_relay_count
# are not reset when the scanner is stop.
# They will accumulate the values since the scanner was ever started.
73
HEADER_KEYS_V1_4 = [
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
    # 1.1 header: the number of different consensuses, that sbws has seen,
    # since the last 5 days
    'recent_consensus_count',
    # 2.4 Number of times a priority list has been created
    'recent_priority_list_count',
    # 2.5 Number of relays that there were in a priority list
    # [50, number of relays in the network * 0.05]
    'recent_priority_relay_count',
    # 3.6 header: the number of times that sbws has tried to measure any relay,
    # since the last 5 days
    # This would be the number of times a relays were in a priority list
    'recent_measurement_attempt_count',
    # 3.7 header: the number of times that sbws has tried to measure any relay,
    # since the last 5 days, but it didn't work
    # This should be the number of attempts - number of ResultSuccess -
    # something else we don't know yet
    # So far is the number of ResultError
    'recent_measurement_failure_count',
92
93
    # The time it took to report about half of the network.
    'time_to_report_half_network',
94
95
] + HEADER_RECENT_MEASUREMENTS_EXCLUDED_KEYS
HEADER_INIT_KEYS = \
96
97
    ['earliest_bandwidth', 'generator_started',
     'scanner_country', 'destinations_countries']\
98
99
    + HEADER_KEYS_V1_2 \
    + HEADER_KEYS_V1_4
100

101
HEADER_INT_KEYS = HEADER_KEYS_V1_2 + HEADER_KEYS_V1_4
102
# List of all unordered KeyValues currently being used to generate the file
103
HEADER_UNORDERED_KEYS = HEADER_KEYS_V1X + HEADER_KEYS_V1_2 + \
104
                      ['latest_bandwidth'] + \
105
                      HEADER_KEYS_V1_4
106
# List of all the KeyValues currently being used to generate the file
107
HEADER_ALL_KEYS = ['version'] + HEADER_UNORDERED_KEYS
108

109
TERMINATOR = '====='
110
111
112

# Bandwidth Lines KeyValues
# =========================
113
# Num header lines in v1.X.X using all the KeyValues
114
NUM_LINES_HEADER_V1 = len(HEADER_ALL_KEYS) + 2
115
116
LINE_TERMINATOR = TERMINATOR + LINE_SEP

juga  's avatar
juga committed
117
# KeyValue separator in Bandwidth Lines
118
BWLINE_KEYVALUES_SEP_V1 = ' '
119
# not inclding in the files the extra bws for now
120
121
BWLINE_KEYS_V0 = ['node_id', 'bw']
BWLINE_KEYS_V1_1 = BWLINE_KEYS_V0 + \
122
                    ['master_key_ed25519', 'nick', 'rtt', 'time',
123
                     'success', 'error_stream', 'error_circ', 'error_misc',
124
125
126
127
128
129
130
131
132
                     # `vote=0` is used for the relays that were excluded to
                     # be reported in the bandwidth file and now they are
                     # reported.
                     # It tells Tor to do not vote on the relay.
                     # `unmeasured=1` is used for the same relays and it is
                     # added in case Tor would vote on them in future versions.
                     # Maybe these keys should not be included for the relays
                     # in which vote=1 and unmeasured=0.
                     'vote', 'unmeasured',
133
134
135
136
                     # When there not enough eligible relays (not excluded)
                     # under_min_report is 1, `vote` is 0.
                     # Added in #29853.
                     'under_min_report',
137
138
                     # Added in #292951
                     'error_second_relay', 'error_destination']
139
BWLINE_KEYS_V1_2 = ['bw_median', 'bw_mean', 'desc_bw_avg', 'desc_bw_bur',
140
141
142
                          'desc_bw_obs_last', 'desc_bw_obs_mean',
                          'consensus_bandwidth',
                          'consensus_bandwidth_is_unmeasured']
143
144

# Added in #292951
145
BWLINE_KEYS_V1_4 = [
146
147
148
    # 1.2 relay: the number of different consensuses, that sbws has seen,
    # since the last 5 days, that have this relay
    'relay_in_recent_consensus_count',
149
150
151
    # 2.6 relay: the number of times a relay was "prioritized" to be measured
    # in the recent days (by default 5).
    'relay_recent_priority_list_count',
152
153
154
155
156
157
158
159
160
161
162
163
    # 3.8 relay:  the number of times that sbws has tried to measure
    # this relay, since the last 5 days
    # This would be the number of times a relay was in a priority list (2.6)
    # since once it gets measured, it either returns ResultError,
    # ResultSuccess or something else happened that we don't know yet
    'relay_recent_measurement_attempt_count',
    # 3.9 relay:  the number of times that sbws has tried to measure
    # this relay, since the last 5 days, but it didn't work
    # This should be the number of attempts - number of ResultSuccess -
    # something else we don't know yet
    # So far is the number of ResultError
    'relay_recent_measurement_failure_count',
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
    # Number of error results created in the last 5 days that are excluded.
    # This is the sum of all the errors.
    'relay_recent_measurements_excluded_error_count',
    # The number of successful results, created in the last 5 days,
    # that were excluded by a rule, for this relay.
    # 'relay_recent_measurements_excluded_error_count' would be the
    # sum of the following 3 + the number of error results.

    # The number of successful measurements that are not X time away
    # from each other (by default 1 day).
    'relay_recent_measurements_excluded_near_count',
    # The number of successful measurements that are away from each other
    # but not X time recent (by default 5 days).
    'relay_recent_measurements_excluded_old_count',
    # The number of measurements excluded because they are not at least X
    # (by default 2).
    'relay_recent_measurements_excluded_few_count',
181
]
182
183
BWLINE_KEYS_V1 = BWLINE_KEYS_V1_1 + BWLINE_KEYS_V1_2 \
               + BWLINE_KEYS_V1_4
184
185
# NOTE: tech-debt: assign boolean type to vote and unmeasured,
# when the attributes are defined with a type, as stem does.
186
BWLINE_INT_KEYS = ['bw', 'rtt', 'success', 'error_stream',
187
188
                    'error_circ', 'error_misc', 'vote', 'unmeasured',
                    'under_min_report'] \
189
190
191
                   + BWLINE_KEYS_V1_2 \
                   + BWLINE_KEYS_V1_4
BWLINE_ALL_KEYS = BWLINE_KEYS_V0 + BWLINE_KEYS_V1
juga  's avatar
juga committed
192
193


194
def round_sig_dig(n, digits=PROP276_ROUND_DIG):
195
196
197
198
199
200
201
    """Round n to 'digits' significant digits in front of the decimal point.
       Results less than or equal to 1 are rounded to 1.
       Returns an integer.

       digits must be greater than 0.
       n must be less than or equal to 2**73, to avoid floating point errors.
       """
202
    digits = int(digits)
203
204
205
206
207
208
209
210
211
    assert digits >= 1
    if n <= 1:
        return 1
    digits_in_n = int(math.log10(n)) + 1
    round_digits = max(digits_in_n - digits, 0)
    rounded_n = round(n, -round_digits)
    return int(rounded_n)


212
def kb_round_x_sig_dig(bw_bs, digits=PROP276_ROUND_DIG):
213
214
215
216
217
218
219
220
221
222
223
    """Convert bw_bs from bytes to kilobytes, and round the result to
       'digits' significant digits.
       Results less than or equal to 1 are rounded up to 1.
       Returns an integer.

       digits must be greater than 0.
       n must be less than or equal to 2**82, to avoid floating point errors.
       """
    # avoid double-rounding by using floating-point
    bw_kb = bw_bs / 1000.0
    return round_sig_dig(bw_kb, digits=digits)
224
225


juga  's avatar
juga committed
226
227
228
229
230
231
232
233
234
def num_results_of_type(results, type_str):
    return len([r for r in results if r.type == type_str])


# Better way to use enums?
def result_type_to_key(type_str):
    return type_str.replace('-', '_')


235
class V3BWHeader(object):
juga  's avatar
juga committed
236
237
    """
    Create a bandwidth measurements (V3bw) header
238
    following bandwidth measurements document spec version 1.X.X.
juga  's avatar
juga committed
239

240
    :param str timestamp: timestamp in Unix Epoch seconds of the most recent
241
        generator result.
juga  's avatar
juga committed
242
243
244
    :param str version: the spec version
    :param str software: the name of the software that generates this
    :param str software_version: the version of the software
245
    :param dict kwargs: extra headers. Currently supported:
juga  's avatar
juga committed
246

247
248
249
250
        - earliest_bandwidth: str, ISO 8601 timestamp in UTC time zone
          when the first bandwidth was obtained
        - generator_started: str, ISO 8601 timestamp in UTC time zone
          when the generator started
juga  's avatar
juga committed
251
    """
252
    def __init__(self, timestamp, **kwargs):
juga  's avatar
juga committed
253
254
255
256
        assert isinstance(timestamp, str)
        for v in kwargs.values():
            assert isinstance(v, str)
        self.timestamp = timestamp
257
258
259
260
        # KeyValues with default value when not given by kwargs
        self.version = kwargs.get('version', SPEC_VERSION)
        self.software = kwargs.get('software', 'sbws')
        self.software_version = kwargs.get('software_version', __version__)
261
        self.file_created = kwargs.get('file_created', now_isodt_str())
juga  's avatar
juga committed
262
        # latest_bandwidth should not be in kwargs, since it MUST be the
263
        # same as timestamp
juga  's avatar
juga committed
264
        self.latest_bandwidth = unixts_to_isodt_str(timestamp)
265
        [setattr(self, k, v) for k, v in kwargs.items()
266
         if k in HEADER_INIT_KEYS]
267

268
    def __str__(self):
269
        if self.version.startswith('1.'):
270
271
            return self.strv1
        return self.strv2
272

juga  's avatar
juga committed
273
    @classmethod
274
275
    def from_results(cls, results, scanner_country=None,
                     destinations_countries=None, state_fpath=''):
juga  's avatar
juga committed
276
277
278
        kwargs = dict()
        latest_bandwidth = cls.latest_bandwidth_from_results(results)
        earliest_bandwidth = cls.earliest_bandwidth_from_results(results)
279
        # NOTE: Blocking, reads file
280
        generator_started = cls.generator_started_from_file(state_fpath)
281
        recent_consensus_count = cls.consensus_count_from_file(state_fpath)
juga  's avatar
juga committed
282
283
284
285
286
        timestamp = str(latest_bandwidth)
        kwargs['latest_bandwidth'] = unixts_to_isodt_str(latest_bandwidth)
        kwargs['earliest_bandwidth'] = unixts_to_isodt_str(earliest_bandwidth)
        if generator_started is not None:
            kwargs['generator_started'] = generator_started
287
288
289
        # To be compatible with older bandwidth files, do not require it.
        if scanner_country is not None:
            kwargs['scanner_country'] = scanner_country
290
291
        if destinations_countries is not None:
            kwargs['destinations_countries'] = destinations_countries
292
293
        if recent_consensus_count is not None:
            kwargs['recent_consensus_count'] = str(recent_consensus_count)
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322

        recent_measurement_attempt_count = \
            cls.recent_measurement_attempt_count_from_file(state_fpath)
        if recent_measurement_attempt_count is not None:
            kwargs['recent_measurement_attempt_count'] = \
                str(recent_measurement_attempt_count)

        # If it is a failure that is not a ResultError, then
        # failures = attempts - all mesaurements
        # Works only in the case that old measurements files already had
        # measurements count
        if recent_measurement_attempt_count is not None:
            all_measurements = 0
            for result_list in results.values():
                all_measurements += len(result_list)
            measurement_failures = (recent_measurement_attempt_count
                                    - all_measurements)
            kwargs['recent_measurement_failure_count'] = \
                str(measurement_failures)

        priority_lists = cls.recent_priority_list_count_from_file(state_fpath)
        if priority_lists is not None:
            kwargs['recent_priority_list_count'] = str(priority_lists)

        priority_relays = \
            cls.recent_priority_relay_count_from_file(state_fpath)
        if priority_relays is not None:
            kwargs['recent_priority_relay_count'] = str(priority_relays)

juga  's avatar
juga committed
323
324
325
        h = cls(timestamp, **kwargs)
        return h

326
    @classmethod
327
    def from_lines_v1(cls, lines):
328
329
        """
        :param list lines: list of lines to parse
330
        :returns: tuple of V3BWHeader object and non-header lines
331
332
333
334
        """
        assert isinstance(lines, list)
        try:
            index_terminator = lines.index(TERMINATOR)
Matt Traudt's avatar
Matt Traudt committed
335
        except ValueError:
336
337
338
            # is not a bw file or is v100
            log.warn('Terminator is not in lines')
            return None
339
        ts = lines[0]
340
        kwargs = dict([l.split(KEYVALUE_SEP_V1)
341
                       for l in lines[:index_terminator]
342
                       if l.split(KEYVALUE_SEP_V1)[0] in HEADER_ALL_KEYS])
343
        h = cls(ts, **kwargs)
juga  's avatar
juga committed
344
        # last line is new line
345
        return h, lines[index_terminator + 1:-1]
346
347

    @classmethod
348
    def from_text_v1(self, text):
349
        """
350
        :param str text: text to parse
351
        :returns: tuple of V3BWHeader object and non-header lines
352
353
        """
        assert isinstance(text, str)
354
        return self.from_lines_v1(text.split(LINE_SEP))
juga  's avatar
juga committed
355

juga  's avatar
juga committed
356
357
358
359
360
361
362
363
364
365
366
    @classmethod
    def from_lines_v100(cls, lines):
        """
        :param list lines: list of lines to parse
        :returns: tuple of V3BWHeader object and non-header lines
        """
        assert isinstance(lines, list)
        h = cls(lines[0])
        # last line is new line
        return h, lines[1:-1]

juga  's avatar
juga committed
367
    @staticmethod
368
    def generator_started_from_file(state_fpath):
369
370
371
372
        '''
        ISO formatted timestamp for the time when the scanner process most
        recently started.
        '''
373
        state = State(state_fpath)
374
375
376
377
        if 'scanner_started' in state:
            return state['scanner_started']
        else:
            return None
juga  's avatar
juga committed
378

379
380
381
    @staticmethod
    def consensus_count_from_file(state_fpath):
        state = State(state_fpath)
382
383
        if 'recent_consensus_count' in state:
            return state['recent_consensus_count']
384
385
386
        else:
            return None

387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
    # NOTE: in future refactor store state in the class
    @staticmethod
    def recent_measurement_attempt_count_from_file(state_fpath):
        """
        Returns the number of times any relay was queued to be measured
        in the recent (by default 5) days from the state file.
        """
        state = State(state_fpath)
        return state.get('recent_measurement_attempt_count', None)

    @staticmethod
    def recent_priority_list_count_from_file(state_fpath):
        """
        Returns the number of times
        :meth:`~sbws.lib.relayprioritizer.RelayPrioritizer.best_priority`
        was run
        in the recent (by default 5) days from the state file.
        """
        state = State(state_fpath)
        return state.get('recent_priority_list_count', None)

    @staticmethod
    def recent_priority_relay_count_from_file(state_fpath):
        """
        Returns the number of times any relay was "prioritized" to be measured
        in the recent (by default 5) days from the state file.
        """
        state = State(state_fpath)
        return state.get('recent_priority_relay_count', None)

juga  's avatar
juga committed
417
    @staticmethod
juga  's avatar
juga committed
418
    def latest_bandwidth_from_results(results):
419
        return round(max([r.time for fp in results for r in results[fp]]))
juga  's avatar
juga committed
420
421
422

    @staticmethod
    def earliest_bandwidth_from_results(results):
423
        return round(min([r.time for fp in results for r in results[fp]]))
juga  's avatar
juga committed
424

juga  's avatar
juga committed
425
426
427
428
429
    @property
    def keyvalue_unordered_tuple_ls(self):
        """Return list of KeyValue tuples that do not have specific order."""
        # sort the list to generate determinist headers
        keyvalue_tuple_ls = sorted([(k, v) for k, v in self.__dict__.items()
430
                                    if k in HEADER_UNORDERED_KEYS])
juga  's avatar
juga committed
431
432
433
434
435
436
437
438
        return keyvalue_tuple_ls

    @property
    def keyvalue_tuple_ls(self):
        """Return list of all KeyValue tuples"""
        return [('version', self.version)] + self.keyvalue_unordered_tuple_ls

    @property
439
440
441
    def keyvalue_v1str_ls(self):
        """Return KeyValue list of strings following spec v1.X.X."""
        keyvalues = [self.timestamp] + [KEYVALUE_SEP_V1.join([k, v])
juga  's avatar
juga committed
442
443
444
445
                                        for k, v in self.keyvalue_tuple_ls]
        return keyvalues

    @property
446
447
448
    def strv1(self):
        """Return header string following spec v1.X.X."""
        header_str = LINE_SEP.join(self.keyvalue_v1str_ls) + LINE_SEP + \
juga  's avatar
juga committed
449
450
451
452
            LINE_TERMINATOR
        return header_str

    @property
453
454
455
    def keyvalue_v2_ls(self):
        """Return KeyValue list of strings following spec v2.X.X."""
        keyvalue = [self.timestamp] + [KEYVALUE_SEP_V2.join([k, v])
juga  's avatar
juga committed
456
457
458
459
                                       for k, v in self.keyvalue_tuple_ls]
        return keyvalue

    @property
460
461
462
    def strv2(self):
        """Return header string following spec v2.X.X."""
        header_str = LINE_SEP.join(self.keyvalue_v2_ls) + LINE_SEP + \
juga  's avatar
juga committed
463
464
465
466
467
468
            LINE_TERMINATOR
        return header_str

    @property
    def num_lines(self):
        return len(self.__str__().split(LINE_SEP))
juga  's avatar
juga committed
469

juga  's avatar
juga committed
470
471
472
    def add_stats(self, **kwargs):
        # Using kwargs because attributes might chage.
        [setattr(self, k, str(v)) for k, v in kwargs.items()
473
         if k in HEADER_KEYS_V1_2]
juga  's avatar
juga committed
474

475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
    def add_time_report_half_network(self):
        """Add to the header the time it took to measure half of the network.

        It is not the time the scanner actually takes on measuring all the
        network, but the ``number_eligible_relays`` that are reported in the
        bandwidth file and directory authorities will vote on.

        This is calculated for half of the network, so that failed or not
        reported relays do not affect too much.

        For instance, if there are 6500 relays in the network, half of the
        network would be 3250. And if there were 4000 eligible relays
        measured in an interval of 3 days, the time to measure half of the
        network would be 3 days * 3250 / 4000.

        Since the elapsed time is calculated from the earliest and the
        latest measurement and a relay might have more than 2 measurements,
        this would give an estimate on how long it would take to measure
        the network including all the valid measurements.

        Log also an estimated on how long it would take with the current
        number of relays included in the bandwidth file.
        """
        # NOTE: in future refactor do not convert attributes to str until
        # writing to the file, so that they do not need to be converted back
        # to do some calculations.
        elapsed_time = (
            (isostr_to_dt_obj(self.latest_bandwidth)
             - isostr_to_dt_obj(self.earliest_bandwidth))
            .total_seconds())

        # This attributes were added later and some tests that
        # do not initialize them would fail.
        eligible_relays = int(getattr(self, 'number_eligible_relays', 0))
        consensus_relays = int(getattr(self, 'number_consensus_relays', 0))
        if not(eligible_relays and consensus_relays):
            return

        half_network = consensus_relays / 2
        # Calculate the time it would take to measure half of the network
        if eligible_relays >= half_network:
            time_half_network = round(
                elapsed_time * half_network / eligible_relays
            )
            self.time_to_report_half_network = str(time_half_network)

        # In any case log an estimated on the time to measure all the network.
        estimated_time = round(
            elapsed_time * consensus_relays / eligible_relays
        )
        log.info("Estimated time to measure the network: %s hours.",
                 round(estimated_time / 60 / 60))

528
529
530
531
532
533
534
535
536
    def add_relays_excluded_counters(self, exclusion_dict):
        """
        Add the monitoring KeyValues to the header about the number of
        relays not included because they were not ``eligible``.
        """
        log.debug("Adding relays excluded counters.")
        for k, v in exclusion_dict.items():
            setattr(self, k, str(v))

juga  's avatar
juga committed
537

juga  's avatar
juga committed
538
class V3BWLine(object):
juga  's avatar
juga committed
539
    """
540
    Create a Bandwidth List line following the spec version 1.X.X.
juga  's avatar
juga committed
541

542
543
544
545
    :param str node_id: the relay fingerprint
    :param int bw: the bandwidth value that directory authorities will include
        in their votes.
    :param dict kwargs: extra headers.
juga  's avatar
juga committed
546

547
548
    .. note:: tech-debt: move node_id and bw to kwargs and just ensure that
       the required values are in **kwargs
juga  's avatar
juga committed
549
    """
juga  's avatar
juga committed
550
551
    def __init__(self, node_id, bw, **kwargs):
        assert isinstance(node_id, str)
552
        assert node_id.startswith('$')
juga  's avatar
juga committed
553
554
555
        self.node_id = node_id
        self.bw = bw
        [setattr(self, k, v) for k, v in kwargs.items()
556
         if k in BWLINE_KEYS_V1]
juga  's avatar
juga committed
557

juga  's avatar
juga committed
558
    def __str__(self):
559
        return self.bw_strv1
juga  's avatar
juga committed
560

juga  's avatar
juga committed
561
    @classmethod
juga  's avatar
juga committed
562
563
    def from_results(cls, results, secs_recent=None, secs_away=None,
                     min_num=0):
564
565
566
        """Convert sbws results to relays' Bandwidth Lines

        ``bs`` stands for Bytes/seconds
567
        ``bw_mean`` means the bw is obtained from the mean of the all the
568
569
570
571
572
        downloads' bandwidth.
        Downloads' bandwidth are calculated as the amount of data received
        divided by the the time it took to received.
        bw = data (Bytes) / time (seconds)
        """
573
        # log.debug("Len success_results %s", len(success_results))
juga  's avatar
juga committed
574
575
576
577
578
579
580
        node_id = '$' + results[0].fingerprint
        kwargs = dict()
        kwargs['nick'] = results[0].nickname
        if getattr(results[0], 'master_key_ed25519'):
            kwargs['master_key_ed25519'] = results[0].master_key_ed25519
        kwargs['time'] = cls.last_time_from_results(results)
        kwargs.update(cls.result_types_from_results(results))
581
582
583
        consensuses_count = \
            [r.relay_in_recent_consensus_count for r in results
             if getattr(r, 'relay_in_recent_consensus_count', None)]
584
585
        if consensuses_count:
            consensus_count = max(consensuses_count)
586
            kwargs['relay_in_recent_consensus_count'] = str(consensus_count)
587

588
589
590
591
592
593
594
595
596
597
598
599
600
601
        measurements_attempts = \
            [r.relay_recent_measurement_attempt_count for r in results
             if getattr(r, 'relay_recent_measurement_attempt_count', None)]
        if measurements_attempts:
            kwargs['relay_recent_measurement_attempt_count'] = \
                str(max(measurements_attempts))

        relay_recent_priority_list_counts = \
            [r.relay_recent_priority_list_count for r in results
             if getattr(r, 'relay_recent_priority_list_count', None)]
        if relay_recent_priority_list_counts:
            kwargs['relay_recent_priority_list_count'] = \
                str(max(relay_recent_priority_list_counts))

602
        success_results = [r for r in results if isinstance(r, ResultSuccess)]
603
604
605
606

        # NOTE: The following 4 conditions exclude relays from the bandwidth
        # file when the measurements does not satisfy some rules, what makes
        # the relay non-`eligible`.
607
608
        # In BWLINE_KEYS_V1_4 it is explained what they mean.
        # In HEADER_RECENT_MEASUREMENTS_EXCLUDED_KEYS it is also
609
610
611
612
613
614
        # explained the what it means the strings returned.
        # They rules were introduced in #28061 and #27338
        # In #28565 we introduce the KeyValues to know why they're excluded.
        # In #28563 we report these relays, but make Tor ignore them.
        # This might confirm #28042.

615
616
617
618
619
620
621
622
623
624
625
626
627
628
        # If the relay is non-`eligible`:
        # Create a bandwidth line with the relay, but set ``vote=0`` so that
        # Tor versions with patch #29806 does not vote on the relay.
        # Set ``bw=1`` so that Tor versions without the patch,
        # will give the relay low bandwidth.
        # Include ``unmeasured=1`` in case Tor would vote on unmeasured relays
        # in future versions.
        # And return because there are not bandwidth values.
        # NOTE: the bandwidth values could still be obtained if:
        # 1. ``ResultError`` will store them
        # 2. assign ``results_recent = results`` when there is a ``exclusion
        # reason.
        # This could be done in a better way as part of a refactor #28684.

juga  's avatar
juga committed
629
630
        kwargs['vote'] = 0
        kwargs['unmeasured'] = 1
631
632

        exclusion_reason = None
633

634
635
636
637
638
        number_excluded_error = len(results) - len(success_results)
        if number_excluded_error > 0:
            # then the number of error results is the number of results
            kwargs['relay_recent_measurements_excluded_error_count'] = \
                number_excluded_error
639
        if not success_results:
640
            exclusion_reason = 'recent_measurements_excluded_error_count'
641
            return (cls(node_id, 1, **kwargs), exclusion_reason)
642

643
644
        results_away = \
            cls.results_away_each_other(success_results, secs_away)
645
646
647
        number_excluded_near = len(success_results) - len(results_away)
        if number_excluded_near > 0:
            kwargs['relay_recent_measurements_excluded_near_count'] = \
648
                number_excluded_near
649
        if not results_away:
650
            exclusion_reason = \
651
                'recent_measurements_excluded_near_count'
652
            return (cls(node_id, 1, **kwargs), exclusion_reason)
653
654
        # log.debug("Results away from each other: %s",
        #           [unixts_to_isodt_str(r.time) for r in results_away])
655

656
        results_recent = cls.results_recent_than(results_away, secs_recent)
657
658
659
660
        number_excluded_old = len(results_away) - len(results_recent)
        if number_excluded_old > 0:
            kwargs['relay_recent_measurements_excluded_old_count'] = \
                number_excluded_old
661
        if not results_recent:
662
            exclusion_reason = \
663
                'recent_measurements_excluded_old_count'
664
            return (cls(node_id, 1, **kwargs), exclusion_reason)
665

666
        if not len(results_recent) >= min_num:
667
668
            kwargs['relay_recent_measurements_excluded_few_count'] = \
                len(results_recent)
669
            # log.debug('The number of results is less than %s', min_num)
670
            exclusion_reason = \
671
                'recent_measurements_excluded_few_count'
672
673
674
675
676
677
            return (cls(node_id, 1, **kwargs), exclusion_reason)

        # For any line not excluded, do not include vote and unmeasured
        # KeyValues
        del kwargs['vote']
        del kwargs['unmeasured']
678

679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
        rtt = cls.rtt_from_results(results_recent)
        if rtt:
            kwargs['rtt'] = rtt
        bw = cls.bw_median_from_results(results_recent)
        kwargs['bw_mean'] = cls.bw_mean_from_results(results_recent)
        kwargs['bw_median'] = cls.bw_median_from_results(
            results_recent)
        kwargs['desc_bw_avg'] = \
            cls.desc_bw_avg_from_results(results_recent)
        kwargs['desc_bw_bur'] = \
            cls.desc_bw_bur_from_results(results_recent)
        kwargs['consensus_bandwidth'] = \
            cls.consensus_bandwidth_from_results(results_recent)
        kwargs['consensus_bandwidth_is_unmeasured'] = \
            cls.consensus_bandwidth_is_unmeasured_from_results(
694
                results_recent)
695
696
697
698
699
        kwargs['desc_bw_obs_last'] = \
            cls.desc_bw_obs_last_from_results(results_recent)
        kwargs['desc_bw_obs_mean'] = \
            cls.desc_bw_obs_mean_from_results(results_recent)
        bwl = cls(node_id, bw, **kwargs)
700
        return bwl, None
juga  's avatar
juga committed
701
702
703
704
705
706

    @classmethod
    def from_data(cls, data, fingerprint):
        assert fingerprint in data
        return cls.from_results(data[fingerprint])

juga  's avatar
juga committed
707
    @classmethod
708
    def from_bw_line_v1(cls, line):
juga  's avatar
juga committed
709
        assert isinstance(line, str)
710
        kwargs = dict([kv.split(KEYVALUE_SEP_V1)
711
712
                       for kv in line.split(BWLINE_KEYVALUES_SEP_V1)
                       if kv.split(KEYVALUE_SEP_V1)[0] in BWLINE_ALL_KEYS])
juga  's avatar
juga committed
713
        for k, v in kwargs.items():
714
            if k in BWLINE_INT_KEYS:
juga  's avatar
juga committed
715
                kwargs[k] = int(v)
716
717
718
719
720
        node_id = kwargs['node_id']
        bw = kwargs['bw']
        del kwargs['node_id']
        del kwargs['bw']
        bw_line = cls(node_id, bw, **kwargs)
juga  's avatar
juga committed
721
722
        return bw_line

juga  's avatar
juga committed
723
724
    @staticmethod
    def results_away_each_other(results, secs_away=None):
juga  's avatar
juga committed
725
726
        # log.debug("Checking whether results are away from each other in %s "
        #           "secs.", secs_away)
juga  's avatar
juga committed
727
728
        if secs_away is None or len(results) < 2:
            return results
729
730
731
732
733
        for a, b in combinations(results, 2):
            if abs(a.time - b.time) > secs_away:
                return results
        # log.debug("Results are NOT away from each other in at least %ss: %s",
        #           secs_away, [unixts_to_isodt_str(r.time) for r in results])
734
        return []
juga  's avatar
juga committed
735
736
737
738
739

    @staticmethod
    def results_recent_than(results, secs_recent=None):
        if secs_recent is None:
            return results
740
        results_recent = list(filter(
juga  's avatar
juga committed
741
                            lambda x: (now_unixts() - x.time) < secs_recent,
742
743
744
745
746
747
                            results))
        # if not results_recent:
        #     log.debug("Results are NOT more recent than %ss: %s",
        #               secs_recent,
        #               [unixts_to_isodt_str(r.time) for r in results])
        return results_recent
juga  's avatar
juga committed
748

749
    @staticmethod
750
    def bw_median_from_results(results):
751
752
753
754
        return max(round(median([dl['amount'] / dl['duration']
                                 for r in results for dl in r.downloads])), 1)

    @staticmethod
755
    def bw_mean_from_results(results):
756
757
758
        return max(round(mean([dl['amount'] / dl['duration']
                               for r in results for dl in r.downloads])), 1)

juga  's avatar
juga committed
759
760
761
762
763
764
765
766
    @staticmethod
    def last_time_from_results(results):
        return unixts_to_isodt_str(round(max([r.time for r in results])))

    @staticmethod
    def rtt_from_results(results):
        # convert from miliseconds to seconds
        rtts = [(round(rtt * 1000)) for r in results for rtt in r.rtts]
juga  's avatar
juga committed
767
        rtt = round(median(rtts)) if rtts else None
juga  's avatar
juga committed
768
769
770
771
772
773
774
775
776
        return rtt

    @staticmethod
    def result_types_from_results(results):
        rt_dict = dict([(result_type_to_key(rt.value),
                         num_results_of_type(results, rt.value))
                        for rt in _ResultType])
        return rt_dict

777
778
779
780
781
782
783
784
    @staticmethod
    def desc_bw_avg_from_results(results):
        """Obtain the last descriptor bandwidth average from the results."""
        for r in reversed(results):
            if r.relay_average_bandwidth is not None:
                return r.relay_average_bandwidth
        return None

785
786
787
788
789
790
791
792
    @staticmethod
    def desc_bw_bur_from_results(results):
        """Obtain the last descriptor bandwidth burst from the results."""
        for r in reversed(results):
            if r.relay_burst_bandwidth is not None:
                return r.relay_burst_bandwidth
        return None

793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
    @staticmethod
    def consensus_bandwidth_from_results(results):
        """Obtain the last consensus bandwidth from the results."""
        for r in reversed(results):
            if r.consensus_bandwidth is not None:
                return r.consensus_bandwidth
        return None

    @staticmethod
    def consensus_bandwidth_is_unmeasured_from_results(results):
        """Obtain the last consensus unmeasured flag from the results."""
        for r in reversed(results):
            if r.consensus_bandwidth_is_unmeasured is not None:
                return r.consensus_bandwidth_is_unmeasured
        return None

809
    @staticmethod
810
811
    def desc_bw_obs_mean_from_results(results):
        desc_bw_obs_ls = []
812
813
        for r in results:
            if r.relay_observed_bandwidth is not None:
814
815
816
                desc_bw_obs_ls.append(r.relay_observed_bandwidth)
        if desc_bw_obs_ls:
            return max(round(mean(desc_bw_obs_ls)), 1)
817
818
819
        return None

    @staticmethod
820
    def desc_bw_obs_last_from_results(results):
821
822
823
824
825
826
        # the last is at the end of the list
        for r in reversed(results):
            if r.relay_observed_bandwidth is not None:
                return r.relay_observed_bandwidth
        return None

juga  's avatar
juga committed
827
828
829
830
831
    @property
    def bw_keyvalue_tuple_ls(self):
        """Return list of KeyValue Bandwidth Line tuples."""
        # sort the list to generate determinist headers
        keyvalue_tuple_ls = sorted([(k, v) for k, v in self.__dict__.items()
832
                                    if k in BWLINE_ALL_KEYS])
juga  's avatar
juga committed
833
        return keyvalue_tuple_ls
juga  's avatar
juga committed
834

juga  's avatar
juga committed
835
    @property
836
    def bw_keyvalue_v1str_ls(self):
juga  's avatar
juga committed
837
        """Return list of KeyValue Bandwidth Line strings following
838
        spec v1.X.X.
juga  's avatar
juga committed
839
        """
840
        bw_keyvalue_str = [KEYVALUE_SEP_V1 .join([k, str(v)])
juga  's avatar
juga committed
841
842
                           for k, v in self.bw_keyvalue_tuple_ls]
        return bw_keyvalue_str
juga  's avatar
juga committed
843

juga  's avatar
juga committed
844
    @property
845
846
    def bw_strv1(self):
        """Return Bandwidth Line string following spec v1.X.X."""
847
        bw_line_str = BWLINE_KEYVALUES_SEP_V1.join(
848
                        self.bw_keyvalue_v1str_ls) + LINE_SEP
juga  's avatar
juga committed
849
850
851
852
853
854
        if len(bw_line_str) > BW_LINE_SIZE:
            # if this is the case, probably there are too many KeyValues,
            # or the limit needs to be changed in Tor
            log.warn("The bandwidth line %s is longer than %s",
                     len(bw_line_str), BW_LINE_SIZE)
        return bw_line_str
juga  's avatar
juga committed
855

juga  's avatar
juga committed
856

857
class V3BWFile(object):
juga  's avatar
juga committed
858
    """
859
    Create a Bandwidth List file following spec version 1.X.X
juga  's avatar
juga committed
860
861
862
863

    :param V3BWHeader v3bwheader: header
    :param list v3bwlines: V3BWLines
    """
juga  's avatar
juga committed
864
865
866
867
868
    def __init__(self, v3bwheader, v3bwlines):
        self.header = v3bwheader
        self.bw_lines = v3bwlines

    def __str__(self):
869
        return str(self.header) + ''.join([str(bw_line) or ''
juga  's avatar
juga committed
870
871
                                           for bw_line in self.bw_lines])

juga  's avatar
juga committed
872
    @classmethod
873
874
    def from_results(cls, results, scanner_country=None,
                     destinations_countries=None, state_fpath='',
875
                     scale_constant=SBWS_SCALE_CONSTANT,
876
877
                     scaling_method=TORFLOW_SCALING,
                     torflow_obs=TORFLOW_OBS_LAST,
juga  's avatar
juga committed
878
                     torflow_cap=TORFLOW_BW_MARGIN,
879
                     round_digs=PROP276_ROUND_DIG,
880
                     secs_recent=None, secs_away=None, min_num=0,
881
882
                     consensus_path=None, max_bw_diff_perc=MAX_BW_DIFF_PERC,
                     reverse=False):
883
884
885
886
887
888
        """Create V3BWFile class from sbws Results.

        :param dict results: see below
        :param str state_fpath: path to the state file
        :param int scaling_method:
            Scaling method to obtain the bandwidth
889
            Possible values: {None, SBWS_SCALING, TORFLOW_SCALING} = {0, 1, 2}
890
891
892
893
894
895
896
897
898
899
900
        :param int scale_constant: sbws scaling constant
        :param int torflow_obs: method to choose descriptor observed bandwidth
        :param bool reverse: whether to sort the bw lines descending or not

        Results are in the form::

            {'relay_fp1': [Result1, Result2, ...],
             'relay_fp2': [Result1, Result2, ...]}

        """
        log.info('Processing results to generate a bandwidth list file.')
901
902
        header = V3BWHeader.from_results(results, scanner_country,
                                         destinations_countries, state_fpath)
903
        bw_lines_raw = []
904
        bw_lines_excluded = []
juga  's avatar
juga committed
905
906
        number_consensus_relays = cls.read_number_consensus_relays(
            consensus_path)
907
        state = State(state_fpath)
908
909
910

        # Create a dictionary with the number of relays excluded by any of the
        # of the filtering rules that makes relays non-`eligible`.
911
        # NOTE: In HEADER_RECENT_MEASUREMENTS_EXCLUDED_KEYS it is
912
913
914
        # explained what are the KeyValues.
        # See also the comments in `from_results`.
        exclusion_dict = dict(
915
            [(k, 0) for k in HEADER_RECENT_MEASUREMENTS_EXCLUDED_KEYS]
916
            )
juga  's avatar
juga committed
917
        for fp, values in results.items():
918
            # log.debug("Relay fp %s", fp)
919
920
            line, reason = V3BWLine.from_results(values, secs_recent,
                                                 secs_away, min_num)
921
922
            # If there is no reason it means the line will not be excluded.
            if not reason:
juga  's avatar
juga committed
923
                bw_lines_raw.append(line)
924
            else:
925
926
927
                # Store the excluded lines to include them in the bandwidth
                # file.
                bw_lines_excluded.append(line)
928
929
930
931
                exclusion_dict[reason] = exclusion_dict.get(reason, 0) + 1
        # Add the headers with the number of excluded relays by reason
        header.add_relays_excluded_counters(exclusion_dict)

932
        if not bw_lines_raw:
933
934
            # It could be possible to scale the lines that were successful
            # even if excluded, but is not done here.
935
936
            log.info("After applying restrictions to the raw results, "
                     "there is not any. Scaling can not be applied.")
937
            # Update the header and log the progress.
juga  's avatar
juga committed
938
            cls.update_progress(
939
940
941
942
943
                cls, 0, header, number_consensus_relays, state)
            # Set the lines that would be excluded anyway (`vote=0`) with
            # `under_min_report=1`
            cls.set_under_min_report(bw_lines_excluded)
            # Create the bandwidth file with the lines that would be excluded.
944
            return cls(header, bw_lines_excluded)
945
946
        if scaling_method == SBWS_SCALING:
            bw_lines = cls.bw_sbws_scale(bw_lines_raw, scale_constant)
947
            cls.warn_if_not_accurate_enough(bw_lines, scale_constant)
948
949
            # log.debug(bw_lines[-1])
        elif scaling_method == TORFLOW_SCALING:
juga  's avatar
juga committed
950
            bw_lines = cls.bw_torflow_scale(bw_lines_raw, torflow_obs,
951
                                            torflow_cap, round_digs)
952
            # log.debug(bw_lines[-1])
953
954
955
956
957
958
959
960
            # Update the header and log the progress.
            min_perc = cls.update_progress(
                cls, len(bw_lines), header, number_consensus_relays, state
                )
            # If after scaling the number of lines is less than the percentage
            # of lines to report, set them with `under_min_report`.
            if not min_perc:
                cls.set_under_min_report(bw_lines)
juga  's avatar
juga committed
961
        else:
962
963
            bw_lines = cls.bw_kb(bw_lines_raw)
            # log.debug(bw_lines[-1])
964
965
        # Not using the result for now, just warning
        cls.is_max_bw_diff_perc_reached(bw_lines, max_bw_diff_perc)
966
        header.add_time_report_half_network()
967
        f = cls(header, bw_lines + bw_lines_excluded)
juga  's avatar
juga committed
968
969
        return f

970
    @classmethod
971
    def from_v1_fpath(cls, fpath):
972
973
974
975
        log.info('Parsing bandwidth file %s', fpath)
        with open(fpath) as fd:
            text = fd.read()
        all_lines = text.split(LINE_SEP)
976
977
        header, lines = V3BWHeader.from_lines_v1(all_lines)
        bw_lines = [V3BWLine.from_bw_line_v1(line) for line in lines]
978
979
        return cls(header, bw_lines)

980
981
982
983
984
985
986
    @classmethod
    def from_v100_fpath(cls, fpath):
        log.info('Parsing bandwidth file %s', fpath)
        with open(fpath) as fd:
            text = fd.read()
        all_lines = text.split(LINE_SEP)
        header, lines = V3BWHeader.from_lines_v100(all_lines)
987
        bw_lines = sorted([V3BWLine.from_bw_line_v1(l) for l in lines],
988
989
990
                          key=lambda l: l.bw)
        return cls(header, bw_lines)

991
992
993
994
995
996
997
998
999
1000
1001
    @staticmethod
    def set_under_min_report(bw_lines):
        """
        Mondify the Bandwidth Lines adding the KeyValue `under_min_report`,
        `vote`.
        """
        log.debug("Setting `under_min_report` to %s lines.", len(bw_lines))
        for l in bw_lines:
            l.under_min_report = 1
            l.vote = 0

juga  's avatar
juga committed
1002
1003
1004
1005
1006
1007
1008
    @staticmethod
    def bw_kb(bw_lines, reverse=False):
        bw_lines_scaled = copy.deepcopy(bw_lines)
        for l in bw_lines_scaled:
            l.bw = max(round(l.bw / 1000), 1)
        return sorted(bw_lines_scaled, key=lambda x: x.bw, reverse=reverse)

juga  's avatar
juga committed
1009
    @staticmethod
1010
    def bw_sbws_scale(bw_lines, scale_constant=SBWS_SCALE_CONSTANT,
juga  's avatar
juga committed
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
                      reverse=False):
        """Return a new V3BwLine list scaled using sbws method.

        :param list bw_lines:
            bw lines to scale, not self.bw_lines,
            since this method will be before self.bw_lines have been
            initialized.
        :param int scale_constant:
            the constant to multiply by the ratio and
            the bandwidth to obtain the new bandwidth
        :returns list: V3BwLine list
        """
        log.debug('Scaling bandwidth using sbws method.')
        m = median([l.bw for l in bw_lines])
        bw_lines_scaled = copy.deepcopy(bw_lines)
        for l in bw_lines_scaled:
            # min is to limit the bw to descriptor average-bandwidth
            # max to avoid bandwidth with 0 value
1029
            l.bw = max(round(min(l.desc_bw_avg,
1030
                                 l.bw * scale_constant / m)
juga  's avatar
juga committed
1031
1032
1033
1034
1035
                             / 1000), 1)
        return sorted(bw_lines_scaled, key=lambda x: x.bw, reverse=reverse)

    @staticmethod
    def warn_if_not_accurate_enough(bw_lines,
1036
                                    scale_constant=SBWS_SCALE_CONSTANT):
juga  's avatar
juga committed
1037
1038
1039
1040
1041
1042
1043
1044
        margin = 0.001
        accuracy_ratio = median([l.bw for l in bw_lines]) / scale_constant
        log.info('The generated lines are within {:.5}% of what they should '
                 'be'.format((1 - accuracy_ratio) * 100))
        if accuracy_ratio < 1 - margin or accuracy_ratio > 1 + margin:
            log.warning('There was %f%% error and only +/- %f%% is '
                        'allowed', (1 - accuracy_ratio) * 100, margin * 100)

1045
    @staticmethod
1046
1047
    def is_max_bw_diff_perc_reached(bw_lines,
                                    max_bw_diff_perc=MAX_BW_DIFF_PERC):
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
        # Since old versions were not storing consensus bandwidth, use getattr.
        sum_consensus_bw = sum([l.consensus_bandwidth for l in bw_lines
                                if getattr(l, 'consensus_bandwidth', None)])
        # Because the scaled bandwidth is in KB, but not the stored consensus
        # bandwidth, multiply by 1000.
        # Do not count 1 bandwidths for the relays that were excluded
        # and exclude also the bw of the relays that did not stored consensus,
        # since they are not included either in the sum of the consensus.
        sum_bw = sum([l.bw for l in bw_lines
                      if getattr(l, 'consensus_bandwidth', None)
                      and getattr(l, 'unmeasured', 0) == 0]) * 1000
        # Percentage difference
        diff_perc = (
            abs(sum_consensus_bw - sum_bw)
            / ((sum_consensus_bw + sum_bw) / 2)
            ) * 100
        log.info("The difference between the total consensus bandwidth (%s)"
                 "and the total measured bandwidth (%s) is %s%%.",
                 sum_consensus_bw, sum_bw, round(diff_perc))
1067
1068
1069
1070
1071
        if diff_perc > MAX_BW_DIFF_PERC:
            log.warning("It is more than %s%%", max_bw_diff_perc)
            return True
        return False

1072
    @staticmethod
1073
    def bw_torflow_scale(bw_lines, desc_bw_obs_type=TORFLOW_OBS_MEAN,
1074
                         cap=TORFLOW_BW_MARGIN,
1075
                         num_round_dig=PROP276_ROUND_DIG, reverse=False):
juga  's avatar
juga committed
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
        """
        Obtain final bandwidth measurements applying Torflow's scaling
        method.

        From Torflow's README.spec.txt (section 2.2)::

            In this way, the resulting network status consensus bandwidth values  # NOQA
            are effectively re-weighted proportional to how much faster the node  # NOQA
            was as compared to the rest of the network.

        The variables and steps used in Torflow:

        **strm_bw**::

            The strm_bw field is the average (mean) of all the streams for the relay  # NOQA
            identified by the fingerprint field.
            strm_bw = sum(bw stream x)/|n stream|

        **filt_bw**::

            The filt_bw field is computed similarly, but only the streams equal to  # NOQA
            or greater than the strm_bw are counted in order to filter very slow  # NOQA
            streams due to slow node pairings.

        **filt_sbw and strm_sbw**::

            for rs in RouterStats.query.filter(stats_clause).\
                  options(eagerload_all('router.streams.circuit.routers')).all():  # NOQA
              tot_sbw = 0
              sbw_cnt = 0
              for s in rs.router.streams:
                if isinstance(s, ClosedStream):
                  skip = False
                  #for br in badrouters:
                  #  if br != rs:
                  #    if br.router in s.circuit.routers:
                  #      skip = True
                  if not skip:
                    # Throw out outliers < mean
                    # (too much variance for stddev to filter much)
                    if rs.strm_closed == 1 or s.bandwidth() >= rs.sbw:
                      tot_sbw += s.bandwidth()
                      sbw_cnt += 1

            if sbw_cnt: rs.filt_sbw = tot_sbw/sbw_cnt
            else: rs.filt_sbw = None

        **filt_avg, and strm_avg**::

            Once we have determined the most recent measurements for each node, we  # NOQA
            compute an average of the filt_bw fields over all nodes we have measured.  # NOQA

        ::

            filt_avg = sum(map(lambda n: n.filt_bw, nodes.itervalues()))/float(len(nodes))  # NOQA
            strm_avg = sum(map(lambda n: n.strm_bw, nodes.itervalues()))/float(len(nodes))  # NOQA

        **true_filt_avg and true_strm_avg**::

            for cl in ["Guard+Exit", "Guard", "Exit", "Middle"]:
                true_filt_avg[cl] = filt_avg
                true_strm_avg[cl] = strm_avg

        In the non-pid case, all types of nodes get the same avg

        **n.fbw_ratio and n.fsw_ratio**::

            for n in nodes.itervalues():
                n.fbw_ratio = n.filt_bw/true_filt_avg[n.node_class()]
                n.sbw_ratio = n.strm_bw/true_strm_avg[n.node_class()]

        **n.ratio**::

            These averages are used to produce ratios for each node by dividing the  # NOQA
            measured value for that node by the network average.

        ::

            # Choose the larger between sbw and fbw
              if n.sbw_ratio > n.fbw_ratio:
                n.ratio = n.sbw_ratio
              else:
                n.ratio = n.fbw_ratio

        **desc_bw**:

juga  's avatar
juga committed
1162
1163
1164
1165
        It is the minimum of all the descriptor bandwidth values::

            bws = map(int, g)
            bw_observed = min(bws)
juga  's avatar
juga committed
1166