GitLab is used only for code review, issue tracking and project management. Canonical locations for source code are still https://gitweb.torproject.org/ https://git.torproject.org/ and git-rw.torproject.org.

v3bwfile.py 64.3 KB
Newer Older
juga  's avatar
juga committed
1 2 3 4
# -*- coding: utf-8 -*-
"""Classes and functions that create the bandwidth measurements document
(v3bw) used by bandwidth authorities."""

juga  's avatar
juga committed
5
import copy
juga  's avatar
juga committed
6
import logging
7
import math
juga  's avatar
juga committed
8
import os
9
from itertools import combinations
juga  's avatar
juga committed
10
from statistics import median, mean
11
from stem.descriptor import parse_file
juga  's avatar
juga committed
12

13
from sbws import __version__
14
from sbws.globals import (SPEC_VERSION, BW_LINE_SIZE, SBWS_SCALE_CONSTANT,
15 16
                          TORFLOW_SCALING, SBWS_SCALING, TORFLOW_BW_MARGIN,
                          TORFLOW_OBS_LAST, TORFLOW_OBS_MEAN,
17
                          PROP276_ROUND_DIG, MIN_REPORT, MAX_BW_DIFF_PERC)
18
from sbws.lib import scaling
juga  's avatar
juga committed
19
from sbws.lib.resultdump import ResultSuccess, _ResultType
20
from sbws.util.filelock import DirectoryLock
juga  's avatar
juga committed
21
from sbws.util.timestamp import (now_isodt_str, unixts_to_isodt_str,
22 23
                                 now_unixts, isostr_to_dt_obj,
                                 dt_obj_to_isodt_str)
24
from sbws.util.state import State
juga  's avatar
juga committed
25 26 27

log = logging.getLogger(__name__)

28
LINE_SEP = '\n'
29 30
KEYVALUE_SEP_V1 = '='
KEYVALUE_SEP_V2 = ' '
31 32 33 34 35 36

# NOTE: in a future refactor make make all the KeyValues be a dictionary
# with their type, so that it's more similar to stem parser.

# Header KeyValues
# =================
37 38
# KeyValues that need to be in a specific order in the Bandwidth File.
HEADER_KEYS_V1_1_ORDERED = ['version']
39 40 41 42 43 44 45 46 47
# KeyValues that are not initialized from the state file nor the measurements.
# They can also be pass as an argument to `Header` to overwrite default values,
# what is done in unit tests.
# `latest bandwidth` is special cause it gets its value from timestamp, which
# is not a KeyValue, but it's always pass as an agument.
# It could be separaed in other list, but so far there is no need, cause:
# 1. when it's pass to the Header to initialize it, it's just ignored.
# 2. when the file is created, it's took into account.
HEADER_KEYS_V1_1_SELF_INITIALIZED = [
48 49 50
    "software",
    "software_version",
    "file_created",
51 52 53 54
    "latest_bandwidth",
]
# KeyValues that are initialized from arguments.
HEADER_KEYS_V1_1_TO_INIT = [
55 56
    "earliest_bandwidth",
    "generator_started",
57 58
]

59 60 61 62
# number_eligible_relays is the number that ends in the bandwidth file
# ie, have not been excluded by one of the filters in 4. below
# They should be call recent_measurement_included_count to be congruent
# with the other KeyValues.
63 64 65 66 67 68 69
HEADER_KEYS_V1_2 = [
    "number_eligible_relays",
    "minimum_number_eligible_relays",
    "number_consensus_relays",
    "percent_eligible_relays",
    "minimum_percent_eligible_relays",
]
70

71 72 73 74 75 76
# KeyValues added in the Bandwidth File v1.3.0
HEADER_KEYS_V1_3 = [
    "scanner_country",
    "destinations_countries",
]

77 78 79
# KeyValues that count the number of relays that are in the bandwidth file,
# but ignored by Tor when voting, because they do not have a
# measured bandwidth.
80
HEADER_RECENT_MEASUREMENTS_EXCLUDED_KEYS = [
81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
    # Number of relays that were measured but all the measurements failed
    # because of network failures or it was
    # not found a suitable helper relay
    'recent_measurements_excluded_error_count',
    # Number of relays that have successful measurements but the measurements
    # were not away from each other in X time (by default 1 day).
    'recent_measurements_excluded_near_count',
    # Number of relays that have successful measurements and they are away from
    # each other but they are not X time recent.
    # By default this is 5 days, which is the same time the older
    # the measurements can be by default.
    'recent_measurements_excluded_old_count',
    # Number of relays that have successful measurements and they are away from
    # each other and recent
    # but the number of measurements are less than X (by default 2).
    'recent_measurements_excluded_few_count',
]
98
# Added in #29591
99 100 101 102
# NOTE: recent_consensus_count, recent_priority_list_count,
# recent_measurement_attempt_count and recent_priority_relay_count
# are not reset when the scanner is stop.
# They will accumulate the values since the scanner was ever started.
103
HEADER_KEYS_V1_4 = [
104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
    # 1.1 header: the number of different consensuses, that sbws has seen,
    # since the last 5 days
    'recent_consensus_count',
    # 2.4 Number of times a priority list has been created
    'recent_priority_list_count',
    # 2.5 Number of relays that there were in a priority list
    # [50, number of relays in the network * 0.05]
    'recent_priority_relay_count',
    # 3.6 header: the number of times that sbws has tried to measure any relay,
    # since the last 5 days
    # This would be the number of times a relays were in a priority list
    'recent_measurement_attempt_count',
    # 3.7 header: the number of times that sbws has tried to measure any relay,
    # since the last 5 days, but it didn't work
    # This should be the number of attempts - number of ResultSuccess -
    # something else we don't know yet
    # So far is the number of ResultError
    'recent_measurement_failure_count',
122 123
    # The time it took to report about half of the network.
    'time_to_report_half_network',
124
] + HEADER_RECENT_MEASUREMENTS_EXCLUDED_KEYS
125

126 127 128 129 130 131 132 133
# KeyValues added in the Bandwidth File v1.5.0
# XXX: Change SPEC_VERSION when all the v1.5.0 keys are added, before a new
# sbws release.
# Tor version will be obtained from the state file, so it won't be pass as an
# argument, but will be self-initialized.
HEADER_KEYS_V1_5_TO_INIT = ['tor_version']
HEADER_KEYS_V1_5 = HEADER_KEYS_V1_5_TO_INIT

134 135 136 137 138
# KeyValues that are initialized from arguments, not self-initialized.
HEADER_INIT_KEYS = (
    HEADER_KEYS_V1_1_TO_INIT
    + HEADER_KEYS_V1_3
    + HEADER_KEYS_V1_2
139
    + HEADER_KEYS_V1_4
140
    + HEADER_KEYS_V1_5_TO_INIT
141
)
142

143
HEADER_INT_KEYS = HEADER_KEYS_V1_2 + HEADER_KEYS_V1_4
144
# List of all unordered KeyValues currently being used to generate the file
145 146 147 148 149 150
HEADER_UNORDERED_KEYS = (
    HEADER_KEYS_V1_1_SELF_INITIALIZED
    + HEADER_KEYS_V1_1_TO_INIT
    + HEADER_KEYS_V1_3
    + HEADER_KEYS_V1_2
    + HEADER_KEYS_V1_4
151
    + HEADER_KEYS_V1_5
152
)
153
# List of all the KeyValues currently being used to generate the file
154
HEADER_ALL_KEYS = HEADER_KEYS_V1_1_ORDERED + HEADER_UNORDERED_KEYS
155

156
TERMINATOR = '====='
157 158 159

# Bandwidth Lines KeyValues
# =========================
160
# Num header lines in v1.X.X using all the KeyValues
161
NUM_LINES_HEADER_V1 = len(HEADER_ALL_KEYS) + 2
162 163
LINE_TERMINATOR = TERMINATOR + LINE_SEP

juga  's avatar
juga committed
164
# KeyValue separator in Bandwidth Lines
165
BWLINE_KEYVALUES_SEP_V1 = ' '
166
# not inclding in the files the extra bws for now
167
BWLINE_KEYS_V0 = ['node_id', 'bw']
168 169 170 171 172 173 174 175 176 177 178 179 180
BWLINE_KEYS_V1_1 = [
    "master_key_ed25519",
    "nick",
    "rtt",
    "time",
    "success",
    "error_stream",
    "error_circ",
    "error_misc",
    # Added in #292951
    "error_second_relay",
    "error_destination",
]
181 182 183 184 185 186 187 188 189 190
BWLINE_KEYS_V1_2 = [
    "bw_median",
    "bw_mean",
    "desc_bw_avg",
    "desc_bw_bur",
    "desc_bw_obs_last",
    "desc_bw_obs_mean",
    "consensus_bandwidth",
    "consensus_bandwidth_is_unmeasured",
]
191

192 193
# There were no bandwidth lines key added in the specification version 1.3

194
# Added in #292951
195
BWLINE_KEYS_V1_4 = [
196 197 198
    # 1.2 relay: the number of different consensuses, that sbws has seen,
    # since the last 5 days, that have this relay
    'relay_in_recent_consensus_count',
199 200 201
    # 2.6 relay: the number of times a relay was "prioritized" to be measured
    # in the recent days (by default 5).
    'relay_recent_priority_list_count',
202 203 204 205 206 207 208 209 210 211 212 213
    # 3.8 relay:  the number of times that sbws has tried to measure
    # this relay, since the last 5 days
    # This would be the number of times a relay was in a priority list (2.6)
    # since once it gets measured, it either returns ResultError,
    # ResultSuccess or something else happened that we don't know yet
    'relay_recent_measurement_attempt_count',
    # 3.9 relay:  the number of times that sbws has tried to measure
    # this relay, since the last 5 days, but it didn't work
    # This should be the number of attempts - number of ResultSuccess -
    # something else we don't know yet
    # So far is the number of ResultError
    'relay_recent_measurement_failure_count',
214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230
    # Number of error results created in the last 5 days that are excluded.
    # This is the sum of all the errors.
    'relay_recent_measurements_excluded_error_count',
    # The number of successful results, created in the last 5 days,
    # that were excluded by a rule, for this relay.
    # 'relay_recent_measurements_excluded_error_count' would be the
    # sum of the following 3 + the number of error results.

    # The number of successful measurements that are not X time away
    # from each other (by default 1 day).
    'relay_recent_measurements_excluded_near_count',
    # The number of successful measurements that are away from each other
    # but not X time recent (by default 5 days).
    'relay_recent_measurements_excluded_old_count',
    # The number of measurements excluded because they are not at least X
    # (by default 2).
    'relay_recent_measurements_excluded_few_count',
231 232 233 234 235 236 237 238 239 240 241 242 243
    # `vote=0` is used for the relays that were excluded to
    # be reported in the bandwidth file and now they are
    # reported.
    # It tells Tor to do not vote on the relay.
    # `unmeasured=1` is used for the same relays and it is
    # added in case Tor would vote on them in future versions.
    # Maybe these keys should not be included for the relays
    # in which vote=1 and unmeasured=0.
    'vote', 'unmeasured',
    # When there not enough eligible relays (not excluded)
    # under_min_report is 1, `vote` is 0.
    # Added in #29853.
    'under_min_report',
244
]
245
BWLINE_KEYS_V1 = BWLINE_KEYS_V0 + BWLINE_KEYS_V1_1 + BWLINE_KEYS_V1_2 \
246
               + BWLINE_KEYS_V1_4
247 248
# NOTE: tech-debt: assign boolean type to vote and unmeasured,
# when the attributes are defined with a type, as stem does.
249 250 251 252 253 254 255 256 257 258 259 260
BWLINE_INT_KEYS = (
    [
        "bw",
        "rtt",
        "success",
        "error_stream",
        "error_circ",
        "error_misc",
    ]
    + BWLINE_KEYS_V1_2
    + BWLINE_KEYS_V1_4
)
juga  's avatar
juga committed
261 262


263
def round_sig_dig(n, digits=PROP276_ROUND_DIG):
264 265 266 267 268 269 270
    """Round n to 'digits' significant digits in front of the decimal point.
       Results less than or equal to 1 are rounded to 1.
       Returns an integer.

       digits must be greater than 0.
       n must be less than or equal to 2**73, to avoid floating point errors.
       """
271
    digits = int(digits)
272 273 274 275 276 277 278 279 280
    assert digits >= 1
    if n <= 1:
        return 1
    digits_in_n = int(math.log10(n)) + 1
    round_digits = max(digits_in_n - digits, 0)
    rounded_n = round(n, -round_digits)
    return int(rounded_n)


281
def kb_round_x_sig_dig(bw_bs, digits=PROP276_ROUND_DIG):
282 283 284 285 286 287 288 289 290 291 292
    """Convert bw_bs from bytes to kilobytes, and round the result to
       'digits' significant digits.
       Results less than or equal to 1 are rounded up to 1.
       Returns an integer.

       digits must be greater than 0.
       n must be less than or equal to 2**82, to avoid floating point errors.
       """
    # avoid double-rounding by using floating-point
    bw_kb = bw_bs / 1000.0
    return round_sig_dig(bw_kb, digits=digits)
293 294


juga  's avatar
juga committed
295 296 297 298 299 300 301 302 303
def num_results_of_type(results, type_str):
    return len([r for r in results if r.type == type_str])


# Better way to use enums?
def result_type_to_key(type_str):
    return type_str.replace('-', '_')


304
class V3BWHeader(object):
juga  's avatar
juga committed
305 306
    """
    Create a bandwidth measurements (V3bw) header
307
    following bandwidth measurements document spec version 1.X.X.
juga  's avatar
juga committed
308

309
    :param str timestamp: timestamp in Unix Epoch seconds of the most recent
310
        generator result.
juga  's avatar
juga committed
311 312 313
    :param str version: the spec version
    :param str software: the name of the software that generates this
    :param str software_version: the version of the software
314
    :param dict kwargs: extra headers. Currently supported:
juga  's avatar
juga committed
315

316 317 318 319
        - earliest_bandwidth: str, ISO 8601 timestamp in UTC time zone
          when the first bandwidth was obtained
        - generator_started: str, ISO 8601 timestamp in UTC time zone
          when the generator started
juga  's avatar
juga committed
320
    """
321
    def __init__(self, timestamp, **kwargs):
juga  's avatar
juga committed
322 323 324 325
        assert isinstance(timestamp, str)
        for v in kwargs.values():
            assert isinstance(v, str)
        self.timestamp = timestamp
326 327 328 329
        # KeyValues with default value when not given by kwargs
        self.version = kwargs.get('version', SPEC_VERSION)
        self.software = kwargs.get('software', 'sbws')
        self.software_version = kwargs.get('software_version', __version__)
330
        self.file_created = kwargs.get('file_created', now_isodt_str())
juga  's avatar
juga committed
331
        # latest_bandwidth should not be in kwargs, since it MUST be the
332
        # same as timestamp
juga  's avatar
juga committed
333
        self.latest_bandwidth = unixts_to_isodt_str(timestamp)
334
        [setattr(self, k, v) for k, v in kwargs.items()
335
         if k in HEADER_INIT_KEYS]
336

337
    def __str__(self):
338
        if self.version.startswith('1.'):
339 340
            return self.strv1
        return self.strv2
341

juga  's avatar
juga committed
342
    @classmethod
343 344
    def from_results(cls, results, scanner_country=None,
                     destinations_countries=None, state_fpath=''):
juga  's avatar
juga committed
345 346 347
        kwargs = dict()
        latest_bandwidth = cls.latest_bandwidth_from_results(results)
        earliest_bandwidth = cls.earliest_bandwidth_from_results(results)
348
        # NOTE: Blocking, reads file
349
        generator_started = cls.generator_started_from_file(state_fpath)
350
        recent_consensus_count = cls.consensus_count_from_file(state_fpath)
juga  's avatar
juga committed
351
        timestamp = str(latest_bandwidth)
352 353 354 355 356 357 358 359 360

        # XXX: tech-debt: obtain the other values from the state file using
        # this state variable.
        # Store the state as an attribute of the object?
        state = State(state_fpath)
        tor_version = state.get('tor_version', None)
        if tor_version:
            kwargs['tor_version'] = tor_version

juga  's avatar
juga committed
361 362 363 364
        kwargs['latest_bandwidth'] = unixts_to_isodt_str(latest_bandwidth)
        kwargs['earliest_bandwidth'] = unixts_to_isodt_str(earliest_bandwidth)
        if generator_started is not None:
            kwargs['generator_started'] = generator_started
365 366 367
        # To be compatible with older bandwidth files, do not require it.
        if scanner_country is not None:
            kwargs['scanner_country'] = scanner_country
368 369
        if destinations_countries is not None:
            kwargs['destinations_countries'] = destinations_countries
370
        if recent_consensus_count is not None:
371
            kwargs['recent_consensus_count'] = recent_consensus_count
372 373 374 375 376 377 378 379 380 381 382

        recent_measurement_attempt_count = \
            cls.recent_measurement_attempt_count_from_file(state_fpath)
        if recent_measurement_attempt_count is not None:
            kwargs['recent_measurement_attempt_count'] = \
                str(recent_measurement_attempt_count)

        # If it is a failure that is not a ResultError, then
        # failures = attempts - all mesaurements
        # Works only in the case that old measurements files already had
        # measurements count
383 384
        # If this is None or 0, the failures can't be calculated
        if recent_measurement_attempt_count:
385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401
            all_measurements = 0
            for result_list in results.values():
                all_measurements += len(result_list)
            measurement_failures = (recent_measurement_attempt_count
                                    - all_measurements)
            kwargs['recent_measurement_failure_count'] = \
                str(measurement_failures)

        priority_lists = cls.recent_priority_list_count_from_file(state_fpath)
        if priority_lists is not None:
            kwargs['recent_priority_list_count'] = str(priority_lists)

        priority_relays = \
            cls.recent_priority_relay_count_from_file(state_fpath)
        if priority_relays is not None:
            kwargs['recent_priority_relay_count'] = str(priority_relays)

juga  's avatar
juga committed
402 403 404
        h = cls(timestamp, **kwargs)
        return h

405
    @classmethod
406
    def from_lines_v1(cls, lines):
407 408
        """
        :param list lines: list of lines to parse
409
        :returns: tuple of V3BWHeader object and non-header lines
410 411 412 413
        """
        assert isinstance(lines, list)
        try:
            index_terminator = lines.index(TERMINATOR)
Matt Traudt's avatar
Matt Traudt committed
414
        except ValueError:
415 416 417
            # is not a bw file or is v100
            log.warn('Terminator is not in lines')
            return None
418
        ts = lines[0]
419
        kwargs = dict([l.split(KEYVALUE_SEP_V1)
420
                       for l in lines[:index_terminator]
421
                       if l.split(KEYVALUE_SEP_V1)[0] in HEADER_ALL_KEYS])
422
        h = cls(ts, **kwargs)
juga  's avatar
juga committed
423
        # last line is new line
424
        return h, lines[index_terminator + 1:-1]
425 426

    @classmethod
427
    def from_text_v1(self, text):
428
        """
429
        :param str text: text to parse
430
        :returns: tuple of V3BWHeader object and non-header lines
431 432
        """
        assert isinstance(text, str)
433
        return self.from_lines_v1(text.split(LINE_SEP))
juga  's avatar
juga committed
434

juga  's avatar
juga committed
435 436 437 438 439 440 441 442 443 444 445
    @classmethod
    def from_lines_v100(cls, lines):
        """
        :param list lines: list of lines to parse
        :returns: tuple of V3BWHeader object and non-header lines
        """
        assert isinstance(lines, list)
        h = cls(lines[0])
        # last line is new line
        return h, lines[1:-1]

juga  's avatar
juga committed
446
    @staticmethod
447
    def generator_started_from_file(state_fpath):
448 449 450 451
        '''
        ISO formatted timestamp for the time when the scanner process most
        recently started.
        '''
452
        state = State(state_fpath)
453
        if 'scanner_started' in state:
454 455
            # From v1.1.0-dev `state` is capable of converting strs to datetime
            return dt_obj_to_isodt_str(state['scanner_started'])
456 457
        else:
            return None
juga  's avatar
juga committed
458

459 460 461
    @staticmethod
    def consensus_count_from_file(state_fpath):
        state = State(state_fpath)
462 463 464 465
        count = state.count("recent_consensus")
        if count:
            return str(count)
        return None
466

467 468 469 470 471 472 473 474
    # NOTE: in future refactor store state in the class
    @staticmethod
    def recent_measurement_attempt_count_from_file(state_fpath):
        """
        Returns the number of times any relay was queued to be measured
        in the recent (by default 5) days from the state file.
        """
        state = State(state_fpath)
475
        return state.count('recent_measurement_attempt')
476 477 478 479 480 481 482 483 484 485

    @staticmethod
    def recent_priority_list_count_from_file(state_fpath):
        """
        Returns the number of times
        :meth:`~sbws.lib.relayprioritizer.RelayPrioritizer.best_priority`
        was run
        in the recent (by default 5) days from the state file.
        """
        state = State(state_fpath)
486
        return state.count('recent_priority_list')
487 488 489 490 491 492 493 494

    @staticmethod
    def recent_priority_relay_count_from_file(state_fpath):
        """
        Returns the number of times any relay was "prioritized" to be measured
        in the recent (by default 5) days from the state file.
        """
        state = State(state_fpath)
495
        return state.count('recent_priority_relay')
496

juga  's avatar
juga committed
497
    @staticmethod
juga  's avatar
juga committed
498
    def latest_bandwidth_from_results(results):
499
        return round(max([r.time for fp in results for r in results[fp]]))
juga  's avatar
juga committed
500 501 502

    @staticmethod
    def earliest_bandwidth_from_results(results):
503
        return round(min([r.time for fp in results for r in results[fp]]))
juga  's avatar
juga committed
504

juga  's avatar
juga committed
505 506 507 508 509
    @property
    def keyvalue_unordered_tuple_ls(self):
        """Return list of KeyValue tuples that do not have specific order."""
        # sort the list to generate determinist headers
        keyvalue_tuple_ls = sorted([(k, v) for k, v in self.__dict__.items()
510
                                    if k in HEADER_UNORDERED_KEYS])
juga  's avatar
juga committed
511 512 513 514 515 516 517 518
        return keyvalue_tuple_ls

    @property
    def keyvalue_tuple_ls(self):
        """Return list of all KeyValue tuples"""
        return [('version', self.version)] + self.keyvalue_unordered_tuple_ls

    @property
519 520 521
    def keyvalue_v1str_ls(self):
        """Return KeyValue list of strings following spec v1.X.X."""
        keyvalues = [self.timestamp] + [KEYVALUE_SEP_V1.join([k, v])
juga  's avatar
juga committed
522 523 524 525
                                        for k, v in self.keyvalue_tuple_ls]
        return keyvalues

    @property
526 527 528
    def strv1(self):
        """Return header string following spec v1.X.X."""
        header_str = LINE_SEP.join(self.keyvalue_v1str_ls) + LINE_SEP + \
juga  's avatar
juga committed
529 530 531 532
            LINE_TERMINATOR
        return header_str

    @property
533 534 535
    def keyvalue_v2_ls(self):
        """Return KeyValue list of strings following spec v2.X.X."""
        keyvalue = [self.timestamp] + [KEYVALUE_SEP_V2.join([k, v])
juga  's avatar
juga committed
536 537 538 539
                                       for k, v in self.keyvalue_tuple_ls]
        return keyvalue

    @property
540 541 542
    def strv2(self):
        """Return header string following spec v2.X.X."""
        header_str = LINE_SEP.join(self.keyvalue_v2_ls) + LINE_SEP + \
juga  's avatar
juga committed
543 544 545 546 547 548
            LINE_TERMINATOR
        return header_str

    @property
    def num_lines(self):
        return len(self.__str__().split(LINE_SEP))
juga  's avatar
juga committed
549

juga  's avatar
juga committed
550 551 552
    def add_stats(self, **kwargs):
        # Using kwargs because attributes might chage.
        [setattr(self, k, str(v)) for k, v in kwargs.items()
553
         if k in HEADER_KEYS_V1_2]
juga  's avatar
juga committed
554

555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607
    def add_time_report_half_network(self):
        """Add to the header the time it took to measure half of the network.

        It is not the time the scanner actually takes on measuring all the
        network, but the ``number_eligible_relays`` that are reported in the
        bandwidth file and directory authorities will vote on.

        This is calculated for half of the network, so that failed or not
        reported relays do not affect too much.

        For instance, if there are 6500 relays in the network, half of the
        network would be 3250. And if there were 4000 eligible relays
        measured in an interval of 3 days, the time to measure half of the
        network would be 3 days * 3250 / 4000.

        Since the elapsed time is calculated from the earliest and the
        latest measurement and a relay might have more than 2 measurements,
        this would give an estimate on how long it would take to measure
        the network including all the valid measurements.

        Log also an estimated on how long it would take with the current
        number of relays included in the bandwidth file.
        """
        # NOTE: in future refactor do not convert attributes to str until
        # writing to the file, so that they do not need to be converted back
        # to do some calculations.
        elapsed_time = (
            (isostr_to_dt_obj(self.latest_bandwidth)
             - isostr_to_dt_obj(self.earliest_bandwidth))
            .total_seconds())

        # This attributes were added later and some tests that
        # do not initialize them would fail.
        eligible_relays = int(getattr(self, 'number_eligible_relays', 0))
        consensus_relays = int(getattr(self, 'number_consensus_relays', 0))
        if not(eligible_relays and consensus_relays):
            return

        half_network = consensus_relays / 2
        # Calculate the time it would take to measure half of the network
        if eligible_relays >= half_network:
            time_half_network = round(
                elapsed_time * half_network / eligible_relays
            )
            self.time_to_report_half_network = str(time_half_network)

        # In any case log an estimated on the time to measure all the network.
        estimated_time = round(
            elapsed_time * consensus_relays / eligible_relays
        )
        log.info("Estimated time to measure the network: %s hours.",
                 round(estimated_time / 60 / 60))

608 609 610 611 612 613 614 615 616
    def add_relays_excluded_counters(self, exclusion_dict):
        """
        Add the monitoring KeyValues to the header about the number of
        relays not included because they were not ``eligible``.
        """
        log.debug("Adding relays excluded counters.")
        for k, v in exclusion_dict.items():
            setattr(self, k, str(v))

juga  's avatar
juga committed
617

juga  's avatar
juga committed
618
class V3BWLine(object):
juga  's avatar
juga committed
619
    """
620
    Create a Bandwidth List line following the spec version 1.X.X.
juga  's avatar
juga committed
621

622 623 624 625
    :param str node_id: the relay fingerprint
    :param int bw: the bandwidth value that directory authorities will include
        in their votes.
    :param dict kwargs: extra headers.
juga  's avatar
juga committed
626

627 628
    .. note:: tech-debt: move node_id and bw to kwargs and just ensure that
       the required values are in **kwargs
juga  's avatar
juga committed
629
    """
juga  's avatar
juga committed
630 631
    def __init__(self, node_id, bw, **kwargs):
        assert isinstance(node_id, str)
632
        assert node_id.startswith('$')
juga  's avatar
juga committed
633 634
        self.node_id = node_id
        self.bw = bw
635 636
        # For now, we do not want to add ``bw_filt`` to the bandwidth file,
        # therefore it is set here but not added to ``BWLINE_KEYS_V1``.
juga  's avatar
juga committed
637
        [setattr(self, k, v) for k, v in kwargs.items()
638
         if k in BWLINE_KEYS_V1 + ["bw_filt"]]
juga  's avatar
juga committed
639

juga  's avatar
juga committed
640
    def __str__(self):
641
        return self.bw_strv1
juga  's avatar
juga committed
642

juga  's avatar
juga committed
643
    @classmethod
juga  's avatar
juga committed
644
    def from_results(cls, results, secs_recent=None, secs_away=None,
645
                     min_num=0, router_statuses_d=None):
646 647 648
        """Convert sbws results to relays' Bandwidth Lines

        ``bs`` stands for Bytes/seconds
649
        ``bw_mean`` means the bw is obtained from the mean of the all the
650 651 652 653 654
        downloads' bandwidth.
        Downloads' bandwidth are calculated as the amount of data received
        divided by the the time it took to received.
        bw = data (Bytes) / time (seconds)
        """
655
        # log.debug("Len success_results %s", len(success_results))
juga  's avatar
juga committed
656 657 658 659 660 661 662
        node_id = '$' + results[0].fingerprint
        kwargs = dict()
        kwargs['nick'] = results[0].nickname
        if getattr(results[0], 'master_key_ed25519'):
            kwargs['master_key_ed25519'] = results[0].master_key_ed25519
        kwargs['time'] = cls.last_time_from_results(results)
        kwargs.update(cls.result_types_from_results(results))
663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685

        # If it has not the attribute, return list to be able to call len
        # If it has the attribute, but it is None, return also list
        kwargs['relay_in_recent_consensus_count'] = str(
            max([
                len(getattr(r, 'relay_in_recent_consensus', []) or [])
                for r in results
            ])
        )

        kwargs['relay_recent_priority_list_count'] = str(
            max([
                len(getattr(r, 'relay_recent_priority_list', []) or [])
                for r in results
            ])
        )

        kwargs['relay_recent_measurement_attempt_count'] = str(
            max([
                len(getattr(r, 'relay_recent_measurement_attempt', []) or [])
                for r in results
            ])
        )
686

687
        success_results = [r for r in results if isinstance(r, ResultSuccess)]
688 689 690 691

        # NOTE: The following 4 conditions exclude relays from the bandwidth
        # file when the measurements does not satisfy some rules, what makes
        # the relay non-`eligible`.
692 693
        # In BWLINE_KEYS_V1_4 it is explained what they mean.
        # In HEADER_RECENT_MEASUREMENTS_EXCLUDED_KEYS it is also
694 695 696 697 698 699
        # explained the what it means the strings returned.
        # They rules were introduced in #28061 and #27338
        # In #28565 we introduce the KeyValues to know why they're excluded.
        # In #28563 we report these relays, but make Tor ignore them.
        # This might confirm #28042.

700 701 702 703 704 705 706 707 708 709 710 711 712 713
        # If the relay is non-`eligible`:
        # Create a bandwidth line with the relay, but set ``vote=0`` so that
        # Tor versions with patch #29806 does not vote on the relay.
        # Set ``bw=1`` so that Tor versions without the patch,
        # will give the relay low bandwidth.
        # Include ``unmeasured=1`` in case Tor would vote on unmeasured relays
        # in future versions.
        # And return because there are not bandwidth values.
        # NOTE: the bandwidth values could still be obtained if:
        # 1. ``ResultError`` will store them
        # 2. assign ``results_recent = results`` when there is a ``exclusion
        # reason.
        # This could be done in a better way as part of a refactor #28684.

juga  's avatar
juga committed
714 715
        kwargs['vote'] = 0
        kwargs['unmeasured'] = 1
716 717

        exclusion_reason = None
718

719 720 721 722 723
        number_excluded_error = len(results) - len(success_results)
        if number_excluded_error > 0:
            # then the number of error results is the number of results
            kwargs['relay_recent_measurements_excluded_error_count'] = \
                number_excluded_error
724
        if not success_results:
725
            exclusion_reason = 'recent_measurements_excluded_error_count'
726
            return (cls(node_id, 1, **kwargs), exclusion_reason)
727

728 729
        results_away = \
            cls.results_away_each_other(success_results, secs_away)
730 731 732
        number_excluded_near = len(success_results) - len(results_away)
        if number_excluded_near > 0:
            kwargs['relay_recent_measurements_excluded_near_count'] = \
733
                number_excluded_near
734
        if not results_away:
735
            exclusion_reason = \
736
                'recent_measurements_excluded_near_count'
737
            return (cls(node_id, 1, **kwargs), exclusion_reason)
738 739
        # log.debug("Results away from each other: %s",
        #           [unixts_to_isodt_str(r.time) for r in results_away])
740

741
        results_recent = cls.results_recent_than(results_away, secs_recent)
742 743 744 745
        number_excluded_old = len(results_away) - len(results_recent)
        if number_excluded_old > 0:
            kwargs['relay_recent_measurements_excluded_old_count'] = \
                number_excluded_old
746
        if not results_recent:
747
            exclusion_reason = \
748
                'recent_measurements_excluded_old_count'
749
            return (cls(node_id, 1, **kwargs), exclusion_reason)
750

751
        if not len(results_recent) >= min_num:
752 753
            kwargs['relay_recent_measurements_excluded_few_count'] = \
                len(results_recent)
754
            # log.debug('The number of results is less than %s', min_num)
755
            exclusion_reason = \
756
                'recent_measurements_excluded_few_count'
757 758
            return (cls(node_id, 1, **kwargs), exclusion_reason)

759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775
        # Use the last consensus if available, since the results' consensus
        # values come from the moment the measurement was made.
        if router_statuses_d and node_id in router_statuses_d:
            consensus_bandwidth = \
                router_statuses_d[node_id].bandwidth * 1000
            consensus_bandwidth_is_unmeasured = \
                router_statuses_d[node_id].is_unmeasured
        else:
            consensus_bandwidth = \
                cls.consensus_bandwidth_from_results(results_recent)
            consensus_bandwidth_is_unmeasured = \
                cls.consensus_bandwidth_is_unmeasured_from_results(
                    results_recent)
        # If there is no last observed bandwidth, there won't be mean either.
        desc_bw_obs_last = \
            cls.desc_bw_obs_last_from_results(results_recent)

776 777 778 779
        # For any line not excluded, do not include vote and unmeasured
        # KeyValues
        del kwargs['vote']
        del kwargs['unmeasured']
780

781 782 783 784
        rtt = cls.rtt_from_results(results_recent)
        if rtt:
            kwargs['rtt'] = rtt
        bw = cls.bw_median_from_results(results_recent)
785 786 787
        # XXX: all the class functions could use the bw_measurements instead of
        # obtaining them each time or use a class Measurements.
        bw_measurements = scaling.bw_measurements_from_results(results_recent)
788
        kwargs['bw_mean'] = cls.bw_mean_from_results(results_recent)
789
        kwargs['bw_filt'] = scaling.bw_filt(bw_measurements)
790 791 792 793 794 795
        kwargs['bw_median'] = cls.bw_median_from_results(
            results_recent)
        kwargs['desc_bw_avg'] = \
            cls.desc_bw_avg_from_results(results_recent)
        kwargs['desc_bw_bur'] = \
            cls.desc_bw_bur_from_results(results_recent)
796
        kwargs['consensus_bandwidth'] = consensus_bandwidth
797
        kwargs['consensus_bandwidth_is_unmeasured'] = \
798 799
            consensus_bandwidth_is_unmeasured
        kwargs['desc_bw_obs_last'] = desc_bw_obs_last
800 801
        kwargs['desc_bw_obs_mean'] = \
            cls.desc_bw_obs_mean_from_results(results_recent)
802

803
        bwl = cls(node_id, bw, **kwargs)
804
        return bwl, None
juga  's avatar
juga committed
805 806 807 808 809 810

    @classmethod
    def from_data(cls, data, fingerprint):
        assert fingerprint in data
        return cls.from_results(data[fingerprint])

juga  's avatar
juga committed
811
    @classmethod
812
    def from_bw_line_v1(cls, line):
juga  's avatar
juga committed
813
        assert isinstance(line, str)
814
        kwargs = dict([kv.split(KEYVALUE_SEP_V1)
815
                       for kv in line.split(BWLINE_KEYVALUES_SEP_V1)
816
                       if kv.split(KEYVALUE_SEP_V1)[0] in BWLINE_KEYS_V1])
juga  's avatar
juga committed
817
        for k, v in kwargs.items():
818
            if k in BWLINE_INT_KEYS:
juga  's avatar
juga committed
819
                kwargs[k] = int(v)
820 821 822 823 824
        node_id = kwargs['node_id']
        bw = kwargs['bw']
        del kwargs['node_id']
        del kwargs['bw']
        bw_line = cls(node_id, bw, **kwargs)
juga  's avatar
juga committed
825 826
        return bw_line

juga  's avatar
juga committed
827 828
    @staticmethod
    def results_away_each_other(results, secs_away=None):
juga  's avatar
juga committed
829 830
        # log.debug("Checking whether results are away from each other in %s "
        #           "secs.", secs_away)
juga  's avatar
juga committed
831 832
        if secs_away is None or len(results) < 2:
            return results
833 834 835 836 837
        for a, b in combinations(results, 2):
            if abs(a.time - b.time) > secs_away:
                return results
        # log.debug("Results are NOT away from each other in at least %ss: %s",
        #           secs_away, [unixts_to_isodt_str(r.time) for r in results])
838
        return []
juga  's avatar
juga committed
839 840 841 842 843

    @staticmethod
    def results_recent_than(results, secs_recent=None):
        if secs_recent is None:
            return results
844
        results_recent = list(filter(
juga  's avatar
juga committed
845
                            lambda x: (now_unixts() - x.time) < secs_recent,
846 847 848 849 850 851
                            results))
        # if not results_recent:
        #     log.debug("Results are NOT more recent than %ss: %s",
        #               secs_recent,
        #               [unixts_to_isodt_str(r.time) for r in results])
        return results_recent
juga  's avatar
juga committed
852

853
    @staticmethod
854
    def bw_median_from_results(results):
855 856 857 858
        return max(round(median([dl['amount'] / dl['duration']
                                 for r in results for dl in r.downloads])), 1)

    @staticmethod
859
    def bw_mean_from_results(results):
860 861 862
        return max(round(mean([dl['amount'] / dl['duration']
                               for r in results for dl in r.downloads])), 1)

juga  's avatar
juga committed
863 864 865 866 867 868 869 870
    @staticmethod
    def last_time_from_results(results):
        return unixts_to_isodt_str(round(max([r.time for r in results])))

    @staticmethod
    def rtt_from_results(results):
        # convert from miliseconds to seconds
        rtts = [(round(rtt * 1000)) for r in results for rtt in r.rtts]
juga  's avatar
juga committed
871
        rtt = round(median(rtts)) if rtts else None
juga  's avatar
juga committed
872 873 874 875 876 877 878 879 880
        return rtt

    @staticmethod
    def result_types_from_results(results):
        rt_dict = dict([(result_type_to_key(rt.value),
                         num_results_of_type(results, rt.value))
                        for rt in _ResultType])
        return rt_dict

881 882 883 884 885 886
    @staticmethod
    def desc_bw_avg_from_results(results):
        """Obtain the last descriptor bandwidth average from the results."""
        for r in reversed(results):
            if r.relay_average_bandwidth is not None:
                return r.relay_average_bandwidth
887
        log.warning("Descriptor average bandwidth is None.")
888 889
        return None

890 891 892 893 894 895
    @staticmethod
    def desc_bw_bur_from_results(results):
        """Obtain the last descriptor bandwidth burst from the results."""
        for r in reversed(results):
            if r.relay_burst_bandwidth is not None:
                return r.relay_burst_bandwidth
896
        log.warning("Descriptor burst bandwidth is None.")
897 898
        return None

899 900 901 902 903 904
    @staticmethod
    def consensus_bandwidth_from_results(results):
        """Obtain the last consensus bandwidth from the results."""
        for r in reversed(results):
            if r.consensus_bandwidth is not None:
                return r.consensus_bandwidth
905
        log.warning("Consensus bandwidth is None.")
906 907 908 909 910 911 912 913
        return None

    @staticmethod
    def consensus_bandwidth_is_unmeasured_from_results(results):
        """Obtain the last consensus unmeasured flag from the results."""
        for r in reversed(results):
            if r.consensus_bandwidth_is_unmeasured is not None:
                return r.consensus_bandwidth_is_unmeasured
914
            log.warning("Consensus bandwidth is unmeasured is None.")
915 916
        return None

917
    @staticmethod
918 919
    def desc_bw_obs_mean_from_results(results):
        desc_bw_obs_ls = []
920 921
        for r in results:
            if r.relay_observed_bandwidth is not None:
922 923
                desc_bw_obs_ls.append(r.relay_observed_bandwidth)
        if desc_bw_obs_ls:
924 925
            return round(mean(desc_bw_obs_ls))
        log.warning("Descriptor observed bandwidth is None.")
926 927 928
        return None

    @staticmethod
929
    def desc_bw_obs_last_from_results(results):
930 931 932 933
        # the last is at the end of the list
        for r in reversed(results):
            if r.relay_observed_bandwidth is not None:
                return r.relay_observed_bandwidth
934
        log.warning("Descriptor observed bandwidth is None.")
935 936
        return None

juga  's avatar
juga committed
937 938 939 940 941
    @property
    def bw_keyvalue_tuple_ls(self):
        """Return list of KeyValue Bandwidth Line tuples."""
        # sort the list to generate determinist headers
        keyvalue_tuple_ls = sorted([(k, v) for k, v in self.__dict__.items()
942
                                    if k in BWLINE_KEYS_V1])
juga  's avatar
juga committed
943
        return keyvalue_tuple_ls
juga  's avatar
juga committed
944

juga  's avatar
juga committed
945
    @property
946
    def bw_keyvalue_v1str_ls(self):
juga  's avatar
juga committed
947
        """Return list of KeyValue Bandwidth Line strings following
948
        spec v1.X.X.
juga  's avatar
juga committed
949
        """
950
        bw_keyvalue_str = [KEYVALUE_SEP_V1 .join([k, str(v)])
juga  's avatar
juga committed
951 952
                           for k, v in self.bw_keyvalue_tuple_ls]
        return bw_keyvalue_str
juga  's avatar
juga committed
953

juga  's avatar
juga committed
954
    @property
955 956
    def bw_strv1(self):
        """Return Bandwidth Line string following spec v1.X.X."""
957
        bw_line_str = BWLINE_KEYVALUES_SEP_V1.join(
958
                        self.bw_keyvalue_v1str_ls) + LINE_SEP
juga  's avatar
juga committed
959 960 961 962 963 964
        if len(bw_line_str) > BW_LINE_SIZE:
            # if this is the case, probably there are too many KeyValues,
            # or the limit needs to be changed in Tor
            log.warn("The bandwidth line %s is longer than %s",
                     len(bw_line_str), BW_LINE_SIZE)
        return bw_line_str
juga  's avatar
juga committed
965

juga  's avatar
juga committed
966

967
class V3BWFile(object):
juga  's avatar
juga committed
968
    """
969
    Create a Bandwidth List file following spec version 1.X.X
juga  's avatar
juga committed
970 971 972 973

    :param V3BWHeader v3bwheader: header
    :param list v3bwlines: V3BWLines
    """
juga  's avatar
juga committed
974 975 976 977 978
    def __init__(self, v3bwheader, v3bwlines):
        self.header = v3bwheader
        self.bw_lines = v3bwlines

    def __str__(self):
979
        return str(self.header) + ''.join([str(bw_line) or ''
juga  's avatar
juga committed
980 981
                                           for bw_line in self.bw_lines])

juga  's avatar
juga committed
982
    @classmethod
983 984
    def from_results(cls, results, scanner_country=None,
                     destinations_countries=None, state_fpath='',
985
                     scale_constant=SBWS_SCALE_CONSTANT,
986 987
                     scaling_method=TORFLOW_SCALING,
                     torflow_obs=TORFLOW_OBS_LAST,
juga  's avatar
juga committed
988
                     torflow_cap=TORFLOW_BW_MARGIN,
989
                     round_digs=PROP276_ROUND_DIG,
990
                     secs_recent=None, secs_away=None, min_num=0,
991 992
                     consensus_path=None, max_bw_diff_perc=MAX_BW_DIFF_PERC,
                     reverse=False):
993 994 995 996 997 998
        """Create V3BWFile class from sbws Results.

        :param dict results: see below
        :param str state_fpath: path to the state file
        :param int scaling_method:
            Scaling method to obtain the bandwidth
999
            Possible values: {None, SBWS_SCALING, TORFLOW_SCALING} = {0, 1, 2}
1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010
        :param int scale_constant: sbws scaling constant
        :param int torflow_obs: method to choose descriptor observed bandwidth
        :param bool reverse: whether to sort the bw lines descending or not

        Results are in the form::

            {'relay_fp1': [Result1, Result2, ...],
             'relay_fp2': [Result1, Result2, ...]}

        """
        log.info('Processing results to generate a bandwidth list file.')
1011 1012
        header = V3BWHeader.from_results(results, scanner_country,
                                         destinations_countries, state_fpath)
1013
        bw_lines_raw = []
1014
        bw_lines_excluded = []
1015 1016 1017 1018
        router_statuses_d = cls.read_router_statuses(consensus_path)
        # XXX: Use router_statuses_d to not parse again the file.
        number_consensus_relays = \
            cls.read_number_consensus_relays(consensus_path)
1019
        state = State(state_fpath)
1020 1021 1022

        # Create a dictionary with the number of relays excluded by any of the
        # of the filtering rules that makes relays non-`eligible`.
1023
        # NOTE: In HEADER_RECENT_MEASUREMENTS_EXCLUDED_KEYS it is
1024 1025 1026
        # explained what are the KeyValues.
        # See also the comments in `from_results`.
        exclusion_dict = dict(
1027
            [(k, 0) for k in HEADER_RECENT_MEASUREMENTS_EXCLUDED_KEYS]
1028
            )
juga  's avatar
juga committed
1029
        for fp, values in results.items():
1030
            # log.debug("Relay fp %s", fp)
1031
            line, reason = V3BWLine.from_results(values, secs_recent,
1032 1033
                                                 secs_away, min_num,
                                                 router_statuses_d)
1034 1035
            # If there is no reason it means the line will not be excluded.
            if not reason:
juga  's avatar
juga committed
1036
                bw_lines_raw.append(line)
1037
            else:
1038 1039 1040
                # Store the excluded lines to include them in the bandwidth
                # file.
                bw_lines_excluded.append(line)
1041 1042 1043 1044
                exclusion_dict[reason] = exclusion_dict.get(reason, 0) + 1
        # Add the headers with the number of excluded relays by reason
        header.add_relays_excluded_counters(exclusion_dict)

1045
        if not bw_lines_raw:
1046 1047
            # It could be possible to scale the lines that were successful
            # even if excluded, but is not done here.
1048 1049
            log.info("After applying restrictions to the raw results, "
                     "there is not any. Scaling can not be applied.")
1050
            # Update the header and log the progress.
juga  's avatar
juga committed
1051
            cls.update_progress(
1052 1053 1054 1055 1056
                cls, 0, header, number_consensus_relays, state)
            # Set the lines that would be excluded anyway (`vote=0`) with
            # `under_min_report=1`
            cls.set_under_min_report(bw_lines_excluded)
            # Create the bandwidth file with the lines that would be excluded.
1057
            return cls(header, bw_lines_excluded)
1058 1059
        if scaling_method == SBWS_SCALING:
            bw_lines = cls.bw_sbws_scale(bw_lines_raw, scale_constant)
1060
            cls.warn_if_not_accurate_enough(bw_lines, scale_constant)
1061 1062
            # log.debug(bw_lines[-1])
        elif scaling_method == TORFLOW_SCALING:
juga  's avatar
juga committed
1063
            bw_lines = cls.bw_torflow_scale(bw_lines_raw, torflow_obs,
1064
                                            torflow_cap, round_digs)
1065
            # log.debug(bw_lines[-1])
1066 1067 1068 1069 1070 1071 1072 1073
            # Update the header and log the progress.
            min_perc = cls.update_progress(
                cls, len(bw_lines), header, number_consensus_relays, state
                )
            # If after scaling the number of lines is less than the percentage
            # of lines to report, set them with `under_min_report`.
            if not min_perc:
                cls.set_under_min_report(bw_lines)
juga  's avatar
juga committed
1074
        else:
1075 1076
            bw_lines = cls.bw_kb(bw_lines_raw)
            # log.debug(bw_lines[-1])
1077 1078
        # Not using the result for now, just warning
        cls.is_max_bw_diff_perc_reached(bw_lines, max_bw_diff_perc)
1079
        header.add_time_report_half_network()
1080
        f = cls(header, bw_lines + bw_lines_excluded)
juga  's avatar
juga committed
1081