buffers.c 62.5 KB
Newer Older
Roger Dingledine's avatar
Roger Dingledine committed
1
2
/* Copyright (c) 2001 Matej Pfajfar.
 * Copyright (c) 2001-2004, Roger Dingledine.
3
 * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
Nick Mathewson's avatar
Nick Mathewson committed
4
 * Copyright (c) 2007-2016, The Tor Project, Inc. */
5
/* See LICENSE for licensing information */
Roger Dingledine's avatar
Roger Dingledine committed
6

7
8
/**
 * \file buffers.c
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
 * \brief Implements a generic buffer interface.
 *
 * A buf_t is a (fairly) opaque byte-oriented FIFO that can read to or flush
 * from memory, sockets, file descriptors, TLS connections, or another buf_t.
 * Buffers are implemented as linked lists of memory chunks.
 *
 * All socket-backed and TLS-based connection_t objects have a pair of
 * buffers: one for incoming data, and one for outcoming data.  These are fed
 * and drained from functions in connection.c, trigged by events that are
 * monitored in main.c.
 *
 * This module has basic support for reading and writing on buf_t objects. It
 * also contains specialized functions for handling particular protocols
 * on a buf_t backend, including SOCKS (used in connection_edge.c), Tor cells
 * (used in connection_or.c and channeltls.c), HTTP (used in directory.c), and
 * line-oriented communication (used in control.c).
25
 **/
26
#define BUFFERS_PRIVATE
Roger Dingledine's avatar
Roger Dingledine committed
27
#include "or.h"
28
#include "addressmap.h"
Sebastian Hahn's avatar
Sebastian Hahn committed
29
#include "buffers.h"
Sebastian Hahn's avatar
Sebastian Hahn committed
30
#include "config.h"
Sebastian Hahn's avatar
Sebastian Hahn committed
31
#include "connection_edge.h"
Sebastian Hahn's avatar
Sebastian Hahn committed
32
#include "connection_or.h"
Sebastian Hahn's avatar
Sebastian Hahn committed
33
#include "control.h"
Sebastian Hahn's avatar
Sebastian Hahn committed
34
#include "reasons.h"
35
#include "ext_orport.h"
36
37
#include "util.h"
#include "torlog.h"
38
39
40
41
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif

42
//#define PARANOIA
43

44
#ifdef PARANOIA
45
46
/** Helper: If PARANOIA is defined, assert that the buffer in local variable
 * <b>buf</b> is well-formed. */
47
#define check() STMT_BEGIN assert_buf_ok(buf); STMT_END
48
#else
49
#define check() STMT_NIL
50
51
#endif

52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
/* Implementation notes:
 *
 * After flirting with memmove, and dallying with ring-buffers, we're finally
 * getting up to speed with the 1970s and implementing buffers as a linked
 * list of small chunks.  Each buffer has such a list; data is removed from
 * the head of the list, and added at the tail.  The list is singly linked,
 * and the buffer keeps a pointer to the head and the tail.
 *
 * Every chunk, except the tail, contains at least one byte of data.  Data in
 * each chunk is contiguous.
 *
 * When you need to treat the first N characters on a buffer as a contiguous
 * string, use the buf_pullup function to make them so.  Don't do this more
 * than necessary.
 *
 * The major free Unix kernels have handled buffers like this since, like,
 * forever.
69
 */
70

71
static void socks_request_set_socks5_error(socks_request_t *req,
72
73
                              socks5_reply_status_t reason);

74
75
76
static int parse_socks(const char *data, size_t datalen, socks_request_t *req,
                       int log_sockstype, int safe_socks, ssize_t *drain_out,
                       size_t *want_length_out);
77
78
79
static int parse_socks_client(const uint8_t *data, size_t datalen,
                              int state, char **reason,
                              ssize_t *drain_out);
80

81
82
/* Chunk manipulation functions */

83
84
#define CHUNK_HEADER_LEN STRUCT_OFFSET(chunk_t, mem[0])

85
86
/** Return the number of bytes needed to allocate a chunk to hold
 * <b>memlen</b> bytes. */
87
#define CHUNK_ALLOC_SIZE(memlen) (CHUNK_HEADER_LEN + (memlen))
88
89
/** Return the number of usable bytes in a chunk allocated with
 * malloc(<b>memlen</b>). */
90
#define CHUNK_SIZE_WITH_ALLOC(memlen) ((memlen) - CHUNK_HEADER_LEN)
91
92
93

/** Return the next character in <b>chunk</b> onto which data can be appended.
 * If the chunk is full, this might be off the end of chunk->mem. */
94
static inline char *
95
CHUNK_WRITE_PTR(chunk_t *chunk)
96
{
97
  return chunk->data + chunk->datalen;
98
99
}

100
101
/** Return the number of bytes that can be written onto <b>chunk</b> without
 * running out of space. */
102
static inline size_t
103
CHUNK_REMAINING_CAPACITY(const chunk_t *chunk)
104
{
105
  return (chunk->mem + chunk->memlen) - (chunk->data + chunk->datalen);
106
107
}

108
109
/** Move all bytes stored in <b>chunk</b> to the front of <b>chunk</b>->mem,
 * to free up space at the end. */
110
static inline void
111
chunk_repack(chunk_t *chunk)
112
{
113
114
115
116
  if (chunk->datalen && chunk->data != &chunk->mem[0]) {
    memmove(chunk->mem, chunk->data, chunk->datalen);
  }
  chunk->data = &chunk->mem[0];
117
118
}

119
120
/** Keep track of total size of allocated chunks for consistency asserts */
static size_t total_bytes_allocated_in_chunks = 0;
121
static void
122
buf_chunk_free_unchecked(chunk_t *chunk)
123
{
124
125
  if (!chunk)
    return;
126
127
128
#ifdef DEBUG_CHUNK_ALLOC
  tor_assert(CHUNK_ALLOC_SIZE(chunk->memlen) == chunk->DBG_alloc);
#endif
129
130
  tor_assert(total_bytes_allocated_in_chunks >=
             CHUNK_ALLOC_SIZE(chunk->memlen));
131
  total_bytes_allocated_in_chunks -= CHUNK_ALLOC_SIZE(chunk->memlen);
132
133
  tor_free(chunk);
}
134
static inline chunk_t *
135
136
137
chunk_new_with_alloc_size(size_t alloc)
{
  chunk_t *ch;
Nick Mathewson's avatar
Nick Mathewson committed
138
  ch = tor_malloc(alloc);
139
140
  ch->next = NULL;
  ch->datalen = 0;
141
142
143
#ifdef DEBUG_CHUNK_ALLOC
  ch->DBG_alloc = alloc;
#endif
144
  ch->memlen = CHUNK_SIZE_WITH_ALLOC(alloc);
145
  total_bytes_allocated_in_chunks += alloc;
146
147
148
  ch->data = &ch->mem[0];
  return ch;
}
149

150
151
/** Expand <b>chunk</b> until it can hold <b>sz</b> bytes, and return a
 * new pointer to <b>chunk</b>.  Old pointers are no longer valid. */
152
static inline chunk_t *
153
154
155
chunk_grow(chunk_t *chunk, size_t sz)
{
  off_t offset;
156
  size_t memlen_orig = chunk->memlen;
157
158
159
160
161
  tor_assert(sz > chunk->memlen);
  offset = chunk->data - chunk->mem;
  chunk = tor_realloc(chunk, CHUNK_ALLOC_SIZE(sz));
  chunk->memlen = sz;
  chunk->data = chunk->mem + offset;
162
163
164
165
#ifdef DEBUG_CHUNK_ALLOC
  tor_assert(chunk->DBG_alloc == CHUNK_ALLOC_SIZE(memlen_orig));
  chunk->DBG_alloc = CHUNK_ALLOC_SIZE(sz);
#endif
166
167
  total_bytes_allocated_in_chunks +=
    CHUNK_ALLOC_SIZE(sz) - CHUNK_ALLOC_SIZE(memlen_orig);
168
  return chunk;
169
170
}

171
172
173
174
175
/** If a read onto the end of a chunk would be smaller than this number, then
 * just start a new chunk. */
#define MIN_READ_LEN 8
/** Every chunk should take up at least this many bytes. */
#define MIN_CHUNK_ALLOC 256
176
/** No chunk should take up more than this many bytes. */
177
178
179
180
#define MAX_CHUNK_ALLOC 65536

/** Return the allocation size we'd like to use to hold <b>target</b>
 * bytes. */
181
STATIC size_t
182
preferred_chunk_size(size_t target)
183
{
184
185
186
  tor_assert(target <= SIZE_T_CEILING - CHUNK_HEADER_LEN);
  if (CHUNK_ALLOC_SIZE(target) >= MAX_CHUNK_ALLOC)
    return CHUNK_ALLOC_SIZE(target);
187
188
189
  size_t sz = MIN_CHUNK_ALLOC;
  while (CHUNK_SIZE_WITH_ALLOC(sz) < target) {
    sz <<= 1;
190
  }
191
  return sz;
192
193
}

194
195
196
/** Collapse data from the first N chunks from <b>buf</b> into buf->head,
 * growing it as necessary, until buf->head has the first <b>bytes</b> bytes
 * of data from the buffer, or until buf->head has all the data in <b>buf</b>.
197
 */
198
STATIC void
199
buf_pullup(buf_t *buf, size_t bytes)
200
201
202
203
{
  chunk_t *dest, *src;
  size_t capacity;
  if (!buf->head)
204
205
    return;

206
207
208
209
  check();
  if (buf->datalen < bytes)
    bytes = buf->datalen;

210
211
212
  capacity = bytes;
  if (buf->head->datalen >= bytes)
    return;
213

214
215
  if (buf->head->memlen >= capacity) {
    /* We don't need to grow the first chunk, but we might need to repack it.*/
216
217
    size_t needed = capacity - buf->head->datalen;
    if (CHUNK_REMAINING_CAPACITY(buf->head) < needed)
218
      chunk_repack(buf->head);
219
    tor_assert(CHUNK_REMAINING_CAPACITY(buf->head) >= needed);
220
  } else {
221
222
223
224
225
226
227
228
229
230
231
    chunk_t *newhead;
    size_t newsize;
    /* We need to grow the chunk. */
    chunk_repack(buf->head);
    newsize = CHUNK_SIZE_WITH_ALLOC(preferred_chunk_size(capacity));
    newhead = chunk_grow(buf->head, newsize);
    tor_assert(newhead->memlen >= capacity);
    if (newhead != buf->head) {
      if (buf->tail == buf->head)
        buf->tail = newhead;
      buf->head = newhead;
232
    }
233
234
  }

235
236
237
238
239
  dest = buf->head;
  while (dest->datalen < bytes) {
    size_t n = bytes - dest->datalen;
    src = dest->next;
    tor_assert(src);
240
    if (n >= src->datalen) {
241
242
243
244
245
      memcpy(CHUNK_WRITE_PTR(dest), src->data, src->datalen);
      dest->datalen += src->datalen;
      dest->next = src->next;
      if (buf->tail == src)
        buf->tail = dest;
246
      buf_chunk_free_unchecked(src);
247
248
249
250
251
252
253
    } else {
      memcpy(CHUNK_WRITE_PTR(dest), src->data, n);
      dest->datalen += n;
      src->data += n;
      src->datalen -= n;
      tor_assert(dest->datalen == bytes);
    }
254
  }
255
256

  check();
257
258
}

259
260
261
262
263
264
265
266
267
268
269
270
271
272
#ifdef TOR_UNIT_TESTS
void
buf_get_first_chunk_data(const buf_t *buf, const char **cp, size_t *sz)
{
  if (!buf || !buf->head) {
    *cp = NULL;
    *sz = 0;
  } else {
    *cp = buf->head->data;
    *sz = buf->head->datalen;
  }
}
#endif

273
/** Remove the first <b>n</b> bytes from buf. */
274
static inline void
275
276
buf_remove_from_front(buf_t *buf, size_t n)
{
Roger Dingledine's avatar
Roger Dingledine committed
277
  tor_assert(buf->datalen >= n);
278
279
280
281
282
283
284
285
286
287
288
289
290
291
  while (n) {
    tor_assert(buf->head);
    if (buf->head->datalen > n) {
      buf->head->datalen -= n;
      buf->head->data += n;
      buf->datalen -= n;
      return;
    } else {
      chunk_t *victim = buf->head;
      n -= victim->datalen;
      buf->datalen -= victim->datalen;
      buf->head = victim->next;
      if (buf->tail == victim)
        buf->tail = NULL;
292
      buf_chunk_free_unchecked(victim);
293
    }
294
  }
295
  check();
296
297
}

298
299
/** Create and return a new buf with default chunk capacity <b>size</b>.
 */
300
buf_t *
301
302
buf_new_with_capacity(size_t size)
{
303
304
305
  buf_t *b = buf_new();
  b->default_chunk_size = preferred_chunk_size(size);
  return b;
306
}
Roger Dingledine's avatar
Roger Dingledine committed
307

308
/** Allocate and return a new buffer with default capacity. */
309
310
buf_t *
buf_new(void)
311
{
312
313
314
315
  buf_t *buf = tor_malloc_zero(sizeof(buf_t));
  buf->magic = BUFFER_MAGIC;
  buf->default_chunk_size = 4096;
  return buf;
316
}
Roger Dingledine's avatar
Roger Dingledine committed
317

318
319
320
321
322
323
size_t
buf_get_default_chunk_size(const buf_t *buf)
{
  return buf->default_chunk_size;
}

324
/** Remove all data from <b>buf</b>. */
325
326
void
buf_clear(buf_t *buf)
327
{
328
  chunk_t *chunk, *next;
329
  buf->datalen = 0;
330
331
  for (chunk = buf->head; chunk; chunk = next) {
    next = chunk->next;
332
    buf_chunk_free_unchecked(chunk);
333
334
  }
  buf->head = buf->tail = NULL;
335
}
Roger Dingledine's avatar
Roger Dingledine committed
336

Roger Dingledine's avatar
Roger Dingledine committed
337
/** Return the number of bytes stored in <b>buf</b> */
Andrea Shepard's avatar
Andrea Shepard committed
338
339
MOCK_IMPL(size_t,
buf_datalen, (const buf_t *buf))
340
341
{
  return buf->datalen;
Roger Dingledine's avatar
Roger Dingledine committed
342
343
}

344
/** Return the total length of all chunks used in <b>buf</b>. */
345
size_t
346
buf_allocation(const buf_t *buf)
347
{
348
349
350
  size_t total = 0;
  const chunk_t *chunk;
  for (chunk = buf->head; chunk; chunk = chunk->next) {
351
    total += CHUNK_ALLOC_SIZE(chunk->memlen);
352
353
  }
  return total;
354
355
}

356
357
358
359
/** Return the number of bytes that can be added to <b>buf</b> without
 * performing any additional allocation. */
size_t
buf_slack(const buf_t *buf)
360
{
361
362
363
364
  if (!buf->tail)
    return 0;
  else
    return CHUNK_REMAINING_CAPACITY(buf->tail);
365
366
}

367
/** Release storage held by <b>buf</b>. */
368
369
370
void
buf_free(buf_t *buf)
{
371
372
  if (!buf)
    return;
373

374
375
376
377
378
  buf_clear(buf);
  buf->magic = 0xdeadbeef;
  tor_free(buf);
}

379
380
381
382
383
/** Return a new copy of <b>in_chunk</b> */
static chunk_t *
chunk_copy(const chunk_t *in_chunk)
{
  chunk_t *newch = tor_memdup(in_chunk, CHUNK_ALLOC_SIZE(in_chunk->memlen));
384
  total_bytes_allocated_in_chunks += CHUNK_ALLOC_SIZE(in_chunk->memlen);
385
386
387
#ifdef DEBUG_CHUNK_ALLOC
  newch->DBG_alloc = CHUNK_ALLOC_SIZE(in_chunk->memlen);
#endif
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
  newch->next = NULL;
  if (in_chunk->data) {
    off_t offset = in_chunk->data - in_chunk->mem;
    newch->data = newch->mem + offset;
  }
  return newch;
}

/** Return a new copy of <b>buf</b> */
buf_t *
buf_copy(const buf_t *buf)
{
  chunk_t *ch;
  buf_t *out = buf_new();
  out->default_chunk_size = buf->default_chunk_size;
  for (ch = buf->head; ch; ch = ch->next) {
    chunk_t *newch = chunk_copy(ch);
    if (out->tail) {
      out->tail->next = newch;
      out->tail = newch;
    } else {
      out->head = out->tail = newch;
    }
  }
  out->datalen = buf->datalen;
  return out;
}

416
417
418
/** Append a new chunk with enough capacity to hold <b>capacity</b> bytes to
 * the tail of <b>buf</b>.  If <b>capped</b>, don't allocate a chunk bigger
 * than MAX_CHUNK_ALLOC. */
419
static chunk_t *
420
buf_add_chunk_with_capacity(buf_t *buf, size_t capacity, int capped)
421
422
{
  chunk_t *chunk;
423

424
  if (CHUNK_ALLOC_SIZE(capacity) < buf->default_chunk_size) {
425
    chunk = chunk_new_with_alloc_size(buf->default_chunk_size);
426
427
  } else if (capped && CHUNK_ALLOC_SIZE(capacity) > MAX_CHUNK_ALLOC) {
    chunk = chunk_new_with_alloc_size(MAX_CHUNK_ALLOC);
428
  } else {
429
    chunk = chunk_new_with_alloc_size(preferred_chunk_size(capacity));
430
  }
431

432
  chunk->inserted_time = (uint32_t)monotime_coarse_absolute_msec();
433

434
435
436
437
438
439
440
  if (buf->tail) {
    tor_assert(buf->head);
    buf->tail->next = chunk;
    buf->tail = chunk;
  } else {
    tor_assert(!buf->head);
    buf->head = buf->tail = chunk;
441
  }
442
443
  check();
  return chunk;
Roger Dingledine's avatar
Roger Dingledine committed
444
445
}

446
/** Return the age of the oldest chunk in the buffer <b>buf</b>, in
447
448
 * milliseconds.  Requires the current monotonic time, in truncated msec,
 * as its input <b>now</b>.
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
 */
uint32_t
buf_get_oldest_chunk_timestamp(const buf_t *buf, uint32_t now)
{
  if (buf->head) {
    return now - buf->head->inserted_time;
  } else {
    return 0;
  }
}

size_t
buf_get_total_allocation(void)
{
  return total_bytes_allocated_in_chunks;
}

466
/** Read up to <b>at_most</b> bytes from the socket <b>fd</b> into
467
 * <b>chunk</b> (which must be on <b>buf</b>). If we get an EOF, set
468
469
 * *<b>reached_eof</b> to 1.  Return -1 on error, 0 on eof or blocking,
 * and the number of bytes read otherwise. */
470
static inline int
471
read_to_chunk(buf_t *buf, chunk_t *chunk, tor_socket_t fd, size_t at_most,
472
              int *reached_eof, int *socket_error)
473
{
474
  ssize_t read_result;
475
476
  if (at_most > CHUNK_REMAINING_CAPACITY(chunk))
    at_most = CHUNK_REMAINING_CAPACITY(chunk);
477
  read_result = tor_socket_recv(fd, CHUNK_WRITE_PTR(chunk), at_most, 0);
478

479
  if (read_result < 0) {
480
    int e = tor_socket_errno(fd);
481
    if (!ERRNO_IS_EAGAIN(e)) { /* it's a real error */
482
#ifdef _WIN32
483
484
485
      if (e == WSAENOBUFS)
        log_warn(LD_NET,"recv() failed: WSAENOBUFS. Not enough ram?");
#endif
486
      *socket_error = e;
487
488
489
490
      return -1;
    }
    return 0; /* would block. */
  } else if (read_result == 0) {
491
    log_debug(LD_NET,"Encountered eof on fd %d", (int)fd);
492
493
    *reached_eof = 1;
    return 0;
494
  } else { /* actually got bytes. */
495
    buf->datalen += read_result;
496
    chunk->datalen += read_result;
497
    log_debug(LD_NET,"Read %ld bytes. %d on inbuf.", (long)read_result,
498
              (int)buf->datalen);
499
500
    tor_assert(read_result < INT_MAX);
    return (int)read_result;
501
502
503
  }
}

504
505
/** As read_to_chunk(), but return (negative) error code on error, blocking,
 * or TLS, and the number of bytes read otherwise. */
506
static inline int
507
508
509
510
511
512
513
514
515
516
517
518
519
520
read_to_chunk_tls(buf_t *buf, chunk_t *chunk, tor_tls_t *tls,
                  size_t at_most)
{
  int read_result;

  tor_assert(CHUNK_REMAINING_CAPACITY(chunk) >= at_most);
  read_result = tor_tls_read(tls, CHUNK_WRITE_PTR(chunk), at_most);
  if (read_result < 0)
    return read_result;
  buf->datalen += read_result;
  chunk->datalen += read_result;
  return read_result;
}

Nick Mathewson's avatar
Nick Mathewson committed
521
/** Read from socket <b>s</b>, writing onto end of <b>buf</b>.  Read at most
522
523
524
 * <b>at_most</b> bytes, growing the buffer as necessary.  If recv() returns 0
 * (because of EOF), set *<b>reached_eof</b> to 1 and return 0. Return -1 on
 * error; else return the number of bytes read.
525
 */
526
/* XXXX indicate "read blocked" somehow? */
527
int
528
read_to_buf(tor_socket_t s, size_t at_most, buf_t *buf, int *reached_eof,
529
            int *socket_error)
530
{
531
  /* XXXX It's stupid to overload the return values for these functions:
532
533
   * "error status" and "number of bytes read" are not mutually exclusive.
   */
534
535
  int r = 0;
  size_t total_read = 0;
Roger Dingledine's avatar
Roger Dingledine committed
536

537
  check();
538
  tor_assert(reached_eof);
539
  tor_assert(SOCKET_OK(s));
540

541
542
  while (at_most > total_read) {
    size_t readlen = at_most - total_read;
543
544
    chunk_t *chunk;
    if (!buf->tail || CHUNK_REMAINING_CAPACITY(buf->tail) < MIN_READ_LEN) {
545
546
547
      chunk = buf_add_chunk_with_capacity(buf, at_most, 1);
      if (readlen > chunk->memlen)
        readlen = chunk->memlen;
548
    } else {
549
550
551
552
      size_t cap = CHUNK_REMAINING_CAPACITY(buf->tail);
      chunk = buf->tail;
      if (cap < readlen)
        readlen = cap;
Roger Dingledine's avatar
Roger Dingledine committed
553
    }
554

555
    r = read_to_chunk(buf, chunk, s, readlen, reached_eof, socket_error);
556
557
558
    check();
    if (r < 0)
      return r; /* Error */
559
    tor_assert(total_read+r < INT_MAX);
560
    total_read += r;
561
562
563
    if ((size_t)r < readlen) { /* eof, block, or no more to read. */
      break;
    }
564
  }
565
  return (int)total_read;
Roger Dingledine's avatar
Roger Dingledine committed
566
567
}

568
569
/** As read_to_buf, but reads from a TLS connection, and returns a TLS
 * status value rather than the number of bytes read.
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
 *
 * Using TLS on OR connections complicates matters in two ways.
 *
 * First, a TLS stream has its own read buffer independent of the
 * connection's read buffer.  (TLS needs to read an entire frame from
 * the network before it can decrypt any data.  Thus, trying to read 1
 * byte from TLS can require that several KB be read from the network
 * and decrypted.  The extra data is stored in TLS's decrypt buffer.)
 * Because the data hasn't been read by Tor (it's still inside the TLS),
 * this means that sometimes a connection "has stuff to read" even when
 * poll() didn't return POLLIN. The tor_tls_get_pending_bytes function is
 * used in connection.c to detect TLS objects with non-empty internal
 * buffers and read from them again.
 *
 * Second, the TLS stream's events do not correspond directly to network
 * events: sometimes, before a TLS stream can read, the network must be
 * ready to write -- or vice versa.
587
 */
588
int
589
read_to_buf_tls(tor_tls_t *tls, size_t at_most, buf_t *buf)
590
{
591
592
  int r = 0;
  size_t total_read = 0;
593
594
595

  check_no_tls_errors();

596
  check();
597

598
599
  while (at_most > total_read) {
    size_t readlen = at_most - total_read;
600
601
    chunk_t *chunk;
    if (!buf->tail || CHUNK_REMAINING_CAPACITY(buf->tail) < MIN_READ_LEN) {
602
603
604
      chunk = buf_add_chunk_with_capacity(buf, at_most, 1);
      if (readlen > chunk->memlen)
        readlen = chunk->memlen;
605
606
607
608
609
610
611
612
    } else {
      size_t cap = CHUNK_REMAINING_CAPACITY(buf->tail);
      chunk = buf->tail;
      if (cap < readlen)
        readlen = cap;
    }

    r = read_to_chunk_tls(buf, chunk, tls, readlen);
613
    check();
614
615
    if (r < 0)
      return r; /* Error */
616
    tor_assert(total_read+r < INT_MAX);
617
    total_read += r;
618
619
    if ((size_t)r < readlen) /* eof, block, or no more to read. */
      break;
620
  }
621
  return (int)total_read;
Roger Dingledine's avatar
Roger Dingledine committed
622
}
623

624
625
626
627
/** Helper for flush_buf(): try to write <b>sz</b> bytes from chunk
 * <b>chunk</b> of buffer <b>buf</b> onto socket <b>s</b>.  On success, deduct
 * the bytes written from *<b>buf_flushlen</b>.  Return the number of bytes
 * written on success, 0 on blocking, -1 on failure.
628
 */
629
static inline int
630
flush_chunk(tor_socket_t s, buf_t *buf, chunk_t *chunk, size_t sz,
631
            size_t *buf_flushlen)
632
{
633
  ssize_t write_result;
634

635
636
  if (sz > chunk->datalen)
    sz = chunk->datalen;
637
  write_result = tor_socket_send(s, chunk->data, sz, 0);
638

639
640
641
  if (write_result < 0) {
    int e = tor_socket_errno(s);
    if (!ERRNO_IS_EAGAIN(e)) { /* it's a real error */
642
#ifdef _WIN32
643
644
645
      if (e == WSAENOBUFS)
        log_warn(LD_NET,"write() failed: WSAENOBUFS. Not enough ram?");
#endif
646
647
      return -1;
    }
648
    log_debug(LD_NET,"write() would block, returning.");
649
650
651
652
    return 0;
  } else {
    *buf_flushlen -= write_result;
    buf_remove_from_front(buf, write_result);
653
654
    tor_assert(write_result < INT_MAX);
    return (int)write_result;
655
656
657
  }
}

658
659
660
661
/** Helper for flush_buf_tls(): try to write <b>sz</b> bytes from chunk
 * <b>chunk</b> of buffer <b>buf</b> onto socket <b>s</b>.  (Tries to write
 * more if there is a forced pending write size.)  On success, deduct the
 * bytes written from *<b>buf_flushlen</b>.  Return the number of bytes
Nick Mathewson's avatar
Nick Mathewson committed
662
 * written on success, and a TOR_TLS error code on failure or blocking.
663
 */
664
static inline int
665
flush_chunk_tls(tor_tls_t *tls, buf_t *buf, chunk_t *chunk,
666
                size_t sz, size_t *buf_flushlen)
667
668
669
{
  int r;
  size_t forced;
670
  char *data;
671
672
673
674

  forced = tor_tls_get_forced_write_size(tls);
  if (forced > sz)
    sz = forced;
675
676
677
678
679
680
681
682
  if (chunk) {
    data = chunk->data;
    tor_assert(sz <= chunk->datalen);
  } else {
    data = NULL;
    tor_assert(sz == 0);
  }
  r = tor_tls_write(tls, data, sz);
683
684
  if (r < 0)
    return r;
685
686
687
688
  if (*buf_flushlen > (size_t)r)
    *buf_flushlen -= r;
  else
    *buf_flushlen = 0;
689
690
691
692
693
694
  buf_remove_from_front(buf, r);
  log_debug(LD_NET,"flushed %d bytes, %d ready to flush, %d remain.",
            r,(int)*buf_flushlen,(int)buf->datalen);
  return r;
}

Roger Dingledine's avatar
Roger Dingledine committed
695
/** Write data from <b>buf</b> to the socket <b>s</b>.  Write at most
696
 * <b>sz</b> bytes, decrement *<b>buf_flushlen</b> by
Roger Dingledine's avatar
Roger Dingledine committed
697
698
699
 * the number of bytes actually written, and remove the written bytes
 * from the buffer.  Return the number of bytes written on success,
 * -1 on failure.  Return 0 if write() would block.
700
 */
701
int
702
flush_buf(tor_socket_t s, buf_t *buf, size_t sz, size_t *buf_flushlen)
703
{
704
  /* XXXX It's stupid to overload the return values for these functions:
705
706
   * "error status" and "number of bytes flushed" are not mutually exclusive.
   */
707
708
  int r;
  size_t flushed = 0;
709
  tor_assert(buf_flushlen);
710
  tor_assert(SOCKET_OK(s));
711
  tor_assert(*buf_flushlen <= buf->datalen);
712
  tor_assert(sz <= *buf_flushlen);
Roger Dingledine's avatar
Roger Dingledine committed
713

714
  check();
715
716
717
718
719
720
721
  while (sz) {
    size_t flushlen0;
    tor_assert(buf->head);
    if (buf->head->datalen >= sz)
      flushlen0 = sz;
    else
      flushlen0 = buf->head->datalen;
722

723
    r = flush_chunk(s, buf, buf->head, flushlen0, buf_flushlen);
724
    check();
725
    if (r < 0)
726
727
      return r;
    flushed += r;
728
    sz -= r;
729
730
    if (r == 0 || (size_t)r < flushlen0) /* can't flush any more now. */
      break;
731
  }
732
733
  tor_assert(flushed < INT_MAX);
  return (int)flushed;
734
}
735

736
737
/** As flush_buf(), but writes data to a TLS connection.  Can write more than
 * <b>flushlen</b> bytes.
738
 */
739
int
740
741
flush_buf_tls(tor_tls_t *tls, buf_t *buf, size_t flushlen,
              size_t *buf_flushlen)
742
743
{
  int r;
744
  size_t flushed = 0;
745
  ssize_t sz;
746
  tor_assert(buf_flushlen);
747
  tor_assert(*buf_flushlen <= buf->datalen);
748
749
  tor_assert(flushlen <= *buf_flushlen);
  sz = (ssize_t) flushlen;
Roger Dingledine's avatar
Roger Dingledine committed
750
751
752

  /* we want to let tls write even if flushlen is zero, because it might
   * have a partial record pending */
753
  check_no_tls_errors();
754

755
  check();
756
  do {
757
    size_t flushlen0;
758
    if (buf->head) {
759
      if ((ssize_t)buf->head->datalen >= sz)
760
761
762
763
764
765
        flushlen0 = sz;
      else
        flushlen0 = buf->head->datalen;
    } else {
      flushlen0 = 0;
    }
766

767
    r = flush_chunk_tls(tls, buf, buf->head, flushlen0, buf_flushlen);
768
    check();
769
    if (r < 0)
770
771
      return r;
    flushed += r;
772
    sz -= r;
773
774
    if (r == 0) /* Can't flush any more now. */
      break;
775
  } while (sz > 0);
776
777
  tor_assert(flushed < INT_MAX);
  return (int)flushed;
778
779
}

Roger Dingledine's avatar
Roger Dingledine committed
780
781
782
/** Append <b>string_len</b> bytes from <b>string</b> to the end of
 * <b>buf</b>.
 *
783
784
 * Return the new length of the buffer on success, -1 on failure.
 */
785
786
787
int
write_to_buf(const char *string, size_t string_len, buf_t *buf)
{
788
  if (!string_len)
789
    return (int)buf->datalen;
790
  check();
Roger Dingledine's avatar
Roger Dingledine committed
791

792
793
794
795
796
797
  while (string_len) {
    size_t copy;
    if (!buf->tail || !CHUNK_REMAINING_CAPACITY(buf->tail))
      buf_add_chunk_with_capacity(buf, string_len, 1);

    copy = CHUNK_REMAINING_CAPACITY(buf->tail);
798
799
800
801
802
803
804
    if (copy > string_len)
      copy = string_len;
    memcpy(CHUNK_WRITE_PTR(buf->tail), string, copy);
    string_len -= copy;
    string += copy;
    buf->datalen += copy;
    buf->tail->datalen += copy;
805
  }
806

807
  check();
808
809
  tor_assert(buf->datalen < INT_MAX);
  return (int)buf->datalen;
810
811
}

812
813
/** Helper: copy the first <b>string_len</b> bytes from <b>buf</b>
 * onto <b>string</b>.
814
 */
815
static inline void
816
peek_from_buf(char *string, size_t string_len, const buf_t *buf)
817
{
818
  chunk_t *chunk;
Roger Dingledine's avatar
Roger Dingledine committed
819

Roger Dingledine's avatar
Roger Dingledine committed
820
  tor_assert(string);
821
822
  /* make sure we don't ask for too much */
  tor_assert(string_len <= buf->datalen);
823
  /* assert_buf_ok(buf); */
Roger Dingledine's avatar
Roger Dingledine committed
824

825
826
827
828
829
830
831
832
833
834
  chunk = buf->head;
  while (string_len) {
    size_t copy = string_len;
    tor_assert(chunk);
    if (chunk->datalen < copy)
      copy = chunk->datalen;
    memcpy(string, chunk->data, copy);
    string_len -= copy;
    string += copy;
    chunk = chunk->next;
835
836
837
  }
}

838
839
840
/** Remove <b>string_len</b> bytes from the front of <b>buf</b>, and store
 * them into <b>string</b>.  Return the new buffer size.  <b>string_len</b>
 * must be \<= the number of bytes on the buffer.
841
 */
842
843
int
fetch_from_buf(char *string, size_t string_len, buf_t *buf)
844
845
846
847
848
849
{
  /* There must be string_len bytes in buf; write them onto string,
   * then memmove buf back (that is, remove them from buf).
   *
   * Return the number of bytes still on the buffer. */

850
  check();
851
  peek_from_buf(string, string_len, buf);
852
  buf_remove_from_front(buf, string_len);
853
  check();
854
855
  tor_assert(buf->datalen < INT_MAX);
  return (int)buf->datalen;
Roger Dingledine's avatar
Roger Dingledine committed
856
857
}

858
859
/** True iff the cell command <b>command</b> is one that implies a
 * variable-length cell in Tor link protocol <b>linkproto</b>. */
860
static inline int
861
862
cell_command_is_var_length(uint8_t command, int linkproto)
{
863
864
865
866
  /* If linkproto is v2 (2), CELL_VERSIONS is the only variable-length cells
   * work as implemented here. If it's 1, there are no variable-length cells.
   * Tor does not support other versions right now, and so can't negotiate
   * them.
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
   */
  switch (linkproto) {
  case 1:
    /* Link protocol version 1 has no variable-length cells. */
    return 0;
  case 2:
    /* In link protocol version 2, VERSIONS is the only variable-length cell */
    return command == CELL_VERSIONS;
  case 0:
  case 3:
  default:
    /* In link protocol version 3 and later, and in version "unknown",
     * commands 128 and higher indicate variable-length. VERSIONS is
     * grandfathered in. */
    return command == CELL_VERSIONS || command >= 128;
  }
}

885
886
887
888
889
890
891
/** Check <b>buf</b> for a variable-length cell according to the rules of link
 * protocol version <b>linkproto</b>.  If one is found, pull it off the buffer
 * and assign a newly allocated var_cell_t to *<b>out</b>, and return 1.
 * Return 0 if whatever is on the start of buf_t is not a variable-length
 * cell.  Return 1 and set *<b>out</b> to NULL if there seems to be the start
 * of a variable-length cell on <b>buf</b>, but the whole thing isn't there
 * yet. */
892
int
893
fetch_var_cell_from_buf(buf_t *buf, var_cell_t **out, int linkproto)
894
{
895
  char hdr[VAR_CELL_MAX_HEADER_SIZE];
896
897
898
  var_cell_t *result;
  uint8_t command;
  uint16_t length;
899
  const int wide_circ_ids = linkproto >= MIN_LINK_PROTO_FOR_WIDE_CIRC_IDS;
900
901
  const int circ_id_len = get_circ_id_size(wide_circ_ids);
  const unsigned header_len = get_var_cell_header_size(wide_circ_ids);
902
903
  check();
  *out = NULL;
904
  if (buf->datalen < header_len)
905
    return 0;
906
  peek_from_buf(hdr, header_len, buf);
907

908
  command = get_uint8(hdr + circ_id_len);
909
  if (!(cell_command_is_var_length(command, linkproto)))
910
911
    return 0;

912
913
  length = ntohs(get_uint16(hdr + circ_id_len + 1));
  if (buf->datalen < (size_t)(header_len+length))
914
    return 1;
915
  result = var_cell_new(length);
916
  result->command = command;
917
918
919
920
  if (wide_circ_ids)
    result->circ_id = ntohl(get_uint32(hdr));
  else
    result->circ_id = ntohs(get_uint16(hdr));
921

922
  buf_remove_from_front(buf, header_len);
Nick Mathewson's avatar
Nick Mathewson committed
923
  peek_from_buf((char*) result->payload, length, buf);
924
925
926
927
928
929
930
  buf_remove_from_front(buf, length);
  check();

  *out = result;
  return 1;
}

931
932
/** Move up to *<b>buf_flushlen</b> bytes from <b>buf_in</b> to
 * <b>buf_out</b>, and modify *<b>buf_flushlen</b> appropriately.
933
934
935
936
937
 * Return the number of bytes actually copied.
 */
int
move_buf_to_buf(buf_t *buf_out, buf_t *buf_in, size_t *buf_flushlen)
{
938
  /* We can do way better here, but this doesn't turn up in any profiles. */
939
940
941
942
943
944
945
  char b[4096];
  size_t cp, len;
  len = *buf_flushlen;
  if (len > buf_in->datalen)
    len = buf_in->datalen;

  cp = len; /* Remember the number of bytes we intend to copy. */
946
  tor_assert(cp < INT_MAX);
947
948
949
950
951
952
953
954
955
956
  while (len) {
    /* This isn't the most efficient implementation one could imagine, since
     * it does two copies instead of 1, but I kinda doubt that this will be
     * critical path. */
    size_t n = len > sizeof(b) ? sizeof(b) : len;
    fetch_from_buf(b, n, buf_in);
    write_to_buf(b, n, buf_out);
    len -= n;
  }
  *buf_flushlen -= cp;
957
  return (int)cp;
958
959
}

960
/** Internal structure: represents a position in a buffer. */
961
typedef struct buf_pos_t {
962
  const chunk_t *chunk; /**< Which chunk are we pointing to? */
963
  int pos;/**< Which character inside the chunk's data are we pointing to? */
964
  size_t chunk_pos; /**< Total length of all previous chunks. */
965
} buf_pos_t;
966

967
/** Initialize <b>out</b> to point to the first character of <b>buf</b>.*/
968
static void