diff --git a/changes/bug16861 b/changes/bug16861
new file mode 100644
index 0000000000000000000000000000000000000000..7fc8d0574061061b07e8b468470f662b48cffe3c
--- /dev/null
+++ b/changes/bug16861
@@ -0,0 +1,16 @@
+ o Major features (traffic analysis)
+   - Relays and clients will now send a padding cell on idle OR
+     connections every 1.5 to 9.5 seconds (tunable via consensus
+     parameters). Directory connections and inter-relay connections
+     are not padded. Padding is negotiated using Tor's link protocol,
+     so both relays and clients must upgrade for this to take effect.
+     Clients may still send padding despite the relay's version by
+     setting ConnectionPadding 1 in torrc, and may disable padding
+     by setting ConnectionPadding 0 in torrc. Padding may be minimized
+     for mobile users with the torrc option ReducedConnectionPadding.
+     Implements Proposal 251 and Section 2 of Proposal 254; closes ticket
+     #16861.
+   - Relays will publish 24 hour totals of padding and non-padding cell
+     counts to their extra-info descriptors, unless PaddingStatistics 0
+     is set in torrc. These 24 hour totals are also rounded to multiples
+     of 10000.
diff --git a/doc/tor.1.txt b/doc/tor.1.txt
index de2e2b4a04dc4c2df33876de49c3db07dab12359..eb4e02ad6c57b7539e0852e2d9de5afe763f9204 100644
--- a/doc/tor.1.txt
+++ b/doc/tor.1.txt
@@ -832,6 +832,22 @@ The following options are useful only for clients (that is, if
     and fast enough. The current behavior is simply that Tor is a client
     unless ORPort, ExtORPort, or DirPort are configured.) (Default: 0)
 
+[[ConnectionPadding]] **ConnectionPadding** **0**|**1**|**auto**::
+    This option governs Tor's use of padding to defend against some forms of
+    traffic analysis. If it is set to 'auto', Tor will send padding only
+    if both the client and the relay support it. If it is set to 0, Tor will
+    not send any padding cells. If it is set to 1, Tor will still send padding
+    for client connections regardless of relay support. Only clients may set
+    this option. This option should be offered via the UI to mobile users
+    for use where bandwidth may be expensive.
+    (Default: auto)
+
+[[ReducedConnectionPadding]] **ReducedConnectionPadding** **0**|**1**::
+    If set to 1, Tor will not not hold OR connections open for very long,
+    and will send less padding on these connections. Only clients may set
+    this option. This option should be offered via the UI to mobile users
+    for use where bandwidth may be expensive. (Default: 0)
+
 [[ExcludeNodes]] **ExcludeNodes** __node__,__node__,__...__::
     A list of identity fingerprints, country codes, and address
     patterns of nodes to avoid when building a circuit. Country codes are
@@ -2031,6 +2047,14 @@ is non-zero):
     If ExtraInfoStatistics is enabled, it will published as part of
     extra-info document. (Default: 0)
 
+[[PaddingStatistics]] **PaddingStatistics** **0**|**1**::
+    Relays only.
+    When this option is enabled, Tor collects statistics for padding cells
+    sent and received by this relay, in addition to total cell counts.
+    These statistics are rounded, and omitted if traffic is low. This
+    information is important for load balancing decisions related to padding.
+    (Default: 1)
+
 [[DirReqStatistics]] **DirReqStatistics** **0**|**1**::
     Relays and bridges only.
     When this option is enabled, a Tor directory writes statistics on the
diff --git a/src/or/Makefile.nmake b/src/or/Makefile.nmake
index 2ac98cd37271f870be9c710091cb2d0448904de2..429ae67858af72bd505cbe6e938555a29930f96a 100644
--- a/src/or/Makefile.nmake
+++ b/src/or/Makefile.nmake
@@ -14,6 +14,7 @@ LIBTOR_OBJECTS = \
   addressmap.obj \
   buffers.obj \
   channel.obj \
+  channelpadding.obj \
   channeltls.obj \
   circpathbias.obj \
   circuitbuild.obj \
diff --git a/src/or/channel.c b/src/or/channel.c
index a1ccb2c8e338738732c62cf35f5eb473c78712b1..74793436da53a1419afc1abd266474f3936896e9 100644
--- a/src/or/channel.c
+++ b/src/or/channel.c
@@ -49,6 +49,7 @@
 #include "or.h"
 #include "channel.h"
 #include "channeltls.h"
+#include "channelpadding.h"
 #include "circuitbuild.h"
 #include "circuitlist.h"
 #include "circuitstats.h"
@@ -63,6 +64,7 @@
 #include "router.h"
 #include "routerlist.h"
 #include "scheduler.h"
+#include "compat_time.h"
 
 /* Global lists of channels */
 
@@ -105,6 +107,8 @@ HT_GENERATE2(channel_gid_map, channel_s, gidmap_node,
              channel_id_hash, channel_id_eq,
              0.6, tor_reallocarray_, tor_free_);
 
+HANDLE_IMPL(channel, channel_s,);
+
 /* Counter for ID numbers */
 static uint64_t n_channels_allocated = 0;
 /*
@@ -922,6 +926,11 @@ channel_free(channel_t *chan)
     circuitmux_set_policy(chan->cmux, NULL);
   }
 
+  /* Remove all timers and associated handle entries now */
+  timer_free(chan->padding_timer);
+  channel_handle_free(chan->timer_handle);
+  channel_handles_clear(chan);
+
   /* Call a free method if there is one */
   if (chan->free_fn) chan->free_fn(chan);
 
@@ -1000,6 +1009,11 @@ channel_force_free(channel_t *chan)
     circuitmux_set_policy(chan->cmux, NULL);
   }
 
+  /* Remove all timers and associated handle entries now */
+  timer_free(chan->padding_timer);
+  channel_handle_free(chan->timer_handle);
+  channel_handles_clear(chan);
+
   /* Call a free method if there is one */
   if (chan->free_fn) chan->free_fn(chan);
 
@@ -2619,6 +2633,19 @@ channel_do_open_actions(channel_t *chan)
     }
   }
 
+  /* Disable or reduce padding according to user prefs. */
+  if (chan->padding_enabled || get_options()->ConnectionPadding == 1) {
+    if (!get_options()->ConnectionPadding) {
+      channelpadding_disable_padding_on_channel(chan);
+    }
+
+    /* Padding can be forced and/or reduced by clients, regardless of if
+     * the channel supports it */
+    if (get_options()->ReducedConnectionPadding) {
+      channelpadding_reduce_padding_on_channel(chan);
+    }
+  }
+
   circuit_n_chan_done(chan, 1, close_origin_circuits);
 }
 
@@ -4215,8 +4242,12 @@ channel_timestamp_active(channel_t *chan)
   time_t now = time(NULL);
 
   tor_assert(chan);
+  chan->timestamp_xfer_ms = monotime_coarse_absolute_msec();
 
   chan->timestamp_active = now;
+
+  /* Clear any potential netflow padding timer. We're active */
+  chan->next_padding_time_ms = 0;
 }
 
 /**
@@ -4299,11 +4330,14 @@ void
 channel_timestamp_recv(channel_t *chan)
 {
   time_t now = time(NULL);
-
   tor_assert(chan);
+  chan->timestamp_xfer_ms = monotime_coarse_absolute_msec();
 
   chan->timestamp_active = now;
   chan->timestamp_recv = now;
+
+  /* Clear any potential netflow padding timer. We're active */
+  chan->next_padding_time_ms = 0;
 }
 
 /**
@@ -4316,11 +4350,15 @@ void
 channel_timestamp_xmit(channel_t *chan)
 {
   time_t now = time(NULL);
-
   tor_assert(chan);
 
+  chan->timestamp_xfer_ms = monotime_coarse_absolute_msec();
+
   chan->timestamp_active = now;
   chan->timestamp_xmit = now;
+
+  /* Clear any potential netflow padding timer. We're active */
+  chan->next_padding_time_ms = 0;
 }
 
 /***************************************************************
diff --git a/src/or/channel.h b/src/or/channel.h
index 0b8f599895ffef27ee47c2a97753d0c4b437b8ed..21211dc8bd5ed19988afdec4381c168fc2cff21c 100644
--- a/src/or/channel.h
+++ b/src/or/channel.h
@@ -11,6 +11,8 @@
 
 #include "or.h"
 #include "circuitmux.h"
+#include "timers.h"
+#include "handles.h"
 
 /* Channel handler function pointer typedefs */
 typedef void (*channel_listener_fn_ptr)(channel_listener_t *, channel_t *);
@@ -21,6 +23,17 @@ struct cell_queue_entry_s;
 TOR_SIMPLEQ_HEAD(chan_cell_queue, cell_queue_entry_s);
 typedef struct chan_cell_queue chan_cell_queue_t;
 
+/**
+ * This enum is used by channelpadding to decide when to pad channels.
+ * Don't add values to it without updating the checks in
+ * channelpadding_decide_to_pad_channel().
+ */
+typedef enum {
+    CHANNEL_USED_NOT_USED_FOR_FULL_CIRCS = 0,
+    CHANNEL_USED_FOR_FULL_CIRCS,
+    CHANNEL_USED_FOR_USER_TRAFFIC,
+} channel_usage_info_t;
+
 /**
  * Channel struct; see the channel_t typedef in or.h.  A channel is an
  * abstract interface for the OR-to-OR connection, similar to connection_or_t,
@@ -37,6 +50,9 @@ struct channel_s {
   /** List entry for hashtable for global-identifier lookup. */
   HT_ENTRY(channel_s) gidmap_node;
 
+  /** Handle entry for handle-based lookup */
+  HANDLE_ENTRY(channel, channel_s);
+
   /** Current channel state */
   channel_state_t state;
 
@@ -51,6 +67,58 @@ struct channel_s {
   /** has this channel ever been open? */
   unsigned int has_been_open:1;
 
+  /**
+   * This field indicates if the other side has enabled or disabled
+   * padding via either the link protocol version or
+   * channelpadding_negotiate cells.
+   *
+   * Clients can override this with ConnectionPadding in torrc to
+   * disable or force padding to relays, but relays cannot override the
+   * client's request.
+   */
+  unsigned int padding_enabled:1;
+
+  /** Cached value of our decision to pad (to avoid expensive
+   * checks during critical path statistics counting). */
+  unsigned int currently_padding:1;
+
+  /** Is there a pending netflow padding callback? */
+  unsigned int pending_padding_callback:1;
+
+  /** Has this channel ever been used for non-directory traffic?
+   * Used to decide what channels to pad, and when. */
+  channel_usage_info_t channel_usage;
+
+  /** When should we send a cell for netflow padding, in absolute
+   *  milliseconds since monotime system start. 0 means no padding
+   *  is scheduled. */
+  uint64_t next_padding_time_ms;
+
+  /** The callback pointer for the padding callbacks */
+  tor_timer_t *padding_timer;
+  /** The handle to this channel (to free on canceled timers) */
+  struct channel_handle_t *timer_handle;
+
+  /**
+   * These two fields specify the minimum and maximum negotiated timeout
+   * values for inactivity (send or receive) before we decide to pad a
+   * channel. These fields can be set either via a PADDING_NEGOTIATE cell,
+   * or the torrc option ReducedConnectionPadding. The consensus parameters
+   * nf_ito_low and nf_ito_high are used to ensure that padding can only be
+   * negotiated to be less frequent than what is specified in the consensus.
+   * (This is done to prevent wingnut clients from requesting excessive
+   * padding).
+   *
+   * The actual timeout value is randomly chosen between these two values
+   * as per the table in channelpadding_get_netflow_inactive_timeout_ms(),
+   * after ensuring that these values do not specify lower timeouts than
+   * the consensus parameters.
+   *
+   * If these are 0, we have not negotiated or specified custom padding
+   * times, and instead use consensus defaults. */
+  uint16_t padding_timeout_low_ms;
+  uint16_t padding_timeout_high_ms;
+
   /** Why did we close?
    */
   enum {
@@ -90,6 +158,18 @@ struct channel_s {
   time_t timestamp_created; /* Channel created */
   time_t timestamp_active; /* Any activity */
 
+  /**
+   * This is a high-resolution monotonic timestamp that marks when we
+   * believe the channel has actually sent or received data to/from
+   * the wire. Right now, it is used to determine when we should send
+   * a padding cell for channelpadding.
+   *
+   * XXX: Are we setting timestamp_xfer_ms in the right places to
+   * accurately reflect actual network data transfer? Or might this be
+   * very wrong wrt when bytes actually go on the wire?
+   */
+  uint64_t timestamp_xfer_ms;
+
   /* Methods implemented by the lower layer */
 
   /** Free a channel */
@@ -633,5 +713,8 @@ int packed_cell_is_destroy(channel_t *chan,
                            const packed_cell_t *packed_cell,
                            circid_t *circid_out);
 
+/* Declare the handle helpers */
+HANDLE_DECL(channel, channel_s,);
+
 #endif
 
diff --git a/src/or/channelpadding.c b/src/or/channelpadding.c
new file mode 100644
index 0000000000000000000000000000000000000000..3976424faf21e5bdb7af81ec598946cce06adf86
--- /dev/null
+++ b/src/or/channelpadding.c
@@ -0,0 +1,574 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2015, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/* TOR_CHANNEL_INTERNAL_ define needed for an O(1) implementation of
+ * channelpadding_channel_to_channelinfo() */
+#define TOR_CHANNEL_INTERNAL_
+
+#include "or.h"
+#include "channel.h"
+#include "channelpadding.h"
+#include "channeltls.h"
+#include "config.h"
+#include "networkstatus.h"
+#include "connection.h"
+#include "connection_or.h"
+#include "main.h"
+#include "rephist.h"
+#include "router.h"
+#include "compat_time.h"
+#include <event.h>
+
+STATIC int channelpadding_get_netflow_inactive_timeout_ms(const channel_t *);
+STATIC int channelpadding_send_disable_command(channel_t *);
+STATIC int64_t channelpadding_compute_time_until_pad_for_netflow(channel_t *);
+
+/** The total number of pending channelpadding timers */
+static uint64_t total_timers_pending;
+
+/**
+ * Get a random netflow inactive timeout keepalive period in milliseconds,
+ * the range for which is determined by consensus parameters, negotiation,
+ * configuration, or default values. The consensus parameters enforce the
+ * minimum possible value, to avoid excessively frequent padding.
+ *
+ * The ranges for this value were chosen to be low enough to ensure that
+ * routers do not emit a new netflow record for a connection due to it
+ * being idle.
+ *
+ * Specific timeout values for major routers are listed in Proposal 251.
+ * No major router appeared capable of setting an inactive timeout below 10
+ * seconds, so we set the defaults below that value, since we can always
+ * scale back if it ends up being too much padding.
+ *
+ * Returns the next timeout period (in milliseconds) after which we should
+ * send a padding packet, or 0 if padding is disabled.
+ */
+#define DFLT_NETFLOW_INACTIVE_KEEPALIVE_LOW 1500
+#define DFLT_NETFLOW_INACTIVE_KEEPALIVE_HIGH 9500
+#define DFLT_NETFLOW_INACTIVE_KEEPALIVE_MIN 0
+#define DFLT_NETFLOW_INACTIVE_KEEPALIVE_MAX 60000
+STATIC int
+channelpadding_get_netflow_inactive_timeout_ms(const channel_t *chan)
+{
+  int low_timeout = networkstatus_get_param(NULL, "nf_ito_low",
+      DFLT_NETFLOW_INACTIVE_KEEPALIVE_LOW,
+      DFLT_NETFLOW_INACTIVE_KEEPALIVE_MIN,
+      DFLT_NETFLOW_INACTIVE_KEEPALIVE_MAX);
+  int high_timeout = networkstatus_get_param(NULL, "nf_ito_high",
+      DFLT_NETFLOW_INACTIVE_KEEPALIVE_HIGH,
+      low_timeout,
+      DFLT_NETFLOW_INACTIVE_KEEPALIVE_MAX);
+  int X1, X2;
+
+  if (low_timeout == 0 && low_timeout == high_timeout)
+    return 0; // No padding
+
+  /* If we have negotiated different timeout values, use those, but
+   * don't allow them to be lower than the consensus ones */
+  if (chan->padding_timeout_low_ms && chan->padding_timeout_high_ms) {
+    low_timeout = MAX(low_timeout, chan->padding_timeout_low_ms);
+    high_timeout = MAX(high_timeout, chan->padding_timeout_high_ms);
+  }
+
+  if (low_timeout == high_timeout)
+    return low_timeout; // No randomization
+
+  /*
+   * This MAX() hack is here because we apply the timeout on both the client
+   * and the server. This creates the situation where the total time before
+   * sending a packet in either direction is actually
+   * min(client_timeout,server_timeout).
+   *
+   * If X is a random variable uniform from 0..R-1 (where R=high-low),
+   * then Y=max(X,X) has Prob(Y == i) = (2.0*i + 1)/(R*R).
+   *
+   * If we create a third random variable Z=min(Y,Y), then it turns out that
+   * Exp[Z] ~= Exp[X]. Here's a table:
+   *
+   *    R     Exp[X]    Exp[Z]    Exp[min(X,X)]   Exp[max(X,X)]
+   *  2000     999.5    1066        666.2           1332.8
+   *  3000    1499.5    1599.5      999.5           1999.5
+   *  5000    2499.5    2666       1666.2           3332.8
+   *  6000    2999.5    3199.5     1999.5           3999.5
+   *  7000    3499.5    3732.8     2332.8           4666.2
+   *  8000    3999.5    4266.2     2666.2           5332.8
+   *  10000   4999.5    5328       3332.8           6666.2
+   *  15000   7499.5    7995       4999.5           9999.5
+   *  20000   9900.5    10661      6666.2           13332.8
+   *
+   * In other words, this hack makes it so that when both the client and
+   * the guard are sending this padding, then the averages work out closer
+   * to the midpoint of the range, making the overhead easier to tune.
+   * If only one endpoint is padding (for example: if the relay does not
+   * support padding, but the client has set ConnectionPadding 1; or
+   * if the relay does support padding, but the client has set
+   * ReducedConnectionPadding 1), then the defense will still prevent
+   * record splitting, but with less overhead than the midpoint
+   * (as seen by the Exp[max(X,X)] column).
+   *
+   * To calculate average padding packet frequency (and thus overhead),
+   * index into the table by picking a row based on R = high-low. Then,
+   * use the appropriate column (Exp[Z] for two-sided padding, and
+   * Exp[max(X,X)] for one-sided padding). Finally, take this value
+   * and add it to the low timeout value. This value is the average
+   * frequency which padding packets will be sent.
+   */
+
+  X1 = crypto_rand_int(high_timeout - low_timeout);
+  X2 = crypto_rand_int(high_timeout - low_timeout);
+  return low_timeout + MAX(X1, X2);
+}
+
+/**
+ * Update this channel's padding settings based on the PADDING_NEGOTIATE
+ * contents.
+ *
+ * Returns -1 on error; 1 on success.
+ */
+int
+channelpadding_update_padding_for_channel(channel_t *chan,
+                const channelpadding_negotiate_t *pad_vars)
+{
+  if (pad_vars->version != 0) {
+    static ratelim_t version_limit = RATELIM_INIT(600);
+
+    log_fn_ratelim(&version_limit,LOG_PROTOCOL_WARN,LD_PROTOCOL,
+           "Got a PADDING_NEGOTIATE cell with an unknown version. Ignoring.");
+    return -1;
+  }
+
+  // We should not allow malicious relays to disable or reduce padding for
+  // us as clients. In fact, we should only accept this cell at all if we're
+  // operating as a relay. Brides should not accept it from relays, either
+  // (only from their clients).
+  if ((get_options()->BridgeRelay &&
+        connection_or_digest_is_known_relay(chan->identity_digest)) ||
+      !get_options()->ORPort_set) {
+    static ratelim_t relay_limit = RATELIM_INIT(600);
+
+    log_fn_ratelim(&relay_limit,LOG_PROTOCOL_WARN,LD_PROTOCOL,
+           "Got a PADDING_NEGOTIATE from relay at %s (%s). "
+           "This should not happen.",
+           chan->get_remote_descr(chan, 0),
+           hex_str(chan->identity_digest, DIGEST_LEN));
+    return -1;
+  }
+
+  chan->padding_enabled = (pad_vars->command == CHANNELPADDING_COMMAND_START);
+
+  /* Min must not be lower than the current consensus parameter
+     nf_ito_low. */
+  chan->padding_timeout_low_ms = MAX(networkstatus_get_param(NULL,
+              "nf_ito_low",
+              DFLT_NETFLOW_INACTIVE_KEEPALIVE_LOW,
+              DFLT_NETFLOW_INACTIVE_KEEPALIVE_MIN,
+              DFLT_NETFLOW_INACTIVE_KEEPALIVE_MAX),
+          pad_vars->ito_low_ms);
+
+  /* Max must not be lower than ito_low_ms */
+  chan->padding_timeout_high_ms = MAX(chan->padding_timeout_low_ms,
+                                   pad_vars->ito_high_ms);
+
+  log_fn(LOG_INFO,LD_OR,
+         "Negotiated padding=%d, lo=%d, hi=%d on "U64_FORMAT,
+         chan->padding_enabled, chan->padding_timeout_low_ms,
+         chan->padding_timeout_high_ms,
+         U64_PRINTF_ARG(chan->global_identifier));
+
+  return 1;
+}
+
+/**
+ * Sends a CELL_PADDING_NEGOTIATE on the channel to tell the other side not
+ * to send padding.
+ *
+ * Returns -1 on error, 0 on success.
+ */
+STATIC int
+channelpadding_send_disable_command(channel_t *chan)
+{
+  channelpadding_negotiate_t disable;
+  cell_t cell;
+
+  tor_assert(BASE_CHAN_TO_TLS(chan)->conn->link_proto >=
+             MIN_LINK_PROTO_FOR_CHANNEL_PADDING);
+
+  memset(&cell, 0, sizeof(cell_t));
+  memset(&disable, 0, sizeof(channelpadding_negotiate_t));
+  cell.command = CELL_PADDING_NEGOTIATE;
+
+  channelpadding_negotiate_set_command(&disable, CHANNELPADDING_COMMAND_STOP);
+
+  if (channelpadding_negotiate_encode(cell.payload, CELL_PAYLOAD_SIZE,
+        &disable) < 0)
+    return -1;
+
+  if (chan->write_cell(chan, &cell) == 1)
+    return 0;
+  else
+    return -1;
+}
+
+/**
+ * Sends a CELL_PADDING_NEGOTIATE on the channel to tell the other side to
+ * resume sending padding at some rate.
+ *
+ * Returns -1 on error, 0 on success.
+ */
+int
+channelpadding_send_enable_command(channel_t *chan, uint16_t low_timeout,
+                                   uint16_t high_timeout)
+{
+  channelpadding_negotiate_t enable;
+  cell_t cell;
+
+  tor_assert(BASE_CHAN_TO_TLS(chan)->conn->link_proto >=
+             MIN_LINK_PROTO_FOR_CHANNEL_PADDING);
+
+  memset(&cell, 0, sizeof(cell_t));
+  memset(&enable, 0, sizeof(channelpadding_negotiate_t));
+  cell.command = CELL_PADDING_NEGOTIATE;
+
+  channelpadding_negotiate_set_command(&enable, CHANNELPADDING_COMMAND_START);
+  channelpadding_negotiate_set_ito_low_ms(&enable, low_timeout);
+  channelpadding_negotiate_set_ito_high_ms(&enable, high_timeout);
+
+  if (channelpadding_negotiate_encode(cell.payload, CELL_PAYLOAD_SIZE,
+        &enable) < 0)
+    return -1;
+
+  if (chan->write_cell(chan, &cell) == 1)
+    return 0;
+  else
+    return -1;
+}
+
+/**
+ * Sends a CELL_PADDING cell on a channel if it has been idle since
+ * our callback was scheduled.
+ *
+ * This function also clears the pending padding timer and the callback
+ * flags.
+ */
+static void
+channelpadding_send_padding_cell_for_callback(channel_t *chan)
+{
+  cell_t cell;
+
+  /* Check that the channel is still valid and open */
+  if (!chan || chan->state != CHANNEL_STATE_OPEN) {
+    if (chan) chan->pending_padding_callback = 0;
+    log_fn(LOG_INFO,LD_OR,
+           "Scheduled a netflow padding cell, but connection already closed.");
+    return;
+  }
+
+  /* We should have a pending callback flag set. */
+  if (BUG(chan->pending_padding_callback == 0))
+    return;
+
+  chan->pending_padding_callback = 0;
+
+  if (!chan->next_padding_time_ms ||
+      chan->has_queued_writes(chan)) {
+    /* We must have been active before the timer fired */
+    chan->next_padding_time_ms = 0;
+    return;
+  }
+
+  {
+    uint64_t now = monotime_coarse_absolute_msec();
+
+    log_fn(LOG_INFO,LD_OR,
+        "Sending netflow keepalive on "U64_FORMAT" to %s (%s) after "
+        I64_FORMAT" ms. Delta "I64_FORMAT"ms",
+        U64_PRINTF_ARG(chan->global_identifier),
+        safe_str_client(chan->get_remote_descr(chan, 0)),
+        safe_str_client(hex_str(chan->identity_digest, DIGEST_LEN)),
+        U64_PRINTF_ARG(now - chan->timestamp_xfer_ms),
+        U64_PRINTF_ARG(now - chan->next_padding_time_ms));
+  }
+
+  /* Clear the timer */
+  chan->next_padding_time_ms = 0;
+
+  /* Send the padding cell. This will cause the channel to get a
+   * fresh timestamp_active */
+  memset(&cell, 0, sizeof(cell));
+  cell.command = CELL_PADDING;
+  chan->write_cell(chan, &cell);
+}
+
+/**
+ * tor_timer callback function for us to send padding on an idle channel.
+ *
+ * This function just obtains the channel from the callback handle, ensures
+ * it is still valid, and then hands it off to
+ * channelpadding_send_padding_cell_for_callback(), which checks if
+ * the channel is still idle before sending padding.
+ */
+static void
+channelpadding_send_padding_callback(tor_timer_t *timer, void *args,
+                                     const struct monotime_t *time)
+{
+  channel_t *chan = channel_handle_get((struct channel_handle_t*)args);
+  (void)timer; (void)time;
+
+  if (chan && CHANNEL_CAN_HANDLE_CELLS(chan)) {
+    /* Hrmm.. It might be nice to have an equivalent to assert_connection_ok
+     * for channels. Then we could get rid of the channeltls dependency */
+    tor_assert(BASE_CHAN_TO_TLS(chan)->conn->base_.magic ==
+               OR_CONNECTION_MAGIC);
+    assert_connection_ok(&BASE_CHAN_TO_TLS(chan)->conn->base_, approx_time());
+
+    channelpadding_send_padding_cell_for_callback(chan);
+  } else {
+     log_fn(LOG_INFO,LD_OR,
+            "Channel closed while waiting for timer.");
+  }
+
+  total_timers_pending--;
+}
+
+/**
+ * Schedules a callback to send padding on a channel in_ms milliseconds from
+ * now.
+ *
+ * Returns CHANNELPADDING_WONTPAD on error, CHANNELPADDING_PADDING_SENT if we
+ * sent the packet immediately without a timer, and
+ * CHANNELPADDING_PADDING_SCHEDULED if we decided to schedule a timer.
+ */
+static channelpadding_decision_t
+channelpadding_schedule_padding(channel_t *chan, int in_ms)
+{
+  struct timeval timeout;
+  tor_assert(!chan->pending_padding_callback);
+
+  if (in_ms <= 0) {
+    chan->pending_padding_callback = 1;
+    channelpadding_send_padding_cell_for_callback(chan);
+    return CHANNELPADDING_PADDING_SENT;
+  }
+
+  timeout.tv_sec = in_ms/1000;
+  timeout.tv_usec = (in_ms%1000)*1000;
+
+  if (!chan->timer_handle) {
+    chan->timer_handle = channel_handle_new(chan);
+  }
+
+  if (chan->padding_timer) {
+    timer_set_cb(chan->padding_timer,
+                 channelpadding_send_padding_callback,
+                 chan->timer_handle);
+  } else {
+    chan->padding_timer = timer_new(channelpadding_send_padding_callback,
+                                    chan->timer_handle);
+  }
+  timer_schedule(chan->padding_timer, &timeout);
+
+  rep_hist_padding_count_timers(++total_timers_pending);
+
+  chan->pending_padding_callback = 1;
+  return CHANNELPADDING_PADDING_SCHEDULED;
+}
+
+/**
+ * Calculates the number of milliseconds from now to schedule a padding cell.
+ *
+ * Returns the number of milliseconds from now (relative) to schedule the
+ * padding callback. If the padding timer is more than 1.1 seconds in the
+ * future, we return -1, to avoid scheduling excessive callbacks. If padding
+ * is disabled in the consensus, we return -2.
+ *
+ * Side-effects: Updates chan->next_padding_time_ms, storing an (absolute, not
+ * relative) millisecond representation of when we should send padding, unless
+ * other activity happens first. This side-effect allows us to avoid
+ * scheduling a libevent callback until we're within 1.1 seconds of the padding
+ * time.
+ */
+#define CHANNELPADDING_TIME_LATER -1
+#define CHANNELPADDING_TIME_DISABLED -2
+STATIC int64_t
+channelpadding_compute_time_until_pad_for_netflow(channel_t *chan)
+{
+  uint64_t long_now = monotime_coarse_absolute_msec();
+
+  if (!chan->next_padding_time_ms) {
+    int64_t padding_timeout =
+        channelpadding_get_netflow_inactive_timeout_ms(chan);
+
+    if (!padding_timeout)
+      return CHANNELPADDING_TIME_DISABLED;
+
+    chan->next_padding_time_ms = padding_timeout
+        + chan->timestamp_xfer_ms;
+  }
+
+  /* If the next padding time is beyond the maximum possible consensus value,
+   * then this indicates a clock jump, so just send padding now. This is
+   * better than using monotonic time because we want to avoid the situation
+   * where we wait around forever for monotonic time to move forward after
+   * a clock jump far into the past.
+   */
+  if (chan->next_padding_time_ms > long_now +
+      DFLT_NETFLOW_INACTIVE_KEEPALIVE_MAX) {
+    tor_fragile_assert();
+    log_warn(LD_BUG,
+        "Channel padding timeout scheduled "I64_FORMAT"ms in the future. "
+        "Did the monotonic clock just jump?",
+        I64_PRINTF_ARG(chan->next_padding_time_ms - long_now));
+    return 0; /* Clock jumped: Send padding now */
+  }
+
+  /* If the timeout will expire before the next time we're called (1000ms
+     from now, plus some slack), then calcualte the number of milliseconds
+     from now which we should send padding, so we can schedule a callback
+     then.
+   */
+  if (long_now + 1100 >= chan->next_padding_time_ms) {
+    int64_t ms_until_pad_for_netflow = chan->next_padding_time_ms -
+                                       long_now;
+    if (ms_until_pad_for_netflow < 0) {
+      log_warn(LD_BUG,
+              "Channel padding timeout scheduled "I64_FORMAT"ms in the past. "
+              "Did the monotonic clock just jump?",
+              I64_PRINTF_ARG(-ms_until_pad_for_netflow));
+      return 0; /* Clock jumped: Send padding now */
+    }
+
+    return ms_until_pad_for_netflow;
+  }
+  return CHANNELPADDING_TIME_LATER;
+}
+
+/**
+ * Calling this function on a channel causes it to tell the other side
+ * not to send padding, and disables sending padding from this side as well.
+ */
+void
+channelpadding_disable_padding_on_channel(channel_t *chan)
+{
+  chan->padding_enabled = 0;
+
+  // Send cell to disable padding on the other end
+  channelpadding_send_disable_command(chan);
+}
+
+/**
+ * Calling this function on a channel causes it to tell the other side
+ * not to send padding, and reduces the rate that padding is sent from
+ * this side.
+ */
+void
+channelpadding_reduce_padding_on_channel(channel_t *chan)
+{
+  /* Padding can be forced and reduced by clients, regardless of if
+   * the channel supports it. So we check for support here before
+   * sending any commands. */
+  if (chan->padding_enabled) {
+    channelpadding_send_disable_command(chan);
+  }
+
+#define DFLT_NETFLOW_REDUCED_KEEPALIVE_LOW 9000
+#define DFLT_NETFLOW_REDUCED_KEEPALIVE_HIGH 14000
+#define DFLT_NETFLOW_REDUCED_KEEPALIVE_MIN 0
+#define DFLT_NETFLOW_REDUCED_KEEPALIVE_MAX 60000
+  chan->padding_timeout_low_ms =
+    networkstatus_get_param(NULL, "nf_ito_low_reduced",
+        DFLT_NETFLOW_REDUCED_KEEPALIVE_LOW,
+        DFLT_NETFLOW_REDUCED_KEEPALIVE_MIN,
+        DFLT_NETFLOW_REDUCED_KEEPALIVE_MAX);
+
+  chan->padding_timeout_high_ms =
+    networkstatus_get_param(NULL, "nf_ito_high_reduced",
+        DFLT_NETFLOW_REDUCED_KEEPALIVE_HIGH,
+        chan->padding_timeout_low_ms,
+        DFLT_NETFLOW_REDUCED_KEEPALIVE_MAX);
+
+  log_fn(LOG_INFO,LD_OR,
+         "Reduced padding on channel "U64_FORMAT": lo=%d, hi=%d",
+         U64_PRINTF_ARG(chan->global_identifier),
+         chan->padding_timeout_low_ms, chan->padding_timeout_high_ms);
+}
+
+/**
+ * This function is called once per second by run_connection_housekeeping(),
+ * but only if the channel is still open, valid, and non-wedged.
+ *
+ * It decides if and when we should send a padding cell, and if needed,
+ * schedules a callback to send that cell at the appropriate time.
+ *
+ * Returns an enum that represents the current padding decision state.
+ * Return value is currently used only by unit tests.
+ */
+channelpadding_decision_t
+channelpadding_decide_to_pad_channel(channel_t *chan)
+{
+  const or_options_t *options = get_options();
+
+  /* Only pad open channels */
+  if (chan->state != CHANNEL_STATE_OPEN)
+    return CHANNELPADDING_WONTPAD;
+
+  if (chan->channel_usage == CHANNEL_USED_FOR_FULL_CIRCS) {
+    if (!networkstatus_get_param(NULL, "nf_pad_before_usage", 1, 0, 1))
+      return CHANNELPADDING_WONTPAD;
+  } else if (chan->channel_usage != CHANNEL_USED_FOR_USER_TRAFFIC) {
+    return CHANNELPADDING_WONTPAD;
+  }
+
+  if (chan->pending_padding_callback)
+    return CHANNELPADDING_PADDING_ALREADY_SCHEDULED;
+
+  /* Don't pad the channel if we didn't negotiate it, but still
+   * allow clients to force padding if options->ChannelPadding is
+   * explicitly set to 1.
+   */
+  if (!chan->padding_enabled && options->ConnectionPadding != 1) {
+    return CHANNELPADDING_WONTPAD;
+  }
+
+  if (!chan->has_queued_writes(chan)) {
+    int is_client_channel = 0;
+
+    if (!public_server_mode(options) || chan->is_client ||
+            !connection_or_digest_is_known_relay(chan->identity_digest)) {
+       is_client_channel = 1;
+    }
+
+    /* If nf_pad_relays=1 is set in the consensus, we pad
+     * on *all* idle connections, relay-relay or relay-client.
+     * Otherwise pad only for client+bridge cons */
+    if (is_client_channel ||
+        networkstatus_get_param(NULL, "nf_pad_relays", 0, 0, 1)) {
+      int64_t pad_time_ms =
+          channelpadding_compute_time_until_pad_for_netflow(chan);
+
+      if (pad_time_ms == CHANNELPADDING_TIME_DISABLED) {
+        return CHANNELPADDING_WONTPAD;
+      } else if (pad_time_ms == CHANNELPADDING_TIME_LATER) {
+        chan->currently_padding = 1;
+        return CHANNELPADDING_PADLATER;
+      } else {
+       /* We have to schedule a callback because we're called exactly once per
+        * second, but we don't want padding packets to go out exactly on an
+        * integer multiple of seconds. This callback will only be scheduled
+        * if we're within 1.1 seconds of the padding time.
+        */
+        chan->currently_padding = 1;
+        return channelpadding_schedule_padding(chan, pad_time_ms);
+      }
+    } else {
+      chan->currently_padding = 0;
+      return CHANNELPADDING_WONTPAD;
+    }
+  } else {
+    return CHANNELPADDING_PADLATER;
+  }
+}
+
diff --git a/src/or/channelpadding.h b/src/or/channelpadding.h
new file mode 100644
index 0000000000000000000000000000000000000000..07af7a6b4633e4800ea006a3e12c78d2539ab8b8
--- /dev/null
+++ b/src/or/channelpadding.h
@@ -0,0 +1,38 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2015, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file circuitbuild.h
+ * \brief Header file for circuitbuild.c.
+ **/
+#ifndef TOR_CHANNELPADDING_H
+#define TOR_CHANNELPADDING_H
+
+#include "channelpadding_negotiation.h"
+
+typedef enum {
+  CHANNELPADDING_WONTPAD,
+  CHANNELPADDING_PADLATER,
+  CHANNELPADDING_PADDING_SCHEDULED,
+  CHANNELPADDING_PADDING_ALREADY_SCHEDULED,
+  CHANNELPADDING_PADDING_SENT,
+} channelpadding_decision_t;
+
+channelpadding_decision_t channelpadding_decide_to_pad_channel(channel_t
+                                                               *chan);
+int channelpadding_update_padding_for_channel(channel_t *,
+    const channelpadding_negotiate_t *);
+
+void channelpadding_disable_padding_on_channel(channel_t *chan);
+void channelpadding_reduce_padding_on_channel(channel_t *chan);
+int channelpadding_send_enable_command(channel_t *chan, uint16_t low_timeout,
+                                       uint16_t high_timeout);
+
+int channelpadding_get_circuits_available_timeout(void);
+unsigned int channelpadding_get_channel_idle_timeout(const channel_t *, int);
+
+#endif
+
diff --git a/src/or/channeltls.c b/src/or/channeltls.c
index dbed95fb43babea7b0c0c9dabc08ddfcf052c38c..155684c0cef8c2830aa283295c46e4140a70b3ce 100644
--- a/src/or/channeltls.c
+++ b/src/or/channeltls.c
@@ -57,6 +57,9 @@
 #include "routerlist.h"
 #include "scheduler.h"
 #include "torcert.h"
+#include "networkstatus.h"
+#include "channelpadding_negotiation.h"
+#include "channelpadding.h"
 
 /** How many CELL_PADDING cells have we received, ever? */
 uint64_t stats_n_padding_cells_processed = 0;
@@ -122,6 +125,8 @@ static void channel_tls_process_netinfo_cell(cell_t *cell,
 static int command_allowed_before_handshake(uint8_t command);
 static int enter_v3_handshake_with_cell(var_cell_t *cell,
                                         channel_tls_t *tlschan);
+static void channel_tls_process_padding_negotiate_cell(cell_t *cell,
+                                                       channel_tls_t *chan);
 
 /**
  * Do parts of channel_tls_t initialization common to channel_tls_connect()
@@ -1098,9 +1103,16 @@ channel_tls_handle_cell(cell_t *cell, or_connection_t *conn)
   /* We note that we're on the internet whenever we read a cell. This is
    * a fast operation. */
   entry_guards_note_internet_connectivity(get_guard_selection_info());
+  rep_hist_padding_count_read(PADDING_TYPE_TOTAL);
+
+  if (chan->base_.currently_padding)
+    rep_hist_padding_count_read(PADDING_TYPE_ENABLED_TOTAL);
 
   switch (cell->command) {
     case CELL_PADDING:
+      rep_hist_padding_count_read(PADDING_TYPE_CELL);
+      if (chan->base_.currently_padding)
+        rep_hist_padding_count_read(PADDING_TYPE_ENABLED_CELL);
       ++stats_n_padding_cells_processed;
       /* do nothing */
       break;
@@ -1111,6 +1123,10 @@ channel_tls_handle_cell(cell_t *cell, or_connection_t *conn)
       ++stats_n_netinfo_cells_processed;
       PROCESS_CELL(netinfo, cell, chan);
       break;
+    case CELL_PADDING_NEGOTIATE:
+      ++stats_n_netinfo_cells_processed;
+      PROCESS_CELL(padding_negotiate, cell, chan);
+      break;
     case CELL_CREATE:
     case CELL_CREATE_FAST:
     case CELL_CREATED:
@@ -1570,6 +1586,9 @@ channel_tls_process_versions_cell(var_cell_t *cell, channel_tls_t *chan)
       chan->conn->link_proto >= MIN_LINK_PROTO_FOR_WIDE_CIRC_IDS;
     chan->conn->wide_circ_ids = chan->base_.wide_circ_ids;
 
+    chan->base_.padding_enabled =
+      chan->conn->link_proto >= MIN_LINK_PROTO_FOR_CHANNEL_PADDING;
+
     if (send_certs) {
       if (connection_or_send_certs_cell(chan->conn) < 0) {
         log_warn(LD_OR, "Couldn't send certs cell");
@@ -1594,6 +1613,43 @@ channel_tls_process_versions_cell(var_cell_t *cell, channel_tls_t *chan)
   }
 }
 
+/**
+ * Process a 'padding_negotiate' cell
+ *
+ * This function is called to handle an incoming PADDING_NEGOTIATE cell;
+ * enable or disable padding accordingly, and read and act on its timeout
+ * value contents.
+ */
+static void
+channel_tls_process_padding_negotiate_cell(cell_t *cell, channel_tls_t *chan)
+{
+  channelpadding_negotiate_t *negotiation;
+  tor_assert(cell);
+  tor_assert(chan);
+  tor_assert(chan->conn);
+
+  if (chan->conn->link_proto < MIN_LINK_PROTO_FOR_CHANNEL_PADDING) {
+    log_fn(LOG_PROTOCOL_WARN, LD_OR,
+           "Received a PADDING_NEGOTIATE cell on v%d connection; dropping.",
+           chan->conn->link_proto);
+    return;
+  }
+
+  if (channelpadding_negotiate_parse(&negotiation, cell->payload,
+                                     CELL_PAYLOAD_SIZE) < 0) {
+    log_fn(LOG_PROTOCOL_WARN, LD_OR,
+          "Received malformed PADDING_NEGOTIATE cell on v%d connection; "
+          "dropping.", chan->conn->link_proto);
+
+    return;
+  }
+
+  channelpadding_update_padding_for_channel(TLS_CHAN_TO_BASE(chan),
+                                            negotiation);
+
+  channelpadding_negotiate_free(negotiation);
+}
+
 /**
  * Process a 'netinfo' cell
  *
diff --git a/src/or/circuitbuild.c b/src/or/circuitbuild.c
index cd0003439528a562caf90e32166b1771474582cb..24b7c7f4d9235733d91178e8d1570fa0523e374a 100644
--- a/src/or/circuitbuild.c
+++ b/src/or/circuitbuild.c
@@ -919,9 +919,18 @@ circuit_send_next_onion_skin(origin_circuit_t *circ)
     memset(&cc, 0, sizeof(cc));
     if (circ->build_state->onehop_tunnel)
       control_event_bootstrap(BOOTSTRAP_STATUS_ONEHOP_CREATE, 0);
-    else
+    else {
       control_event_bootstrap(BOOTSTRAP_STATUS_CIRCUIT_CREATE, 0);
 
+      /* If this is not a one-hop tunnel, the channel is being used
+       * for traffic that wants anonymity and protection from traffic
+       * analysis (such as netflow record retention). That means we want
+       * to pad it.
+       */
+      if (circ->base_.n_chan->channel_usage < CHANNEL_USED_FOR_FULL_CIRCS)
+        circ->base_.n_chan->channel_usage = CHANNEL_USED_FOR_FULL_CIRCS;
+    }
+
     node = node_get_by_id(circ->base_.n_chan->identity_digest);
     fast = should_use_create_fast_for_circuit(circ);
     if (!fast) {
diff --git a/src/or/command.c b/src/or/command.c
index 5866c386e467b51d321ff8ae3c2d00f254bae26e..cebb5bfc7abeea4915df1cd6fbba2459d702496b 100644
--- a/src/or/command.c
+++ b/src/or/command.c
@@ -328,8 +328,16 @@ command_process_create_cell(cell_t *cell, channel_t *chan)
 
   if (create_cell->handshake_type != ONION_HANDSHAKE_TYPE_FAST) {
     /* hand it off to the cpuworkers, and then return. */
-    if (connection_or_digest_is_known_relay(chan->identity_digest))
+    if (connection_or_digest_is_known_relay(chan->identity_digest)) {
       rep_hist_note_circuit_handshake_requested(create_cell->handshake_type);
+      // Needed for chutney: Sometimes relays aren't in the consensus yet, and
+      // get marked as clients. This resets their channels once they appear.
+      // Probably useful for normal operation wrt relay flapping, too.
+      chan->is_client = 0;
+    } else {
+      channel_mark_client(chan);
+    }
+
     if (assign_onionskin_to_cpuworker(circ, create_cell) < 0) {
       log_debug(LD_GENERAL,"Failed to hand off onionskin. Closing.");
       circuit_mark_for_close(TO_CIRCUIT(circ), END_CIRC_REASON_RESOURCELIMIT);
@@ -344,9 +352,15 @@ command_process_create_cell(cell_t *cell, channel_t *chan)
     int len;
     created_cell_t created_cell;
 
-    /* Make sure we never try to use the OR connection on which we
-     * received this cell to satisfy an EXTEND request,  */
-    channel_mark_client(chan);
+    /* If this is a create_fast, this might be a client. Let's check. */
+    if (connection_or_digest_is_known_relay(chan->identity_digest)) {
+      // Needed for chutney: Sometimes relays aren't in the consensus yet, and
+      // get marked as clients. This resets their channels once they appear.
+      // Probably useful for normal operation wrt relay flapping, too.
+      chan->is_client = 0;
+    } else {
+      channel_mark_client(chan);
+    }
 
     memset(&created_cell, 0, sizeof(created_cell));
     len = onion_skin_server_handshake(ONION_HANDSHAKE_TYPE_FAST,
diff --git a/src/or/config.c b/src/or/config.c
index a527571cb024a82cae16288c90d9fd852dcd8e13..d7489c0a462457fbeae361b0efaa2d55fbf7b87f 100644
--- a/src/or/config.c
+++ b/src/or/config.c
@@ -242,6 +242,7 @@ static config_var_t option_vars_[] = {
   V(BridgeRecordUsageByCountry,  BOOL,     "1"),
   V(BridgeRelay,                 BOOL,     "0"),
   V(CellStatistics,              BOOL,     "0"),
+  V(PaddingStatistics,           BOOL,     "1"),
   V(LearnCircuitBuildTimeout,    BOOL,     "1"),
   V(CircuitBuildTimeout,         INTERVAL, "0"),
   V(CircuitIdleTimeout,          INTERVAL, "1 hour"),
@@ -458,6 +459,8 @@ static config_var_t option_vars_[] = {
   V(RecommendedClientVersions,   LINELIST, NULL),
   V(RecommendedServerVersions,   LINELIST, NULL),
   V(RecommendedPackages,         LINELIST, NULL),
+  V(ReducedConnectionPadding,    BOOL,     "0"),
+  V(ConnectionPadding,           AUTOBOOL, "auto"),
   V(RefuseUnknownExits,          AUTOBOOL, "auto"),
   V(RejectPlaintextPorts,        CSV,      ""),
   V(RelayBandwidthBurst,         MEMUNIT,  "0"),
@@ -3429,6 +3432,14 @@ options_validate(or_options_t *old_options, or_options_t *options,
     options->DirPort_set = 0;
   }
 
+  if (server_mode(options) && options->ConnectionPadding != -1) {
+    REJECT("Relays must use 'auto' for the ConnectionPadding setting.");
+  }
+
+  if (server_mode(options) && options->ReducedConnectionPadding != 0) {
+    REJECT("Relays cannot set ReducedConnectionPadding. ");
+  }
+
   if (options->MinUptimeHidServDirectoryV2 < 0) {
     log_warn(LD_CONFIG, "MinUptimeHidServDirectoryV2 option must be at "
                         "least 0 seconds. Changing to 0.");
diff --git a/src/or/connection_or.c b/src/or/connection_or.c
index cefe42c4db234ad2e270362d49ac202e69ece72e..40c28e60e52736bd9d221b7d1a726cb0210910e0 100644
--- a/src/or/connection_or.c
+++ b/src/or/connection_or.c
@@ -55,6 +55,7 @@
 #include "ext_orport.h"
 #include "scheduler.h"
 #include "torcert.h"
+#include "channelpadding.h"
 
 static int connection_tls_finish_handshake(or_connection_t *conn);
 static int connection_or_launch_v3_or_handshake(or_connection_t *conn);
@@ -1983,12 +1984,23 @@ connection_or_write_cell_to_buf(const cell_t *cell, or_connection_t *conn)
 
   cell_pack(&networkcell, cell, conn->wide_circ_ids);
 
+  rep_hist_padding_count_write(PADDING_TYPE_TOTAL);
+  if (cell->command == CELL_PADDING)
+    rep_hist_padding_count_write(PADDING_TYPE_CELL);
+
   connection_write_to_buf(networkcell.body, cell_network_size, TO_CONN(conn));
 
   /* Touch the channel's active timestamp if there is one */
-  if (conn->chan)
+  if (conn->chan) {
     channel_timestamp_active(TLS_CHAN_TO_BASE(conn->chan));
 
+    if (conn->chan->base_.currently_padding) {
+      rep_hist_padding_count_write(PADDING_TYPE_ENABLED_TOTAL);
+      if (cell->command == CELL_PADDING)
+        rep_hist_padding_count_write(PADDING_TYPE_ENABLED_CELL);
+    }
+  }
+
   if (conn->base_.state == OR_CONN_STATE_OR_HANDSHAKING_V3)
     or_handshake_state_record_cell(conn, conn->handshake_state, cell, 0);
 }
@@ -2094,7 +2106,7 @@ connection_or_process_cells_from_inbuf(or_connection_t *conn)
 }
 
 /** Array of recognized link protocol versions. */
-static const uint16_t or_protocol_versions[] = { 1, 2, 3, 4 };
+static const uint16_t or_protocol_versions[] = { 1, 2, 3, 4, 5 };
 /** Number of versions in <b>or_protocol_versions</b>. */
 static const int n_or_protocol_versions =
   (int)( sizeof(or_protocol_versions)/sizeof(uint16_t) );
diff --git a/src/or/connection_or.h b/src/or/connection_or.h
index 80a5bddb143b2f4fa155013f11cf1ee978cd4e95..5d089e67897784318715ecc2cd3ad0bc7d36b8bf 100644
--- a/src/or/connection_or.h
+++ b/src/or/connection_or.h
@@ -109,6 +109,8 @@ void var_cell_free(var_cell_t *cell);
 
 /* DOCDOC */
 #define MIN_LINK_PROTO_FOR_WIDE_CIRC_IDS 4
+#define MIN_LINK_PROTO_FOR_CHANNEL_PADDING 5
+#define MAX_LINK_PROTO MIN_LINK_PROTO_FOR_CHANNEL_PADDING
 
 void connection_or_group_set_badness_(smartlist_t *group, int force);
 
diff --git a/src/or/include.am b/src/or/include.am
index 4e54deca557ea703a2cffd06e373d15237a51304..c52fdf1fb0dc8516aa0ecc1e7ee2817167afb6ef 100644
--- a/src/or/include.am
+++ b/src/or/include.am
@@ -22,6 +22,7 @@ LIBTOR_A_SOURCES = \
 	src/or/bridges.c				\
 	src/or/buffers.c				\
 	src/or/channel.c				\
+	src/or/channelpadding.c				\
 	src/or/channeltls.c				\
 	src/or/circpathbias.c				\
 	src/or/circuitbuild.c				\
@@ -137,6 +138,7 @@ ORHEADERS = \
 	src/or/bridges.h				\
 	src/or/buffers.h				\
 	src/or/channel.h				\
+	src/or/channelpadding.h				\
 	src/or/channeltls.h				\
 	src/or/circpathbias.h				\
 	src/or/circuitbuild.h				\
diff --git a/src/or/main.c b/src/or/main.c
index 5549f97998779e660f1a5e0c010a4fd128f71902..ef4a1ffdd0eddcbb1f434dc3f76aa96ccd44976f 100644
--- a/src/or/main.c
+++ b/src/or/main.c
@@ -54,6 +54,7 @@
 #include "buffers.h"
 #include "channel.h"
 #include "channeltls.h"
+#include "channelpadding.h"
 #include "circuitbuild.h"
 #include "circuitlist.h"
 #include "circuituse.h"
@@ -176,7 +177,7 @@ static int signewnym_is_pending = 0;
 static unsigned newnym_epoch = 0;
 
 /** Smartlist of all open connections. */
-static smartlist_t *connection_array = NULL;
+STATIC smartlist_t *connection_array = NULL;
 /** List of connections that have been marked for close and need to be freed
  * and removed from connection_array. */
 static smartlist_t *closeable_connection_lst = NULL;
@@ -1119,6 +1120,8 @@ run_connection_housekeeping(int i, time_t now)
     memset(&cell,0,sizeof(cell_t));
     cell.command = CELL_PADDING;
     connection_or_write_cell_to_buf(&cell, or_conn);
+  } else {
+    channelpadding_decide_to_pad_channel(chan);
   }
 }
 
@@ -1184,6 +1187,7 @@ CALLBACK(check_dns_honesty);
 CALLBACK(write_bridge_ns);
 CALLBACK(check_fw_helper_app);
 CALLBACK(heartbeat);
+CALLBACK(reset_padding_counts);
 
 #undef CALLBACK
 
@@ -1215,6 +1219,7 @@ static periodic_event_item_t periodic_events[] = {
   CALLBACK(write_bridge_ns),
   CALLBACK(check_fw_helper_app),
   CALLBACK(heartbeat),
+  CALLBACK(reset_padding_counts),
   END_OF_PERIODIC_EVENTS
 };
 #undef CALLBACK
@@ -1723,6 +1728,17 @@ write_stats_file_callback(time_t now, const or_options_t *options)
 /**
  * Periodic callback: Write bridge statistics to disk if appropriate.
  */
+static int
+reset_padding_counts_callback(time_t now, const or_options_t *options)
+{
+  if (options->PaddingStatistics) {
+    rep_hist_prep_published_padding_counts(now);
+  }
+
+  rep_hist_reset_padding_counts();
+  return REPHIST_CELL_PADDING_COUNTS_INTERVAL;
+}
+
 static int
 record_bridge_stats_callback(time_t now, const or_options_t *options)
 {
@@ -2336,6 +2352,8 @@ do_main_loop(void)
   }
 
   handle_signals(1);
+  monotime_init();
+  timers_initialize();
 
   /* load the private keys, if we're supposed to have them, and set up the
    * TLS context. */
@@ -3196,6 +3214,9 @@ tor_cleanup(void)
       rep_hist_record_mtbf_data(now, 0);
     keypin_close_journal();
   }
+
+  timers_shutdown();
+
 #ifdef USE_DMALLOC
   dmalloc_log_stats();
 #endif
diff --git a/src/or/networkstatus.c b/src/or/networkstatus.c
index 508cf6c5b6ac784e77b44cedbcece622cc53e4b3..76b968a8c88f38667cff7bc6f9ad8a1ac8adb549 100644
--- a/src/or/networkstatus.c
+++ b/src/or/networkstatus.c
@@ -72,11 +72,11 @@ static strmap_t *unnamed_server_map = NULL;
 
 /** Most recently received and validated v3 "ns"-flavored consensus network
  * status. */
-static networkstatus_t *current_ns_consensus = NULL;
+STATIC networkstatus_t *current_ns_consensus = NULL;
 
 /** Most recently received and validated v3 "microdec"-flavored consensus
  * network status. */
-static networkstatus_t *current_md_consensus = NULL;
+STATIC networkstatus_t *current_md_consensus = NULL;
 
 /** A v3 consensus networkstatus that we've received, but which we don't
  * have enough certificates to be happy about. */
diff --git a/src/or/or.h b/src/or/or.h
index 0db9f23604469d044a52d736b519ef580090794b..02c43a5eac0283cce6b46e4938ebadac9364f1cf 100644
--- a/src/or/or.h
+++ b/src/or/or.h
@@ -875,6 +875,7 @@ typedef enum {
 #define CELL_RELAY_EARLY 9
 #define CELL_CREATE2 10
 #define CELL_CREATED2 11
+#define CELL_PADDING_NEGOTIATE 12
 
 #define CELL_VPADDING 128
 #define CELL_CERTS 129
@@ -3756,6 +3757,15 @@ typedef struct {
   int AvoidDiskWrites; /**< Boolean: should we never cache things to disk?
                         * Not used yet. */
   int ClientOnly; /**< Boolean: should we never evolve into a server role? */
+
+  int ReducedConnectionPadding; /**< Boolean: Should we try to keep connections
+                                  open shorter and pad them less against
+                                  connection-level traffic analysis? */
+  /** Autobool: if auto, then connection padding will be negotiated by client
+   * and server. If 0, it will be fully disabled. If 1, the client will still
+   * pad to the server regardless of server support. */
+  int ConnectionPadding;
+
   /** To what authority types do we publish our descriptor? Choices are
    * "v1", "v2", "v3", "bridge", or "". */
   smartlist_t *PublishServerDescriptor;
@@ -4163,6 +4173,9 @@ typedef struct {
   /** If true, the user wants us to collect cell statistics. */
   int CellStatistics;
 
+  /** If true, the user wants us to collect padding statistics. */
+  int PaddingStatistics;
+
   /** If true, the user wants us to collect statistics as entry node. */
   int EntryStatistics;
 
diff --git a/src/or/relay.c b/src/or/relay.c
index 2e76a8ec36556136cfde6990476bb0765c6953a5..18b68fff50f142333397ae5dbe7e6daad903b534 100644
--- a/src/or/relay.c
+++ b/src/or/relay.c
@@ -74,6 +74,7 @@
 #include "routerlist.h"
 #include "routerparse.h"
 #include "scheduler.h"
+#include "rephist.h"
 
 static edge_connection_t *relay_lookup_conn(circuit_t *circ, cell_t *cell,
                                             cell_direction_t cell_direction,
@@ -196,6 +197,82 @@ relay_crypt_one_payload(crypto_cipher_t *cipher, uint8_t *in,
   return 0;
 }
 
+/**
+ * Update channel usage state based on the type of relay cell and
+ * circuit properties.
+ *
+ * This is needed to determine if a client channel is being
+ * used for application traffic, and if a relay channel is being
+ * used for multihop circuits and application traffic. The decision
+ * to pad in channelpadding.c depends upon this info (as well as
+ * consensus parameters) to decide what channels to pad.
+ */
+static void
+circuit_update_channel_usage(circuit_t *circ, cell_t *cell)
+{
+  if (CIRCUIT_IS_ORIGIN(circ)) {
+    /*
+     * The client state was first set much earlier in
+     * circuit_send_next_onion_skin(), so we can start padding as early as
+     * possible.
+     *
+     * However, if padding turns out to be expensive, we may want to not do
+     * it until actual application traffic starts flowing (which is controlled
+     * via consensus param nf_pad_before_usage).
+     *
+     * So: If we're an origin circuit and we've created a full length circuit,
+     * then any CELL_RELAY cell means application data. Increase the usage
+     * state of the channel to indicate this.
+     *
+     * We want to wait for CELL_RELAY specifically here, so we know that
+     * the channel was definitely being used for data and not for extends.
+     * By default, we pad as soon as a channel has been used for *any*
+     * circuits, so this state is irrelevant to the padding decision in
+     * the default case. However, if padding turns out to be expensive,
+     * we would like the ability to avoid padding until we're absolutely
+     * sure that a channel is used for enough application data to be worth
+     * padding.
+     *
+     * (So it does not matter that CELL_RELAY_EARLY can actually contain
+     * application data. This is only a load reducing option and that edge
+     * case does not matter if we're desperately trying to reduce overhead
+     * anyway. See also consensus parameter nf_pad_before_usage).
+     */
+    if (BUG(!circ->n_chan))
+      return;
+
+    if (circ->n_chan->channel_usage == CHANNEL_USED_FOR_FULL_CIRCS &&
+        cell->command == CELL_RELAY) {
+      circ->n_chan->channel_usage = CHANNEL_USED_FOR_USER_TRAFFIC;
+    }
+  } else {
+    /* If we're a relay circuit, the question is more complicated. Basically:
+     * we only want to pad connections that carry multihop (anonymous)
+     * circuits.
+     *
+     * We assume we're more than one hop if either the previous hop
+     * is not a client, or if the previous hop is a client and there's
+     * a next hop. Then, circuit traffic starts at RELAY_EARLY, and
+     * user application traffic starts when we see RELAY cells.
+     */
+    or_circuit_t *or_circ = TO_OR_CIRCUIT(circ);
+
+    if (BUG(!or_circ->p_chan))
+      return;
+
+    if (!or_circ->p_chan->is_client ||
+        (or_circ->p_chan->is_client && circ->n_chan)) {
+      if (cell->command == CELL_RELAY_EARLY) {
+        if (or_circ->p_chan->channel_usage < CHANNEL_USED_FOR_FULL_CIRCS) {
+          or_circ->p_chan->channel_usage = CHANNEL_USED_FOR_FULL_CIRCS;
+        }
+      } else if (cell->command == CELL_RELAY) {
+        or_circ->p_chan->channel_usage = CHANNEL_USED_FOR_USER_TRAFFIC;
+      }
+    }
+  }
+}
+
 /** Receive a relay cell:
  *  - Crypt it (encrypt if headed toward the origin or if we <b>are</b> the
  *    origin; decrypt if we're headed toward the exit).
@@ -229,6 +306,8 @@ circuit_receive_relay_cell(cell_t *cell, circuit_t *circ,
     return -END_CIRC_REASON_INTERNAL;
   }
 
+  circuit_update_channel_usage(circ, cell);
+
   if (recognized) {
     edge_connection_t *conn = NULL;
 
@@ -632,6 +711,9 @@ relay_send_command_from_edge_,(streamid_t stream_id, circuit_t *circ,
   log_debug(LD_OR,"delivering %d cell %s.", relay_command,
             cell_direction == CELL_DIRECTION_OUT ? "forward" : "backward");
 
+  if (relay_command == RELAY_COMMAND_DROP)
+    rep_hist_padding_count_write(PADDING_TYPE_DROP);
+
   /* If we are sending an END cell and this circuit is used for a tunneled
    * directory request, advance its state. */
   if (relay_command == RELAY_COMMAND_END && circ->dirreq_id)
@@ -1513,6 +1595,7 @@ connection_edge_process_relay_cell(cell_t *cell, circuit_t *circ,
 
   switch (rh.command) {
     case RELAY_COMMAND_DROP:
+      rep_hist_padding_count_read(PADDING_TYPE_DROP);
 //      log_info(domain,"Got a relay-level padding cell. Dropping.");
       return 0;
     case RELAY_COMMAND_BEGIN:
diff --git a/src/or/rephist.c b/src/or/rephist.c
index 8bcd7396aaf30f833bbbca8fb2cd3698b506411f..a87ae9ef17976b973b54ac52ba985195fcdb697e 100644
--- a/src/or/rephist.c
+++ b/src/or/rephist.c
@@ -85,6 +85,9 @@
 #include "routerlist.h"
 #include "ht.h"
 
+#include "channelpadding.h"
+#include "connection_or.h"
+
 static void bw_arrays_init(void);
 static void predicted_ports_init(void);
 
@@ -165,6 +168,44 @@ typedef struct or_history_t {
   digestmap_t *link_history_map;
 } or_history_t;
 
+/**
+ * This structure holds accounting needed to calculate the padding overhead.
+ */
+typedef struct padding_counts_t {
+  /** Total number of cells we have received, including padding */
+  uint64_t read_cell_count;
+  /** Total number of cells we have sent, including padding */
+  uint64_t write_cell_count;
+  /** Total number of CELL_PADDING cells we have received */
+  uint64_t read_pad_cell_count;
+  /** Total number of CELL_PADDING cells we have sent */
+  uint64_t write_pad_cell_count;
+  /** Total number of read cells on padding-enabled conns */
+  uint64_t enabled_read_cell_count;
+  /** Total number of sent cells on padding-enabled conns */
+  uint64_t enabled_write_cell_count;
+  /** Total number of read CELL_PADDING cells on padding-enabled cons */
+  uint64_t enabled_read_pad_cell_count;
+  /** Total number of sent CELL_PADDING cells on padding-enabled cons */
+  uint64_t enabled_write_pad_cell_count;
+  /** Total number of RELAY_DROP cells we have received */
+  uint64_t read_drop_cell_count;
+  /** Total number of RELAY_DROP cells we have sent */
+  uint64_t write_drop_cell_count;
+  /** The maximum number of padding timers we've seen in 24 hours */
+  uint64_t maximum_chanpad_timers;
+  /** When did we first copy padding_current into padding_published? */
+  char first_published_at[ISO_TIME_LEN+1];
+} padding_counts_t;
+
+/** Holds the current values of our padding statistics.
+ * It is not published until it is transferred to padding_published. */
+static padding_counts_t padding_current;
+
+/** Remains fixed for a 24 hour period, and then is replaced
+ * by a redacted copy of padding_current */
+static padding_counts_t padding_published;
+
 /** When did we last multiply all routers' weighted_run_length and
  * total_run_weights by STABILITY_ALPHA? */
 static time_t stability_last_downrated = 0;
@@ -1828,6 +1869,7 @@ rep_hist_get_predicted_ports(time_t now)
   int predicted_circs_relevance_time;
   smartlist_t *out = smartlist_new();
   tor_assert(predicted_ports_list);
+  // XXX: Change this if ReducedConnectionPadding is set.
   predicted_circs_relevance_time = get_options()->PredictedPortsRelevanceTime;
 
   /* clean out obsolete entries */
@@ -3210,8 +3252,7 @@ rep_hist_hs_stats_write(time_t now)
   return start_of_hs_stats_interval + WRITE_STATS_INTERVAL;
 }
 
-#define MAX_LINK_PROTO_TO_LOG 4
-static uint64_t link_proto_count[MAX_LINK_PROTO_TO_LOG+1][2];
+static uint64_t link_proto_count[MAX_LINK_PROTO+1][2];
 
 /** Note that we negotiated link protocol version <b>link_proto</b>, on
  * a connection that started here iff <b>started_here</b> is true.
@@ -3220,7 +3261,7 @@ void
 rep_hist_note_negotiated_link_proto(unsigned link_proto, int started_here)
 {
   started_here = !!started_here; /* force to 0 or 1 */
-  if (link_proto > MAX_LINK_PROTO_TO_LOG) {
+  if (link_proto > MAX_LINK_PROTO) {
     log_warn(LD_BUG, "Can't log link protocol %u", link_proto);
     return;
   }
@@ -3228,6 +3269,165 @@ rep_hist_note_negotiated_link_proto(unsigned link_proto, int started_here)
   link_proto_count[link_proto][started_here]++;
 }
 
+/**
+ * Update the maximum count of total pending channel padding timers
+ * in this period.
+ */
+void
+rep_hist_padding_count_timers(uint64_t num_timers)
+{
+  if (num_timers > padding_current.maximum_chanpad_timers) {
+    padding_current.maximum_chanpad_timers = num_timers;
+  }
+}
+
+/**
+ * Count a cell that we sent for padding overhead statistics.
+ *
+ * RELAY_COMMAND_DROP and CELL_PADDING are accounted separately. Both should be
+ * counted for PADDING_TYPE_TOTAL.
+ */
+void
+rep_hist_padding_count_write(padding_type_t type)
+{
+  switch (type) {
+    case PADDING_TYPE_DROP:
+      padding_current.write_drop_cell_count++;
+      break;
+    case PADDING_TYPE_CELL:
+      padding_current.write_pad_cell_count++;
+      break;
+    case PADDING_TYPE_TOTAL:
+      padding_current.write_cell_count++;
+      break;
+    case PADDING_TYPE_ENABLED_TOTAL:
+      padding_current.enabled_write_cell_count++;
+      break;
+    case PADDING_TYPE_ENABLED_CELL:
+      padding_current.enabled_write_pad_cell_count++;
+      break;
+  }
+}
+
+/**
+ * Count a cell that we've received for padding overhead statistics.
+ *
+ * RELAY_COMMAND_DROP and CELL_PADDING are accounted separately. Both should be
+ * counted for PADDING_TYPE_TOTAL.
+ */
+void
+rep_hist_padding_count_read(padding_type_t type)
+{
+  switch (type) {
+    case PADDING_TYPE_DROP:
+      padding_current.read_drop_cell_count++;
+      break;
+    case PADDING_TYPE_CELL:
+      padding_current.read_pad_cell_count++;
+      break;
+    case PADDING_TYPE_TOTAL:
+      padding_current.read_cell_count++;
+      break;
+    case PADDING_TYPE_ENABLED_TOTAL:
+      padding_current.enabled_read_cell_count++;
+      break;
+    case PADDING_TYPE_ENABLED_CELL:
+      padding_current.enabled_read_pad_cell_count++;
+      break;
+  }
+}
+
+/**
+ * Reset our current padding statistics. Called once every 24 hours.
+ */
+void
+rep_hist_reset_padding_counts(void)
+{
+  memset(&padding_current, 0, sizeof(padding_current));
+}
+
+/**
+ * Copy our current cell counts into a structure for listing in our
+ * extra-info descriptor. Also perform appropriate rounding and redaction.
+ *
+ * This function is called once every 24 hours.
+ */
+#define MIN_CELL_COUNTS_TO_PUBLISH 1
+#define ROUND_CELL_COUNTS_TO 10000
+void
+rep_hist_prep_published_padding_counts(time_t now)
+{
+  memcpy(&padding_published, &padding_current, sizeof(padding_published));
+
+  if (padding_published.read_cell_count < MIN_CELL_COUNTS_TO_PUBLISH ||
+      padding_published.write_cell_count < MIN_CELL_COUNTS_TO_PUBLISH) {
+    memset(&padding_published, 0, sizeof(padding_published));
+    return;
+  }
+
+  format_iso_time(padding_published.first_published_at, now);
+#define ROUND_AND_SET_COUNT(x) (x) = round_uint64_to_next_multiple_of((x), \
+                                      ROUND_CELL_COUNTS_TO)
+  ROUND_AND_SET_COUNT(padding_published.read_pad_cell_count);
+  ROUND_AND_SET_COUNT(padding_published.write_pad_cell_count);
+  ROUND_AND_SET_COUNT(padding_published.read_drop_cell_count);
+  ROUND_AND_SET_COUNT(padding_published.write_drop_cell_count);
+  ROUND_AND_SET_COUNT(padding_published.write_cell_count);
+  ROUND_AND_SET_COUNT(padding_published.read_cell_count);
+  ROUND_AND_SET_COUNT(padding_published.enabled_read_cell_count);
+  ROUND_AND_SET_COUNT(padding_published.enabled_read_pad_cell_count);
+  ROUND_AND_SET_COUNT(padding_published.enabled_write_cell_count);
+  ROUND_AND_SET_COUNT(padding_published.enabled_write_pad_cell_count);
+#undef ROUND_AND_SET_COUNT
+}
+
+/**
+ * Returns an allocated string for extra-info documents for publishing
+ * padding statistics from the last 24 hour interval.
+ */
+char *
+rep_hist_get_padding_count_lines(void)
+{
+  char *result = NULL;
+
+  if (!padding_published.read_cell_count ||
+          !padding_published.write_cell_count) {
+    return NULL;
+  }
+
+  tor_asprintf(&result, "padding-counts %s (%d s)"
+                        " bin-size="U64_FORMAT
+                        " write-drop="U64_FORMAT
+                        " write-pad="U64_FORMAT
+                        " write-total="U64_FORMAT
+                        " read-drop="U64_FORMAT
+                        " read-pad="U64_FORMAT
+                        " read-total="U64_FORMAT
+                        " enabled-read-pad="U64_FORMAT
+                        " enabled-read-total="U64_FORMAT
+                        " enabled-write-pad="U64_FORMAT
+                        " enabled-write-total="U64_FORMAT
+                        " max-chanpad-timers="U64_FORMAT
+                        "\n",
+               padding_published.first_published_at,
+               REPHIST_CELL_PADDING_COUNTS_INTERVAL,
+               U64_PRINTF_ARG(ROUND_CELL_COUNTS_TO),
+               U64_PRINTF_ARG(padding_published.write_drop_cell_count),
+               U64_PRINTF_ARG(padding_published.write_pad_cell_count),
+               U64_PRINTF_ARG(padding_published.write_cell_count),
+               U64_PRINTF_ARG(padding_published.read_drop_cell_count),
+               U64_PRINTF_ARG(padding_published.read_pad_cell_count),
+               U64_PRINTF_ARG(padding_published.read_cell_count),
+               U64_PRINTF_ARG(padding_published.enabled_read_pad_cell_count),
+               U64_PRINTF_ARG(padding_published.enabled_read_cell_count),
+               U64_PRINTF_ARG(padding_published.enabled_write_pad_cell_count),
+               U64_PRINTF_ARG(padding_published.enabled_write_cell_count),
+               U64_PRINTF_ARG(padding_published.maximum_chanpad_timers)
+               );
+
+  return result;
+}
+
 /** Log a heartbeat message explaining how many connections of each link
  * protocol version we have used.
  */
diff --git a/src/or/rephist.h b/src/or/rephist.h
index ff4810a56d85499deb581b0c5b9d1cfeaafafcb1..4d95bff0d0052ebf91c08f061594499ca41002be 100644
--- a/src/or/rephist.h
+++ b/src/or/rephist.h
@@ -119,5 +119,30 @@ extern int onion_handshakes_requested[MAX_ONION_HANDSHAKE_TYPE+1];
 extern int onion_handshakes_assigned[MAX_ONION_HANDSHAKE_TYPE+1];
 #endif
 
+/**
+ * Represents the type of a cell for padding accounting
+ */
+typedef enum padding_type_t {
+    /** A RELAY_DROP cell */
+    PADDING_TYPE_DROP,
+    /** A CELL_PADDING cell */
+    PADDING_TYPE_CELL,
+    /** Total counts of padding and non-padding together */
+    PADDING_TYPE_TOTAL,
+    /** Total cell counts for all padding-enabled channels */
+    PADDING_TYPE_ENABLED_TOTAL,
+    /** CELL_PADDING counts for all padding-enabled channels */
+    PADDING_TYPE_ENABLED_CELL
+} padding_type_t;
+
+/** The amount of time over which the padding cell counts were counted */
+#define REPHIST_CELL_PADDING_COUNTS_INTERVAL (24*60*60)
+void rep_hist_padding_count_read(padding_type_t type);
+void rep_hist_padding_count_write(padding_type_t type);
+char *rep_hist_get_padding_count_lines(void);
+void rep_hist_reset_padding_counts(void);
+void rep_hist_prep_published_padding_counts(time_t now);
+void rep_hist_padding_count_timers(uint64_t num_timers);
+
 #endif
 
diff --git a/src/or/router.c b/src/or/router.c
index e4fa72a28376ca85ae2be26c4b34582f87303665..82f963a3f76d5a20202bf07d9392ec6a195737b1 100644
--- a/src/or/router.c
+++ b/src/or/router.c
@@ -3199,6 +3199,12 @@ extrainfo_dump_to_string(char **s_out, extrainfo_t *extrainfo,
     }
   }
 
+  if (options->PaddingStatistics) {
+    contents = rep_hist_get_padding_count_lines();
+    if (contents)
+      smartlist_add(chunks, contents);
+  }
+
   /* Add information about the pluggable transports we support. */
   if (options->ServerTransportPlugin) {
     char *pluggable_transports = pt_get_extra_info_descriptor_string();
diff --git a/src/test/Makefile.nmake b/src/test/Makefile.nmake
index 0ba56d7036b0006c60d35ce7daeb931fb19342cd..69a06eec4d0d806323460f09818da61f437bc518 100644
--- a/src/test/Makefile.nmake
+++ b/src/test/Makefile.nmake
@@ -17,6 +17,7 @@ TEST_OBJECTS = test.obj test_addr.obj test_channel.obj test_channeltls.obj \
 	test_checkdir.obj test_microdesc.obj test_pt.obj test_util.obj \
         test_config.obj test_connection.obj \
 	test_cell_formats.obj test_relay.obj test_replay.obj \
+	test_channelpadding.obj \
 	test_scheduler.obj test_introduce.obj test_hs.obj tinytest.obj
 
 tinytest.obj: ..\ext\tinytest.c
diff --git a/src/test/include.am b/src/test/include.am
index 1c0726fd3a4e110dabd4edf6f9ad01657c340120..bcc2b56c18ef138810397a3ed19a53aacb5db847 100644
--- a/src/test/include.am
+++ b/src/test/include.am
@@ -78,6 +78,7 @@ src_test_test_SOURCES = \
 	src/test/test_cell_formats.c \
 	src/test/test_cell_queue.c \
 	src/test/test_channel.c \
+	src/test/test_channelpadding.c \
 	src/test/test_channeltls.c \
 	src/test/test_checkdir.c \
 	src/test/test_circuitlist.c \
diff --git a/src/test/test.c b/src/test/test.c
index 866408e8568d90c1dc4004298e52ae9329be1dc5..9a78859ef24e12375cb04d87b6b660cee2b9dd08 100644
--- a/src/test/test.c
+++ b/src/test/test.c
@@ -1186,6 +1186,7 @@ struct testgroup_t testgroups[] = {
   { "cellfmt/", cell_format_tests },
   { "cellqueue/", cell_queue_tests },
   { "channel/", channel_tests },
+  { "channelpadding/", channelpadding_tests },
   { "channeltls/", channeltls_tests },
   { "checkdir/", checkdir_tests },
   { "circuitlist/", circuitlist_tests },
diff --git a/src/test/test.h b/src/test/test.h
index 2bd58f51c86fa4cd52a079c4ff4eb4dbe70c6978..1f12a9d6c264d1dc3acd9bc1fd2c3af3246f3a05 100644
--- a/src/test/test.h
+++ b/src/test/test.h
@@ -181,6 +181,7 @@ extern struct testcase_t buffer_tests[];
 extern struct testcase_t cell_format_tests[];
 extern struct testcase_t cell_queue_tests[];
 extern struct testcase_t channel_tests[];
+extern struct testcase_t channelpadding_tests[];
 extern struct testcase_t channeltls_tests[];
 extern struct testcase_t checkdir_tests[];
 extern struct testcase_t circuitlist_tests[];
diff --git a/src/test/test_channelpadding.c b/src/test/test_channelpadding.c
new file mode 100644
index 0000000000000000000000000000000000000000..b6591eac7b51794d0172b073b2dce579f4e23a82
--- /dev/null
+++ b/src/test/test_channelpadding.c
@@ -0,0 +1,839 @@
+#define TOR_CHANNEL_INTERNAL_
+#include "or.h"
+#include "test.h"
+#include "testsupport.h"
+#include "connection.h"
+#include "connection_or.h"
+#include "channel.h"
+#include "channeltls.h"
+#include "channelpadding.h"
+#include "compat_libevent.h"
+#include "config.h"
+#include <event.h>
+#include "compat_time.h"
+
+extern smartlist_t *connection_array;
+extern networkstatus_t *current_ns_consensus;
+extern networkstatus_t *current_md_consensus;
+
+int channelpadding_get_netflow_inactive_timeout_ms(channel_t *chan);
+int64_t channelpadding_compute_time_until_pad_for_netflow(channel_t *chan);
+int channelpadding_send_disable_command(channel_t*);
+int channelpadding_find_timerslot(channel_t *chan);
+
+void test_channelpadding_timers(void *arg);
+void test_channelpadding_consensus(void *arg);
+void test_channelpadding_negotiation(void *arg);
+void test_channelpadding_decide_to_pad_channel(void *arg);
+
+void dummy_nop_timer(void);
+
+/* Thing to cast to fake tor_tls_t * to appease assert_connection_ok() */
+static int fake_tortls = 0; /* Bleh... */
+
+static int dont_stop_libevent = 0;
+
+// From test_channel.c
+channel_t * new_fake_channel(void);
+void free_fake_channel(channel_t*);
+
+static int
+mock_channel_has_queued_writes(channel_t *chan)
+{
+  (void)chan;
+  return 0;
+}
+
+static int tried_to_write_cell = 0;
+
+static channel_t *relay1_relay2;
+static channel_t *relay2_relay1;
+static channel_t *relay3_client;
+static channel_t *client_relay3;
+
+static int
+mock_channel_write_cell_relay2(channel_t *chan, cell_t *cell)
+{
+  (void)chan;
+  tried_to_write_cell++;
+  channel_tls_handle_cell(cell, ((channel_tls_t*)relay1_relay2)->conn);
+  event_base_loopbreak(tor_libevent_get_base());
+  return 0;
+}
+
+static int
+mock_channel_write_cell_relay1(channel_t *chan, cell_t *cell)
+{
+  (void)chan;
+  tried_to_write_cell++;
+  channel_tls_handle_cell(cell, ((channel_tls_t*)relay2_relay1)->conn);
+  event_base_loopbreak(tor_libevent_get_base());
+  return 0;
+}
+
+static int
+mock_channel_write_cell_relay3(channel_t *chan, cell_t *cell)
+{
+  (void)chan;
+  tried_to_write_cell++;
+  channel_tls_handle_cell(cell, ((channel_tls_t*)client_relay3)->conn);
+  event_base_loopbreak(tor_libevent_get_base());
+  return 0;
+}
+
+static int
+mock_channel_write_cell_client(channel_t *chan, cell_t *cell)
+{
+  (void)chan;
+  tried_to_write_cell++;
+  channel_tls_handle_cell(cell, ((channel_tls_t*)relay3_client)->conn);
+  event_base_loopbreak(tor_libevent_get_base());
+  return 0;
+}
+
+static int
+mock_channel_write_cell(channel_t *chan, cell_t *cell)
+{
+  tried_to_write_cell++;
+  channel_tls_handle_cell(cell, ((channel_tls_t*)chan)->conn);
+  if (!dont_stop_libevent)
+    event_base_loopbreak(tor_libevent_get_base());
+  return 0;
+}
+
+static void
+setup_fake_connection_for_channel(channel_tls_t *chan)
+{
+  or_connection_t *conn = (or_connection_t*)connection_new(CONN_TYPE_OR,
+                                                           AF_INET);
+
+  conn->base_.conn_array_index = smartlist_len(connection_array);
+  smartlist_add(connection_array, conn);
+
+  connection_or_set_canonical(conn, 1);
+
+  conn->chan = chan;
+  chan->conn = conn;
+
+  conn->base_.magic = OR_CONNECTION_MAGIC;
+  conn->base_.state = OR_CONN_STATE_OPEN;
+  conn->base_.type = CONN_TYPE_OR;
+  conn->base_.socket_family = AF_INET;
+  conn->base_.address = tor_strdup("<fake>");
+
+  conn->base_.port = 4242;
+
+  conn->tls = (tor_tls_t *)((void *)(&fake_tortls));
+
+  conn->link_proto = MIN_LINK_PROTO_FOR_CHANNEL_PADDING;
+}
+
+static channel_tls_t *
+new_fake_channeltls(uint8_t id)
+{
+  channel_tls_t *chan = tor_realloc(new_fake_channel(), sizeof(channel_tls_t));
+  chan->base_.magic = TLS_CHAN_MAGIC;
+  setup_fake_connection_for_channel(chan);
+  chan->base_.channel_usage = CHANNEL_USED_FOR_FULL_CIRCS;
+  chan->base_.has_queued_writes = mock_channel_has_queued_writes;
+  chan->base_.write_cell = mock_channel_write_cell;
+  chan->base_.padding_enabled = 1;
+
+  chan->base_.identity_digest[0] = id;
+  channel_register(&chan->base_);
+
+  return chan;
+}
+
+static void
+free_fake_channeltls(channel_tls_t *chan)
+{
+  channel_unregister(&chan->base_);
+
+  tor_free(((channel_tls_t*)chan)->conn->base_.address);
+  buf_free(((channel_tls_t*)chan)->conn->base_.inbuf);
+  buf_free(((channel_tls_t*)chan)->conn->base_.outbuf);
+  tor_free(((channel_tls_t*)chan)->conn);
+
+  timer_free(chan->base_.padding_timer);
+  channel_handle_free(chan->base_.timer_handle);
+  channel_handles_clear(&chan->base_);
+
+  free_fake_channel(&chan->base_);
+
+  return;
+}
+
+static void
+setup_mock_network(void)
+{
+  routerstatus_t *relay;
+  connection_array = smartlist_new();
+
+  current_md_consensus = current_ns_consensus
+        = tor_malloc_zero(sizeof(networkstatus_t));
+  current_md_consensus->net_params = smartlist_new();
+  current_md_consensus->routerstatus_list = smartlist_new();
+
+  relay1_relay2 = (channel_t*)new_fake_channeltls(2);
+  relay1_relay2->write_cell = mock_channel_write_cell_relay1;
+  channel_timestamp_active(relay1_relay2);
+  relay = tor_malloc_zero(sizeof(routerstatus_t));
+  relay->identity_digest[0] = 1;
+  smartlist_add(current_md_consensus->routerstatus_list, relay);
+
+  relay2_relay1 = (channel_t*)new_fake_channeltls(1);
+  relay2_relay1->write_cell = mock_channel_write_cell_relay2;
+  channel_timestamp_active(relay2_relay1);
+  relay = tor_malloc_zero(sizeof(routerstatus_t));
+  relay->identity_digest[0] = 2;
+  smartlist_add(current_md_consensus->routerstatus_list, relay);
+
+  relay3_client = (channel_t*)new_fake_channeltls(0);
+  relay3_client->write_cell = mock_channel_write_cell_relay3;
+  relay3_client->is_client = 1;
+  channel_timestamp_active(relay3_client);
+  relay = tor_malloc_zero(sizeof(routerstatus_t));
+  relay->identity_digest[0] = 3;
+  smartlist_add(current_md_consensus->routerstatus_list, relay);
+
+  client_relay3 = (channel_t*)new_fake_channeltls(3);
+  client_relay3->write_cell = mock_channel_write_cell_client;
+  channel_timestamp_active(client_relay3);
+}
+
+static void
+free_mock_network(void)
+{
+  free_fake_channeltls((channel_tls_t*)relay1_relay2);
+  free_fake_channeltls((channel_tls_t*)relay2_relay1);
+  free_fake_channeltls((channel_tls_t*)relay3_client);
+  free_fake_channeltls((channel_tls_t*)client_relay3);
+
+  SMARTLIST_FOREACH(current_md_consensus->routerstatus_list, void *, r,
+                    tor_free(r));
+  smartlist_free(current_md_consensus->routerstatus_list);
+  smartlist_free(current_ns_consensus->net_params);
+  smartlist_free(connection_array);
+  tor_free(current_ns_consensus);
+}
+
+static void
+dummy_timer_cb(tor_timer_t *t, void *arg, const monotime_t *now_mono)
+{
+  (void)t; (void)arg; (void)now_mono;
+  event_base_loopbreak(tor_libevent_get_base());
+  return;
+}
+
+// This hack adds a dummy timer so that the libevent base loop
+// actually returns when we don't expect any timers to fire. Otherwise,
+// the global_timer_event gets scheduled an hour from now, and the
+// base loop never returns.
+void dummy_nop_timer(void)
+{
+  tor_timer_t *dummy_timer = timer_new(dummy_timer_cb, NULL);
+  struct timeval timeout;
+  timeout.tv_sec = 1;
+  timeout.tv_usec = 0;
+
+  timer_schedule(dummy_timer, &timeout);
+
+  event_base_loop(tor_libevent_get_base(), 0);
+  timer_free(dummy_timer);
+}
+
+#define CHANNELPADDING_MAX_TIMERS 25
+#define CHANNELS_TO_TEST (CHANNELPADDING_MAX_TIMERS*4)
+/**
+ * Tests to ensure that we handle more than the max number of pending
+ * timers properly.
+ */
+void
+test_channelpadding_timers(void *arg)
+{
+  channelpadding_decision_t decision;
+  channel_t *chans[CHANNELS_TO_TEST];
+  (void)arg;
+  connection_array = smartlist_new();
+
+  monotime_init();
+  timers_initialize();
+
+  for (int i = 0; i < CHANNELS_TO_TEST; i++) {
+    chans[i] = (channel_t*)new_fake_channeltls(0);
+    channel_timestamp_active(chans[i]);
+  }
+
+  for (int j = 0; j < 2; j++) {
+    tried_to_write_cell = 0;
+    int i = 0;
+
+    /* This loop fills our timerslot array with timers of increasing time
+     * until they fire */
+    for (; i < CHANNELPADDING_MAX_TIMERS; i++) {
+      chans[i]->next_padding_time_ms = monotime_coarse_absolute_msec()
+                                        + 10 + i*4;
+      decision = channelpadding_decide_to_pad_channel(chans[i]);
+      tt_int_op(decision, OP_EQ, CHANNELPADDING_PADDING_SCHEDULED);
+      tt_assert(chans[i]->pending_padding_callback);
+      tt_int_op(tried_to_write_cell, OP_EQ, 0);
+    }
+
+    /* This loop should add timers to our existing lists in a weak
+     * pseudorandom pattern.  It ensures that the lists can grow with multiple
+     * timers in them. */
+    for (; i < CHANNELS_TO_TEST/2; i++) {
+      chans[i]->next_padding_time_ms = monotime_coarse_absolute_msec() + 10 +
+          i*3 % CHANNELPADDING_MAX_TIMERS;
+      decision = channelpadding_decide_to_pad_channel(chans[i]);
+      tt_int_op(decision, OP_EQ, CHANNELPADDING_PADDING_SCHEDULED);
+      tt_assert(chans[i]->pending_padding_callback);
+      tt_int_op(tried_to_write_cell, OP_EQ, 0);
+    }
+
+    /* This loop should add timers to the first position in the timerslot
+     * array, since its timeout is before all other timers. */
+    for (; i < CHANNELS_TO_TEST/3; i++) {
+      chans[i]->next_padding_time_ms = monotime_coarse_absolute_msec() + 1;
+      decision = channelpadding_decide_to_pad_channel(chans[i]);
+      tt_int_op(decision, OP_EQ, CHANNELPADDING_PADDING_SCHEDULED);
+      tt_assert(chans[i]->pending_padding_callback);
+      tt_int_op(tried_to_write_cell, OP_EQ, 0);
+    }
+
+    /* This loop should add timers to the last position in the timerslot
+     * array, since its timeout is after all other timers. */
+    for (; i < CHANNELS_TO_TEST; i++) {
+      chans[i]->next_padding_time_ms = monotime_coarse_absolute_msec() + 500 +
+          i % CHANNELPADDING_MAX_TIMERS;
+      decision = channelpadding_decide_to_pad_channel(chans[i]);
+      tt_int_op(decision, OP_EQ, CHANNELPADDING_PADDING_SCHEDULED);
+      tt_assert(chans[i]->pending_padding_callback);
+      tt_int_op(tried_to_write_cell, OP_EQ, 0);
+    }
+
+    // Wait for the timers and then kill the event loop.
+    dont_stop_libevent = 1;
+    dummy_nop_timer();
+
+    tt_int_op(tried_to_write_cell, OP_EQ, CHANNELS_TO_TEST);
+
+    // Test that we have no pending callbacks and all empty slots now
+    for (i = 0; i < CHANNELS_TO_TEST; i++) {
+      tt_assert(!chans[i]->pending_padding_callback);
+    }
+  }
+
+ done:
+  for (int i = 0; i < CHANNELS_TO_TEST; i++) {
+    free_fake_channeltls((channel_tls_t*)chans[i]);
+  }
+  smartlist_free(connection_array);
+
+  timers_shutdown();
+  channel_free_all();
+
+  return;
+}
+
+void
+test_channelpadding_consensus(void *arg)
+{
+  channelpadding_decision_t decision;
+  or_options_t *options = get_options_mutable();
+  int64_t val;
+  (void)arg;
+
+  /*
+   * Params tested:
+   *   nf_pad_before_usage
+   *   nf_pad_relays
+   *   nf_ito_low
+   *   nf_ito_high
+   *
+   * Plan:
+   * 1. Padding can be completely disabled via consensus
+   * 2. Negotiation can't re-enable consensus-disabled padding
+   * 3. Negotiation can't increase padding from relays beyond
+   *    consensus defaults
+   * 4. Relay-to-relay padding can be enabled/disabled in consensus
+   * 5. Can enable/disable padding before actually using a connection
+   */
+  channel_t *chan;
+  routerstatus_t *relay = tor_malloc_zero(sizeof(routerstatus_t));
+  monotime_init();
+  timers_initialize();
+
+  connection_array = smartlist_new();
+  chan = (channel_t*)new_fake_channeltls(0);
+  channel_timestamp_active(chan);
+
+  current_md_consensus = current_ns_consensus
+        = tor_malloc_zero(sizeof(networkstatus_t));
+  current_md_consensus->net_params = smartlist_new();
+  current_md_consensus->routerstatus_list = smartlist_new();
+
+  get_options_mutable()->ORPort_set = 1;
+
+  /* Test 1: Padding can be completely disabled via consensus */
+  tried_to_write_cell = 0;
+  chan->next_padding_time_ms = monotime_coarse_absolute_msec() + 100;
+  decision = channelpadding_decide_to_pad_channel(chan);
+  tt_int_op(decision, OP_EQ, CHANNELPADDING_PADDING_SCHEDULED);
+  tt_assert(chan->pending_padding_callback);
+  tt_int_op(tried_to_write_cell, OP_EQ, 0);
+
+  decision = channelpadding_decide_to_pad_channel(chan);
+  tt_int_op(decision, OP_EQ, CHANNELPADDING_PADDING_ALREADY_SCHEDULED);
+
+  // Wait for the timer
+  event_base_loop(tor_libevent_get_base(), 0);
+  tt_int_op(tried_to_write_cell, OP_EQ, 1);
+  tt_assert(!chan->pending_padding_callback);
+
+  smartlist_add(current_md_consensus->net_params,
+                (void*)"nf_ito_low=0");
+  smartlist_add(current_md_consensus->net_params,
+                (void*)"nf_ito_high=0");
+  get_options_mutable()->ConnectionPadding = 1;
+
+  decision = channelpadding_decide_to_pad_channel(chan);
+  tt_int_op(decision, OP_EQ, CHANNELPADDING_WONTPAD);
+  tt_assert(!chan->pending_padding_callback);
+  val = channelpadding_get_netflow_inactive_timeout_ms(chan);
+  tt_int_op(val, OP_EQ, 0);
+  val = channelpadding_compute_time_until_pad_for_netflow(chan);
+  tt_int_op(val, OP_EQ, -2);
+
+  /* Test 2: Negotiation can't re-enable consensus-disabled padding */
+  channelpadding_send_enable_command(chan, 100, 200);
+  tried_to_write_cell = 0;
+  decision = channelpadding_decide_to_pad_channel(chan);
+  tt_int_op(decision, OP_EQ, CHANNELPADDING_WONTPAD);
+  tt_assert(!chan->pending_padding_callback);
+  val = channelpadding_get_netflow_inactive_timeout_ms(chan);
+  tt_int_op(val, OP_EQ, 0);
+  val = channelpadding_compute_time_until_pad_for_netflow(chan);
+  tt_int_op(val, OP_EQ, -2);
+  tt_assert(!chan->next_padding_time_ms);
+
+  smartlist_clear(current_md_consensus->net_params);
+
+  /* Test 3: Negotiation can't increase padding from relays beyond consensus
+   * values */
+  smartlist_add(current_md_consensus->net_params,
+                (void*)"nf_ito_low=100");
+  smartlist_add(current_md_consensus->net_params,
+                (void*)"nf_ito_high=200");
+
+  tried_to_write_cell = 0;
+  decision = channelpadding_decide_to_pad_channel(chan);
+  tt_int_op(decision, OP_EQ, CHANNELPADDING_PADDING_SCHEDULED);
+  tt_assert(chan->pending_padding_callback);
+  tt_int_op(tried_to_write_cell, OP_EQ, 0);
+  val = channelpadding_get_netflow_inactive_timeout_ms(chan);
+  tt_int_op(val, OP_GE, 100);
+  tt_int_op(val, OP_LE, 200);
+  val = channelpadding_compute_time_until_pad_for_netflow(chan);
+  tt_int_op(val, OP_LE, 200);
+
+  // Wait for the timer
+  event_base_loop(tor_libevent_get_base(), 0);
+  tt_int_op(tried_to_write_cell, OP_EQ, 1);
+  tt_assert(!chan->pending_padding_callback);
+
+  smartlist_clear(current_md_consensus->net_params);
+  smartlist_add(current_md_consensus->net_params,
+                (void*)"nf_ito_low=1500");
+  smartlist_add(current_md_consensus->net_params,
+                (void*)"nf_ito_high=4500");
+
+  channelpadding_send_enable_command(chan, 100, 200);
+  tried_to_write_cell = 0;
+  decision = channelpadding_decide_to_pad_channel(chan);
+  tt_int_op(decision, OP_EQ, CHANNELPADDING_PADLATER);
+  tt_assert(!chan->pending_padding_callback);
+  val = channelpadding_get_netflow_inactive_timeout_ms(chan);
+  tt_int_op(val, OP_GE, 1500);
+  tt_int_op(val, OP_LE, 4500);
+  val = channelpadding_compute_time_until_pad_for_netflow(chan);
+  tt_int_op(val, OP_LE, 4500);
+
+  /* Test 4: Relay-to-relay padding can be enabled/disabled in consensus */
+  /* Make this channel a relay's channel */
+  memcpy(relay->identity_digest,
+          ((channel_tls_t *)chan)->conn->identity_digest, DIGEST_LEN);
+  smartlist_add(current_md_consensus->routerstatus_list, relay);
+
+  tried_to_write_cell = 0;
+  decision = channelpadding_decide_to_pad_channel(chan);
+  tt_int_op(decision, OP_EQ, CHANNELPADDING_WONTPAD);
+  tt_assert(!chan->pending_padding_callback);
+
+  smartlist_add(current_md_consensus->net_params,
+                (void*)"nf_pad_relays=1");
+
+  decision = channelpadding_decide_to_pad_channel(chan);
+  tt_int_op(decision, OP_EQ, CHANNELPADDING_PADLATER);
+  tt_assert(!chan->pending_padding_callback);
+  val = channelpadding_get_netflow_inactive_timeout_ms(chan);
+  tt_int_op(val, OP_GE, 1500);
+  tt_int_op(val, OP_LE, 4500);
+  val = channelpadding_compute_time_until_pad_for_netflow(chan);
+  tt_int_op(val, OP_LE, 4500);
+
+  /* Test 5: If we disable padding before channel usage, does that work? */
+  smartlist_add(current_md_consensus->net_params,
+                (void*)"nf_pad_before_usage=0");
+  tried_to_write_cell = 0;
+  decision = channelpadding_decide_to_pad_channel(chan);
+  tt_int_op(decision, OP_EQ, CHANNELPADDING_WONTPAD);
+  tt_assert(!chan->pending_padding_callback);
+
+ done:
+  free_fake_channeltls((channel_tls_t*)chan);
+  smartlist_free(connection_array);
+  smartlist_free(current_md_consensus->routerstatus_list);
+  smartlist_free(current_ns_consensus->net_params);
+  tor_free(relay);
+  tor_free(current_ns_consensus);
+
+  timers_shutdown();
+  channel_free_all();
+
+  return;
+}
+
+void
+test_channelpadding_negotiation(void *arg)
+{
+  channelpadding_negotiate_t disable;
+  cell_t cell;
+  channelpadding_decision_t decision;
+  int val;
+  (void)arg;
+
+  /* Plan:
+   * 1. Clients reject negotiation, relays accept it.
+   *    * Bridges accept negotiation from their clients,
+   *      but not from relays.
+   * 2. Torrc options can override client-side negotiation
+   * 3. Test a version issue in channelpadidng cell
+   * 4. Test channelpadding_reduced_padding
+   */
+  monotime_init();
+  timers_initialize();
+  setup_mock_network();
+
+  /* Test case #1: Do the right things ignore negotiation? */
+  /* relay-to-client case: */
+  channelpadding_send_disable_command(relay3_client);
+  tt_assert(client_relay3->padding_enabled);
+
+  /* client-to-relay case: */
+  get_options_mutable()->ORPort_set = 1;
+  channelpadding_disable_padding_on_channel(client_relay3);
+  tt_int_op(channelpadding_decide_to_pad_channel(relay3_client), OP_EQ,
+      CHANNELPADDING_WONTPAD);
+  tt_assert(!relay3_client->padding_enabled);
+  relay3_client->padding_enabled = 1;
+  client_relay3->padding_enabled = 1;
+
+  /* Bridge case from relay */
+  get_options_mutable()->BridgeRelay = 1;
+  channelpadding_disable_padding_on_channel(relay2_relay1);
+  tt_assert(relay1_relay2->padding_enabled);
+
+  /* Bridge case from client */
+  channelpadding_disable_padding_on_channel(client_relay3);
+  tt_assert(!relay3_client->padding_enabled);
+  tt_int_op(channelpadding_decide_to_pad_channel(relay3_client), OP_EQ,
+      CHANNELPADDING_WONTPAD);
+  relay3_client->padding_enabled = 1;
+  client_relay3->padding_enabled = 1;
+  get_options_mutable()->BridgeRelay = 0;
+  get_options_mutable()->ORPort_set = 0;
+
+  /* Test case #2: Torrc options */
+  /* ConnectionPadding auto; Relay doesn't suport us */
+  ((channel_tls_t*)relay3_client)->conn->link_proto = 4;
+  relay3_client->padding_enabled = 0;
+  tried_to_write_cell = 0;
+  decision = channelpadding_decide_to_pad_channel(relay3_client);
+  tt_int_op(decision, OP_EQ, CHANNELPADDING_WONTPAD);
+  tt_assert(!relay3_client->pending_padding_callback);
+  tt_int_op(tried_to_write_cell, OP_EQ, 0);
+  ((channel_tls_t*)relay3_client)->conn->link_proto = 5;
+  relay3_client->padding_enabled = 1;
+
+  /* ConnectionPadding 1; Relay doesn't suport us */
+  get_options_mutable()->ConnectionPadding = 1;
+  tried_to_write_cell = 0;
+  decision = channelpadding_decide_to_pad_channel(client_relay3);
+  tt_int_op(decision, OP_EQ, CHANNELPADDING_PADLATER);
+  tt_assert(!client_relay3->pending_padding_callback);
+  tt_int_op(tried_to_write_cell, OP_EQ, 0);
+  get_options_mutable()->ConnectionPadding = 0;
+
+  /* Test case #3: Test a version issue in channelpadding cell */
+  get_options_mutable()->ORPort_set = 1;
+  client_relay3->padding_enabled = 1;
+  relay3_client->padding_enabled = 1;
+  memset(&cell, 0, sizeof(cell_t));
+  memset(&disable, 0, sizeof(channelpadding_negotiate_t));
+  cell.command = CELL_PADDING_NEGOTIATE;
+
+  channelpadding_negotiate_set_command(&disable, CHANNELPADDING_COMMAND_STOP);
+  channelpadding_negotiate_encode(cell.payload, CELL_PAYLOAD_SIZE, &disable);
+  ((channelpadding_negotiate_t*)cell.payload)->version = 1;
+  client_relay3->write_cell(client_relay3, &cell);
+  tt_assert(relay3_client->padding_enabled);
+  disable.version = 1;
+  tt_int_op(channelpadding_update_padding_for_channel(client_relay3, &disable),
+          OP_EQ, -1);
+  tt_assert(client_relay3->padding_enabled);
+
+  ((channelpadding_negotiate_t*)cell.payload)->version = 0;
+  disable.version = 0;
+  channelpadding_negotiate_encode(cell.payload, CELL_PAYLOAD_SIZE, &disable);
+  client_relay3->write_cell(client_relay3, &cell);
+  tt_assert(!relay3_client->padding_enabled);
+
+  /* Test case 4: Reducing padding actually reduces it */
+  relay3_client->padding_enabled = 1;
+  client_relay3->padding_enabled = 1;
+
+  decision = channelpadding_decide_to_pad_channel(relay3_client);
+  tt_int_op(decision, OP_EQ, CHANNELPADDING_PADLATER);
+
+  channelpadding_reduce_padding_on_channel(client_relay3);
+
+  tried_to_write_cell = 0;
+  decision = channelpadding_decide_to_pad_channel(relay3_client);
+  tt_int_op(decision, OP_EQ, CHANNELPADDING_WONTPAD);
+
+  get_options_mutable()->ORPort_set = 0;
+  decision = channelpadding_decide_to_pad_channel(client_relay3);
+  tt_int_op(decision, OP_EQ, CHANNELPADDING_PADLATER);
+
+  tt_assert(!client_relay3->pending_padding_callback);
+  val = channelpadding_get_netflow_inactive_timeout_ms(client_relay3);
+  tt_int_op(val, OP_GE, 9000);
+  tt_int_op(val, OP_LE, 14000);
+  val = channelpadding_compute_time_until_pad_for_netflow(client_relay3);
+  tt_int_op(val, OP_LE, 14000);
+
+ done:
+  free_mock_network();
+
+  timers_shutdown();
+  channel_free_all();
+
+  return;
+}
+
+void
+test_channelpadding_decide_to_pad_channel(void *arg)
+{
+  channelpadding_decision_t decision;
+  /**
+   * Test case plan:
+   *
+   * 1. Channel that has "sent a packet" before the timeout.
+   *    + We should decide to pad later
+   * 2. Channel that has not "sent a packet" before the timeout:
+   * 2a. Not within 1.1s of the timeout.
+   *    + We should decide to pad later
+   * 2b. Within 1.1s of the timemout.
+   *    + We should schedule padding
+   *    + We should get feedback that we wrote a cell
+   * 2c. Within 0.1s of the timeout.
+   *    + We should schedule padding
+   *    + We should get feedback that we wrote a cell
+   * 2d. Channel that asks to pad while timeout is scheduled
+   *    + We should schedule padding
+   *    + We should get feedback that we wrote a cell
+   * 2e. 0s of the timeout
+   *    + We should send padding immediately
+   *    + We should get feedback that we wrote a cell
+   * 2f. <0s of the timeout
+   *    + We should send padding immediately
+   *    + We should get feedback that we wrote a cell
+   * 3. Channel that sends a packet while timeout is scheduled
+   *    + We should not get feedback that we wrote a cell
+   * 4. Channel that closes while timeout is scheduled
+   *    + We should not get feedback that we wrote a cell
+   * 5. Make sure the channel still would work if repaired
+   *    + We should be able to schedule padding and resend
+   * 6. Channel is not used for full circuits
+   * 7. Channel that disappears while timeout is scheduled
+   *    + We should not send padding
+   */
+  channel_t *chan;
+  connection_array = smartlist_new();
+  (void)arg;
+
+  monotime_init();
+  timers_initialize();
+
+  chan = (channel_t*)new_fake_channeltls(0);
+  channel_timestamp_active(chan);
+
+  /* Test case #1: Channel that has "sent a packet" before the timeout. */
+  tried_to_write_cell = 0;
+  decision = channelpadding_decide_to_pad_channel(chan);
+  tt_int_op(decision, OP_EQ, CHANNELPADDING_PADLATER);
+  tt_assert(!chan->pending_padding_callback);
+  tt_int_op(tried_to_write_cell, OP_EQ, 0);
+
+  /* Test case #2a: > 1.1s until timeout */
+  tried_to_write_cell = 0;
+  chan->next_padding_time_ms = monotime_coarse_absolute_msec() + 1200;
+  decision = channelpadding_decide_to_pad_channel(chan);
+  tt_int_op(decision, OP_EQ, CHANNELPADDING_PADLATER);
+  tt_assert(!chan->pending_padding_callback);
+  tt_int_op(tried_to_write_cell, OP_EQ, 0);
+
+  /* Test case #2b: >= 1.0s until timeout */
+  tried_to_write_cell = 0;
+  chan->next_padding_time_ms = monotime_coarse_absolute_msec() + 1000;
+  decision = channelpadding_decide_to_pad_channel(chan);
+  tt_int_op(decision, OP_EQ, CHANNELPADDING_PADDING_SCHEDULED);
+  tt_assert(chan->pending_padding_callback);
+  tt_int_op(tried_to_write_cell, OP_EQ, 0);
+
+  // Wait for the timer from case #2b
+  event_base_loop(tor_libevent_get_base(), 0);
+  tt_int_op(tried_to_write_cell, OP_EQ, 1);
+  tt_assert(!chan->pending_padding_callback);
+
+  /* Test case #2c: > 0.1s until timeout */
+  tried_to_write_cell = 0;
+  chan->next_padding_time_ms = monotime_coarse_absolute_msec() + 100;
+  decision = channelpadding_decide_to_pad_channel(chan);
+  tt_int_op(decision, OP_EQ, CHANNELPADDING_PADDING_SCHEDULED);
+  tt_assert(chan->pending_padding_callback);
+  tt_int_op(tried_to_write_cell, OP_EQ, 0);
+
+  /* Test case #2d: Channel that asks to pad while timeout is scheduled */
+  decision = channelpadding_decide_to_pad_channel(chan);
+  tt_int_op(decision, OP_EQ, CHANNELPADDING_PADDING_ALREADY_SCHEDULED);
+
+  // Wait for the timer
+  event_base_loop(tor_libevent_get_base(), 0);
+  tt_int_op(tried_to_write_cell, OP_EQ, 1);
+  tt_assert(!chan->pending_padding_callback);
+
+  /* Test case #2e: 0s until timeout */
+  tried_to_write_cell = 0;
+  chan->next_padding_time_ms = monotime_coarse_absolute_msec();
+  decision = channelpadding_decide_to_pad_channel(chan);
+  tt_int_op(decision, OP_EQ, CHANNELPADDING_PADDING_SENT);
+  tt_int_op(tried_to_write_cell, OP_EQ, 1);
+  tt_assert(!chan->pending_padding_callback);
+
+  /* Test case #2f: <0s until timeout */
+  tried_to_write_cell = 0;
+  chan->next_padding_time_ms = monotime_coarse_absolute_msec() - 100;
+  decision = channelpadding_decide_to_pad_channel(chan);
+  tt_int_op(decision, OP_EQ, CHANNELPADDING_PADDING_SENT);
+  tt_int_op(tried_to_write_cell, OP_EQ, 1);
+  tt_assert(!chan->pending_padding_callback);
+
+  /* Test case #3: Channel that sends a packet while timeout is scheduled */
+  tried_to_write_cell = 0;
+  chan->next_padding_time_ms = monotime_coarse_absolute_msec() + 100;
+  decision = channelpadding_decide_to_pad_channel(chan);
+  tt_int_op(decision, OP_EQ, CHANNELPADDING_PADDING_SCHEDULED);
+  tt_int_op(tried_to_write_cell, OP_EQ, 0);
+  tt_assert(chan->pending_padding_callback);
+
+  // Pretend the channel sent a packet
+  channel_timestamp_active(chan);
+
+  // We don't expect any timer callbacks here. Make a dummy one to be sure.
+  dummy_nop_timer();
+
+  tt_int_op(tried_to_write_cell, OP_EQ, 0);
+  tt_assert(!chan->pending_padding_callback);
+
+  /* Test case #4: Channel that closes while a timeout is scheduled */
+  tried_to_write_cell = 0;
+  chan->next_padding_time_ms = monotime_coarse_absolute_msec() + 100;
+  decision = channelpadding_decide_to_pad_channel(chan);
+  tt_int_op(decision, OP_EQ, CHANNELPADDING_PADDING_SCHEDULED);
+  tt_int_op(tried_to_write_cell, OP_EQ, 0);
+  tt_assert(chan->pending_padding_callback);
+
+  // Pretend the channel is temporarily down
+  chan->state = CHANNEL_STATE_MAINT;
+
+  // We don't expect any timer callbacks here. Make a dummy one to be sure.
+  dummy_nop_timer();
+
+  tt_int_op(tried_to_write_cell, OP_EQ, 0);
+  tt_assert(!chan->pending_padding_callback);
+  chan->state = CHANNEL_STATE_OPEN;
+
+  /* Test case #5: Make sure previous test case didn't break everything */
+  tried_to_write_cell = 0;
+  chan->next_padding_time_ms = monotime_coarse_absolute_msec() + 100;
+  decision = channelpadding_decide_to_pad_channel(chan);
+  tt_int_op(decision, OP_EQ, CHANNELPADDING_PADDING_SCHEDULED);
+  tt_assert(chan->pending_padding_callback);
+  tt_int_op(tried_to_write_cell, OP_EQ, 0);
+
+  // Wait for the timer
+  event_base_loop(tor_libevent_get_base(), 0);
+  tt_int_op(tried_to_write_cell, OP_EQ, 1);
+  tt_assert(!chan->pending_padding_callback);
+
+  /* Test case #6. Channel is not used for full circuits */
+  chan->channel_usage = CHANNEL_USED_NOT_USED_FOR_FULL_CIRCS;
+  decision = channelpadding_decide_to_pad_channel(chan);
+  tt_int_op(decision, OP_EQ, CHANNELPADDING_WONTPAD);
+  tt_assert(!chan->pending_padding_callback);
+  chan->channel_usage = CHANNEL_USED_FOR_FULL_CIRCS;
+
+  /* Test case #7. Channel is closed while timeout is scheduled.
+   *
+   * NOTE: This test deliberately breaks the channel callback mechanism.
+   * It must be last.
+   */
+  tried_to_write_cell = 0;
+  chan->next_padding_time_ms = monotime_coarse_absolute_msec() + 100;
+  decision = channelpadding_decide_to_pad_channel(chan);
+  tt_int_op(decision, OP_EQ, CHANNELPADDING_PADDING_SCHEDULED);
+  tt_int_op(tried_to_write_cell, OP_EQ, 0);
+  tt_assert(chan->pending_padding_callback);
+
+  // Close the connection while the timer is scheduled
+  free_fake_channeltls((channel_tls_t*)chan);
+
+  // We don't expect any timer callbacks here. Make a dummy one to be sure.
+  dummy_nop_timer();
+
+  tt_int_op(tried_to_write_cell, OP_EQ, 0);
+
+ done:
+  smartlist_free(connection_array);
+
+  timers_shutdown();
+  channel_free_all();
+
+  return;
+}
+
+#define TEST_CHANNELPADDING(name, flags) \
+    { #name, test_##name, (flags), NULL, NULL }
+
+struct testcase_t channelpadding_tests[] = {
+  //TEST_CHANNELPADDING(channelpadding_decide_to_pad_channel, 0),
+  TEST_CHANNELPADDING(channelpadding_decide_to_pad_channel, TT_FORK),
+  TEST_CHANNELPADDING(channelpadding_negotiation, TT_FORK),
+  TEST_CHANNELPADDING(channelpadding_consensus, TT_FORK),
+  TEST_CHANNELPADDING(channelpadding_timers, TT_FORK),
+  END_OF_TESTCASES
+};
+
diff --git a/src/trunnel/channelpadding_negotiation.c b/src/trunnel/channelpadding_negotiation.c
new file mode 100644
index 0000000000000000000000000000000000000000..172d6f8a0389510cbb694db07387fa97dd474d08
--- /dev/null
+++ b/src/trunnel/channelpadding_negotiation.c
@@ -0,0 +1,281 @@
+/* channelpadding_negotiation.c -- generated by Trunnel v1.4.3.
+ * https://gitweb.torproject.org/trunnel.git
+ * You probably shouldn't edit this file.
+ */
+#include <stdlib.h>
+#include "trunnel-impl.h"
+
+#include "channelpadding_negotiation.h"
+
+#define TRUNNEL_SET_ERROR_CODE(obj) \
+  do {                              \
+    (obj)->trunnel_error_code_ = 1; \
+  } while (0)
+
+#if defined(__COVERITY__) || defined(__clang_analyzer__)
+/* If we're runnning a static analysis tool, we don't want it to complain
+ * that some of our remaining-bytes checks are dead-code. */
+int channelpaddingnegotiation_deadcode_dummy__ = 0;
+#define OR_DEADCODE_DUMMY || channelpaddingnegotiation_deadcode_dummy__
+#else
+#define OR_DEADCODE_DUMMY
+#endif
+
+#define CHECK_REMAINING(nbytes, label)                           \
+  do {                                                           \
+    if (remaining < (nbytes) OR_DEADCODE_DUMMY) {                \
+      goto label;                                                \
+    }                                                            \
+  } while (0)
+
+channelpadding_negotiate_t *
+channelpadding_negotiate_new(void)
+{
+  channelpadding_negotiate_t *val = trunnel_calloc(1, sizeof(channelpadding_negotiate_t));
+  if (NULL == val)
+    return NULL;
+  val->command = CHANNELPADDING_COMMAND_START;
+  return val;
+}
+
+/** Release all storage held inside 'obj', but do not free 'obj'.
+ */
+static void
+channelpadding_negotiate_clear(channelpadding_negotiate_t *obj)
+{
+  (void) obj;
+}
+
+void
+channelpadding_negotiate_free(channelpadding_negotiate_t *obj)
+{
+  if (obj == NULL)
+    return;
+  channelpadding_negotiate_clear(obj);
+  trunnel_memwipe(obj, sizeof(channelpadding_negotiate_t));
+  trunnel_free_(obj);
+}
+
+uint8_t
+channelpadding_negotiate_get_version(channelpadding_negotiate_t *inp)
+{
+  return inp->version;
+}
+int
+channelpadding_negotiate_set_version(channelpadding_negotiate_t *inp, uint8_t val)
+{
+  if (! ((val == 0))) {
+     TRUNNEL_SET_ERROR_CODE(inp);
+     return -1;
+  }
+  inp->version = val;
+  return 0;
+}
+uint8_t
+channelpadding_negotiate_get_command(channelpadding_negotiate_t *inp)
+{
+  return inp->command;
+}
+int
+channelpadding_negotiate_set_command(channelpadding_negotiate_t *inp, uint8_t val)
+{
+  if (! ((val == CHANNELPADDING_COMMAND_START || val == CHANNELPADDING_COMMAND_STOP))) {
+     TRUNNEL_SET_ERROR_CODE(inp);
+     return -1;
+  }
+  inp->command = val;
+  return 0;
+}
+uint16_t
+channelpadding_negotiate_get_ito_low_ms(channelpadding_negotiate_t *inp)
+{
+  return inp->ito_low_ms;
+}
+int
+channelpadding_negotiate_set_ito_low_ms(channelpadding_negotiate_t *inp, uint16_t val)
+{
+  inp->ito_low_ms = val;
+  return 0;
+}
+uint16_t
+channelpadding_negotiate_get_ito_high_ms(channelpadding_negotiate_t *inp)
+{
+  return inp->ito_high_ms;
+}
+int
+channelpadding_negotiate_set_ito_high_ms(channelpadding_negotiate_t *inp, uint16_t val)
+{
+  inp->ito_high_ms = val;
+  return 0;
+}
+const char *
+channelpadding_negotiate_check(const channelpadding_negotiate_t *obj)
+{
+  if (obj == NULL)
+    return "Object was NULL";
+  if (obj->trunnel_error_code_)
+    return "A set function failed on this object";
+  if (! (obj->version == 0))
+    return "Integer out of bounds";
+  if (! (obj->command == CHANNELPADDING_COMMAND_START || obj->command == CHANNELPADDING_COMMAND_STOP))
+    return "Integer out of bounds";
+  return NULL;
+}
+
+ssize_t
+channelpadding_negotiate_encoded_len(const channelpadding_negotiate_t *obj)
+{
+  ssize_t result = 0;
+
+  if (NULL != channelpadding_negotiate_check(obj))
+     return -1;
+
+
+  /* Length of u8 version IN [0] */
+  result += 1;
+
+  /* Length of u8 command IN [CHANNELPADDING_COMMAND_START, CHANNELPADDING_COMMAND_STOP] */
+  result += 1;
+
+  /* Length of u16 ito_low_ms */
+  result += 2;
+
+  /* Length of u16 ito_high_ms */
+  result += 2;
+  return result;
+}
+int
+channelpadding_negotiate_clear_errors(channelpadding_negotiate_t *obj)
+{
+  int r = obj->trunnel_error_code_;
+  obj->trunnel_error_code_ = 0;
+  return r;
+}
+ssize_t
+channelpadding_negotiate_encode(uint8_t *output, const size_t avail, const channelpadding_negotiate_t *obj)
+{
+  ssize_t result = 0;
+  size_t written = 0;
+  uint8_t *ptr = output;
+  const char *msg;
+#ifdef TRUNNEL_CHECK_ENCODED_LEN
+  const ssize_t encoded_len = channelpadding_negotiate_encoded_len(obj);
+#endif
+
+  if (NULL != (msg = channelpadding_negotiate_check(obj)))
+    goto check_failed;
+
+#ifdef TRUNNEL_CHECK_ENCODED_LEN
+  trunnel_assert(encoded_len >= 0);
+#endif
+
+  /* Encode u8 version IN [0] */
+  trunnel_assert(written <= avail);
+  if (avail - written < 1)
+    goto truncated;
+  trunnel_set_uint8(ptr, (obj->version));
+  written += 1; ptr += 1;
+
+  /* Encode u8 command IN [CHANNELPADDING_COMMAND_START, CHANNELPADDING_COMMAND_STOP] */
+  trunnel_assert(written <= avail);
+  if (avail - written < 1)
+    goto truncated;
+  trunnel_set_uint8(ptr, (obj->command));
+  written += 1; ptr += 1;
+
+  /* Encode u16 ito_low_ms */
+  trunnel_assert(written <= avail);
+  if (avail - written < 2)
+    goto truncated;
+  trunnel_set_uint16(ptr, trunnel_htons(obj->ito_low_ms));
+  written += 2; ptr += 2;
+
+  /* Encode u16 ito_high_ms */
+  trunnel_assert(written <= avail);
+  if (avail - written < 2)
+    goto truncated;
+  trunnel_set_uint16(ptr, trunnel_htons(obj->ito_high_ms));
+  written += 2; ptr += 2;
+
+
+  trunnel_assert(ptr == output + written);
+#ifdef TRUNNEL_CHECK_ENCODED_LEN
+  {
+    trunnel_assert(encoded_len >= 0);
+    trunnel_assert((size_t)encoded_len == written);
+  }
+
+#endif
+
+  return written;
+
+ truncated:
+  result = -2;
+  goto fail;
+ check_failed:
+  (void)msg;
+  result = -1;
+  goto fail;
+ fail:
+  trunnel_assert(result < 0);
+  return result;
+}
+
+/** As channelpadding_negotiate_parse(), but do not allocate the
+ * output object.
+ */
+static ssize_t
+channelpadding_negotiate_parse_into(channelpadding_negotiate_t *obj, const uint8_t *input, const size_t len_in)
+{
+  const uint8_t *ptr = input;
+  size_t remaining = len_in;
+  ssize_t result = 0;
+  (void)result;
+
+  /* Parse u8 version IN [0] */
+  CHECK_REMAINING(1, truncated);
+  obj->version = (trunnel_get_uint8(ptr));
+  remaining -= 1; ptr += 1;
+  if (! (obj->version == 0))
+    goto fail;
+
+  /* Parse u8 command IN [CHANNELPADDING_COMMAND_START, CHANNELPADDING_COMMAND_STOP] */
+  CHECK_REMAINING(1, truncated);
+  obj->command = (trunnel_get_uint8(ptr));
+  remaining -= 1; ptr += 1;
+  if (! (obj->command == CHANNELPADDING_COMMAND_START || obj->command == CHANNELPADDING_COMMAND_STOP))
+    goto fail;
+
+  /* Parse u16 ito_low_ms */
+  CHECK_REMAINING(2, truncated);
+  obj->ito_low_ms = trunnel_ntohs(trunnel_get_uint16(ptr));
+  remaining -= 2; ptr += 2;
+
+  /* Parse u16 ito_high_ms */
+  CHECK_REMAINING(2, truncated);
+  obj->ito_high_ms = trunnel_ntohs(trunnel_get_uint16(ptr));
+  remaining -= 2; ptr += 2;
+  trunnel_assert(ptr + remaining == input + len_in);
+  return len_in - remaining;
+
+ truncated:
+  return -2;
+ fail:
+  result = -1;
+  return result;
+}
+
+ssize_t
+channelpadding_negotiate_parse(channelpadding_negotiate_t **output, const uint8_t *input, const size_t len_in)
+{
+  ssize_t result;
+  *output = channelpadding_negotiate_new();
+  if (NULL == *output)
+    return -1;
+  result = channelpadding_negotiate_parse_into(*output, input, len_in);
+  if (result < 0) {
+    channelpadding_negotiate_free(*output);
+    *output = NULL;
+  }
+  return result;
+}
diff --git a/src/trunnel/channelpadding_negotiation.h b/src/trunnel/channelpadding_negotiation.h
new file mode 100644
index 0000000000000000000000000000000000000000..e58bda3be14ac55442f7e2d80a02af74bce4ea8b
--- /dev/null
+++ b/src/trunnel/channelpadding_negotiation.h
@@ -0,0 +1,98 @@
+/* channelpadding_negotiation.h -- generated by by Trunnel v1.4.3.
+ * https://gitweb.torproject.org/trunnel.git
+ * You probably shouldn't edit this file.
+ */
+#ifndef TRUNNEL_CHANNELPADDING_NEGOTIATION_H
+#define TRUNNEL_CHANNELPADDING_NEGOTIATION_H
+
+#include <stdint.h>
+#include "trunnel.h"
+
+#define CHANNELPADDING_COMMAND_STOP 1
+#define CHANNELPADDING_COMMAND_START 2
+#if !defined(TRUNNEL_OPAQUE) && !defined(TRUNNEL_OPAQUE_CHANNELPADDING_NEGOTIATE)
+struct channelpadding_negotiate_st {
+  uint8_t version;
+  uint8_t command;
+  uint16_t ito_low_ms;
+  uint16_t ito_high_ms;
+  uint8_t trunnel_error_code_;
+};
+#endif
+typedef struct channelpadding_negotiate_st channelpadding_negotiate_t;
+/** Return a newly allocated channelpadding_negotiate with all
+ * elements set to zero.
+ */
+channelpadding_negotiate_t *channelpadding_negotiate_new(void);
+/** Release all storage held by the channelpadding_negotiate in
+ * 'victim'. (Do nothing if 'victim' is NULL.)
+ */
+void channelpadding_negotiate_free(channelpadding_negotiate_t *victim);
+/** Try to parse a channelpadding_negotiate from the buffer in
+ * 'input', using up to 'len_in' bytes from the input buffer. On
+ * success, return the number of bytes consumed and set *output to the
+ * newly allocated channelpadding_negotiate_t. On failure, return -2
+ * if the input appears truncated, and -1 if the input is otherwise
+ * invalid.
+ */
+ssize_t channelpadding_negotiate_parse(channelpadding_negotiate_t **output, const uint8_t *input, const size_t len_in);
+/** Return the number of bytes we expect to need to encode the
+ * channelpadding_negotiate in 'obj'. On failure, return a negative
+ * value. Note that this value may be an overestimate, and can even be
+ * an underestimate for certain unencodeable objects.
+ */
+ssize_t channelpadding_negotiate_encoded_len(const channelpadding_negotiate_t *obj);
+/** Try to encode the channelpadding_negotiate from 'input' into the
+ * buffer at 'output', using up to 'avail' bytes of the output buffer.
+ * On success, return the number of bytes used. On failure, return -2
+ * if the buffer was not long enough, and -1 if the input was invalid.
+ */
+ssize_t channelpadding_negotiate_encode(uint8_t *output, size_t avail, const channelpadding_negotiate_t *input);
+/** Check whether the internal state of the channelpadding_negotiate
+ * in 'obj' is consistent. Return NULL if it is, and a short message
+ * if it is not.
+ */
+const char *channelpadding_negotiate_check(const channelpadding_negotiate_t *obj);
+/** Clear any errors that were set on the object 'obj' by its setter
+ * functions. Return true iff errors were cleared.
+ */
+int channelpadding_negotiate_clear_errors(channelpadding_negotiate_t *obj);
+/** Return the value of the version field of the
+ * channelpadding_negotiate_t in 'inp'
+ */
+uint8_t channelpadding_negotiate_get_version(channelpadding_negotiate_t *inp);
+/** Set the value of the version field of the
+ * channelpadding_negotiate_t in 'inp' to 'val'. Return 0 on success;
+ * return -1 and set the error code on 'inp' on failure.
+ */
+int channelpadding_negotiate_set_version(channelpadding_negotiate_t *inp, uint8_t val);
+/** Return the value of the command field of the
+ * channelpadding_negotiate_t in 'inp'
+ */
+uint8_t channelpadding_negotiate_get_command(channelpadding_negotiate_t *inp);
+/** Set the value of the command field of the
+ * channelpadding_negotiate_t in 'inp' to 'val'. Return 0 on success;
+ * return -1 and set the error code on 'inp' on failure.
+ */
+int channelpadding_negotiate_set_command(channelpadding_negotiate_t *inp, uint8_t val);
+/** Return the value of the ito_low_ms field of the
+ * channelpadding_negotiate_t in 'inp'
+ */
+uint16_t channelpadding_negotiate_get_ito_low_ms(channelpadding_negotiate_t *inp);
+/** Set the value of the ito_low_ms field of the
+ * channelpadding_negotiate_t in 'inp' to 'val'. Return 0 on success;
+ * return -1 and set the error code on 'inp' on failure.
+ */
+int channelpadding_negotiate_set_ito_low_ms(channelpadding_negotiate_t *inp, uint16_t val);
+/** Return the value of the ito_high_ms field of the
+ * channelpadding_negotiate_t in 'inp'
+ */
+uint16_t channelpadding_negotiate_get_ito_high_ms(channelpadding_negotiate_t *inp);
+/** Set the value of the ito_high_ms field of the
+ * channelpadding_negotiate_t in 'inp' to 'val'. Return 0 on success;
+ * return -1 and set the error code on 'inp' on failure.
+ */
+int channelpadding_negotiate_set_ito_high_ms(channelpadding_negotiate_t *inp, uint16_t val);
+
+
+#endif
diff --git a/src/trunnel/channelpadding_negotiation.trunnel b/src/trunnel/channelpadding_negotiation.trunnel
new file mode 100644
index 0000000000000000000000000000000000000000..7f2d4795b04c3dc6f08ba9931e24e96d15003ec6
--- /dev/null
+++ b/src/trunnel/channelpadding_negotiation.trunnel
@@ -0,0 +1,17 @@
+const CHANNELPADDING_COMMAND_STOP = 1;
+const CHANNELPADDING_COMMAND_START = 2;
+
+/* This command tells the relay to alter its min and max netflow
+   timeout range values, and send padding at that rate (resuming
+   if stopped). */
+struct channelpadding_negotiate {
+  u8 version IN [0];
+  u8 command IN [CHANNELPADDING_COMMAND_START, CHANNELPADDING_COMMAND_STOP];
+
+  /* Min must not be lower than the current consensus parameter
+     nf_ito_low. */
+  u16 ito_low_ms;
+
+  /* Max must not be lower than ito_low_ms */
+  u16 ito_high_ms;
+};
diff --git a/src/trunnel/include.am b/src/trunnel/include.am
index 9b26d58615bf2d9cdbc97a9f1543d557b69baa20..de6cf4781f3d74f8c91c9eb6c4d0a5a2748ec472 100644
--- a/src/trunnel/include.am
+++ b/src/trunnel/include.am
@@ -11,7 +11,8 @@ AM_CPPFLAGS += -I$(srcdir)/src/ext/trunnel -I$(srcdir)/src/trunnel
 TRUNNELINPUTS = \
 	src/trunnel/ed25519_cert.trunnel \
 	src/trunnel/link_handshake.trunnel \
-	src/trunnel/pwbox.trunnel
+	src/trunnel/pwbox.trunnel \
+	src/trunnel/channelpadding_negotiation.trunnel
 
 TRUNNELSOURCES = \
 	src/ext/trunnel/trunnel.c \
@@ -20,7 +21,8 @@ TRUNNELSOURCES = \
 	src/trunnel/pwbox.c			\
 	src/trunnel/hs/cell_common.c            \
 	src/trunnel/hs/cell_establish_intro.c	\
-	src/trunnel/hs/cell_introduce1.c
+	src/trunnel/hs/cell_introduce1.c \
+	src/trunnel/channelpadding_negotiation.c
 
 TRUNNELHEADERS = \
 	src/ext/trunnel/trunnel.h		\
@@ -31,7 +33,8 @@ TRUNNELHEADERS = \
 	src/trunnel/pwbox.h			\
 	src/trunnel/hs/cell_common.h            \
 	src/trunnel/hs/cell_establish_intro.h	\
-	src/trunnel/hs/cell_introduce1.h
+	src/trunnel/hs/cell_introduce1.h \
+	src/trunnel/channelpadding_negotiation.h
 
 src_trunnel_libor_trunnel_a_SOURCES = $(TRUNNELSOURCES)
 src_trunnel_libor_trunnel_a_CPPFLAGS = -DTRUNNEL_LOCAL_H $(AM_CPPFLAGS)