tcp.h revision b72a0ff7
1/*
2 * Copyright (c) 2016-2019 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 *     http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#ifndef _vnet_tcp_h_
17#define _vnet_tcp_h_
18
19#include <vnet/vnet.h>
20#include <vnet/ip/ip.h>
21#include <vnet/tcp/tcp_packet.h>
22#include <vnet/tcp/tcp_timer.h>
23#include <vnet/session/transport.h>
24#include <vnet/session/session.h>
25#include <vnet/tcp/tcp_debug.h>
26
27#define TCP_TICK 0.001			/**< TCP tick period (s) */
28#define THZ (u32) (1/TCP_TICK)		/**< TCP tick frequency */
29#define TCP_TSTAMP_RESOLUTION TCP_TICK	/**< Time stamp resolution */
30#define TCP_PAWS_IDLE 24 * 24 * 60 * 60 * THZ /**< 24 days */
31#define TCP_FIB_RECHECK_PERIOD	1 * THZ	/**< Recheck every 1s */
32#define TCP_MAX_OPTION_SPACE 40
33#define TCP_CC_DATA_SZ 24
34#define TCP_MAX_GSO_SZ 65536
35#define TCP_RXT_MAX_BURST 10
36
37#define TCP_DUPACK_THRESHOLD 	3
38#define TCP_IW_N_SEGMENTS 	10
39#define TCP_ALWAYS_ACK		1	/**< On/off delayed acks */
40#define TCP_USE_SACKS		1	/**< Disable only for testing */
41
42/** TCP FSM state definitions as per RFC793. */
43#define foreach_tcp_fsm_state   \
44  _(CLOSED, "CLOSED")           \
45  _(LISTEN, "LISTEN")           \
46  _(SYN_SENT, "SYN_SENT")       \
47  _(SYN_RCVD, "SYN_RCVD")       \
48  _(ESTABLISHED, "ESTABLISHED") \
49  _(CLOSE_WAIT, "CLOSE_WAIT")   \
50  _(FIN_WAIT_1, "FIN_WAIT_1")   \
51  _(LAST_ACK, "LAST_ACK")       \
52  _(CLOSING, "CLOSING")         \
53  _(FIN_WAIT_2, "FIN_WAIT_2")   \
54  _(TIME_WAIT, "TIME_WAIT")
55
56typedef enum _tcp_state
57{
58#define _(sym, str) TCP_STATE_##sym,
59  foreach_tcp_fsm_state
60#undef _
61  TCP_N_STATES
62} tcp_state_t;
63
64format_function_t format_tcp_state;
65format_function_t format_tcp_flags;
66format_function_t format_tcp_sacks;
67format_function_t format_tcp_rcv_sacks;
68
69/** TCP timers */
70#define foreach_tcp_timer               \
71  _(RETRANSMIT, "RETRANSMIT")           \
72  _(DELACK, "DELAYED ACK")              \
73  _(PERSIST, "PERSIST")                 \
74  _(WAITCLOSE, "WAIT CLOSE")            \
75  _(RETRANSMIT_SYN, "RETRANSMIT SYN")   \
76
77typedef enum _tcp_timers
78{
79#define _(sym, str) TCP_TIMER_##sym,
80  foreach_tcp_timer
81#undef _
82  TCP_N_TIMERS
83} tcp_timers_e;
84
85typedef void (timer_expiration_handler) (u32 index, u32 thread_index);
86
87extern timer_expiration_handler tcp_timer_delack_handler;
88extern timer_expiration_handler tcp_timer_retransmit_handler;
89extern timer_expiration_handler tcp_timer_persist_handler;
90extern timer_expiration_handler tcp_timer_retransmit_syn_handler;
91
92#define TCP_TIMER_HANDLE_INVALID ((u32) ~0)
93
94#define TCP_TIMER_TICK		0.1		/**< Timer tick in seconds */
95#define TCP_TO_TIMER_TICK       TCP_TICK*10	/**< Factor for converting
96						     ticks to timer ticks */
97
98#define TCP_RTO_MAX 60 * THZ	/* Min max RTO (60s) as per RFC6298 */
99#define TCP_RTO_MIN 0.2 * THZ	/* Min RTO (200ms) - lower than standard */
100#define TCP_RTT_MAX 30 * THZ	/* 30s (probably too much) */
101#define TCP_RTO_SYN_RETRIES 3	/* SYN retries without doubling RTO */
102#define TCP_RTO_INIT 1 * THZ	/* Initial retransmit timer */
103#define TCP_RTO_BOFF_MAX 8	/* Max number of retries before reset */
104#define TCP_ESTABLISH_TIME (60 * THZ)	/* Connection establish timeout */
105
106/** Connection configuration flags */
107#define foreach_tcp_cfg_flag 			\
108  _(RATE_SAMPLE, "Rate sampling")		\
109  _(NO_CSUM_OFFLOAD, "No csum offload")    	\
110  _(NO_TSO, "TSO off")				\
111  _(TSO, "TSO")					\
112
113typedef enum tcp_cfg_flag_bits_
114{
115#define _(sym, str) TCP_CFG_F_##sym##_BIT,
116  foreach_tcp_cfg_flag
117#undef _
118  TCP_CFG_N_FLAG_BITS
119} tcp_cfg_flag_bits_e;
120
121typedef enum tcp_cfg_flag_
122{
123#define _(sym, str) TCP_CFG_F_##sym = 1 << TCP_CFG_F_##sym##_BIT,
124  foreach_tcp_cfg_flag
125#undef _
126  TCP_CFG_N_FLAGS
127} tcp_cfg_flags_e;
128
129/** TCP connection flags */
130#define foreach_tcp_connection_flag             \
131  _(SNDACK, "Send ACK")                         \
132  _(FINSNT, "FIN sent")				\
133  _(RECOVERY, "Recovery")                    	\
134  _(FAST_RECOVERY, "Fast Recovery")		\
135  _(DCNT_PENDING, "Disconnect pending")		\
136  _(HALF_OPEN_DONE, "Half-open completed")	\
137  _(FINPNDG, "FIN pending")			\
138  _(RXT_PENDING, "Retransmit pending")		\
139  _(FRXT_FIRST, "Retransmit first")		\
140  _(DEQ_PENDING, "Dequeue pending ")		\
141  _(PSH_PENDING, "PSH pending")			\
142  _(FINRCVD, "FIN received")			\
143  _(ZERO_RWND_SENT, "Zero RWND sent")		\
144
145typedef enum tcp_connection_flag_bits_
146{
147#define _(sym, str) TCP_CONN_##sym##_BIT,
148  foreach_tcp_connection_flag
149#undef _
150  TCP_CONN_N_FLAG_BITS
151} tcp_connection_flag_bits_e;
152
153typedef enum tcp_connection_flag_
154{
155#define _(sym, str) TCP_CONN_##sym = 1 << TCP_CONN_##sym##_BIT,
156  foreach_tcp_connection_flag
157#undef _
158  TCP_CONN_N_FLAGS
159} tcp_connection_flags_e;
160
161#define TCP_SCOREBOARD_TRACE (0)
162#define TCP_MAX_SACK_BLOCKS 256	/**< Max number of SACK blocks stored */
163#define TCP_INVALID_SACK_HOLE_INDEX ((u32)~0)
164
165typedef struct _scoreboard_trace_elt
166{
167  u32 start;
168  u32 end;
169  u32 ack;
170  u32 snd_una_max;
171  u32 group;
172} scoreboard_trace_elt_t;
173
174typedef struct _sack_scoreboard_hole
175{
176  u32 next;		/**< Index for next entry in linked list */
177  u32 prev;		/**< Index for previous entry in linked list */
178  u32 start;		/**< Start sequence number */
179  u32 end;		/**< End sequence number */
180  u8 is_lost;		/**< Mark hole as lost */
181} sack_scoreboard_hole_t;
182
183typedef struct _sack_scoreboard
184{
185  sack_scoreboard_hole_t *holes;	/**< Pool of holes */
186  u32 head;				/**< Index of first entry */
187  u32 tail;				/**< Index of last entry */
188  u32 sacked_bytes;			/**< Number of bytes sacked in sb */
189  u32 last_sacked_bytes;		/**< Number of bytes last sacked */
190  u32 last_bytes_delivered;		/**< Sack bytes delivered to app */
191  u32 rxt_sacked;			/**< Rxt bytes last delivered */
192  u32 high_sacked;			/**< Highest byte sacked (fack) */
193  u32 high_rxt;				/**< Highest retransmitted sequence */
194  u32 rescue_rxt;			/**< Rescue sequence number */
195  u32 lost_bytes;			/**< Bytes lost as per RFC6675 */
196  u32 last_lost_bytes;			/**< Number of bytes last lost */
197  u32 cur_rxt_hole;			/**< Retransmitting from this hole */
198  u8 is_reneging;
199
200#if TCP_SCOREBOARD_TRACE
201  scoreboard_trace_elt_t *trace;
202#endif
203
204} sack_scoreboard_t;
205
206#if TCP_SCOREBOARD_TRACE
207#define tcp_scoreboard_trace_add(_tc, _ack) 				\
208{									\
209    static u64 _group = 0;						\
210    sack_scoreboard_t *_sb = &_tc->sack_sb;				\
211    sack_block_t *_sack, *_sacks;					\
212    scoreboard_trace_elt_t *_elt;					\
213    int i;								\
214    _group++;								\
215    _sacks = _tc->rcv_opts.sacks;					\
216    for (i = 0; i < vec_len (_sacks); i++) 				\
217      {									\
218	_sack = &_sacks[i];						\
219	vec_add2 (_sb->trace, _elt, 1);					\
220	_elt->start = _sack->start;					\
221	_elt->end = _sack->end;						\
222	_elt->ack = _elt->end == _ack ? _ack : 0;			\
223	_elt->snd_una_max = _elt->end == _ack ? _tc->snd_una_max : 0;	\
224	_elt->group = _group;						\
225      }									\
226}
227#else
228#define tcp_scoreboard_trace_add(_tc, _ack)
229#endif
230
231sack_scoreboard_hole_t *scoreboard_next_rxt_hole (sack_scoreboard_t * sb,
232						  sack_scoreboard_hole_t *
233						  start, u8 have_sent_1_smss,
234						  u8 * can_rescue,
235						  u8 * snd_limited);
236sack_scoreboard_hole_t *scoreboard_get_hole (sack_scoreboard_t * sb,
237					     u32 index);
238
239sack_scoreboard_hole_t *scoreboard_next_hole (sack_scoreboard_t * sb,
240					      sack_scoreboard_hole_t * hole);
241sack_scoreboard_hole_t *scoreboard_prev_hole (sack_scoreboard_t * sb,
242					      sack_scoreboard_hole_t * hole);
243sack_scoreboard_hole_t *scoreboard_first_hole (sack_scoreboard_t * sb);
244sack_scoreboard_hole_t *scoreboard_last_hole (sack_scoreboard_t * sb);
245
246void scoreboard_clear (sack_scoreboard_t * sb);
247void scoreboard_clear_reneging (sack_scoreboard_t * sb, u32 start, u32 end);
248void scoreboard_init (sack_scoreboard_t * sb);
249void scoreboard_init_rxt (sack_scoreboard_t * sb, u32 snd_una);
250u8 *format_tcp_scoreboard (u8 * s, va_list * args);
251
252#define TCP_BTS_INVALID_INDEX	((u32)~0)
253
254typedef enum tcp_bts_flags_
255{
256  TCP_BTS_IS_RXT = 1,
257  TCP_BTS_IS_APP_LIMITED = 1 << 1,
258  TCP_BTS_IS_SACKED = 1 << 2,
259  TCP_BTS_IS_RXT_LOST = 1 << 3,
260} __clib_packed tcp_bts_flags_t;
261
262typedef struct tcp_bt_sample_
263{
264  u32 next;			/**< Next sample index in list */
265  u32 prev;			/**< Previous sample index in list */
266  u32 min_seq;			/**< Min seq number in sample */
267  u32 max_seq;			/**< Max seq number. Set for rxt samples */
268  u64 delivered;		/**< Total delivered bytes for sample */
269  f64 delivered_time;		/**< Delivered time when sample taken */
270  f64 tx_time;			/**< Transmit time for the burst */
271  f64 first_tx_time;		/**< Connection first tx time at tx */
272  tcp_bts_flags_t flags;	/**< Sample flag */
273} tcp_bt_sample_t;
274
275typedef struct tcp_rate_sample_
276{
277  u64 prior_delivered;		/**< Delivered of sample used for rate, i.e.,
278				     total bytes delivered at prior_time */
279  f64 prior_time;		/**< Delivered time of sample used for rate */
280  f64 interval_time;		/**< Time to ack the bytes delivered */
281  f64 rtt_time;			/**< RTT for sample */
282  u32 delivered;		/**< Bytes delivered in interval_time */
283  u32 acked_and_sacked;		/**< Bytes acked + sacked now */
284  u32 lost;			/**< Bytes lost now */
285  tcp_bts_flags_t flags;	/**< Rate sample flags from bt sample */
286} tcp_rate_sample_t;
287
288typedef struct tcp_byte_tracker_
289{
290  tcp_bt_sample_t *samples;	/**< Pool of samples */
291  rb_tree_t sample_lookup;	/**< Rbtree for sample lookup by min_seq */
292  u32 head;			/**< Head of samples linked list */
293  u32 tail;			/**< Tail of samples linked list */
294  u32 last_ooo;			/**< Cached last ooo sample */
295} tcp_byte_tracker_t;
296
297typedef enum _tcp_cc_algorithm_type
298{
299  TCP_CC_NEWRENO,
300  TCP_CC_CUBIC,
301  TCP_CC_LAST = TCP_CC_CUBIC
302} tcp_cc_algorithm_type_e;
303
304typedef struct _tcp_cc_algorithm tcp_cc_algorithm_t;
305
306typedef enum _tcp_cc_ack_t
307{
308  TCP_CC_ACK,
309  TCP_CC_DUPACK,
310  TCP_CC_PARTIALACK
311} tcp_cc_ack_t;
312
313typedef enum tcp_cc_event_
314{
315  TCP_CC_EVT_START_TX,
316} tcp_cc_event_t;
317
318/*
319 * As per RFC4898 tcpEStatsStackSoftErrors
320 */
321typedef struct tcp_errors_
322{
323  u32 below_data_wnd;	/**< All data in seg is below snd_una */
324  u32 above_data_wnd;	/**< Some data in segment is above snd_wnd */
325  u32 below_ack_wnd;	/**< Acks for data below snd_una */
326  u32 above_ack_wnd;	/**< Acks for data not sent */
327} tcp_errors_t;
328
329typedef struct _tcp_connection
330{
331  CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
332  transport_connection_t connection;  /**< Common transport data. First! */
333
334  u8 state;			/**< TCP state as per tcp_state_t */
335  u8 cfg_flags;			/**< Connection configuration flags */
336  u16 flags;			/**< Connection flags (see tcp_conn_flags_e) */
337  u32 timers[TCP_N_TIMERS];	/**< Timer handles into timer wheel */
338
339  u64 segs_in;		/** RFC4022/4898 tcpHCInSegs/tcpEStatsPerfSegsIn */
340  u64 bytes_in;		/** RFC4898 tcpEStatsPerfHCDataOctetsIn */
341  u64 segs_out;		/** RFC4898 tcpEStatsPerfSegsOut */
342  u64 bytes_out;	/** RFC4898 tcpEStatsPerfHCDataOctetsOut */
343
344  /** Send sequence variables RFC793 */
345  u32 snd_una;		/**< oldest unacknowledged sequence number */
346  u32 snd_una_max;	/**< newest unacknowledged sequence number + 1*/
347  u32 snd_wnd;		/**< send window */
348  u32 snd_wl1;		/**< seq number used for last snd.wnd update */
349  u32 snd_wl2;		/**< ack number used for last snd.wnd update */
350  u32 snd_nxt;		/**< next seq number to be sent */
351  u16 snd_mss;		/**< Effective send max seg (data) size */
352
353  u64 data_segs_in;	/** RFC4898 tcpEStatsPerfDataSegsIn */
354  u64 data_segs_out;	/** RFC4898 tcpEStatsPerfDataSegsOut */
355
356  /** Receive sequence variables RFC793 */
357  u32 rcv_nxt;		/**< next sequence number expected */
358  u32 rcv_wnd;		/**< receive window we expect */
359
360  u32 rcv_las;		/**< rcv_nxt at last ack sent/rcv_wnd update */
361  u32 iss;		/**< initial sent sequence */
362  u32 irs;		/**< initial remote sequence */
363
364  /* Options */
365  u8 snd_opts_len;		/**< Tx options len */
366  u8 rcv_wscale;		/**< Window scale to advertise to peer */
367  u8 snd_wscale;		/**< Window scale to use when sending */
368  u32 tsval_recent;		/**< Last timestamp received */
369  u32 tsval_recent_age;		/**< When last updated tstamp_recent*/
370  tcp_options_t snd_opts;	/**< Tx options for connection */
371  tcp_options_t rcv_opts;	/**< Rx options for connection */
372
373  sack_block_t *snd_sacks;	/**< Vector of SACKs to send. XXX Fixed size? */
374  u8 snd_sack_pos;		/**< Position in vec of first block to send */
375  sack_block_t *snd_sacks_fl;	/**< Vector for building new list */
376  sack_scoreboard_t sack_sb;	/**< SACK "scoreboard" that tracks holes */
377
378  u16 rcv_dupacks;	/**< Number of recent DUPACKs received */
379  u32 dupacks_in;	/**< RFC4898 tcpEStatsStackDupAcksIn*/
380  u8 pending_dupacks;	/**< Number of DUPACKs to be sent */
381  u32 dupacks_out;	/**< RFC4898 tcpEStatsPathDupAcksOut */
382
383  /* Congestion control */
384  u32 cwnd;		/**< Congestion window */
385  u32 cwnd_acc_bytes;	/**< Bytes accumulated for cwnd increment */
386  u32 ssthresh;		/**< Slow-start threshold */
387  u32 prev_ssthresh;	/**< ssthresh before congestion */
388  u32 prev_cwnd;	/**< ssthresh before congestion */
389  u32 bytes_acked;	/**< Bytes acknowledged by current segment */
390  u32 burst_acked;	/**< Bytes acknowledged in current burst */
391  u32 snd_rxt_bytes;	/**< Retransmitted bytes during current cc event */
392  u32 snd_rxt_ts;	/**< Timestamp when first packet is retransmitted */
393  u32 prr_delivered;	/**< RFC6937 bytes delivered during current event */
394  u32 prr_start;	/**< snd_una when prr starts */
395  u32 rxt_delivered;	/**< Rxt bytes delivered during current cc event */
396  u32 rxt_head;		/**< snd_una last time we re rxted the head */
397  u32 tsecr_last_ack;	/**< Timestamp echoed to us in last healthy ACK */
398  u32 snd_congestion;	/**< snd_una_max when congestion is detected */
399  u32 tx_fifo_size;	/**< Tx fifo size. Used to constrain cwnd */
400  tcp_cc_algorithm_t *cc_algo;	/**< Congestion control algorithm */
401  u8 cc_data[TCP_CC_DATA_SZ];	/**< Congestion control algo private data */
402
403  u32 fr_occurences;	/**< fast-retransmit occurrences RFC4898
404			     tcpEStatsStackFastRetran */
405  u32 tr_occurences;	/**< timer-retransmit occurrences */
406  u64 bytes_retrans;	/**< RFC4898 tcpEStatsPerfOctetsRetrans */
407  u64 segs_retrans;	/**< RFC4898 tcpEStatsPerfSegsRetrans*/
408
409  /* RTT and RTO */
410  u32 rto;		/**< Retransmission timeout */
411  u32 rto_boff;		/**< Index for RTO backoff */
412  u32 srtt;		/**< Smoothed RTT */
413  u32 rttvar;		/**< Smoothed mean RTT difference. Approximates variance */
414  u32 rtt_seq;		/**< Sequence number for tracked ACK */
415  f64 rtt_ts;		/**< Timestamp for tracked ACK */
416  f64 mrtt_us;		/**< High precision mrtt from tracked acks */
417
418  u32 psh_seq;		/**< Add psh header for seg that includes this */
419  u32 next_node_index;	/**< Can be used to control next node in output */
420  u32 next_node_opaque;	/**< Opaque to pass to next node */
421  u32 limited_transmit;	/**< snd_nxt when limited transmit starts */
422  u32 sw_if_index;	/**< Interface for the connection */
423
424  /* Delivery rate estimation */
425  u64 delivered;		/**< Total bytes delivered to peer */
426  u64 app_limited;		/**< Delivered when app-limited detected */
427  f64 delivered_time;		/**< Time last bytes were acked */
428  f64 first_tx_time;		/**< Send time for recently delivered/sent */
429  tcp_byte_tracker_t *bt;	/**< Tx byte tracker */
430
431  tcp_errors_t errors;	/**< Soft connection errors */
432
433  f64 start_ts;		/**< Timestamp when connection initialized */
434  u32 last_fib_check;	/**< Last time we checked fib route for peer */
435  u16 mss;		/**< Our max seg size that includes options */
436  u32 timestamp_delta;	/**< Offset for timestamp */
437  u32 ipv6_flow_label;	/**< flow label for ipv6 header */
438} tcp_connection_t;
439
440/* *INDENT-OFF* */
441struct _tcp_cc_algorithm
442{
443  const char *name;
444  uword (*unformat_cfg) (unformat_input_t * input);
445  void (*init) (tcp_connection_t * tc);
446  void (*cleanup) (tcp_connection_t * tc);
447  void (*rcv_ack) (tcp_connection_t * tc, tcp_rate_sample_t *rs);
448  void (*rcv_cong_ack) (tcp_connection_t * tc, tcp_cc_ack_t ack,
449			tcp_rate_sample_t *rs);
450  void (*congestion) (tcp_connection_t * tc);
451  void (*loss) (tcp_connection_t * tc);
452  void (*recovered) (tcp_connection_t * tc);
453  void (*undo_recovery) (tcp_connection_t * tc);
454  void (*event) (tcp_connection_t *tc, tcp_cc_event_t evt);
455  u64 (*get_pacing_rate) (tcp_connection_t *tc);
456};
457/* *INDENT-ON* */
458
459#define tcp_fastrecovery_on(tc) (tc)->flags |= TCP_CONN_FAST_RECOVERY
460#define tcp_fastrecovery_off(tc) (tc)->flags &= ~TCP_CONN_FAST_RECOVERY
461#define tcp_recovery_on(tc) (tc)->flags |= TCP_CONN_RECOVERY
462#define tcp_recovery_off(tc) (tc)->flags &= ~TCP_CONN_RECOVERY
463#define tcp_in_fastrecovery(tc) ((tc)->flags & TCP_CONN_FAST_RECOVERY)
464#define tcp_in_recovery(tc) ((tc)->flags & (TCP_CONN_RECOVERY))
465#define tcp_in_slowstart(tc) (tc->cwnd < tc->ssthresh)
466#define tcp_disconnect_pending(tc) ((tc)->flags & TCP_CONN_DCNT_PENDING)
467#define tcp_disconnect_pending_on(tc) ((tc)->flags |= TCP_CONN_DCNT_PENDING)
468#define tcp_disconnect_pending_off(tc) ((tc)->flags &= ~TCP_CONN_DCNT_PENDING)
469#define tcp_fastrecovery_first(tc) ((tc)->flags & TCP_CONN_FRXT_FIRST)
470#define tcp_fastrecovery_first_on(tc) ((tc)->flags |= TCP_CONN_FRXT_FIRST)
471#define tcp_fastrecovery_first_off(tc) ((tc)->flags &= ~TCP_CONN_FRXT_FIRST)
472
473#define tcp_in_cong_recovery(tc) ((tc)->flags & 		\
474	  (TCP_CONN_FAST_RECOVERY | TCP_CONN_RECOVERY))
475
476#define tcp_csum_offload(tc) (!((tc)->cfg_flags & TCP_CFG_F_NO_CSUM_OFFLOAD))
477
478always_inline void
479tcp_cong_recovery_off (tcp_connection_t * tc)
480{
481  tc->flags &= ~(TCP_CONN_FAST_RECOVERY | TCP_CONN_RECOVERY);
482  tcp_fastrecovery_first_off (tc);
483}
484
485#define tcp_zero_rwnd_sent(tc) ((tc)->flags & TCP_CONN_ZERO_RWND_SENT)
486#define tcp_zero_rwnd_sent_on(tc) (tc)->flags |= TCP_CONN_ZERO_RWND_SENT
487#define tcp_zero_rwnd_sent_off(tc) (tc)->flags &= ~TCP_CONN_ZERO_RWND_SENT
488
489typedef enum _tcp_error
490{
491#define tcp_error(n,s) TCP_ERROR_##n,
492#include <vnet/tcp/tcp_error.def>
493#undef tcp_error
494  TCP_N_ERROR,
495} tcp_error_t;
496
497typedef struct _tcp_lookup_dispatch
498{
499  u8 next, error;
500} tcp_lookup_dispatch_t;
501
502typedef struct tcp_worker_ctx_
503{
504  CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
505  /** worker time */
506  u32 time_now;
507
508  /** worker timer wheel */
509  tw_timer_wheel_16t_2w_512sl_t timer_wheel;
510
511  /** tx buffer free list */
512  u32 *tx_buffers;
513
514  /** tx frames for ip 4/6 lookup nodes */
515  vlib_frame_t *ip_lookup_tx_frames[2];
516
517  /** vector of pending ack dequeues */
518  u32 *pending_deq_acked;
519
520  /** vector of pending disconnect notifications */
521  u32 *pending_disconnects;
522
523  /** convenience pointer to this thread's vlib main */
524  vlib_main_t *vm;
525
526    CLIB_CACHE_LINE_ALIGN_MARK (cacheline1);
527
528  /** cached 'on the wire' options for bursts */
529  u8 cached_opts[40];
530
531} tcp_worker_ctx_t;
532
533typedef struct tcp_iss_seed_
534{
535  u64 first;
536  u64 second;
537} tcp_iss_seed_t;
538
539typedef struct tcp_configuration_
540{
541  /** Max rx fifo size for a session (in bytes). It is used in to compute the
542   *  rfc 7323 window scaling factor */
543  u32 max_rx_fifo;
544
545  /** Min rx fifo for a session (in bytes) */
546  u32 min_rx_fifo;
547
548  /** Default MTU to be used when establishing connections */
549  u16 default_mtu;
550
551  /** Initial CWND multiplier, which multiplies MSS to determine initial CWND.
552   *  Set 0 to determine the initial CWND by another way */
553  u16 initial_cwnd_multiplier;
554
555  /** Enable tx pacing for new connections */
556  u8 enable_tx_pacing;
557
558  /** Allow use of TSO whenever available */
559  u8 allow_tso;
560
561  /** Set if csum offloading is enabled */
562  u8 csum_offload;
563
564  /** Default congestion control algorithm type */
565  tcp_cc_algorithm_type_e cc_algo;
566
567  /** Min rwnd, as number of snd_mss segments, for update ack to be sent after
568   * a zero rwnd advertisement */
569  u32 rwnd_min_update_ack;
570
571  /** Delayed ack time (disabled) */
572  u16 delack_time;
573
574  /** Timer ticks to wait for close from app */
575  u16 closewait_time;
576
577  /** Timer ticks to wait in time-wait. Also known as 2MSL */
578  u16 timewait_time;
579
580  /** Timer ticks to wait in fin-wait1 to send fin and rcv fin-ack */
581  u16 finwait1_time;
582
583  /** Timer ticks to wait in last ack for ack */
584  u16 lastack_time;
585
586  /** Timer ticks to wait in fin-wait2 for fin */
587  u16 finwait2_time;
588
589  /** Timer ticks to wait in closing for fin ack */
590  u16 closing_time;
591
592  /** Timer ticks to wait before cleaning up the connection */
593  u16 cleanup_time;
594
595  /** Number of preallocated connections */
596  u32 preallocated_connections;
597
598  /** Number of preallocated half-open connections */
599  u32 preallocated_half_open_connections;
600
601  /** Vectors of src addresses. Optional unless one needs > 63K active-opens */
602  ip4_address_t *ip4_src_addrs;
603  ip6_address_t *ip6_src_addrs;
604
605  /** Fault-injection. Debug only */
606  f64 buffer_fail_fraction;
607} tcp_configuration_t;
608
609typedef struct _tcp_main
610{
611  /* Per-worker thread tcp connection pools */
612  tcp_connection_t **connections;
613
614  /* Pool of listeners. */
615  tcp_connection_t *listener_pool;
616
617  /** Dispatch table by state and flags */
618  tcp_lookup_dispatch_t dispatch_table[TCP_N_STATES][64];
619
620  u8 log2_tstamp_clocks_per_tick;
621  f64 tstamp_ticks_per_clock;
622
623  /** per-worker context */
624  tcp_worker_ctx_t *wrk_ctx;
625
626  /** Pool of half-open connections on which we've sent a SYN */
627  tcp_connection_t *half_open_connections;
628  clib_spinlock_t half_open_lock;
629
630  /** vlib buffer size */
631  u32 bytes_per_buffer;
632
633  /** Seed used to generate random iss */
634  tcp_iss_seed_t iss_seed;
635
636  /** Congestion control algorithms registered */
637  tcp_cc_algorithm_t *cc_algos;
638
639  /** Hash table of cc algorithms by name */
640  uword *cc_algo_by_name;
641
642  /** Last cc algo registered */
643  tcp_cc_algorithm_type_e cc_last_type;
644
645  /** Flag that indicates if stack is on or off */
646  u8 is_enabled;
647
648  /** Flag that indicates if v4 punting is enabled */
649  u8 punt_unknown4;
650
651  /** Flag that indicates if v6 punting is enabled */
652  u8 punt_unknown6;
653
654  /** Rotor for v4 source addresses */
655  u32 last_v4_addr_rotor;
656
657  /** Rotor for v6 source addresses */
658  u32 last_v6_addr_rotor;
659
660  /** Protocol configuration */
661  tcp_configuration_t cfg;
662} tcp_main_t;
663
664extern tcp_main_t tcp_main;
665extern vlib_node_registration_t tcp4_input_node;
666extern vlib_node_registration_t tcp6_input_node;
667extern vlib_node_registration_t tcp4_output_node;
668extern vlib_node_registration_t tcp6_output_node;
669extern vlib_node_registration_t tcp4_established_node;
670extern vlib_node_registration_t tcp6_established_node;
671extern vlib_node_registration_t tcp4_syn_sent_node;
672extern vlib_node_registration_t tcp6_syn_sent_node;
673extern vlib_node_registration_t tcp4_rcv_process_node;
674extern vlib_node_registration_t tcp6_rcv_process_node;
675extern vlib_node_registration_t tcp4_listen_node;
676extern vlib_node_registration_t tcp6_listen_node;
677
678#define tcp_cfg tcp_main.cfg
679#define tcp_node_index(node_id, is_ip4) 				\
680  ((is_ip4) ? tcp4_##node_id##_node.index : tcp6_##node_id##_node.index)
681
682always_inline tcp_main_t *
683vnet_get_tcp_main ()
684{
685  return &tcp_main;
686}
687
688always_inline tcp_worker_ctx_t *
689tcp_get_worker (u32 thread_index)
690{
691  return &tcp_main.wrk_ctx[thread_index];
692}
693
694always_inline tcp_header_t *
695tcp_buffer_hdr (vlib_buffer_t * b)
696{
697  ASSERT ((signed) b->current_data >= (signed) -VLIB_BUFFER_PRE_DATA_SIZE);
698  return (tcp_header_t *) (b->data + b->current_data
699			   + vnet_buffer (b)->tcp.hdr_offset);
700}
701
702#if (VLIB_BUFFER_TRACE_TRAJECTORY)
703#define tcp_trajectory_add_start(b, start)			\
704{								\
705    (*vlib_buffer_trace_trajectory_cb) (b, start);		\
706}
707#else
708#define tcp_trajectory_add_start(b, start)
709#endif
710
711clib_error_t *vnet_tcp_enable_disable (vlib_main_t * vm, u8 is_en);
712
713void tcp_punt_unknown (vlib_main_t * vm, u8 is_ip4, u8 is_add);
714
715always_inline tcp_connection_t *
716tcp_connection_get (u32 conn_index, u32 thread_index)
717{
718  if (PREDICT_FALSE
719      (pool_is_free_index (tcp_main.connections[thread_index], conn_index)))
720    return 0;
721  return pool_elt_at_index (tcp_main.connections[thread_index], conn_index);
722}
723
724always_inline tcp_connection_t *
725tcp_connection_get_if_valid (u32 conn_index, u32 thread_index)
726{
727  if (tcp_main.connections[thread_index] == 0)
728    return 0;
729  if (pool_is_free_index (tcp_main.connections[thread_index], conn_index))
730    return 0;
731  return pool_elt_at_index (tcp_main.connections[thread_index], conn_index);
732}
733
734always_inline tcp_connection_t *
735tcp_get_connection_from_transport (transport_connection_t * tconn)
736{
737  return (tcp_connection_t *) tconn;
738}
739
740always_inline void
741tcp_connection_set_state (tcp_connection_t * tc, tcp_state_t state)
742{
743  tc->state = state;
744  TCP_EVT (TCP_EVT_STATE_CHANGE, tc);
745}
746
747void tcp_connection_close (tcp_connection_t * tc);
748void tcp_connection_cleanup (tcp_connection_t * tc);
749void tcp_connection_del (tcp_connection_t * tc);
750int tcp_half_open_connection_cleanup (tcp_connection_t * tc);
751tcp_connection_t *tcp_connection_alloc (u8 thread_index);
752tcp_connection_t *tcp_connection_alloc_w_base (u8 thread_index,
753					       tcp_connection_t * base);
754void tcp_connection_free (tcp_connection_t * tc);
755void tcp_connection_reset (tcp_connection_t * tc);
756int tcp_configure_v4_source_address_range (vlib_main_t * vm,
757					   ip4_address_t * start,
758					   ip4_address_t * end, u32 table_id);
759int tcp_configure_v6_source_address_range (vlib_main_t * vm,
760					   ip6_address_t * start,
761					   ip6_address_t * end, u32 table_id);
762void tcp_api_reference (void);
763u8 *format_tcp_connection (u8 * s, va_list * args);
764
765always_inline tcp_connection_t *
766tcp_listener_get (u32 tli)
767{
768  return pool_elt_at_index (tcp_main.listener_pool, tli);
769}
770
771always_inline tcp_connection_t *
772tcp_half_open_connection_get (u32 conn_index)
773{
774  tcp_connection_t *tc = 0;
775  clib_spinlock_lock_if_init (&tcp_main.half_open_lock);
776  if (!pool_is_free_index (tcp_main.half_open_connections, conn_index))
777    tc = pool_elt_at_index (tcp_main.half_open_connections, conn_index);
778  clib_spinlock_unlock_if_init (&tcp_main.half_open_lock);
779  return tc;
780}
781
782void tcp_make_fin (tcp_connection_t * tc, vlib_buffer_t * b);
783void tcp_make_synack (tcp_connection_t * ts, vlib_buffer_t * b);
784void tcp_send_reset_w_pkt (tcp_connection_t * tc, vlib_buffer_t * pkt,
785			   u32 thread_index, u8 is_ip4);
786void tcp_send_reset (tcp_connection_t * tc);
787void tcp_send_syn (tcp_connection_t * tc);
788void tcp_send_synack (tcp_connection_t * tc);
789void tcp_send_fin (tcp_connection_t * tc);
790void tcp_send_ack (tcp_connection_t * tc);
791void tcp_update_burst_snd_vars (tcp_connection_t * tc);
792void tcp_update_rto (tcp_connection_t * tc);
793void tcp_flush_frames_to_output (tcp_worker_ctx_t * wrk);
794void tcp_send_window_update_ack (tcp_connection_t * tc);
795
796void tcp_program_ack (tcp_connection_t * tc);
797void tcp_program_dupack (tcp_connection_t * tc);
798void tcp_program_retransmit (tcp_connection_t * tc);
799
800/*
801 * Rate estimation
802 */
803
804/**
805 * Byte tracker initialize
806 *
807 * @param tc	connection for which the byte tracker should be allocated and
808 * 		initialized
809 */
810void tcp_bt_init (tcp_connection_t * tc);
811/**
812 * Byte tracker cleanup
813 *
814 * @param tc	connection for which the byte tracker should be cleaned up
815 */
816void tcp_bt_cleanup (tcp_connection_t * tc);
817/**
818 * Flush byte tracker samples
819 *
820 * @param tc	tcp connection for which samples should be flushed
821 */
822void tcp_bt_flush_samples (tcp_connection_t * tc);
823/**
824 * Track a tcp tx burst
825 *
826 * @param tc	tcp connection
827 */
828void tcp_bt_track_tx (tcp_connection_t * tc, u32 len);
829/**
830 * Track a tcp retransmission
831 *
832 * @param tc	tcp connection
833 * @param start	start sequence number
834 * @param end	end sequence number
835 */
836void tcp_bt_track_rxt (tcp_connection_t * tc, u32 start, u32 end);
837/**
838 * Generate a delivery rate sample from recently acked bytes
839 *
840 * @param tc	tcp connection
841 * @param rs	resulting rate sample
842 */
843void tcp_bt_sample_delivery_rate (tcp_connection_t * tc,
844				  tcp_rate_sample_t * rs);
845/**
846 * Check if sample to be generated is app limited
847 *
848 * @param tc	tcp connection
849 */
850void tcp_bt_check_app_limited (tcp_connection_t * tc);
851/**
852 * Check if the byte tracker is in sane state
853 *
854 * Should be used only for testing
855 *
856 * @param bt	byte tracker
857 */
858int tcp_bt_is_sane (tcp_byte_tracker_t * bt);
859u8 *format_tcp_bt (u8 * s, va_list * args);
860
861always_inline u32
862tcp_end_seq (tcp_header_t * th, u32 len)
863{
864  return th->seq_number + tcp_is_syn (th) + tcp_is_fin (th) + len;
865}
866
867/* Modulo arithmetic for TCP sequence numbers */
868#define seq_lt(_s1, _s2) ((i32)((_s1)-(_s2)) < 0)
869#define seq_leq(_s1, _s2) ((i32)((_s1)-(_s2)) <= 0)
870#define seq_gt(_s1, _s2) ((i32)((_s1)-(_s2)) > 0)
871#define seq_geq(_s1, _s2) ((i32)((_s1)-(_s2)) >= 0)
872#define seq_max(_s1, _s2) (seq_gt((_s1), (_s2)) ? (_s1) : (_s2))
873
874/* Modulo arithmetic for timestamps */
875#define timestamp_lt(_t1, _t2) ((i32)((_t1)-(_t2)) < 0)
876#define timestamp_leq(_t1, _t2) ((i32)((_t1)-(_t2)) <= 0)
877
878/**
879 * Our estimate of the number of bytes that have left the network
880 */
881always_inline u32
882tcp_bytes_out (const tcp_connection_t * tc)
883{
884  if (tcp_opts_sack_permitted (&tc->rcv_opts))
885    return tc->sack_sb.sacked_bytes + tc->sack_sb.lost_bytes;
886  else
887    return clib_min (tc->rcv_dupacks * tc->snd_mss,
888		     tc->snd_nxt - tc->snd_una);
889}
890
891/**
892 * Our estimate of the number of bytes in flight (pipe size)
893 */
894always_inline u32
895tcp_flight_size (const tcp_connection_t * tc)
896{
897  int flight_size;
898
899  flight_size = (int) (tc->snd_nxt - tc->snd_una) - tcp_bytes_out (tc)
900    + tc->snd_rxt_bytes - tc->rxt_delivered;
901
902  ASSERT (flight_size >= 0);
903
904  return flight_size;
905}
906
907/**
908 * Initial cwnd as per RFC5681
909 */
910always_inline u32
911tcp_initial_cwnd (const tcp_connection_t * tc)
912{
913  if (tcp_cfg.initial_cwnd_multiplier > 0)
914    return tcp_cfg.initial_cwnd_multiplier * tc->snd_mss;
915
916  if (tc->snd_mss > 2190)
917    return 2 * tc->snd_mss;
918  else if (tc->snd_mss > 1095)
919    return 3 * tc->snd_mss;
920  else
921    return 4 * tc->snd_mss;
922}
923
924/*
925 * Accumulate acked bytes for cwnd increase
926 *
927 * Once threshold bytes are accumulated, snd_mss bytes are added
928 * to the cwnd.
929 */
930always_inline void
931tcp_cwnd_accumulate (tcp_connection_t * tc, u32 thresh, u32 bytes)
932{
933  tc->cwnd_acc_bytes += bytes;
934  if (tc->cwnd_acc_bytes >= thresh)
935    {
936      u32 inc = tc->cwnd_acc_bytes / thresh;
937      tc->cwnd_acc_bytes -= inc * thresh;
938      tc->cwnd += inc * tc->snd_mss;
939      tc->cwnd = clib_min (tc->cwnd, tc->tx_fifo_size);
940    }
941}
942
943always_inline u32
944tcp_loss_wnd (const tcp_connection_t * tc)
945{
946  /* Whatever we have in flight + the packet we're about to send */
947  return tcp_flight_size (tc) + tc->snd_mss;
948}
949
950always_inline u32
951tcp_available_snd_wnd (const tcp_connection_t * tc)
952{
953  return clib_min (tc->cwnd, tc->snd_wnd);
954}
955
956always_inline u32
957tcp_available_output_snd_space (const tcp_connection_t * tc)
958{
959  u32 available_wnd = tcp_available_snd_wnd (tc);
960  int flight_size = (int) (tc->snd_nxt - tc->snd_una);
961
962  if (available_wnd <= flight_size)
963    return 0;
964
965  return available_wnd - flight_size;
966}
967
968/**
969 * Estimate of how many bytes we can still push into the network
970 */
971always_inline u32
972tcp_available_cc_snd_space (const tcp_connection_t * tc)
973{
974  u32 available_wnd = tcp_available_snd_wnd (tc);
975  u32 flight_size = tcp_flight_size (tc);
976
977  if (available_wnd <= flight_size)
978    return 0;
979
980  return available_wnd - flight_size;
981}
982
983always_inline u8
984tcp_is_lost_fin (tcp_connection_t * tc)
985{
986  if ((tc->flags & TCP_CONN_FINSNT) && (tc->snd_una_max - tc->snd_una == 1))
987    return 1;
988  return 0;
989}
990
991u32 tcp_snd_space (tcp_connection_t * tc);
992int tcp_fastrecovery_prr_snd_space (tcp_connection_t * tc);
993
994fib_node_index_t tcp_lookup_rmt_in_fib (tcp_connection_t * tc);
995
996/* Made public for unit testing only */
997void tcp_update_sack_list (tcp_connection_t * tc, u32 start, u32 end);
998u32 tcp_sack_list_bytes (tcp_connection_t * tc);
999
1000always_inline u32
1001tcp_time_now (void)
1002{
1003  return tcp_main.wrk_ctx[vlib_get_thread_index ()].time_now;
1004}
1005
1006always_inline u32
1007tcp_time_now_w_thread (u32 thread_index)
1008{
1009  return tcp_main.wrk_ctx[thread_index].time_now;
1010}
1011
1012/**
1013 * Generate timestamp for tcp connection
1014 */
1015always_inline u32
1016tcp_tstamp (tcp_connection_t * tc)
1017{
1018  return (tcp_main.wrk_ctx[tc->c_thread_index].time_now -
1019	  tc->timestamp_delta);
1020}
1021
1022always_inline f64
1023tcp_time_now_us (u32 thread_index)
1024{
1025  return transport_time_now (thread_index);
1026}
1027
1028always_inline u32
1029tcp_set_time_now (tcp_worker_ctx_t * wrk)
1030{
1031  wrk->time_now = clib_cpu_time_now () * tcp_main.tstamp_ticks_per_clock;
1032  return wrk->time_now;
1033}
1034
1035u32 tcp_session_push_header (transport_connection_t * tconn,
1036			     vlib_buffer_t * b);
1037int tcp_session_custom_tx (void *conn, u32 max_burst_size);
1038
1039void tcp_connection_timers_init (tcp_connection_t * tc);
1040void tcp_connection_timers_reset (tcp_connection_t * tc);
1041void tcp_init_snd_vars (tcp_connection_t * tc);
1042void tcp_connection_init_vars (tcp_connection_t * tc);
1043void tcp_connection_tx_pacer_update (tcp_connection_t * tc);
1044void tcp_connection_tx_pacer_reset (tcp_connection_t * tc, u32 window,
1045				    u32 start_bucket);
1046
1047always_inline void
1048tcp_cc_rcv_ack (tcp_connection_t * tc, tcp_rate_sample_t * rs)
1049{
1050  tc->cc_algo->rcv_ack (tc, rs);
1051  tc->tsecr_last_ack = tc->rcv_opts.tsecr;
1052}
1053
1054static inline void
1055tcp_cc_rcv_cong_ack (tcp_connection_t * tc, tcp_cc_ack_t ack_type,
1056		     tcp_rate_sample_t * rs)
1057{
1058  tc->cc_algo->rcv_cong_ack (tc, ack_type, rs);
1059}
1060
1061static inline void
1062tcp_cc_congestion (tcp_connection_t * tc)
1063{
1064  tc->cc_algo->congestion (tc);
1065}
1066
1067static inline void
1068tcp_cc_loss (tcp_connection_t * tc)
1069{
1070  tc->cc_algo->loss (tc);
1071}
1072
1073static inline void
1074tcp_cc_recovered (tcp_connection_t * tc)
1075{
1076  tc->cc_algo->recovered (tc);
1077}
1078
1079static inline void
1080tcp_cc_undo_recovery (tcp_connection_t * tc)
1081{
1082  if (tc->cc_algo->undo_recovery)
1083    tc->cc_algo->undo_recovery (tc);
1084}
1085
1086static inline void
1087tcp_cc_event (tcp_connection_t * tc, tcp_cc_event_t evt)
1088{
1089  if (tc->cc_algo->event)
1090    tc->cc_algo->event (tc, evt);
1091}
1092
1093static inline u64
1094tcp_cc_get_pacing_rate (tcp_connection_t * tc)
1095{
1096  if (tc->cc_algo->get_pacing_rate)
1097    return tc->cc_algo->get_pacing_rate (tc);
1098
1099  f64 srtt = clib_min ((f64) tc->srtt * TCP_TICK, tc->mrtt_us);
1100
1101  /* TODO should constrain to interface's max throughput but
1102   * we don't have link speeds for sw ifs ..*/
1103  return ((f64) tc->cwnd / srtt);
1104}
1105
1106always_inline void
1107tcp_timer_set (tcp_connection_t * tc, u8 timer_id, u32 interval)
1108{
1109  ASSERT (tc->c_thread_index == vlib_get_thread_index ());
1110  ASSERT (tc->timers[timer_id] == TCP_TIMER_HANDLE_INVALID);
1111  tc->timers[timer_id] =
1112    tw_timer_start_16t_2w_512sl (&tcp_main.
1113				 wrk_ctx[tc->c_thread_index].timer_wheel,
1114				 tc->c_c_index, timer_id, interval);
1115}
1116
1117always_inline void
1118tcp_timer_reset (tcp_connection_t * tc, u8 timer_id)
1119{
1120  ASSERT (tc->c_thread_index == vlib_get_thread_index ());
1121  if (tc->timers[timer_id] == TCP_TIMER_HANDLE_INVALID)
1122    return;
1123
1124  tw_timer_stop_16t_2w_512sl (&tcp_main.
1125			      wrk_ctx[tc->c_thread_index].timer_wheel,
1126			      tc->timers[timer_id]);
1127  tc->timers[timer_id] = TCP_TIMER_HANDLE_INVALID;
1128}
1129
1130always_inline void
1131tcp_timer_update (tcp_connection_t * tc, u8 timer_id, u32 interval)
1132{
1133  ASSERT (tc->c_thread_index == vlib_get_thread_index ());
1134  if (tc->timers[timer_id] != TCP_TIMER_HANDLE_INVALID)
1135    tw_timer_update_16t_2w_512sl (&tcp_main.
1136				  wrk_ctx[tc->c_thread_index].timer_wheel,
1137				  tc->timers[timer_id], interval);
1138  else
1139    tc->timers[timer_id] =
1140      tw_timer_start_16t_2w_512sl (&tcp_main.
1141				   wrk_ctx[tc->c_thread_index].timer_wheel,
1142				   tc->c_c_index, timer_id, interval);
1143}
1144
1145always_inline void
1146tcp_retransmit_timer_set (tcp_connection_t * tc)
1147{
1148  ASSERT (tc->snd_una != tc->snd_una_max);
1149  tcp_timer_set (tc, TCP_TIMER_RETRANSMIT,
1150		 clib_max (tc->rto * TCP_TO_TIMER_TICK, 1));
1151}
1152
1153always_inline void
1154tcp_retransmit_timer_reset (tcp_connection_t * tc)
1155{
1156  tcp_timer_reset (tc, TCP_TIMER_RETRANSMIT);
1157}
1158
1159always_inline void
1160tcp_retransmit_timer_force_update (tcp_connection_t * tc)
1161{
1162  tcp_timer_update (tc, TCP_TIMER_RETRANSMIT,
1163		    clib_max (tc->rto * TCP_TO_TIMER_TICK, 1));
1164}
1165
1166always_inline void
1167tcp_persist_timer_set (tcp_connection_t * tc)
1168{
1169  /* Reuse RTO. It's backed off in handler */
1170  tcp_timer_set (tc, TCP_TIMER_PERSIST,
1171		 clib_max (tc->rto * TCP_TO_TIMER_TICK, 1));
1172}
1173
1174always_inline void
1175tcp_persist_timer_update (tcp_connection_t * tc)
1176{
1177  u32 interval;
1178
1179  if (seq_leq (tc->snd_una, tc->snd_congestion + tc->burst_acked))
1180    interval = 1;
1181  else
1182    interval = clib_max (tc->rto * TCP_TO_TIMER_TICK, 1);
1183
1184  tcp_timer_update (tc, TCP_TIMER_PERSIST, interval);
1185}
1186
1187always_inline void
1188tcp_persist_timer_reset (tcp_connection_t * tc)
1189{
1190  tcp_timer_reset (tc, TCP_TIMER_PERSIST);
1191}
1192
1193always_inline void
1194tcp_retransmit_timer_update (tcp_connection_t * tc)
1195{
1196  if (tc->snd_una == tc->snd_nxt)
1197    {
1198      tcp_retransmit_timer_reset (tc);
1199      if (tc->snd_wnd < tc->snd_mss)
1200	tcp_persist_timer_update (tc);
1201    }
1202  else
1203    tcp_timer_update (tc, TCP_TIMER_RETRANSMIT,
1204		      clib_max (tc->rto * TCP_TO_TIMER_TICK, 1));
1205}
1206
1207always_inline u8
1208tcp_timer_is_active (tcp_connection_t * tc, tcp_timers_e timer)
1209{
1210  return tc->timers[timer] != TCP_TIMER_HANDLE_INVALID;
1211}
1212
1213#define tcp_validate_txf_size(_tc, _a) 					\
1214  ASSERT(_tc->state != TCP_STATE_ESTABLISHED 				\
1215	 || transport_max_tx_dequeue (&_tc->connection) >= _a)
1216
1217void tcp_rcv_sacks (tcp_connection_t * tc, u32 ack);
1218u8 *tcp_scoreboard_replay (u8 * s, tcp_connection_t * tc, u8 verbose);
1219
1220/**
1221 * Register exiting cc algo type
1222 */
1223void tcp_cc_algo_register (tcp_cc_algorithm_type_e type,
1224			   const tcp_cc_algorithm_t * vft);
1225
1226/**
1227 * Register new cc algo type
1228 */
1229tcp_cc_algorithm_type_e tcp_cc_algo_new_type (const tcp_cc_algorithm_t * vft);
1230tcp_cc_algorithm_t *tcp_cc_algo_get (tcp_cc_algorithm_type_e type);
1231
1232static inline void *
1233tcp_cc_data (tcp_connection_t * tc)
1234{
1235  return (void *) tc->cc_data;
1236}
1237
1238void newreno_rcv_cong_ack (tcp_connection_t * tc, tcp_cc_ack_t ack_type,
1239			   tcp_rate_sample_t * rs);
1240
1241/**
1242 * Push TCP header to buffer
1243 *
1244 * @param vm - vlib_main
1245 * @param b - buffer to write the header to
1246 * @param sp_net - source port net order
1247 * @param dp_net - destination port net order
1248 * @param seq - sequence number net order
1249 * @param ack - ack number net order
1250 * @param tcp_hdr_opts_len - header and options length in bytes
1251 * @param flags - header flags
1252 * @param wnd - window size
1253 *
1254 * @return - pointer to start of TCP header
1255 */
1256always_inline void *
1257vlib_buffer_push_tcp_net_order (vlib_buffer_t * b, u16 sp, u16 dp, u32 seq,
1258				u32 ack, u8 tcp_hdr_opts_len, u8 flags,
1259				u16 wnd)
1260{
1261  tcp_header_t *th;
1262
1263  th = vlib_buffer_push_uninit (b, tcp_hdr_opts_len);
1264
1265  th->src_port = sp;
1266  th->dst_port = dp;
1267  th->seq_number = seq;
1268  th->ack_number = ack;
1269  th->data_offset_and_reserved = (tcp_hdr_opts_len >> 2) << 4;
1270  th->flags = flags;
1271  th->window = wnd;
1272  th->checksum = 0;
1273  th->urgent_pointer = 0;
1274  vnet_buffer (b)->l4_hdr_offset = (u8 *) th - b->data;
1275  b->flags |= VNET_BUFFER_F_L4_HDR_OFFSET_VALID;
1276  return th;
1277}
1278
1279/**
1280 * Push TCP header to buffer
1281 *
1282 * @param b - buffer to write the header to
1283 * @param sp_net - source port net order
1284 * @param dp_net - destination port net order
1285 * @param seq - sequence number host order
1286 * @param ack - ack number host order
1287 * @param tcp_hdr_opts_len - header and options length in bytes
1288 * @param flags - header flags
1289 * @param wnd - window size
1290 *
1291 * @return - pointer to start of TCP header
1292 */
1293always_inline void *
1294vlib_buffer_push_tcp (vlib_buffer_t * b, u16 sp_net, u16 dp_net, u32 seq,
1295		      u32 ack, u8 tcp_hdr_opts_len, u8 flags, u16 wnd)
1296{
1297  return vlib_buffer_push_tcp_net_order (b, sp_net, dp_net,
1298					 clib_host_to_net_u32 (seq),
1299					 clib_host_to_net_u32 (ack),
1300					 tcp_hdr_opts_len, flags,
1301					 clib_host_to_net_u16 (wnd));
1302}
1303
1304#endif /* _vnet_tcp_h_ */
1305
1306/*
1307 * fd.io coding-style-patch-verification: ON
1308 *
1309 * Local Variables:
1310 * eval: (c-set-style "gnu")
1311 * End:
1312 */
1313