tcp.h revision 02833ff3
1/*
2 * Copyright (c) 2016-2019 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 *     http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#ifndef _vnet_tcp_h_
17#define _vnet_tcp_h_
18
19#include <vnet/vnet.h>
20#include <vnet/ip/ip.h>
21#include <vnet/tcp/tcp_packet.h>
22#include <vnet/tcp/tcp_timer.h>
23#include <vnet/session/transport.h>
24#include <vnet/session/session.h>
25#include <vnet/tcp/tcp_debug.h>
26
27#define TCP_TICK 0.001			/**< TCP tick period (s) */
28#define THZ (u32) (1/TCP_TICK)		/**< TCP tick frequency */
29#define TCP_TSTAMP_RESOLUTION TCP_TICK	/**< Time stamp resolution */
30#define TCP_PAWS_IDLE 24 * 24 * 60 * 60 * THZ /**< 24 days */
31#define TCP_FIB_RECHECK_PERIOD	1 * THZ	/**< Recheck every 1s */
32#define TCP_MAX_OPTION_SPACE 40
33#define TCP_CC_DATA_SZ 24
34#define TCP_MAX_GSO_SZ 65536
35#define TCP_RXT_MAX_BURST 10
36
37#define TCP_DUPACK_THRESHOLD 	3
38#define TCP_IW_N_SEGMENTS 	10
39#define TCP_ALWAYS_ACK		1	/**< On/off delayed acks */
40#define TCP_USE_SACKS		1	/**< Disable only for testing */
41
42/** TCP FSM state definitions as per RFC793. */
43#define foreach_tcp_fsm_state   \
44  _(CLOSED, "CLOSED")           \
45  _(LISTEN, "LISTEN")           \
46  _(SYN_SENT, "SYN_SENT")       \
47  _(SYN_RCVD, "SYN_RCVD")       \
48  _(ESTABLISHED, "ESTABLISHED") \
49  _(CLOSE_WAIT, "CLOSE_WAIT")   \
50  _(FIN_WAIT_1, "FIN_WAIT_1")   \
51  _(LAST_ACK, "LAST_ACK")       \
52  _(CLOSING, "CLOSING")         \
53  _(FIN_WAIT_2, "FIN_WAIT_2")   \
54  _(TIME_WAIT, "TIME_WAIT")
55
56typedef enum _tcp_state
57{
58#define _(sym, str) TCP_STATE_##sym,
59  foreach_tcp_fsm_state
60#undef _
61  TCP_N_STATES
62} tcp_state_t;
63
64format_function_t format_tcp_state;
65format_function_t format_tcp_flags;
66format_function_t format_tcp_sacks;
67format_function_t format_tcp_rcv_sacks;
68
69/** TCP timers */
70#define foreach_tcp_timer               \
71  _(RETRANSMIT, "RETRANSMIT")           \
72  _(DELACK, "DELAYED ACK")              \
73  _(PERSIST, "PERSIST")                 \
74  _(WAITCLOSE, "WAIT CLOSE")            \
75  _(RETRANSMIT_SYN, "RETRANSMIT SYN")   \
76
77typedef enum _tcp_timers
78{
79#define _(sym, str) TCP_TIMER_##sym,
80  foreach_tcp_timer
81#undef _
82  TCP_N_TIMERS
83} tcp_timers_e;
84
85typedef void (timer_expiration_handler) (u32 index);
86
87extern timer_expiration_handler tcp_timer_delack_handler;
88extern timer_expiration_handler tcp_timer_retransmit_handler;
89extern timer_expiration_handler tcp_timer_persist_handler;
90extern timer_expiration_handler tcp_timer_retransmit_syn_handler;
91
92#define TCP_TIMER_HANDLE_INVALID ((u32) ~0)
93
94#define TCP_TIMER_TICK		0.1		/**< Timer tick in seconds */
95#define TCP_TO_TIMER_TICK       TCP_TICK*10	/**< Factor for converting
96						     ticks to timer ticks */
97
98#define TCP_RTO_MAX 60 * THZ	/* Min max RTO (60s) as per RFC6298 */
99#define TCP_RTO_MIN 0.2 * THZ	/* Min RTO (200ms) - lower than standard */
100#define TCP_RTT_MAX 30 * THZ	/* 30s (probably too much) */
101#define TCP_RTO_SYN_RETRIES 3	/* SYN retries without doubling RTO */
102#define TCP_RTO_INIT 1 * THZ	/* Initial retransmit timer */
103#define TCP_RTO_BOFF_MAX 8	/* Max number of retries before reset */
104#define TCP_ESTABLISH_TIME (60 * THZ)	/* Connection establish timeout */
105
106/** TCP connection flags */
107#define foreach_tcp_connection_flag             \
108  _(SNDACK, "Send ACK")                         \
109  _(FINSNT, "FIN sent")				\
110  _(RECOVERY, "Recovery")                    	\
111  _(FAST_RECOVERY, "Fast Recovery")		\
112  _(DCNT_PENDING, "Disconnect pending")		\
113  _(HALF_OPEN_DONE, "Half-open completed")	\
114  _(FINPNDG, "FIN pending")			\
115  _(RXT_PENDING, "Retransmit pending")		\
116  _(FRXT_FIRST, "Fast-retransmit first again")	\
117  _(DEQ_PENDING, "Pending dequeue acked")	\
118  _(PSH_PENDING, "PSH pending")			\
119  _(FINRCVD, "FIN received")			\
120  _(RATE_SAMPLE, "Conn does rate sampling")	\
121  _(TRACK_BURST, "Track burst")			\
122  _(ZERO_RWND_SENT, "Zero RWND sent")		\
123  _(NO_CSUM_OFFLOAD, "No Checksum Offload")     \
124
125typedef enum _tcp_connection_flag_bits
126{
127#define _(sym, str) TCP_CONN_##sym##_BIT,
128  foreach_tcp_connection_flag
129#undef _
130  TCP_CONN_N_FLAG_BITS
131} tcp_connection_flag_bits_e;
132
133typedef enum _tcp_connection_flag
134{
135#define _(sym, str) TCP_CONN_##sym = 1 << TCP_CONN_##sym##_BIT,
136  foreach_tcp_connection_flag
137#undef _
138  TCP_CONN_N_FLAGS
139} tcp_connection_flags_e;
140
141#define TCP_SCOREBOARD_TRACE (0)
142#define TCP_MAX_SACK_BLOCKS 256	/**< Max number of SACK blocks stored */
143#define TCP_INVALID_SACK_HOLE_INDEX ((u32)~0)
144
145typedef struct _scoreboard_trace_elt
146{
147  u32 start;
148  u32 end;
149  u32 ack;
150  u32 snd_una_max;
151  u32 group;
152} scoreboard_trace_elt_t;
153
154typedef struct _sack_scoreboard_hole
155{
156  u32 next;		/**< Index for next entry in linked list */
157  u32 prev;		/**< Index for previous entry in linked list */
158  u32 start;		/**< Start sequence number */
159  u32 end;		/**< End sequence number */
160  u8 is_lost;		/**< Mark hole as lost */
161} sack_scoreboard_hole_t;
162
163typedef struct _sack_scoreboard
164{
165  sack_scoreboard_hole_t *holes;	/**< Pool of holes */
166  u32 head;				/**< Index of first entry */
167  u32 tail;				/**< Index of last entry */
168  u32 sacked_bytes;			/**< Number of bytes sacked in sb */
169  u32 last_sacked_bytes;		/**< Number of bytes last sacked */
170  u32 last_bytes_delivered;		/**< Sack bytes delivered to app */
171  u32 rxt_sacked;			/**< Rxt last delivered */
172  u32 high_sacked;			/**< Highest byte sacked (fack) */
173  u32 high_rxt;				/**< Highest retransmitted sequence */
174  u32 rescue_rxt;			/**< Rescue sequence number */
175  u32 lost_bytes;			/**< Bytes lost as per RFC6675 */
176  u32 last_lost_bytes;			/**< Number of bytes last lost */
177  u32 cur_rxt_hole;			/**< Retransmitting from this hole */
178  u8 is_reneging;
179
180#if TCP_SCOREBOARD_TRACE
181  scoreboard_trace_elt_t *trace;
182#endif
183
184} sack_scoreboard_t;
185
186#if TCP_SCOREBOARD_TRACE
187#define tcp_scoreboard_trace_add(_tc, _ack) 				\
188{									\
189    static u64 _group = 0;						\
190    sack_scoreboard_t *_sb = &_tc->sack_sb;				\
191    sack_block_t *_sack, *_sacks;					\
192    scoreboard_trace_elt_t *_elt;					\
193    int i;								\
194    _group++;								\
195    _sacks = _tc->rcv_opts.sacks;					\
196    for (i = 0; i < vec_len (_sacks); i++) 				\
197      {									\
198	_sack = &_sacks[i];						\
199	vec_add2 (_sb->trace, _elt, 1);					\
200	_elt->start = _sack->start;					\
201	_elt->end = _sack->end;						\
202	_elt->ack = _elt->end == _ack ? _ack : 0;			\
203	_elt->snd_una_max = _elt->end == _ack ? _tc->snd_una_max : 0;	\
204	_elt->group = _group;						\
205      }									\
206}
207#else
208#define tcp_scoreboard_trace_add(_tc, _ack)
209#endif
210
211sack_scoreboard_hole_t *scoreboard_next_rxt_hole (sack_scoreboard_t * sb,
212						  sack_scoreboard_hole_t *
213						  start, u8 have_sent_1_smss,
214						  u8 * can_rescue,
215						  u8 * snd_limited);
216sack_scoreboard_hole_t *scoreboard_get_hole (sack_scoreboard_t * sb,
217					     u32 index);
218
219sack_scoreboard_hole_t *scoreboard_next_hole (sack_scoreboard_t * sb,
220					      sack_scoreboard_hole_t * hole);
221sack_scoreboard_hole_t *scoreboard_prev_hole (sack_scoreboard_t * sb,
222					      sack_scoreboard_hole_t * hole);
223sack_scoreboard_hole_t *scoreboard_first_hole (sack_scoreboard_t * sb);
224sack_scoreboard_hole_t *scoreboard_last_hole (sack_scoreboard_t * sb);
225
226void scoreboard_clear (sack_scoreboard_t * sb);
227void scoreboard_clear_reneging (sack_scoreboard_t * sb, u32 start, u32 end);
228void scoreboard_init (sack_scoreboard_t * sb);
229void scoreboard_init_high_rxt (sack_scoreboard_t * sb, u32 snd_una);
230u8 *format_tcp_scoreboard (u8 * s, va_list * args);
231
232#define TCP_BTS_INVALID_INDEX	((u32)~0)
233
234typedef enum tcp_bts_flags_
235{
236  TCP_BTS_IS_RXT = 1,
237  TCP_BTS_IS_APP_LIMITED = 1 << 1,
238} __clib_packed tcp_bts_flags_t;
239
240typedef struct tcp_bt_sample_
241{
242  u32 next;			/**< Next sample index in list */
243  u32 prev;			/**< Previous sample index in list */
244  u32 min_seq;			/**< Min seq number in sample */
245  u32 max_seq;			/**< Max seq number. Set for rxt samples */
246  u64 delivered;		/**< Total delivered bytes for sample */
247  f64 delivered_time;		/**< Delivered time when sample taken */
248  f64 tx_time;			/**< Transmit time for the burst */
249  f64 first_tx_time;		/**< Connection first tx time at tx */
250  tcp_bts_flags_t flags;	/**< Sample flag */
251} tcp_bt_sample_t;
252
253typedef struct tcp_rate_sample_
254{
255  u64 prior_delivered;		/**< Delivered of sample used for rate, i.e.,
256				     total bytes delivered at prior_time */
257  f64 prior_time;		/**< Delivered time of sample used for rate */
258  f64 interval_time;		/**< Time to ack the bytes delivered */
259  f64 rtt_time;			/**< RTT for sample */
260  u32 delivered;		/**< Bytes delivered in interval_time */
261  u32 acked_and_sacked;		/**< Bytes acked + sacked now */
262  u32 lost;			/**< Bytes lost now */
263  tcp_bts_flags_t flags;	/**< Rate sample flags from bt sample */
264} tcp_rate_sample_t;
265
266typedef struct tcp_byte_tracker_
267{
268  tcp_bt_sample_t *samples;	/**< Pool of samples */
269  rb_tree_t sample_lookup;	/**< Rbtree for sample lookup by min_seq */
270  u32 head;			/**< Head of samples linked list */
271  u32 tail;			/**< Tail of samples linked list */
272  u32 last_ooo;			/**< Cached last ooo sample */
273} tcp_byte_tracker_t;
274
275typedef enum _tcp_cc_algorithm_type
276{
277  TCP_CC_NEWRENO,
278  TCP_CC_CUBIC,
279  TCP_CC_LAST = TCP_CC_CUBIC
280} tcp_cc_algorithm_type_e;
281
282typedef struct _tcp_cc_algorithm tcp_cc_algorithm_t;
283
284typedef enum _tcp_cc_ack_t
285{
286  TCP_CC_ACK,
287  TCP_CC_DUPACK,
288  TCP_CC_PARTIALACK
289} tcp_cc_ack_t;
290
291typedef enum tcp_cc_event_
292{
293  TCP_CC_EVT_START_TX,
294} tcp_cc_event_t;
295
296/*
297 * As per RFC4898 tcpEStatsStackSoftErrors
298 */
299typedef struct tcp_errors_
300{
301  u32 below_data_wnd;	/**< All data in seg is below snd_una */
302  u32 above_data_wnd;	/**< Some data in segment is above snd_wnd */
303  u32 below_ack_wnd;	/**< Acks for data below snd_una */
304  u32 above_ack_wnd;	/**< Acks for data not sent */
305} tcp_errors_t;
306
307typedef struct _tcp_connection
308{
309  CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
310  transport_connection_t connection;  /**< Common transport data. First! */
311
312  u8 state;			/**< TCP state as per tcp_state_t */
313  u8 is_tso;	  /** is connection could use tso */
314  u16 flags;			/**< Connection flags (see tcp_conn_flags_e) */
315  u32 timers[TCP_N_TIMERS];	/**< Timer handles into timer wheel */
316
317  u64 segs_in;		/** RFC4022/4898 tcpHCInSegs/tcpEStatsPerfSegsIn */
318  u64 bytes_in;		/** RFC4898 tcpEStatsPerfHCDataOctetsIn */
319  u64 segs_out;		/** RFC4898 tcpEStatsPerfSegsOut */
320  u64 bytes_out;	/** RFC4898 tcpEStatsPerfHCDataOctetsOut */
321
322  /** Send sequence variables RFC793 */
323  u32 snd_una;		/**< oldest unacknowledged sequence number */
324  u32 snd_una_max;	/**< newest unacknowledged sequence number + 1*/
325  u32 snd_wnd;		/**< send window */
326  u32 snd_wl1;		/**< seq number used for last snd.wnd update */
327  u32 snd_wl2;		/**< ack number used for last snd.wnd update */
328  u32 snd_nxt;		/**< next seq number to be sent */
329  u16 snd_mss;		/**< Effective send max seg (data) size */
330
331  u64 data_segs_in;	/** RFC4898 tcpEStatsPerfDataSegsIn */
332  u64 data_segs_out;	/** RFC4898 tcpEStatsPerfDataSegsOut */
333
334  /** Receive sequence variables RFC793 */
335  u32 rcv_nxt;		/**< next sequence number expected */
336  u32 rcv_wnd;		/**< receive window we expect */
337
338  u32 rcv_las;		/**< rcv_nxt at last ack sent/rcv_wnd update */
339  u32 iss;		/**< initial sent sequence */
340  u32 irs;		/**< initial remote sequence */
341
342  /* Options */
343  u8 snd_opts_len;		/**< Tx options len */
344  u8 rcv_wscale;		/**< Window scale to advertise to peer */
345  u8 snd_wscale;		/**< Window scale to use when sending */
346  u32 tsval_recent;		/**< Last timestamp received */
347  u32 tsval_recent_age;		/**< When last updated tstamp_recent*/
348  tcp_options_t snd_opts;	/**< Tx options for connection */
349  tcp_options_t rcv_opts;	/**< Rx options for connection */
350
351  sack_block_t *snd_sacks;	/**< Vector of SACKs to send. XXX Fixed size? */
352  u8 snd_sack_pos;		/**< Position in vec of first block to send */
353  sack_block_t *snd_sacks_fl;	/**< Vector for building new list */
354  sack_scoreboard_t sack_sb;	/**< SACK "scoreboard" that tracks holes */
355
356  u16 rcv_dupacks;	/**< Number of recent DUPACKs received */
357  u32 dupacks_in;	/**< RFC4898 tcpEStatsStackDupAcksIn*/
358  u8 pending_dupacks;	/**< Number of DUPACKs to be sent */
359  u32 dupacks_out;	/**< RFC4898 tcpEStatsPathDupAcksOut */
360
361  /* Congestion control */
362  u32 cwnd;		/**< Congestion window */
363  u32 cwnd_acc_bytes;	/**< Bytes accumulated for cwnd increment */
364  u32 ssthresh;		/**< Slow-start threshold */
365  u32 prev_ssthresh;	/**< ssthresh before congestion */
366  u32 prev_cwnd;	/**< ssthresh before congestion */
367  u32 bytes_acked;	/**< Bytes acknowledged by current segment */
368  u32 burst_acked;	/**< Bytes acknowledged in current burst */
369  u32 snd_rxt_bytes;	/**< Retransmitted bytes during current cc event */
370  u32 snd_rxt_ts;	/**< Timestamp when first packet is retransmitted */
371  u32 prr_delivered;	/**< RFC6937 bytes delivered during current event */
372  u32 rxt_delivered;	/**< Rxt bytes delivered during current cc event */
373  u32 tsecr_last_ack;	/**< Timestamp echoed to us in last healthy ACK */
374  u32 snd_congestion;	/**< snd_una_max when congestion is detected */
375  u32 tx_fifo_size;	/**< Tx fifo size. Used to constrain cwnd */
376  tcp_cc_algorithm_t *cc_algo;	/**< Congestion control algorithm */
377  u8 cc_data[TCP_CC_DATA_SZ];	/**< Congestion control algo private data */
378
379  u32 fr_occurences;	/**< fast-retransmit occurrences RFC4898
380			     tcpEStatsStackFastRetran */
381  u32 tr_occurences;	/**< timer-retransmit occurrences */
382  u64 bytes_retrans;	/**< RFC4898 tcpEStatsPerfOctetsRetrans */
383  u64 segs_retrans;	/**< RFC4898 tcpEStatsPerfSegsRetrans*/
384
385  /* RTT and RTO */
386  u32 rto;		/**< Retransmission timeout */
387  u32 rto_boff;		/**< Index for RTO backoff */
388  u32 srtt;		/**< Smoothed RTT */
389  u32 rttvar;		/**< Smoothed mean RTT difference. Approximates variance */
390  u32 rtt_seq;		/**< Sequence number for tracked ACK */
391  f64 rtt_ts;		/**< Timestamp for tracked ACK */
392  f64 mrtt_us;		/**< High precision mrtt from tracked acks */
393
394  u32 psh_seq;		/**< Add psh header for seg that includes this */
395  u32 next_node_index;	/**< Can be used to control next node in output */
396  u32 next_node_opaque;	/**< Opaque to pass to next node */
397  u32 limited_transmit;	/**< snd_nxt when limited transmit starts */
398  u32 sw_if_index;	/**< Interface for the connection */
399
400  /* Delivery rate estimation */
401  u64 delivered;		/**< Total bytes delivered to peer */
402  u64 app_limited;		/**< Delivered when app-limited detected */
403  f64 delivered_time;		/**< Time last bytes were acked */
404  f64 first_tx_time;		/**< Send time for recently delivered/sent */
405  tcp_byte_tracker_t *bt;	/**< Tx byte tracker */
406
407  tcp_errors_t errors;	/**< Soft connection errors */
408
409  f64 start_ts;		/**< Timestamp when connection initialized */
410  u32 last_fib_check;	/**< Last time we checked fib route for peer */
411  u16 mss;		/**< Our max seg size that includes options */
412  u32 timestamp_delta;	/**< Offset for timestamp */
413} tcp_connection_t;
414
415/* *INDENT-OFF* */
416struct _tcp_cc_algorithm
417{
418  const char *name;
419  uword (*unformat_cfg) (unformat_input_t * input);
420  void (*init) (tcp_connection_t * tc);
421  void (*cleanup) (tcp_connection_t * tc);
422  void (*rcv_ack) (tcp_connection_t * tc, tcp_rate_sample_t *rs);
423  void (*rcv_cong_ack) (tcp_connection_t * tc, tcp_cc_ack_t ack,
424			tcp_rate_sample_t *rs);
425  void (*congestion) (tcp_connection_t * tc);
426  void (*loss) (tcp_connection_t * tc);
427  void (*recovered) (tcp_connection_t * tc);
428  void (*undo_recovery) (tcp_connection_t * tc);
429  void (*event) (tcp_connection_t *tc, tcp_cc_event_t evt);
430  u64 (*get_pacing_rate) (tcp_connection_t *tc);
431};
432/* *INDENT-ON* */
433
434#define tcp_fastrecovery_on(tc) (tc)->flags |= TCP_CONN_FAST_RECOVERY
435#define tcp_fastrecovery_off(tc) (tc)->flags &= ~TCP_CONN_FAST_RECOVERY
436#define tcp_recovery_on(tc) (tc)->flags |= TCP_CONN_RECOVERY
437#define tcp_recovery_off(tc) (tc)->flags &= ~TCP_CONN_RECOVERY
438#define tcp_in_fastrecovery(tc) ((tc)->flags & TCP_CONN_FAST_RECOVERY)
439#define tcp_in_recovery(tc) ((tc)->flags & (TCP_CONN_RECOVERY))
440#define tcp_in_slowstart(tc) (tc->cwnd < tc->ssthresh)
441#define tcp_disconnect_pending(tc) ((tc)->flags & TCP_CONN_DCNT_PENDING)
442#define tcp_disconnect_pending_on(tc) ((tc)->flags |= TCP_CONN_DCNT_PENDING)
443#define tcp_disconnect_pending_off(tc) ((tc)->flags &= ~TCP_CONN_DCNT_PENDING)
444#define tcp_fastrecovery_first(tc) ((tc)->flags & TCP_CONN_FRXT_FIRST)
445#define tcp_fastrecovery_first_on(tc) ((tc)->flags |= TCP_CONN_FRXT_FIRST)
446#define tcp_fastrecovery_first_off(tc) ((tc)->flags &= ~TCP_CONN_FRXT_FIRST)
447
448#define tcp_in_cong_recovery(tc) ((tc)->flags & 		\
449	  (TCP_CONN_FAST_RECOVERY | TCP_CONN_RECOVERY))
450
451always_inline void
452tcp_cong_recovery_off (tcp_connection_t * tc)
453{
454  tc->flags &= ~(TCP_CONN_FAST_RECOVERY | TCP_CONN_RECOVERY);
455  tcp_fastrecovery_first_off (tc);
456}
457
458#define tcp_zero_rwnd_sent(tc) ((tc)->flags & TCP_CONN_ZERO_RWND_SENT)
459#define tcp_zero_rwnd_sent_on(tc) (tc)->flags |= TCP_CONN_ZERO_RWND_SENT
460#define tcp_zero_rwnd_sent_off(tc) (tc)->flags &= ~TCP_CONN_ZERO_RWND_SENT
461
462typedef enum _tcp_error
463{
464#define tcp_error(n,s) TCP_ERROR_##n,
465#include <vnet/tcp/tcp_error.def>
466#undef tcp_error
467  TCP_N_ERROR,
468} tcp_error_t;
469
470typedef struct _tcp_lookup_dispatch
471{
472  u8 next, error;
473} tcp_lookup_dispatch_t;
474
475typedef struct tcp_worker_ctx_
476{
477  CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
478  /** worker time */
479  u32 time_now;
480
481  /** worker timer wheel */
482  tw_timer_wheel_16t_2w_512sl_t timer_wheel;
483
484  /** tx buffer free list */
485  u32 *tx_buffers;
486
487  /** tx frames for tcp 4/6 output nodes */
488  vlib_frame_t *tx_frames[2];
489
490  /** tx frames for ip 4/6 lookup nodes */
491  vlib_frame_t *ip_lookup_tx_frames[2];
492
493  /** vector of pending ack dequeues */
494  u32 *pending_deq_acked;
495
496  /** vector of pending disconnect notifications */
497  u32 *pending_disconnects;
498
499  /** convenience pointer to this thread's vlib main */
500  vlib_main_t *vm;
501
502    CLIB_CACHE_LINE_ALIGN_MARK (cacheline1);
503
504  /** cached 'on the wire' options for bursts */
505  u8 cached_opts[40];
506
507} tcp_worker_ctx_t;
508
509typedef struct tcp_iss_seed_
510{
511  u64 first;
512  u64 second;
513} tcp_iss_seed_t;
514
515typedef struct tcp_configuration_
516{
517  /** Max rx fifo size for a session (in bytes). It is used in to compute the
518   *  rfc 7323 window scaling factor */
519  u32 max_rx_fifo;
520
521  /** Min rx fifo for a session (in bytes) */
522  u32 min_rx_fifo;
523
524  /** Default MTU to be used when establishing connections */
525  u16 default_mtu;
526
527  /** Initial CWND multiplier, which multiplies MSS to determine initial CWND.
528   *  Set 0 to determine the initial CWND by another way */
529  u16 initial_cwnd_multiplier;
530
531  /** Enable tx pacing for new connections */
532  u8 enable_tx_pacing;
533
534  /** Default congestion control algorithm type */
535  tcp_cc_algorithm_type_e cc_algo;
536
537  /** Min rwnd, as number of snd_mss segments, for update ack to be sent after
538   * a zero rwnd advertisement */
539  u32 rwnd_min_update_ack;
540
541  /** Delayed ack time (disabled) */
542  u16 delack_time;
543
544  /** Timer ticks to wait for close from app */
545  u16 closewait_time;
546
547  /** Timer ticks to wait in time-wait. Also known as 2MSL */
548  u16 timewait_time;
549
550  /** Timer ticks to wait in fin-wait1 to send fin and rcv fin-ack */
551  u16 finwait1_time;
552
553  /** Timer ticks to wait in last ack for ack */
554  u16 lastack_time;
555
556  /** Timer ticks to wait in fin-wait2 for fin */
557  u16 finwait2_time;
558
559  /** Timer ticks to wait in closing for fin ack */
560  u16 closing_time;
561
562  /** Timer ticks to wait before cleaning up the connection */
563  u16 cleanup_time;
564
565  /** Number of preallocated connections */
566  u32 preallocated_connections;
567
568  /** Number of preallocated half-open connections */
569  u32 preallocated_half_open_connections;
570
571  /** Vectors of src addresses. Optional unless one needs > 63K active-opens */
572  ip4_address_t *ip4_src_addrs;
573  ip6_address_t *ip6_src_addrs;
574
575  /** Fault-injection. Debug only */
576  f64 buffer_fail_fraction;
577} tcp_configuration_t;
578
579typedef struct _tcp_main
580{
581  /* Per-worker thread tcp connection pools */
582  tcp_connection_t **connections;
583
584  /* Pool of listeners. */
585  tcp_connection_t *listener_pool;
586
587  /** Dispatch table by state and flags */
588  tcp_lookup_dispatch_t dispatch_table[TCP_N_STATES][64];
589
590  u8 log2_tstamp_clocks_per_tick;
591  f64 tstamp_ticks_per_clock;
592
593  /** per-worker context */
594  tcp_worker_ctx_t *wrk_ctx;
595
596  /** Pool of half-open connections on which we've sent a SYN */
597  tcp_connection_t *half_open_connections;
598  clib_spinlock_t half_open_lock;
599
600  /** vlib buffer size */
601  u32 bytes_per_buffer;
602
603  /** Seed used to generate random iss */
604  tcp_iss_seed_t iss_seed;
605
606  /** Congestion control algorithms registered */
607  tcp_cc_algorithm_t *cc_algos;
608
609  /** Hash table of cc algorithms by name */
610  uword *cc_algo_by_name;
611
612  /** Last cc algo registered */
613  tcp_cc_algorithm_type_e cc_last_type;
614
615  /** Flag that indicates if stack is on or off */
616  u8 is_enabled;
617
618  /** Flag that indicates if v4 punting is enabled */
619  u8 punt_unknown4;
620
621  /** Flag that indicates if v6 punting is enabled */
622  u8 punt_unknown6;
623
624  /** Rotor for v4 source addresses */
625  u32 last_v4_addr_rotor;
626
627  /** Rotor for v6 source addresses */
628  u32 last_v6_addr_rotor;
629
630  /** Protocol configuration */
631  tcp_configuration_t cfg;
632} tcp_main_t;
633
634extern tcp_main_t tcp_main;
635extern vlib_node_registration_t tcp4_input_node;
636extern vlib_node_registration_t tcp6_input_node;
637extern vlib_node_registration_t tcp4_output_node;
638extern vlib_node_registration_t tcp6_output_node;
639extern vlib_node_registration_t tcp4_established_node;
640extern vlib_node_registration_t tcp6_established_node;
641extern vlib_node_registration_t tcp4_syn_sent_node;
642extern vlib_node_registration_t tcp6_syn_sent_node;
643extern vlib_node_registration_t tcp4_rcv_process_node;
644extern vlib_node_registration_t tcp6_rcv_process_node;
645extern vlib_node_registration_t tcp4_listen_node;
646extern vlib_node_registration_t tcp6_listen_node;
647
648#define tcp_cfg tcp_main.cfg
649#define tcp_node_index(node_id, is_ip4) 				\
650  ((is_ip4) ? tcp4_##node_id##_node.index : tcp6_##node_id##_node.index)
651
652always_inline tcp_main_t *
653vnet_get_tcp_main ()
654{
655  return &tcp_main;
656}
657
658always_inline tcp_worker_ctx_t *
659tcp_get_worker (u32 thread_index)
660{
661  return &tcp_main.wrk_ctx[thread_index];
662}
663
664always_inline tcp_header_t *
665tcp_buffer_hdr (vlib_buffer_t * b)
666{
667  ASSERT ((signed) b->current_data >= (signed) -VLIB_BUFFER_PRE_DATA_SIZE);
668  return (tcp_header_t *) (b->data + b->current_data
669			   + vnet_buffer (b)->tcp.hdr_offset);
670}
671
672#if (VLIB_BUFFER_TRACE_TRAJECTORY)
673#define tcp_trajectory_add_start(b, start)			\
674{								\
675    (*vlib_buffer_trace_trajectory_cb) (b, start);		\
676}
677#else
678#define tcp_trajectory_add_start(b, start)
679#endif
680
681clib_error_t *vnet_tcp_enable_disable (vlib_main_t * vm, u8 is_en);
682
683void tcp_punt_unknown (vlib_main_t * vm, u8 is_ip4, u8 is_add);
684
685always_inline tcp_connection_t *
686tcp_connection_get (u32 conn_index, u32 thread_index)
687{
688  if (PREDICT_FALSE
689      (pool_is_free_index (tcp_main.connections[thread_index], conn_index)))
690    return 0;
691  return pool_elt_at_index (tcp_main.connections[thread_index], conn_index);
692}
693
694always_inline tcp_connection_t *
695tcp_connection_get_if_valid (u32 conn_index, u32 thread_index)
696{
697  if (tcp_main.connections[thread_index] == 0)
698    return 0;
699  if (pool_is_free_index (tcp_main.connections[thread_index], conn_index))
700    return 0;
701  return pool_elt_at_index (tcp_main.connections[thread_index], conn_index);
702}
703
704always_inline tcp_connection_t *
705tcp_get_connection_from_transport (transport_connection_t * tconn)
706{
707  return (tcp_connection_t *) tconn;
708}
709
710always_inline void
711tcp_connection_set_state (tcp_connection_t * tc, tcp_state_t state)
712{
713  tc->state = state;
714  TCP_EVT (TCP_EVT_STATE_CHANGE, tc);
715}
716
717void tcp_connection_close (tcp_connection_t * tc);
718void tcp_connection_cleanup (tcp_connection_t * tc);
719void tcp_connection_del (tcp_connection_t * tc);
720int tcp_half_open_connection_cleanup (tcp_connection_t * tc);
721tcp_connection_t *tcp_connection_alloc (u8 thread_index);
722tcp_connection_t *tcp_connection_alloc_w_base (u8 thread_index,
723					       tcp_connection_t * base);
724void tcp_connection_free (tcp_connection_t * tc);
725void tcp_connection_reset (tcp_connection_t * tc);
726int tcp_configure_v4_source_address_range (vlib_main_t * vm,
727					   ip4_address_t * start,
728					   ip4_address_t * end, u32 table_id);
729int tcp_configure_v6_source_address_range (vlib_main_t * vm,
730					   ip6_address_t * start,
731					   ip6_address_t * end, u32 table_id);
732void tcp_api_reference (void);
733u8 *format_tcp_connection (u8 * s, va_list * args);
734
735always_inline tcp_connection_t *
736tcp_listener_get (u32 tli)
737{
738  return pool_elt_at_index (tcp_main.listener_pool, tli);
739}
740
741always_inline tcp_connection_t *
742tcp_half_open_connection_get (u32 conn_index)
743{
744  tcp_connection_t *tc = 0;
745  clib_spinlock_lock_if_init (&tcp_main.half_open_lock);
746  if (!pool_is_free_index (tcp_main.half_open_connections, conn_index))
747    tc = pool_elt_at_index (tcp_main.half_open_connections, conn_index);
748  clib_spinlock_unlock_if_init (&tcp_main.half_open_lock);
749  return tc;
750}
751
752void tcp_make_fin (tcp_connection_t * tc, vlib_buffer_t * b);
753void tcp_make_synack (tcp_connection_t * ts, vlib_buffer_t * b);
754void tcp_send_reset_w_pkt (tcp_connection_t * tc, vlib_buffer_t * pkt,
755			   u32 thread_index, u8 is_ip4);
756void tcp_send_reset (tcp_connection_t * tc);
757void tcp_send_syn (tcp_connection_t * tc);
758void tcp_send_synack (tcp_connection_t * tc);
759void tcp_send_fin (tcp_connection_t * tc);
760void tcp_update_burst_snd_vars (tcp_connection_t * tc);
761void tcp_update_rto (tcp_connection_t * tc);
762void tcp_flush_frame_to_output (tcp_worker_ctx_t * wrk, u8 is_ip4);
763void tcp_flush_frames_to_output (tcp_worker_ctx_t * wrk);
764void tcp_send_window_update_ack (tcp_connection_t * tc);
765
766void tcp_program_ack (tcp_connection_t * tc);
767void tcp_program_dupack (tcp_connection_t * tc);
768void tcp_program_retransmit (tcp_connection_t * tc);
769
770/*
771 * Rate estimation
772 */
773
774/**
775 * Byte tracker initialize
776 *
777 * @param tc	connection for which the byte tracker should be allocated and
778 * 		initialized
779 */
780void tcp_bt_init (tcp_connection_t * tc);
781/**
782 * Byte tracker cleanup
783 *
784 * @param tc	connection for which the byte tracker should be cleaned up
785 */
786void tcp_bt_cleanup (tcp_connection_t * tc);
787/**
788 * Flush byte tracker samples
789 *
790 * @param tc	tcp connection for which samples should be flushed
791 */
792void tcp_bt_flush_samples (tcp_connection_t * tc);
793/**
794 * Track a tcp tx burst
795 *
796 * @param tc	tcp connection
797 */
798void tcp_bt_track_tx (tcp_connection_t * tc);
799/**
800 * Track a tcp retransmission
801 *
802 * @param tc	tcp connection
803 * @param start	start sequence number
804 * @param end	end sequence number
805 */
806void tcp_bt_track_rxt (tcp_connection_t * tc, u32 start, u32 end);
807/**
808 * Generate a delivery rate sample from recently acked bytes
809 *
810 * @param tc	tcp connection
811 * @param rs	resulting rate sample
812 */
813void tcp_bt_sample_delivery_rate (tcp_connection_t * tc,
814				  tcp_rate_sample_t * rs);
815/**
816 * Check if sample to be generated is app limited
817 *
818 * @param tc	tcp connection
819 */
820void tcp_bt_check_app_limited (tcp_connection_t * tc);
821/**
822 * Check if the byte tracker is in sane state
823 *
824 * Should be used only for testing
825 *
826 * @param bt	byte tracker
827 */
828int tcp_bt_is_sane (tcp_byte_tracker_t * bt);
829
830always_inline u32
831tcp_end_seq (tcp_header_t * th, u32 len)
832{
833  return th->seq_number + tcp_is_syn (th) + tcp_is_fin (th) + len;
834}
835
836/* Modulo arithmetic for TCP sequence numbers */
837#define seq_lt(_s1, _s2) ((i32)((_s1)-(_s2)) < 0)
838#define seq_leq(_s1, _s2) ((i32)((_s1)-(_s2)) <= 0)
839#define seq_gt(_s1, _s2) ((i32)((_s1)-(_s2)) > 0)
840#define seq_geq(_s1, _s2) ((i32)((_s1)-(_s2)) >= 0)
841#define seq_max(_s1, _s2) (seq_gt((_s1), (_s2)) ? (_s1) : (_s2))
842
843/* Modulo arithmetic for timestamps */
844#define timestamp_lt(_t1, _t2) ((i32)((_t1)-(_t2)) < 0)
845#define timestamp_leq(_t1, _t2) ((i32)((_t1)-(_t2)) <= 0)
846
847/**
848 * Our estimate of the number of bytes that have left the network
849 */
850always_inline u32
851tcp_bytes_out (const tcp_connection_t * tc)
852{
853  if (tcp_opts_sack_permitted (&tc->rcv_opts))
854    return tc->sack_sb.sacked_bytes + tc->sack_sb.lost_bytes;
855  else
856    return tc->rcv_dupacks * tc->snd_mss;
857}
858
859/**
860 * Our estimate of the number of bytes in flight (pipe size)
861 */
862always_inline u32
863tcp_flight_size (const tcp_connection_t * tc)
864{
865  int flight_size;
866
867  flight_size = (int) (tc->snd_nxt - tc->snd_una) - tcp_bytes_out (tc)
868    + tc->snd_rxt_bytes - tc->rxt_delivered;
869
870  ASSERT (flight_size >= 0);
871
872  return flight_size;
873}
874
875/**
876 * Initial cwnd as per RFC5681
877 */
878always_inline u32
879tcp_initial_cwnd (const tcp_connection_t * tc)
880{
881  if (tcp_cfg.initial_cwnd_multiplier > 0)
882    return tcp_cfg.initial_cwnd_multiplier * tc->snd_mss;
883
884  if (tc->snd_mss > 2190)
885    return 2 * tc->snd_mss;
886  else if (tc->snd_mss > 1095)
887    return 3 * tc->snd_mss;
888  else
889    return 4 * tc->snd_mss;
890}
891
892/*
893 * Accumulate acked bytes for cwnd increase
894 *
895 * Once threshold bytes are accumulated, snd_mss bytes are added
896 * to the cwnd.
897 */
898always_inline void
899tcp_cwnd_accumulate (tcp_connection_t * tc, u32 thresh, u32 bytes)
900{
901  tc->cwnd_acc_bytes += bytes;
902  if (tc->cwnd_acc_bytes >= thresh)
903    {
904      u32 inc = tc->cwnd_acc_bytes / thresh;
905      tc->cwnd_acc_bytes -= inc * thresh;
906      tc->cwnd += inc * tc->snd_mss;
907      tc->cwnd = clib_min (tc->cwnd, tc->tx_fifo_size);
908    }
909}
910
911always_inline u32
912tcp_loss_wnd (const tcp_connection_t * tc)
913{
914  /* Whatever we have in flight + the packet we're about to send */
915  return tcp_flight_size (tc) + tc->snd_mss;
916}
917
918always_inline u32
919tcp_available_snd_wnd (const tcp_connection_t * tc)
920{
921  return clib_min (tc->cwnd, tc->snd_wnd);
922}
923
924always_inline u32
925tcp_available_output_snd_space (const tcp_connection_t * tc)
926{
927  u32 available_wnd = tcp_available_snd_wnd (tc);
928  int flight_size = (int) (tc->snd_nxt - tc->snd_una);
929
930  if (available_wnd <= flight_size)
931    return 0;
932
933  return available_wnd - flight_size;
934}
935
936/**
937 * Estimate of how many bytes we can still push into the network
938 */
939always_inline u32
940tcp_available_cc_snd_space (const tcp_connection_t * tc)
941{
942  u32 available_wnd = tcp_available_snd_wnd (tc);
943  u32 flight_size = tcp_flight_size (tc);
944
945  if (available_wnd <= flight_size)
946    return 0;
947
948  return available_wnd - flight_size;
949}
950
951always_inline u8
952tcp_is_lost_fin (tcp_connection_t * tc)
953{
954  if ((tc->flags & TCP_CONN_FINSNT) && (tc->snd_una_max - tc->snd_una == 1))
955    return 1;
956  return 0;
957}
958
959u32 tcp_snd_space (tcp_connection_t * tc);
960//void tcp_cc_init_congestion (tcp_connection_t * tc);
961//void tcp_cc_fastrecovery_clear (tcp_connection_t * tc);
962
963fib_node_index_t tcp_lookup_rmt_in_fib (tcp_connection_t * tc);
964
965/* Made public for unit testing only */
966void tcp_update_sack_list (tcp_connection_t * tc, u32 start, u32 end);
967u32 tcp_sack_list_bytes (tcp_connection_t * tc);
968
969always_inline u32
970tcp_time_now (void)
971{
972  return tcp_main.wrk_ctx[vlib_get_thread_index ()].time_now;
973}
974
975always_inline u32
976tcp_time_now_w_thread (u32 thread_index)
977{
978  return tcp_main.wrk_ctx[thread_index].time_now;
979}
980
981/**
982 * Generate timestamp for tcp connection
983 */
984always_inline u32
985tcp_tstamp (tcp_connection_t * tc)
986{
987  return (tcp_main.wrk_ctx[tc->c_thread_index].time_now -
988	  tc->timestamp_delta);
989}
990
991always_inline f64
992tcp_time_now_us (u32 thread_index)
993{
994  return transport_time_now (thread_index);
995}
996
997always_inline u32
998tcp_set_time_now (tcp_worker_ctx_t * wrk)
999{
1000  wrk->time_now = clib_cpu_time_now () * tcp_main.tstamp_ticks_per_clock;
1001  return wrk->time_now;
1002}
1003
1004u32 tcp_session_push_header (transport_connection_t * tconn,
1005			     vlib_buffer_t * b);
1006int tcp_session_custom_tx (void *conn, u32 max_burst_size);
1007
1008void tcp_connection_timers_init (tcp_connection_t * tc);
1009void tcp_connection_timers_reset (tcp_connection_t * tc);
1010void tcp_init_snd_vars (tcp_connection_t * tc);
1011void tcp_connection_init_vars (tcp_connection_t * tc);
1012void tcp_connection_tx_pacer_update (tcp_connection_t * tc);
1013void tcp_connection_tx_pacer_reset (tcp_connection_t * tc, u32 window,
1014				    u32 start_bucket);
1015
1016always_inline void
1017tcp_cc_rcv_ack (tcp_connection_t * tc, tcp_rate_sample_t * rs)
1018{
1019  tc->cc_algo->rcv_ack (tc, rs);
1020  tc->tsecr_last_ack = tc->rcv_opts.tsecr;
1021}
1022
1023static inline void
1024tcp_cc_rcv_cong_ack (tcp_connection_t * tc, tcp_cc_ack_t ack_type,
1025		     tcp_rate_sample_t * rs)
1026{
1027  tc->cc_algo->rcv_cong_ack (tc, ack_type, rs);
1028}
1029
1030static inline void
1031tcp_cc_congestion (tcp_connection_t * tc)
1032{
1033  tc->cc_algo->congestion (tc);
1034}
1035
1036static inline void
1037tcp_cc_loss (tcp_connection_t * tc)
1038{
1039  tc->cc_algo->loss (tc);
1040}
1041
1042static inline void
1043tcp_cc_recovered (tcp_connection_t * tc)
1044{
1045  tc->cc_algo->recovered (tc);
1046}
1047
1048static inline void
1049tcp_cc_undo_recovery (tcp_connection_t * tc)
1050{
1051  if (tc->cc_algo->undo_recovery)
1052    tc->cc_algo->undo_recovery (tc);
1053}
1054
1055static inline void
1056tcp_cc_event (tcp_connection_t * tc, tcp_cc_event_t evt)
1057{
1058  if (tc->cc_algo->event)
1059    tc->cc_algo->event (tc, evt);
1060}
1061
1062static inline u64
1063tcp_cc_get_pacing_rate (tcp_connection_t * tc)
1064{
1065  if (tc->cc_algo->get_pacing_rate)
1066    return tc->cc_algo->get_pacing_rate (tc);
1067
1068  f64 srtt = clib_min ((f64) tc->srtt * TCP_TICK, tc->mrtt_us);
1069
1070  /* TODO should constrain to interface's max throughput but
1071   * we don't have link speeds for sw ifs ..*/
1072  return ((f64) tc->cwnd / srtt);
1073}
1074
1075always_inline void
1076tcp_timer_set (tcp_connection_t * tc, u8 timer_id, u32 interval)
1077{
1078  ASSERT (tc->c_thread_index == vlib_get_thread_index ());
1079  ASSERT (tc->timers[timer_id] == TCP_TIMER_HANDLE_INVALID);
1080  tc->timers[timer_id] =
1081    tw_timer_start_16t_2w_512sl (&tcp_main.
1082				 wrk_ctx[tc->c_thread_index].timer_wheel,
1083				 tc->c_c_index, timer_id, interval);
1084}
1085
1086always_inline void
1087tcp_timer_reset (tcp_connection_t * tc, u8 timer_id)
1088{
1089  ASSERT (tc->c_thread_index == vlib_get_thread_index ());
1090  if (tc->timers[timer_id] == TCP_TIMER_HANDLE_INVALID)
1091    return;
1092
1093  tw_timer_stop_16t_2w_512sl (&tcp_main.
1094			      wrk_ctx[tc->c_thread_index].timer_wheel,
1095			      tc->timers[timer_id]);
1096  tc->timers[timer_id] = TCP_TIMER_HANDLE_INVALID;
1097}
1098
1099always_inline void
1100tcp_timer_update (tcp_connection_t * tc, u8 timer_id, u32 interval)
1101{
1102  ASSERT (tc->c_thread_index == vlib_get_thread_index ());
1103  if (tc->timers[timer_id] != TCP_TIMER_HANDLE_INVALID)
1104    tw_timer_update_16t_2w_512sl (&tcp_main.
1105				  wrk_ctx[tc->c_thread_index].timer_wheel,
1106				  tc->timers[timer_id], interval);
1107  else
1108    tc->timers[timer_id] =
1109      tw_timer_start_16t_2w_512sl (&tcp_main.
1110				   wrk_ctx[tc->c_thread_index].timer_wheel,
1111				   tc->c_c_index, timer_id, interval);
1112}
1113
1114always_inline void
1115tcp_retransmit_timer_set (tcp_connection_t * tc)
1116{
1117  ASSERT (tc->snd_una != tc->snd_una_max);
1118  tcp_timer_set (tc, TCP_TIMER_RETRANSMIT,
1119		 clib_max (tc->rto * TCP_TO_TIMER_TICK, 1));
1120}
1121
1122always_inline void
1123tcp_retransmit_timer_reset (tcp_connection_t * tc)
1124{
1125  tcp_timer_reset (tc, TCP_TIMER_RETRANSMIT);
1126}
1127
1128always_inline void
1129tcp_retransmit_timer_force_update (tcp_connection_t * tc)
1130{
1131  tcp_timer_update (tc, TCP_TIMER_RETRANSMIT,
1132		    clib_max (tc->rto * TCP_TO_TIMER_TICK, 1));
1133}
1134
1135always_inline void
1136tcp_persist_timer_set (tcp_connection_t * tc)
1137{
1138  /* Reuse RTO. It's backed off in handler */
1139  tcp_timer_set (tc, TCP_TIMER_PERSIST,
1140		 clib_max (tc->rto * TCP_TO_TIMER_TICK, 1));
1141}
1142
1143always_inline void
1144tcp_persist_timer_update (tcp_connection_t * tc)
1145{
1146  tcp_timer_update (tc, TCP_TIMER_PERSIST,
1147		    clib_max (tc->rto * TCP_TO_TIMER_TICK, 1));
1148}
1149
1150always_inline void
1151tcp_persist_timer_reset (tcp_connection_t * tc)
1152{
1153  tcp_timer_reset (tc, TCP_TIMER_PERSIST);
1154}
1155
1156always_inline void
1157tcp_retransmit_timer_update (tcp_connection_t * tc)
1158{
1159  if (tc->snd_una == tc->snd_nxt)
1160    {
1161      tcp_retransmit_timer_reset (tc);
1162      if (tc->snd_wnd < tc->snd_mss)
1163	tcp_persist_timer_update (tc);
1164    }
1165  else
1166    tcp_timer_update (tc, TCP_TIMER_RETRANSMIT,
1167		      clib_max (tc->rto * TCP_TO_TIMER_TICK, 1));
1168}
1169
1170always_inline u8
1171tcp_timer_is_active (tcp_connection_t * tc, tcp_timers_e timer)
1172{
1173  return tc->timers[timer] != TCP_TIMER_HANDLE_INVALID;
1174}
1175
1176#define tcp_validate_txf_size(_tc, _a) 					\
1177  ASSERT(_tc->state != TCP_STATE_ESTABLISHED 				\
1178	 || transport_max_tx_dequeue (&_tc->connection) >= _a)
1179
1180void tcp_rcv_sacks (tcp_connection_t * tc, u32 ack);
1181u8 *tcp_scoreboard_replay (u8 * s, tcp_connection_t * tc, u8 verbose);
1182
1183/**
1184 * Register exiting cc algo type
1185 */
1186void tcp_cc_algo_register (tcp_cc_algorithm_type_e type,
1187			   const tcp_cc_algorithm_t * vft);
1188
1189/**
1190 * Register new cc algo type
1191 */
1192tcp_cc_algorithm_type_e tcp_cc_algo_new_type (const tcp_cc_algorithm_t * vft);
1193tcp_cc_algorithm_t *tcp_cc_algo_get (tcp_cc_algorithm_type_e type);
1194
1195static inline void *
1196tcp_cc_data (tcp_connection_t * tc)
1197{
1198  return (void *) tc->cc_data;
1199}
1200
1201void newreno_rcv_cong_ack (tcp_connection_t * tc, tcp_cc_ack_t ack_type,
1202			   tcp_rate_sample_t * rs);
1203
1204/**
1205 * Push TCP header to buffer
1206 *
1207 * @param vm - vlib_main
1208 * @param b - buffer to write the header to
1209 * @param sp_net - source port net order
1210 * @param dp_net - destination port net order
1211 * @param seq - sequence number net order
1212 * @param ack - ack number net order
1213 * @param tcp_hdr_opts_len - header and options length in bytes
1214 * @param flags - header flags
1215 * @param wnd - window size
1216 *
1217 * @return - pointer to start of TCP header
1218 */
1219always_inline void *
1220vlib_buffer_push_tcp_net_order (vlib_buffer_t * b, u16 sp, u16 dp, u32 seq,
1221				u32 ack, u8 tcp_hdr_opts_len, u8 flags,
1222				u16 wnd)
1223{
1224  tcp_header_t *th;
1225
1226  th = vlib_buffer_push_uninit (b, tcp_hdr_opts_len);
1227
1228  th->src_port = sp;
1229  th->dst_port = dp;
1230  th->seq_number = seq;
1231  th->ack_number = ack;
1232  th->data_offset_and_reserved = (tcp_hdr_opts_len >> 2) << 4;
1233  th->flags = flags;
1234  th->window = wnd;
1235  th->checksum = 0;
1236  th->urgent_pointer = 0;
1237  vnet_buffer (b)->l4_hdr_offset = (u8 *) th - b->data;
1238  b->flags |= VNET_BUFFER_F_L4_HDR_OFFSET_VALID;
1239  return th;
1240}
1241
1242/**
1243 * Push TCP header to buffer
1244 *
1245 * @param b - buffer to write the header to
1246 * @param sp_net - source port net order
1247 * @param dp_net - destination port net order
1248 * @param seq - sequence number host order
1249 * @param ack - ack number host order
1250 * @param tcp_hdr_opts_len - header and options length in bytes
1251 * @param flags - header flags
1252 * @param wnd - window size
1253 *
1254 * @return - pointer to start of TCP header
1255 */
1256always_inline void *
1257vlib_buffer_push_tcp (vlib_buffer_t * b, u16 sp_net, u16 dp_net, u32 seq,
1258		      u32 ack, u8 tcp_hdr_opts_len, u8 flags, u16 wnd)
1259{
1260  return vlib_buffer_push_tcp_net_order (b, sp_net, dp_net,
1261					 clib_host_to_net_u32 (seq),
1262					 clib_host_to_net_u32 (ack),
1263					 tcp_hdr_opts_len, flags,
1264					 clib_host_to_net_u16 (wnd));
1265}
1266
1267#endif /* _vnet_tcp_h_ */
1268
1269/*
1270 * fd.io coding-style-patch-verification: ON
1271 *
1272 * Local Variables:
1273 * eval: (c-set-style "gnu")
1274 * End:
1275 */
1276