tcp_input.c revision 11e9e351
1/*
2 * Copyright (c) 2016-2019 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 *     http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#include <vppinfra/sparse_vec.h>
17#include <vnet/fib/ip4_fib.h>
18#include <vnet/fib/ip6_fib.h>
19#include <vnet/tcp/tcp_packet.h>
20#include <vnet/tcp/tcp.h>
21#include <vnet/session/session.h>
22#include <math.h>
23
24static char *tcp_error_strings[] = {
25#define tcp_error(n,s) s,
26#include <vnet/tcp/tcp_error.def>
27#undef tcp_error
28};
29
30/* All TCP nodes have the same outgoing arcs */
31#define foreach_tcp_state_next                  \
32  _ (DROP4, "ip4-drop")                         \
33  _ (DROP6, "ip6-drop")                         \
34  _ (TCP4_OUTPUT, "tcp4-output")                \
35  _ (TCP6_OUTPUT, "tcp6-output")
36
37typedef enum _tcp_established_next
38{
39#define _(s,n) TCP_ESTABLISHED_NEXT_##s,
40  foreach_tcp_state_next
41#undef _
42    TCP_ESTABLISHED_N_NEXT,
43} tcp_established_next_t;
44
45typedef enum _tcp_rcv_process_next
46{
47#define _(s,n) TCP_RCV_PROCESS_NEXT_##s,
48  foreach_tcp_state_next
49#undef _
50    TCP_RCV_PROCESS_N_NEXT,
51} tcp_rcv_process_next_t;
52
53typedef enum _tcp_syn_sent_next
54{
55#define _(s,n) TCP_SYN_SENT_NEXT_##s,
56  foreach_tcp_state_next
57#undef _
58    TCP_SYN_SENT_N_NEXT,
59} tcp_syn_sent_next_t;
60
61typedef enum _tcp_listen_next
62{
63#define _(s,n) TCP_LISTEN_NEXT_##s,
64  foreach_tcp_state_next
65#undef _
66    TCP_LISTEN_N_NEXT,
67} tcp_listen_next_t;
68
69/* Generic, state independent indices */
70typedef enum _tcp_state_next
71{
72#define _(s,n) TCP_NEXT_##s,
73  foreach_tcp_state_next
74#undef _
75    TCP_STATE_N_NEXT,
76} tcp_state_next_t;
77
78#define tcp_next_output(is_ip4) (is_ip4 ? TCP_NEXT_TCP4_OUTPUT          \
79                                        : TCP_NEXT_TCP6_OUTPUT)
80
81#define tcp_next_drop(is_ip4) (is_ip4 ? TCP_NEXT_DROP4                  \
82                                      : TCP_NEXT_DROP6)
83
84/**
85 * Validate segment sequence number. As per RFC793:
86 *
87 * Segment Receive Test
88 *      Length  Window
89 *      ------- -------  -------------------------------------------
90 *      0       0       SEG.SEQ = RCV.NXT
91 *      0       >0      RCV.NXT =< SEG.SEQ < RCV.NXT+RCV.WND
92 *      >0      0       not acceptable
93 *      >0      >0      RCV.NXT =< SEG.SEQ < RCV.NXT+RCV.WND
94 *                      or RCV.NXT =< SEG.SEQ+SEG.LEN-1 < RCV.NXT+RCV.WND
95 *
96 * This ultimately consists in checking if segment falls within the window.
97 * The one important difference compared to RFC793 is that we use rcv_las,
98 * or the rcv_nxt at last ack sent instead of rcv_nxt since that's the
99 * peer's reference when computing our receive window.
100 *
101 * This:
102 *  seq_leq (end_seq, tc->rcv_las + tc->rcv_wnd) && seq_geq (seq, tc->rcv_las)
103 * however, is too strict when we have retransmits. Instead we just check that
104 * the seq is not beyond the right edge and that the end of the segment is not
105 * less than the left edge.
106 *
107 * N.B. rcv_nxt and rcv_wnd are both updated in this node if acks are sent, so
108 * use rcv_nxt in the right edge window test instead of rcv_las.
109 *
110 */
111always_inline u8
112tcp_segment_in_rcv_wnd (tcp_connection_t * tc, u32 seq, u32 end_seq)
113{
114  return (seq_geq (end_seq, tc->rcv_las)
115	  && seq_leq (seq, tc->rcv_nxt + tc->rcv_wnd));
116}
117
118/**
119 * Parse TCP header options.
120 *
121 * @param th TCP header
122 * @param to TCP options data structure to be populated
123 * @param is_syn set if packet is syn
124 * @return -1 if parsing failed
125 */
126static inline int
127tcp_options_parse (tcp_header_t * th, tcp_options_t * to, u8 is_syn)
128{
129  const u8 *data;
130  u8 opt_len, opts_len, kind;
131  int j;
132  sack_block_t b;
133
134  opts_len = (tcp_doff (th) << 2) - sizeof (tcp_header_t);
135  data = (const u8 *) (th + 1);
136
137  /* Zero out all flags but those set in SYN */
138  to->flags &= (TCP_OPTS_FLAG_SACK_PERMITTED | TCP_OPTS_FLAG_WSCALE
139		| TCP_OPTS_FLAG_TSTAMP | TCP_OPTS_FLAG_MSS);
140
141  for (; opts_len > 0; opts_len -= opt_len, data += opt_len)
142    {
143      kind = data[0];
144
145      /* Get options length */
146      if (kind == TCP_OPTION_EOL)
147	break;
148      else if (kind == TCP_OPTION_NOOP)
149	{
150	  opt_len = 1;
151	  continue;
152	}
153      else
154	{
155	  /* broken options */
156	  if (opts_len < 2)
157	    return -1;
158	  opt_len = data[1];
159
160	  /* weird option length */
161	  if (opt_len < 2 || opt_len > opts_len)
162	    return -1;
163	}
164
165      /* Parse options */
166      switch (kind)
167	{
168	case TCP_OPTION_MSS:
169	  if (!is_syn)
170	    break;
171	  if ((opt_len == TCP_OPTION_LEN_MSS) && tcp_syn (th))
172	    {
173	      to->flags |= TCP_OPTS_FLAG_MSS;
174	      to->mss = clib_net_to_host_u16 (*(u16 *) (data + 2));
175	    }
176	  break;
177	case TCP_OPTION_WINDOW_SCALE:
178	  if (!is_syn)
179	    break;
180	  if ((opt_len == TCP_OPTION_LEN_WINDOW_SCALE) && tcp_syn (th))
181	    {
182	      to->flags |= TCP_OPTS_FLAG_WSCALE;
183	      to->wscale = data[2];
184	      if (to->wscale > TCP_MAX_WND_SCALE)
185		to->wscale = TCP_MAX_WND_SCALE;
186	    }
187	  break;
188	case TCP_OPTION_TIMESTAMP:
189	  if (is_syn)
190	    to->flags |= TCP_OPTS_FLAG_TSTAMP;
191	  if ((to->flags & TCP_OPTS_FLAG_TSTAMP)
192	      && opt_len == TCP_OPTION_LEN_TIMESTAMP)
193	    {
194	      to->tsval = clib_net_to_host_u32 (*(u32 *) (data + 2));
195	      to->tsecr = clib_net_to_host_u32 (*(u32 *) (data + 6));
196	    }
197	  break;
198	case TCP_OPTION_SACK_PERMITTED:
199	  if (!is_syn)
200	    break;
201	  if (opt_len == TCP_OPTION_LEN_SACK_PERMITTED && tcp_syn (th))
202	    to->flags |= TCP_OPTS_FLAG_SACK_PERMITTED;
203	  break;
204	case TCP_OPTION_SACK_BLOCK:
205	  /* If SACK permitted was not advertised or a SYN, break */
206	  if ((to->flags & TCP_OPTS_FLAG_SACK_PERMITTED) == 0 || tcp_syn (th))
207	    break;
208
209	  /* If too short or not correctly formatted, break */
210	  if (opt_len < 10 || ((opt_len - 2) % TCP_OPTION_LEN_SACK_BLOCK))
211	    break;
212
213	  to->flags |= TCP_OPTS_FLAG_SACK;
214	  to->n_sack_blocks = (opt_len - 2) / TCP_OPTION_LEN_SACK_BLOCK;
215	  vec_reset_length (to->sacks);
216	  for (j = 0; j < to->n_sack_blocks; j++)
217	    {
218	      b.start = clib_net_to_host_u32 (*(u32 *) (data + 2 + 8 * j));
219	      b.end = clib_net_to_host_u32 (*(u32 *) (data + 6 + 8 * j));
220	      vec_add1 (to->sacks, b);
221	    }
222	  break;
223	default:
224	  /* Nothing to see here */
225	  continue;
226	}
227    }
228  return 0;
229}
230
231/**
232 * RFC1323: Check against wrapped sequence numbers (PAWS). If we have
233 * timestamp to echo and it's less than tsval_recent, drop segment
234 * but still send an ACK in order to retain TCP's mechanism for detecting
235 * and recovering from half-open connections
236 *
237 * Or at least that's what the theory says. It seems that this might not work
238 * very well with packet reordering and fast retransmit. XXX
239 */
240always_inline int
241tcp_segment_check_paws (tcp_connection_t * tc)
242{
243  return tcp_opts_tstamp (&tc->rcv_opts)
244    && timestamp_lt (tc->rcv_opts.tsval, tc->tsval_recent);
245}
246
247/**
248 * Update tsval recent
249 */
250always_inline void
251tcp_update_timestamp (tcp_connection_t * tc, u32 seq, u32 seq_end)
252{
253  /*
254   * RFC1323: If Last.ACK.sent falls within the range of sequence numbers
255   * of an incoming segment:
256   *    SEG.SEQ <= Last.ACK.sent < SEG.SEQ + SEG.LEN
257   * then the TSval from the segment is copied to TS.Recent;
258   * otherwise, the TSval is ignored.
259   */
260  if (tcp_opts_tstamp (&tc->rcv_opts) && seq_leq (seq, tc->rcv_las)
261      && seq_leq (tc->rcv_las, seq_end))
262    {
263      ASSERT (timestamp_leq (tc->tsval_recent, tc->rcv_opts.tsval));
264      tc->tsval_recent = tc->rcv_opts.tsval;
265      tc->tsval_recent_age = tcp_time_now_w_thread (tc->c_thread_index);
266    }
267}
268
269/**
270 * Validate incoming segment as per RFC793 p. 69 and RFC1323 p. 19
271 *
272 * It first verifies if segment has a wrapped sequence number (PAWS) and then
273 * does the processing associated to the first four steps (ignoring security
274 * and precedence): sequence number, rst bit and syn bit checks.
275 *
276 * @return 0 if segments passes validation.
277 */
278static int
279tcp_segment_validate (tcp_worker_ctx_t * wrk, tcp_connection_t * tc0,
280		      vlib_buffer_t * b0, tcp_header_t * th0, u32 * error0)
281{
282  /* We could get a burst of RSTs interleaved with acks */
283  if (PREDICT_FALSE (tc0->state == TCP_STATE_CLOSED))
284    {
285      tcp_send_reset (tc0);
286      *error0 = TCP_ERROR_CONNECTION_CLOSED;
287      goto error;
288    }
289
290  if (PREDICT_FALSE (!tcp_ack (th0) && !tcp_rst (th0) && !tcp_syn (th0)))
291    {
292      *error0 = TCP_ERROR_SEGMENT_INVALID;
293      goto error;
294    }
295
296  if (PREDICT_FALSE (tcp_options_parse (th0, &tc0->rcv_opts, 0)))
297    {
298      *error0 = TCP_ERROR_OPTIONS;
299      goto error;
300    }
301
302  if (PREDICT_FALSE (tcp_segment_check_paws (tc0)))
303    {
304      *error0 = TCP_ERROR_PAWS;
305      TCP_EVT (TCP_EVT_PAWS_FAIL, tc0, vnet_buffer (b0)->tcp.seq_number,
306	       vnet_buffer (b0)->tcp.seq_end);
307
308      /* If it just so happens that a segment updates tsval_recent for a
309       * segment over 24 days old, invalidate tsval_recent. */
310      if (timestamp_lt (tc0->tsval_recent_age + TCP_PAWS_IDLE,
311			tcp_time_now_w_thread (tc0->c_thread_index)))
312	{
313	  tc0->tsval_recent = tc0->rcv_opts.tsval;
314	  clib_warning ("paws failed: 24-day old segment");
315	}
316      /* Drop after ack if not rst. Resets can fail paws check as per
317       * RFC 7323 sec. 5.2: When an <RST> segment is received, it MUST NOT
318       * be subjected to the PAWS check by verifying an acceptable value in
319       * SEG.TSval */
320      else if (!tcp_rst (th0))
321	{
322	  tcp_program_ack (tc0);
323	  TCP_EVT (TCP_EVT_DUPACK_SENT, tc0, vnet_buffer (b0)->tcp);
324	  goto error;
325	}
326    }
327
328  /* 1st: check sequence number */
329  if (!tcp_segment_in_rcv_wnd (tc0, vnet_buffer (b0)->tcp.seq_number,
330			       vnet_buffer (b0)->tcp.seq_end))
331    {
332      /* SYN/SYN-ACK retransmit */
333      if (tcp_syn (th0)
334	  && vnet_buffer (b0)->tcp.seq_number == tc0->rcv_nxt - 1)
335	{
336	  tcp_options_parse (th0, &tc0->rcv_opts, 1);
337	  if (tc0->state == TCP_STATE_SYN_RCVD)
338	    {
339	      tcp_send_synack (tc0);
340	      TCP_EVT (TCP_EVT_SYN_RCVD, tc0, 0);
341	      *error0 = TCP_ERROR_SYNS_RCVD;
342	    }
343	  else
344	    {
345	      tcp_program_ack (tc0);
346	      TCP_EVT (TCP_EVT_SYNACK_RCVD, tc0);
347	      *error0 = TCP_ERROR_SYN_ACKS_RCVD;
348	    }
349	  goto error;
350	}
351
352      /* If our window is 0 and the packet is in sequence, let it pass
353       * through for ack processing. It should be dropped later. */
354      if (tc0->rcv_wnd < tc0->snd_mss
355	  && tc0->rcv_nxt == vnet_buffer (b0)->tcp.seq_number)
356	goto check_reset;
357
358      /* If we entered recovery and peer did so as well, there's a chance that
359       * dup acks won't be acceptable on either end because seq_end may be less
360       * than rcv_las. This can happen if acks are lost in both directions. */
361      if (tcp_in_recovery (tc0)
362	  && seq_geq (vnet_buffer (b0)->tcp.seq_number,
363		      tc0->rcv_las - tc0->rcv_wnd)
364	  && seq_leq (vnet_buffer (b0)->tcp.seq_end,
365		      tc0->rcv_nxt + tc0->rcv_wnd))
366	goto check_reset;
367
368      *error0 = TCP_ERROR_RCV_WND;
369
370      /* If we advertised a zero rcv_wnd and the segment is in the past or the
371       * next one that we expect, it is probably a window probe */
372      if ((tc0->flags & TCP_CONN_ZERO_RWND_SENT)
373	  && seq_lt (vnet_buffer (b0)->tcp.seq_end,
374		     tc0->rcv_las + tc0->rcv_opts.mss))
375	*error0 = TCP_ERROR_ZERO_RWND;
376
377      tc0->errors.below_data_wnd += seq_lt (vnet_buffer (b0)->tcp.seq_end,
378					    tc0->rcv_las);
379
380      /* If not RST, send dup ack */
381      if (!tcp_rst (th0))
382	{
383	  tcp_program_dupack (tc0);
384	  TCP_EVT (TCP_EVT_DUPACK_SENT, tc0, vnet_buffer (b0)->tcp);
385	}
386      goto error;
387
388    check_reset:
389      ;
390    }
391
392  /* 2nd: check the RST bit */
393  if (PREDICT_FALSE (tcp_rst (th0)))
394    {
395      tcp_connection_reset (tc0);
396      *error0 = TCP_ERROR_RST_RCVD;
397      goto error;
398    }
399
400  /* 3rd: check security and precedence (skip) */
401
402  /* 4th: check the SYN bit (in window) */
403  if (PREDICT_FALSE (tcp_syn (th0)))
404    {
405      /* As per RFC5961 send challenge ack instead of reset */
406      tcp_program_ack (tc0);
407      *error0 = TCP_ERROR_SPURIOUS_SYN;
408      goto error;
409    }
410
411  /* If segment in window, save timestamp */
412  tcp_update_timestamp (tc0, vnet_buffer (b0)->tcp.seq_number,
413			vnet_buffer (b0)->tcp.seq_end);
414  return 0;
415
416error:
417  return -1;
418}
419
420always_inline int
421tcp_rcv_ack_no_cc (tcp_connection_t * tc, vlib_buffer_t * b, u32 * error)
422{
423  /* SND.UNA =< SEG.ACK =< SND.NXT */
424  if (!(seq_leq (tc->snd_una, vnet_buffer (b)->tcp.ack_number)
425	&& seq_leq (vnet_buffer (b)->tcp.ack_number, tc->snd_nxt)))
426    {
427      if (seq_leq (vnet_buffer (b)->tcp.ack_number, tc->snd_una_max)
428	  && seq_gt (vnet_buffer (b)->tcp.ack_number, tc->snd_una))
429	{
430	  tc->snd_nxt = vnet_buffer (b)->tcp.ack_number;
431	  goto acceptable;
432	}
433      *error = TCP_ERROR_ACK_INVALID;
434      return -1;
435    }
436
437acceptable:
438  tc->bytes_acked = vnet_buffer (b)->tcp.ack_number - tc->snd_una;
439  tc->snd_una = vnet_buffer (b)->tcp.ack_number;
440  *error = TCP_ERROR_ACK_OK;
441  return 0;
442}
443
444/**
445 * Compute smoothed RTT as per VJ's '88 SIGCOMM and RFC6298
446 *
447 * Note that although the original article, srtt and rttvar are scaled
448 * to minimize round-off errors, here we don't. Instead, we rely on
449 * better precision time measurements.
450 *
451 * TODO support us rtt resolution
452 */
453static void
454tcp_estimate_rtt (tcp_connection_t * tc, u32 mrtt)
455{
456  int err, diff;
457
458  if (tc->srtt != 0)
459    {
460      err = mrtt - tc->srtt;
461
462      /* XXX Drop in RTT results in RTTVAR increase and bigger RTO.
463       * The increase should be bound */
464      tc->srtt = clib_max ((int) tc->srtt + (err >> 3), 1);
465      diff = (clib_abs (err) - (int) tc->rttvar) >> 2;
466      tc->rttvar = clib_max ((int) tc->rttvar + diff, 1);
467    }
468  else
469    {
470      /* First measurement. */
471      tc->srtt = mrtt;
472      tc->rttvar = mrtt >> 1;
473    }
474}
475
476#ifndef CLIB_MARCH_VARIANT
477void
478tcp_update_rto (tcp_connection_t * tc)
479{
480  tc->rto = clib_min (tc->srtt + (tc->rttvar << 2), TCP_RTO_MAX);
481  tc->rto = clib_max (tc->rto, TCP_RTO_MIN);
482}
483#endif /* CLIB_MARCH_VARIANT */
484
485/**
486 * Update RTT estimate and RTO timer
487 *
488 * Measure RTT: We have two sources of RTT measurements: TSOPT and ACK
489 * timing. Middle boxes are known to fiddle with TCP options so we
490 * should give higher priority to ACK timing.
491 *
492 * This should be called only if previously sent bytes have been acked.
493 *
494 * return 1 if valid rtt 0 otherwise
495 */
496static int
497tcp_update_rtt (tcp_connection_t * tc, tcp_rate_sample_t * rs, u32 ack)
498{
499  u32 mrtt = 0;
500
501  /* Karn's rule, part 1. Don't use retransmitted segments to estimate
502   * RTT because they're ambiguous. */
503  if (tcp_in_cong_recovery (tc))
504    {
505      /* Accept rtt estimates for samples that have not been retransmitted */
506      if ((tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
507	  && !(rs->flags & TCP_BTS_IS_RXT))
508	{
509	  mrtt = rs->rtt_time * THZ;
510	  goto estimate_rtt;
511	}
512      goto done;
513    }
514
515  if (tc->rtt_ts && seq_geq (ack, tc->rtt_seq))
516    {
517      f64 sample = tcp_time_now_us (tc->c_thread_index) - tc->rtt_ts;
518      tc->mrtt_us = tc->mrtt_us + (sample - tc->mrtt_us) * 0.125;
519      mrtt = clib_max ((u32) (sample * THZ), 1);
520      /* Allow measuring of a new RTT */
521      tc->rtt_ts = 0;
522    }
523  /* As per RFC7323 TSecr can be used for RTTM only if the segment advances
524   * snd_una, i.e., the left side of the send window:
525   * seq_lt (tc->snd_una, ack). This is a condition for calling update_rtt */
526  else if (tcp_opts_tstamp (&tc->rcv_opts) && tc->rcv_opts.tsecr)
527    {
528      u32 now = tcp_tstamp (tc);
529      mrtt = clib_max (now - tc->rcv_opts.tsecr, 1);
530    }
531
532estimate_rtt:
533
534  /* Ignore dubious measurements */
535  if (mrtt == 0 || mrtt > TCP_RTT_MAX)
536    goto done;
537
538  tcp_estimate_rtt (tc, mrtt);
539
540done:
541
542  /* If we got here something must've been ACKed so make sure boff is 0,
543   * even if mrtt is not valid since we update the rto lower */
544  tc->rto_boff = 0;
545  tcp_update_rto (tc);
546
547  return 0;
548}
549
550static void
551tcp_estimate_initial_rtt (tcp_connection_t * tc)
552{
553  u8 thread_index = vlib_num_workers ()? 1 : 0;
554  int mrtt;
555
556  if (tc->rtt_ts)
557    {
558      tc->mrtt_us = tcp_time_now_us (thread_index) - tc->rtt_ts;
559      tc->mrtt_us = clib_max (tc->mrtt_us, 0.0001);
560      mrtt = clib_max ((u32) (tc->mrtt_us * THZ), 1);
561      tc->rtt_ts = 0;
562    }
563  else
564    {
565      mrtt = tcp_time_now_w_thread (thread_index) - tc->rcv_opts.tsecr;
566      mrtt = clib_max (mrtt, 1);
567      /* Due to retransmits we don't know the initial mrtt */
568      if (tc->rto_boff && mrtt > 1 * THZ)
569	mrtt = 1 * THZ;
570      tc->mrtt_us = (f64) mrtt *TCP_TICK;
571    }
572
573  if (mrtt > 0 && mrtt < TCP_RTT_MAX)
574    tcp_estimate_rtt (tc, mrtt);
575  tcp_update_rto (tc);
576}
577
578/**
579 * Dequeue bytes for connections that have received acks in last burst
580 */
581static void
582tcp_handle_postponed_dequeues (tcp_worker_ctx_t * wrk)
583{
584  u32 thread_index = wrk->vm->thread_index;
585  u32 *pending_deq_acked;
586  tcp_connection_t *tc;
587  int i;
588
589  if (!vec_len (wrk->pending_deq_acked))
590    return;
591
592  pending_deq_acked = wrk->pending_deq_acked;
593  for (i = 0; i < vec_len (pending_deq_acked); i++)
594    {
595      tc = tcp_connection_get (pending_deq_acked[i], thread_index);
596      tc->flags &= ~TCP_CONN_DEQ_PENDING;
597
598      if (PREDICT_FALSE (!tc->burst_acked))
599	continue;
600
601      /* Dequeue the newly ACKed bytes */
602      session_tx_fifo_dequeue_drop (&tc->connection, tc->burst_acked);
603      tcp_validate_txf_size (tc, tc->snd_una_max - tc->snd_una);
604
605      if (PREDICT_FALSE (tc->flags & TCP_CONN_PSH_PENDING))
606	{
607	  if (seq_leq (tc->psh_seq, tc->snd_una))
608	    tc->flags &= ~TCP_CONN_PSH_PENDING;
609	}
610
611      /* If everything has been acked, stop retransmit timer
612       * otherwise update. */
613      tcp_retransmit_timer_update (tc);
614
615      /* Update pacer based on our new cwnd estimate */
616      tcp_connection_tx_pacer_update (tc);
617
618      tc->burst_acked = 0;
619    }
620  _vec_len (wrk->pending_deq_acked) = 0;
621}
622
623static void
624tcp_program_dequeue (tcp_worker_ctx_t * wrk, tcp_connection_t * tc)
625{
626  if (!(tc->flags & TCP_CONN_DEQ_PENDING))
627    {
628      vec_add1 (wrk->pending_deq_acked, tc->c_c_index);
629      tc->flags |= TCP_CONN_DEQ_PENDING;
630    }
631  tc->burst_acked += tc->bytes_acked;
632}
633
634#ifndef CLIB_MARCH_VARIANT
635static u32
636scoreboard_hole_index (sack_scoreboard_t * sb, sack_scoreboard_hole_t * hole)
637{
638  ASSERT (!pool_is_free_index (sb->holes, hole - sb->holes));
639  return hole - sb->holes;
640}
641
642static u32
643scoreboard_hole_bytes (sack_scoreboard_hole_t * hole)
644{
645  return hole->end - hole->start;
646}
647
648sack_scoreboard_hole_t *
649scoreboard_get_hole (sack_scoreboard_t * sb, u32 index)
650{
651  if (index != TCP_INVALID_SACK_HOLE_INDEX)
652    return pool_elt_at_index (sb->holes, index);
653  return 0;
654}
655
656sack_scoreboard_hole_t *
657scoreboard_next_hole (sack_scoreboard_t * sb, sack_scoreboard_hole_t * hole)
658{
659  if (hole->next != TCP_INVALID_SACK_HOLE_INDEX)
660    return pool_elt_at_index (sb->holes, hole->next);
661  return 0;
662}
663
664sack_scoreboard_hole_t *
665scoreboard_prev_hole (sack_scoreboard_t * sb, sack_scoreboard_hole_t * hole)
666{
667  if (hole->prev != TCP_INVALID_SACK_HOLE_INDEX)
668    return pool_elt_at_index (sb->holes, hole->prev);
669  return 0;
670}
671
672sack_scoreboard_hole_t *
673scoreboard_first_hole (sack_scoreboard_t * sb)
674{
675  if (sb->head != TCP_INVALID_SACK_HOLE_INDEX)
676    return pool_elt_at_index (sb->holes, sb->head);
677  return 0;
678}
679
680sack_scoreboard_hole_t *
681scoreboard_last_hole (sack_scoreboard_t * sb)
682{
683  if (sb->tail != TCP_INVALID_SACK_HOLE_INDEX)
684    return pool_elt_at_index (sb->holes, sb->tail);
685  return 0;
686}
687
688static void
689scoreboard_remove_hole (sack_scoreboard_t * sb, sack_scoreboard_hole_t * hole)
690{
691  sack_scoreboard_hole_t *next, *prev;
692
693  if (hole->next != TCP_INVALID_SACK_HOLE_INDEX)
694    {
695      next = pool_elt_at_index (sb->holes, hole->next);
696      next->prev = hole->prev;
697    }
698  else
699    {
700      sb->tail = hole->prev;
701    }
702
703  if (hole->prev != TCP_INVALID_SACK_HOLE_INDEX)
704    {
705      prev = pool_elt_at_index (sb->holes, hole->prev);
706      prev->next = hole->next;
707    }
708  else
709    {
710      sb->head = hole->next;
711    }
712
713  if (scoreboard_hole_index (sb, hole) == sb->cur_rxt_hole)
714    sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX;
715
716  /* Poison the entry */
717  if (CLIB_DEBUG > 0)
718    clib_memset (hole, 0xfe, sizeof (*hole));
719
720  pool_put (sb->holes, hole);
721}
722
723static sack_scoreboard_hole_t *
724scoreboard_insert_hole (sack_scoreboard_t * sb, u32 prev_index,
725			u32 start, u32 end)
726{
727  sack_scoreboard_hole_t *hole, *next, *prev;
728  u32 hole_index;
729
730  pool_get (sb->holes, hole);
731  clib_memset (hole, 0, sizeof (*hole));
732
733  hole->start = start;
734  hole->end = end;
735  hole_index = scoreboard_hole_index (sb, hole);
736
737  prev = scoreboard_get_hole (sb, prev_index);
738  if (prev)
739    {
740      hole->prev = prev_index;
741      hole->next = prev->next;
742
743      if ((next = scoreboard_next_hole (sb, hole)))
744	next->prev = hole_index;
745      else
746	sb->tail = hole_index;
747
748      prev->next = hole_index;
749    }
750  else
751    {
752      sb->head = hole_index;
753      hole->prev = TCP_INVALID_SACK_HOLE_INDEX;
754      hole->next = TCP_INVALID_SACK_HOLE_INDEX;
755    }
756
757  return hole;
758}
759
760always_inline void
761scoreboard_update_sacked_rxt (sack_scoreboard_t * sb, u32 start, u32 end,
762			      u8 has_rxt)
763{
764  if (!has_rxt || seq_geq (start, sb->high_rxt))
765    return;
766
767  sb->rxt_sacked +=
768    seq_lt (end, sb->high_rxt) ? (end - start) : (sb->high_rxt - start);
769}
770
771always_inline void
772scoreboard_update_bytes (sack_scoreboard_t * sb, u32 ack, u32 snd_mss)
773{
774  sack_scoreboard_hole_t *left, *right;
775  u32 sacked = 0, blks = 0, old_sacked;
776
777  old_sacked = sb->sacked_bytes;
778
779  sb->last_lost_bytes = 0;
780  sb->lost_bytes = 0;
781  sb->sacked_bytes = 0;
782
783  right = scoreboard_last_hole (sb);
784  if (!right)
785    {
786      sb->sacked_bytes = sb->high_sacked - ack;
787      return;
788    }
789
790  if (seq_gt (sb->high_sacked, right->end))
791    {
792      sacked = sb->high_sacked - right->end;
793      blks = 1;
794    }
795
796  while (sacked < (TCP_DUPACK_THRESHOLD - 1) * snd_mss
797	 && blks < TCP_DUPACK_THRESHOLD)
798    {
799      if (right->is_lost)
800	sb->lost_bytes += scoreboard_hole_bytes (right);
801
802      left = scoreboard_prev_hole (sb, right);
803      if (!left)
804	{
805	  ASSERT (right->start == ack || sb->is_reneging);
806	  sacked += right->start - ack;
807	  right = 0;
808	  break;
809	}
810
811      sacked += right->start - left->end;
812      blks++;
813      right = left;
814    }
815
816  /* right is first lost */
817  while (right)
818    {
819      sb->lost_bytes += scoreboard_hole_bytes (right);
820      sb->last_lost_bytes += right->is_lost ? 0 : (right->end - right->start);
821      right->is_lost = 1;
822      left = scoreboard_prev_hole (sb, right);
823      if (!left)
824	{
825	  ASSERT (right->start == ack || sb->is_reneging);
826	  sacked += right->start - ack;
827	  break;
828	}
829      sacked += right->start - left->end;
830      right = left;
831    }
832
833  sb->sacked_bytes = sacked;
834  sb->last_sacked_bytes = sacked - (old_sacked - sb->last_bytes_delivered);
835}
836
837/**
838 * Figure out the next hole to retransmit
839 *
840 * Follows logic proposed in RFC6675 Sec. 4, NextSeg()
841 */
842sack_scoreboard_hole_t *
843scoreboard_next_rxt_hole (sack_scoreboard_t * sb,
844			  sack_scoreboard_hole_t * start,
845			  u8 have_unsent, u8 * can_rescue, u8 * snd_limited)
846{
847  sack_scoreboard_hole_t *hole = 0;
848
849  hole = start ? start : scoreboard_first_hole (sb);
850  while (hole && seq_leq (hole->end, sb->high_rxt) && hole->is_lost)
851    hole = scoreboard_next_hole (sb, hole);
852
853  /* Nothing, return */
854  if (!hole)
855    {
856      sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX;
857      return 0;
858    }
859
860  /* Rule (1): if higher than rxt, less than high_sacked and lost */
861  if (hole->is_lost && seq_lt (hole->start, sb->high_sacked))
862    {
863      sb->cur_rxt_hole = scoreboard_hole_index (sb, hole);
864    }
865  else
866    {
867      /* Rule (2): available unsent data */
868      if (have_unsent)
869	{
870	  sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX;
871	  return 0;
872	}
873      /* Rule (3): if hole not lost */
874      else if (seq_lt (hole->start, sb->high_sacked))
875	{
876	  /* And we didn't already retransmit it */
877	  if (seq_leq (hole->end, sb->high_rxt))
878	    {
879	      sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX;
880	      return 0;
881	    }
882	  *snd_limited = 0;
883	  sb->cur_rxt_hole = scoreboard_hole_index (sb, hole);
884	}
885      /* Rule (4): if hole beyond high_sacked */
886      else
887	{
888	  ASSERT (seq_geq (hole->start, sb->high_sacked));
889	  *snd_limited = 1;
890	  *can_rescue = 1;
891	  /* HighRxt MUST NOT be updated */
892	  return 0;
893	}
894    }
895
896  if (hole && seq_lt (sb->high_rxt, hole->start))
897    sb->high_rxt = hole->start;
898
899  return hole;
900}
901
902void
903scoreboard_init_rxt (sack_scoreboard_t * sb, u32 snd_una)
904{
905  sack_scoreboard_hole_t *hole;
906  hole = scoreboard_first_hole (sb);
907  if (hole)
908    {
909      snd_una = seq_gt (snd_una, hole->start) ? snd_una : hole->start;
910      sb->cur_rxt_hole = sb->head;
911    }
912  sb->high_rxt = snd_una;
913  sb->rescue_rxt = snd_una - 1;
914}
915
916void
917scoreboard_init (sack_scoreboard_t * sb)
918{
919  sb->head = TCP_INVALID_SACK_HOLE_INDEX;
920  sb->tail = TCP_INVALID_SACK_HOLE_INDEX;
921  sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX;
922}
923
924void
925scoreboard_clear (sack_scoreboard_t * sb)
926{
927  sack_scoreboard_hole_t *hole;
928  while ((hole = scoreboard_first_hole (sb)))
929    {
930      scoreboard_remove_hole (sb, hole);
931    }
932  ASSERT (sb->head == sb->tail && sb->head == TCP_INVALID_SACK_HOLE_INDEX);
933  ASSERT (pool_elts (sb->holes) == 0);
934  sb->sacked_bytes = 0;
935  sb->last_sacked_bytes = 0;
936  sb->last_bytes_delivered = 0;
937  sb->lost_bytes = 0;
938  sb->last_lost_bytes = 0;
939  sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX;
940  sb->is_reneging = 0;
941}
942
943void
944scoreboard_clear_reneging (sack_scoreboard_t * sb, u32 start, u32 end)
945{
946  sack_scoreboard_hole_t *last_hole;
947
948  clib_warning ("sack reneging");
949
950  scoreboard_clear (sb);
951  last_hole = scoreboard_insert_hole (sb, TCP_INVALID_SACK_HOLE_INDEX,
952				      start, end);
953  last_hole->is_lost = 1;
954  sb->tail = scoreboard_hole_index (sb, last_hole);
955  sb->high_sacked = start;
956  scoreboard_init_rxt (sb, start);
957}
958
959#endif /* CLIB_MARCH_VARIANT */
960
961/**
962 * Test that scoreboard is sane after recovery
963 *
964 * Returns 1 if scoreboard is empty or if first hole beyond
965 * snd_una.
966 */
967static u8
968tcp_scoreboard_is_sane_post_recovery (tcp_connection_t * tc)
969{
970  sack_scoreboard_hole_t *hole;
971  hole = scoreboard_first_hole (&tc->sack_sb);
972  return (!hole || (seq_geq (hole->start, tc->snd_una)
973		    && seq_lt (hole->end, tc->snd_nxt)));
974}
975
976#ifndef CLIB_MARCH_VARIANT
977
978void
979tcp_rcv_sacks (tcp_connection_t * tc, u32 ack)
980{
981  sack_scoreboard_hole_t *hole, *next_hole;
982  sack_scoreboard_t *sb = &tc->sack_sb;
983  sack_block_t *blk, *rcv_sacks;
984  u32 blk_index = 0, i, j;
985  u8 has_rxt;
986
987  sb->last_sacked_bytes = 0;
988  sb->last_bytes_delivered = 0;
989  sb->rxt_sacked = 0;
990
991  if (!tcp_opts_sack (&tc->rcv_opts)
992      && sb->head == TCP_INVALID_SACK_HOLE_INDEX)
993    return;
994
995  has_rxt = tcp_in_cong_recovery (tc);
996
997  /* Remove invalid blocks */
998  blk = tc->rcv_opts.sacks;
999  while (blk < vec_end (tc->rcv_opts.sacks))
1000    {
1001      if (seq_lt (blk->start, blk->end)
1002	  && seq_gt (blk->start, tc->snd_una)
1003	  && seq_gt (blk->start, ack)
1004	  && seq_lt (blk->start, tc->snd_nxt)
1005	  && seq_leq (blk->end, tc->snd_nxt))
1006	{
1007	  blk++;
1008	  continue;
1009	}
1010      vec_del1 (tc->rcv_opts.sacks, blk - tc->rcv_opts.sacks);
1011    }
1012
1013  /* Add block for cumulative ack */
1014  if (seq_gt (ack, tc->snd_una))
1015    {
1016      vec_add2 (tc->rcv_opts.sacks, blk, 1);
1017      blk->start = tc->snd_una;
1018      blk->end = ack;
1019    }
1020
1021  if (vec_len (tc->rcv_opts.sacks) == 0)
1022    return;
1023
1024  tcp_scoreboard_trace_add (tc, ack);
1025
1026  /* Make sure blocks are ordered */
1027  rcv_sacks = tc->rcv_opts.sacks;
1028  for (i = 0; i < vec_len (rcv_sacks); i++)
1029    for (j = i + 1; j < vec_len (rcv_sacks); j++)
1030      if (seq_lt (rcv_sacks[j].start, rcv_sacks[i].start))
1031	{
1032	  sack_block_t tmp = rcv_sacks[i];
1033	  rcv_sacks[i] = rcv_sacks[j];
1034	  rcv_sacks[j] = tmp;
1035	}
1036
1037  if (sb->head == TCP_INVALID_SACK_HOLE_INDEX)
1038    {
1039      /* Handle reneging as a special case */
1040      if (PREDICT_FALSE (sb->is_reneging))
1041	{
1042	  /* No holes, only sacked bytes */
1043	  if (seq_leq (tc->snd_nxt, sb->high_sacked))
1044	    {
1045	      /* No progress made so return */
1046	      if (seq_leq (ack, tc->snd_una))
1047		return;
1048
1049	      /* Update sacked bytes delivered and return */
1050	      sb->last_bytes_delivered = ack - tc->snd_una;
1051	      sb->sacked_bytes -= sb->last_bytes_delivered;
1052	      sb->is_reneging = seq_lt (ack, sb->high_sacked);
1053	      return;
1054	    }
1055
1056	  /* New hole above high sacked. Add it and process normally */
1057	  hole = scoreboard_insert_hole (sb, TCP_INVALID_SACK_HOLE_INDEX,
1058					 sb->high_sacked, tc->snd_nxt);
1059	  sb->tail = scoreboard_hole_index (sb, hole);
1060	}
1061      /* Not reneging and no holes. Insert the first that covers all
1062       * outstanding bytes */
1063      else
1064	{
1065	  hole = scoreboard_insert_hole (sb, TCP_INVALID_SACK_HOLE_INDEX,
1066					 tc->snd_una, tc->snd_nxt);
1067	  sb->tail = scoreboard_hole_index (sb, hole);
1068	}
1069      sb->high_sacked = rcv_sacks[vec_len (rcv_sacks) - 1].end;
1070    }
1071  else
1072    {
1073      /* If we have holes but snd_nxt is beyond the last hole, update
1074       * last hole end or add new hole after high sacked */
1075      hole = scoreboard_last_hole (sb);
1076      if (seq_gt (tc->snd_nxt, hole->end))
1077	{
1078	  if (seq_geq (hole->start, sb->high_sacked))
1079	    {
1080	      hole->end = tc->snd_nxt;
1081	    }
1082	  /* New hole after high sacked block */
1083	  else if (seq_lt (sb->high_sacked, tc->snd_nxt))
1084	    {
1085	      scoreboard_insert_hole (sb, sb->tail, sb->high_sacked,
1086				      tc->snd_nxt);
1087	    }
1088	}
1089
1090      /* Keep track of max byte sacked for when the last hole
1091       * is acked */
1092      sb->high_sacked = seq_max (rcv_sacks[vec_len (rcv_sacks) - 1].end,
1093				 sb->high_sacked);
1094    }
1095
1096  /* Walk the holes with the SACK blocks */
1097  hole = pool_elt_at_index (sb->holes, sb->head);
1098
1099  if (PREDICT_FALSE (sb->is_reneging))
1100    sb->last_bytes_delivered += hole->start - tc->snd_una;
1101
1102  while (hole && blk_index < vec_len (rcv_sacks))
1103    {
1104      blk = &rcv_sacks[blk_index];
1105      if (seq_leq (blk->start, hole->start))
1106	{
1107	  /* Block covers hole. Remove hole */
1108	  if (seq_geq (blk->end, hole->end))
1109	    {
1110	      next_hole = scoreboard_next_hole (sb, hole);
1111
1112	      /* If covered by ack, compute delivered bytes */
1113	      if (blk->end == ack)
1114		{
1115		  u32 sacked = next_hole ? next_hole->start : sb->high_sacked;
1116		  if (PREDICT_FALSE (seq_lt (ack, sacked)))
1117		    {
1118		      sb->last_bytes_delivered += ack - hole->end;
1119		      sb->is_reneging = 1;
1120		    }
1121		  else
1122		    {
1123		      sb->last_bytes_delivered += sacked - hole->end;
1124		      sb->is_reneging = 0;
1125		    }
1126		}
1127	      scoreboard_update_sacked_rxt (sb, hole->start, hole->end,
1128					    has_rxt);
1129	      scoreboard_remove_hole (sb, hole);
1130	      hole = next_hole;
1131	    }
1132	  /* Partial 'head' overlap */
1133	  else
1134	    {
1135	      if (seq_gt (blk->end, hole->start))
1136		{
1137		  scoreboard_update_sacked_rxt (sb, hole->start, blk->end,
1138						has_rxt);
1139		  hole->start = blk->end;
1140		}
1141	      blk_index++;
1142	    }
1143	}
1144      else
1145	{
1146	  /* Hole must be split */
1147	  if (seq_lt (blk->end, hole->end))
1148	    {
1149	      u32 hole_index = scoreboard_hole_index (sb, hole);
1150	      next_hole = scoreboard_insert_hole (sb, hole_index, blk->end,
1151						  hole->end);
1152	      /* Pool might've moved */
1153	      hole = scoreboard_get_hole (sb, hole_index);
1154	      hole->end = blk->start;
1155
1156	      scoreboard_update_sacked_rxt (sb, blk->start, blk->end,
1157					    has_rxt);
1158
1159	      blk_index++;
1160	      ASSERT (hole->next == scoreboard_hole_index (sb, next_hole));
1161	    }
1162	  else if (seq_lt (blk->start, hole->end))
1163	    {
1164	      scoreboard_update_sacked_rxt (sb, blk->start, hole->end,
1165					    has_rxt);
1166	      hole->end = blk->start;
1167	    }
1168	  hole = scoreboard_next_hole (sb, hole);
1169	}
1170    }
1171
1172  scoreboard_update_bytes (sb, ack, tc->snd_mss);
1173
1174  ASSERT (sb->last_sacked_bytes <= sb->sacked_bytes || tcp_in_recovery (tc));
1175  ASSERT (sb->sacked_bytes == 0 || tcp_in_recovery (tc)
1176	  || sb->sacked_bytes <= tc->snd_nxt - seq_max (tc->snd_una, ack));
1177  ASSERT (sb->last_sacked_bytes + sb->lost_bytes <= tc->snd_nxt
1178	  - seq_max (tc->snd_una, ack) || tcp_in_recovery (tc));
1179  ASSERT (sb->head == TCP_INVALID_SACK_HOLE_INDEX || tcp_in_recovery (tc)
1180	  || sb->is_reneging || sb->holes[sb->head].start == ack);
1181  ASSERT (sb->last_lost_bytes <= sb->lost_bytes);
1182  ASSERT ((ack - tc->snd_una) + sb->last_sacked_bytes
1183	  - sb->last_bytes_delivered >= sb->rxt_sacked);
1184  ASSERT ((ack - tc->snd_una) >= tc->sack_sb.last_bytes_delivered
1185	  || (tc->flags & TCP_CONN_FINSNT));
1186
1187  TCP_EVT (TCP_EVT_CC_SCOREBOARD, tc);
1188}
1189#endif /* CLIB_MARCH_VARIANT */
1190
1191/**
1192 * Try to update snd_wnd based on feedback received from peer.
1193 *
1194 * If successful, and new window is 'effectively' 0, activate persist
1195 * timer.
1196 */
1197static void
1198tcp_update_snd_wnd (tcp_connection_t * tc, u32 seq, u32 ack, u32 snd_wnd)
1199{
1200  /* If (SND.WL1 < SEG.SEQ or (SND.WL1 = SEG.SEQ and SND.WL2 =< SEG.ACK)), set
1201   * SND.WND <- SEG.WND, set SND.WL1 <- SEG.SEQ, and set SND.WL2 <- SEG.ACK */
1202  if (seq_lt (tc->snd_wl1, seq)
1203      || (tc->snd_wl1 == seq && seq_leq (tc->snd_wl2, ack)))
1204    {
1205      tc->snd_wnd = snd_wnd;
1206      tc->snd_wl1 = seq;
1207      tc->snd_wl2 = ack;
1208      TCP_EVT (TCP_EVT_SND_WND, tc);
1209
1210      if (PREDICT_FALSE (tc->snd_wnd < tc->snd_mss))
1211	{
1212	  /* Set persist timer if not set and we just got 0 wnd */
1213	  if (!tcp_timer_is_active (tc, TCP_TIMER_PERSIST)
1214	      && !tcp_timer_is_active (tc, TCP_TIMER_RETRANSMIT))
1215	    tcp_persist_timer_set (tc);
1216	}
1217      else
1218	{
1219	  tcp_persist_timer_reset (tc);
1220	  if (PREDICT_FALSE (!tcp_in_recovery (tc) && tc->rto_boff > 0))
1221	    {
1222	      tc->rto_boff = 0;
1223	      tcp_update_rto (tc);
1224	    }
1225	}
1226    }
1227}
1228
1229/**
1230 * Init loss recovery/fast recovery.
1231 *
1232 * Triggered by dup acks as opposed to timer timeout. Note that cwnd is
1233 * updated in @ref tcp_cc_handle_event after fast retransmit
1234 */
1235static void
1236tcp_cc_init_congestion (tcp_connection_t * tc)
1237{
1238  tcp_fastrecovery_on (tc);
1239  tc->snd_congestion = tc->snd_nxt;
1240  tc->cwnd_acc_bytes = 0;
1241  tc->snd_rxt_bytes = 0;
1242  tc->rxt_delivered = 0;
1243  tc->prr_delivered = 0;
1244  tc->prr_start = tc->snd_una;
1245  tc->prev_ssthresh = tc->ssthresh;
1246  tc->prev_cwnd = tc->cwnd;
1247
1248  tc->snd_rxt_ts = tcp_tstamp (tc);
1249  tcp_cc_congestion (tc);
1250
1251  /* Post retransmit update cwnd to ssthresh and account for the
1252   * three segments that have left the network and should've been
1253   * buffered at the receiver XXX */
1254  if (!tcp_opts_sack_permitted (&tc->rcv_opts))
1255    tc->cwnd += 3 * tc->snd_mss;
1256
1257  tc->fr_occurences += 1;
1258  TCP_EVT (TCP_EVT_CC_EVT, tc, 4);
1259}
1260
1261static void
1262tcp_cc_congestion_undo (tcp_connection_t * tc)
1263{
1264  tc->cwnd = tc->prev_cwnd;
1265  tc->ssthresh = tc->prev_ssthresh;
1266  tcp_cc_undo_recovery (tc);
1267  ASSERT (tc->rto_boff == 0);
1268  TCP_EVT (TCP_EVT_CC_EVT, tc, 5);
1269}
1270
1271static inline u8
1272tcp_cc_is_spurious_timeout_rxt (tcp_connection_t * tc)
1273{
1274  return (tcp_in_recovery (tc) && tc->rto_boff == 1
1275	  && tc->snd_rxt_ts
1276	  && tcp_opts_tstamp (&tc->rcv_opts)
1277	  && timestamp_lt (tc->rcv_opts.tsecr, tc->snd_rxt_ts));
1278}
1279
1280static inline u8
1281tcp_cc_is_spurious_retransmit (tcp_connection_t * tc)
1282{
1283  return (tcp_cc_is_spurious_timeout_rxt (tc));
1284}
1285
1286static inline u8
1287tcp_should_fastrecover_sack (tcp_connection_t * tc)
1288{
1289  return (tc->sack_sb.lost_bytes
1290	  || ((TCP_DUPACK_THRESHOLD - 1) * tc->snd_mss
1291	      < tc->sack_sb.sacked_bytes));
1292}
1293
1294static inline u8
1295tcp_should_fastrecover (tcp_connection_t * tc, u8 has_sack)
1296{
1297  if (!has_sack)
1298    {
1299      /* If of of the two conditions lower hold, reset dupacks because
1300       * we're probably after timeout (RFC6582 heuristics).
1301       * If Cumulative ack does not cover more than congestion threshold,
1302       * and:
1303       * 1) The following doesn't hold: The congestion window is greater
1304       *    than SMSS bytes and the difference between highest_ack
1305       *    and prev_highest_ack is at most 4*SMSS bytes
1306       * 2) Echoed timestamp in the last non-dup ack does not equal the
1307       *    stored timestamp
1308       */
1309      if (seq_leq (tc->snd_una, tc->snd_congestion)
1310	  && ((!(tc->cwnd > tc->snd_mss
1311		 && tc->bytes_acked <= 4 * tc->snd_mss))
1312	      || (tc->rcv_opts.tsecr != tc->tsecr_last_ack)))
1313	{
1314	  tc->rcv_dupacks = 0;
1315	  return 0;
1316	}
1317    }
1318  return ((tc->rcv_dupacks == TCP_DUPACK_THRESHOLD)
1319	  || tcp_should_fastrecover_sack (tc));
1320}
1321
1322static int
1323tcp_cc_recover (tcp_connection_t * tc)
1324{
1325  sack_scoreboard_hole_t *hole;
1326  u8 is_spurious = 0;
1327
1328  ASSERT (tcp_in_cong_recovery (tc));
1329
1330  if (tcp_cc_is_spurious_retransmit (tc))
1331    {
1332      tcp_cc_congestion_undo (tc);
1333      is_spurious = 1;
1334    }
1335
1336  tcp_connection_tx_pacer_reset (tc, tc->cwnd, 0 /* start bucket */ );
1337  tc->rcv_dupacks = 0;
1338
1339  /* Previous recovery left us congested. Continue sending as part
1340   * of the current recovery event with an updated snd_congestion */
1341  if (tc->sack_sb.sacked_bytes)
1342    {
1343      tc->snd_congestion = tc->snd_nxt;
1344      tcp_program_retransmit (tc);
1345      return is_spurious;
1346    }
1347
1348  tc->rxt_delivered = 0;
1349  tc->snd_rxt_bytes = 0;
1350  tc->snd_rxt_ts = 0;
1351  tc->prr_delivered = 0;
1352  tc->rtt_ts = 0;
1353  tc->flags &= ~TCP_CONN_RXT_PENDING;
1354
1355  hole = scoreboard_first_hole (&tc->sack_sb);
1356  if (hole && hole->start == tc->snd_una && hole->end == tc->snd_nxt)
1357    scoreboard_clear (&tc->sack_sb);
1358
1359  if (!tcp_in_recovery (tc) && !is_spurious)
1360    tcp_cc_recovered (tc);
1361
1362  tcp_fastrecovery_off (tc);
1363  tcp_fastrecovery_first_off (tc);
1364  tcp_recovery_off (tc);
1365  TCP_EVT (TCP_EVT_CC_EVT, tc, 3);
1366
1367  ASSERT (tc->rto_boff == 0);
1368  ASSERT (!tcp_in_cong_recovery (tc));
1369  ASSERT (tcp_scoreboard_is_sane_post_recovery (tc));
1370  return is_spurious;
1371}
1372
1373static void
1374tcp_cc_update (tcp_connection_t * tc, tcp_rate_sample_t * rs)
1375{
1376  ASSERT (!tcp_in_cong_recovery (tc) || tcp_is_lost_fin (tc));
1377
1378  /* Congestion avoidance */
1379  tcp_cc_rcv_ack (tc, rs);
1380
1381  /* If a cumulative ack, make sure dupacks is 0 */
1382  tc->rcv_dupacks = 0;
1383
1384  /* When dupacks hits the threshold we only enter fast retransmit if
1385   * cumulative ack covers more than snd_congestion. Should snd_una
1386   * wrap this test may fail under otherwise valid circumstances.
1387   * Therefore, proactively update snd_congestion when wrap detected. */
1388  if (PREDICT_FALSE
1389      (seq_leq (tc->snd_congestion, tc->snd_una - tc->bytes_acked)
1390       && seq_gt (tc->snd_congestion, tc->snd_una)))
1391    tc->snd_congestion = tc->snd_una - 1;
1392}
1393
1394/**
1395 * One function to rule them all ... and in the darkness bind them
1396 */
1397static void
1398tcp_cc_handle_event (tcp_connection_t * tc, tcp_rate_sample_t * rs,
1399		     u32 is_dack)
1400{
1401  u8 has_sack = tcp_opts_sack_permitted (&tc->rcv_opts);
1402
1403  /*
1404   * If not in recovery, figure out if we should enter
1405   */
1406  if (!tcp_in_cong_recovery (tc))
1407    {
1408      ASSERT (is_dack);
1409
1410      tc->rcv_dupacks++;
1411      TCP_EVT (TCP_EVT_DUPACK_RCVD, tc, 1);
1412      tcp_cc_rcv_cong_ack (tc, TCP_CC_DUPACK, rs);
1413
1414      if (tcp_should_fastrecover (tc, has_sack))
1415	{
1416	  tcp_cc_init_congestion (tc);
1417
1418	  if (has_sack)
1419	    scoreboard_init_rxt (&tc->sack_sb, tc->snd_una);
1420
1421	  tcp_connection_tx_pacer_reset (tc, tc->cwnd, 0 /* start bucket */ );
1422	  tcp_program_retransmit (tc);
1423	}
1424
1425      return;
1426    }
1427
1428  /*
1429   * Already in recovery
1430   */
1431
1432  /*
1433   * Process (re)transmit feedback. Output path uses this to decide how much
1434   * more data to release into the network
1435   */
1436  if (has_sack)
1437    {
1438      if (!tc->bytes_acked && tc->sack_sb.rxt_sacked)
1439	tcp_fastrecovery_first_on (tc);
1440
1441      tc->rxt_delivered += tc->sack_sb.rxt_sacked;
1442      tc->prr_delivered += tc->bytes_acked + tc->sack_sb.last_sacked_bytes
1443	- tc->sack_sb.last_bytes_delivered;
1444
1445      tcp_program_retransmit (tc);
1446    }
1447  else
1448    {
1449      if (is_dack)
1450	{
1451	  tc->rcv_dupacks += 1;
1452	  TCP_EVT (TCP_EVT_DUPACK_RCVD, tc, 1);
1453	}
1454      tc->rxt_delivered = clib_max (tc->rxt_delivered + tc->bytes_acked,
1455				    tc->snd_rxt_bytes);
1456      if (is_dack)
1457	tc->prr_delivered += clib_min (tc->snd_mss,
1458				       tc->snd_nxt - tc->snd_una);
1459      else
1460	tc->prr_delivered += tc->bytes_acked - clib_min (tc->bytes_acked,
1461							 tc->snd_mss *
1462							 tc->rcv_dupacks);
1463
1464      /* If partial ack, assume that the first un-acked segment was lost */
1465      if (tc->bytes_acked || tc->rcv_dupacks == TCP_DUPACK_THRESHOLD)
1466	tcp_fastrecovery_first_on (tc);
1467
1468      tcp_program_retransmit (tc);
1469    }
1470
1471  /*
1472   * See if we can exit and stop retransmitting
1473   */
1474  if (seq_geq (tc->snd_una, tc->snd_congestion))
1475    {
1476      /* If spurious return, we've already updated everything */
1477      if (tcp_cc_recover (tc))
1478	{
1479	  tc->tsecr_last_ack = tc->rcv_opts.tsecr;
1480	  return;
1481	}
1482
1483      /* Treat as congestion avoidance ack */
1484      tcp_cc_rcv_ack (tc, rs);
1485      return;
1486    }
1487
1488  /*
1489   * Notify cc of the event
1490   */
1491
1492  if (!tc->bytes_acked)
1493    {
1494      tcp_cc_rcv_cong_ack (tc, TCP_CC_DUPACK, rs);
1495      return;
1496    }
1497
1498  /* RFC6675: If the incoming ACK is a cumulative acknowledgment,
1499   * reset dupacks to 0. Also needed if in congestion recovery */
1500  tc->rcv_dupacks = 0;
1501
1502  if (tcp_in_recovery (tc))
1503    tcp_cc_rcv_ack (tc, rs);
1504  else
1505    tcp_cc_rcv_cong_ack (tc, TCP_CC_PARTIALACK, rs);
1506}
1507
1508/**
1509 * Check if duplicate ack as per RFC5681 Sec. 2
1510 */
1511always_inline u8
1512tcp_ack_is_dupack (tcp_connection_t * tc, vlib_buffer_t * b, u32 prev_snd_wnd,
1513		   u32 prev_snd_una)
1514{
1515  return ((vnet_buffer (b)->tcp.ack_number == prev_snd_una)
1516	  && seq_gt (tc->snd_nxt, tc->snd_una)
1517	  && (vnet_buffer (b)->tcp.seq_end == vnet_buffer (b)->tcp.seq_number)
1518	  && (prev_snd_wnd == tc->snd_wnd));
1519}
1520
1521/**
1522 * Checks if ack is a congestion control event.
1523 */
1524static u8
1525tcp_ack_is_cc_event (tcp_connection_t * tc, vlib_buffer_t * b,
1526		     u32 prev_snd_wnd, u32 prev_snd_una, u8 * is_dack)
1527{
1528  /* Check if ack is duplicate. Per RFC 6675, ACKs that SACK new data are
1529   * defined to be 'duplicate' as well */
1530  *is_dack = tc->sack_sb.last_sacked_bytes
1531    || tcp_ack_is_dupack (tc, b, prev_snd_wnd, prev_snd_una);
1532
1533  /* If reneging, wait for timer based retransmits */
1534  if (PREDICT_FALSE (tcp_is_lost_fin (tc) || tc->sack_sb.is_reneging))
1535    return 0;
1536
1537  return (*is_dack || tcp_in_cong_recovery (tc));
1538}
1539
1540/**
1541 * Process incoming ACK
1542 */
1543static int
1544tcp_rcv_ack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc, vlib_buffer_t * b,
1545	     tcp_header_t * th, u32 * error)
1546{
1547  u32 prev_snd_wnd, prev_snd_una;
1548  tcp_rate_sample_t rs = { 0 };
1549  u8 is_dack;
1550
1551  TCP_EVT (TCP_EVT_CC_STAT, tc);
1552
1553  /* If the ACK acks something not yet sent (SEG.ACK > SND.NXT) */
1554  if (PREDICT_FALSE (seq_gt (vnet_buffer (b)->tcp.ack_number, tc->snd_nxt)))
1555    {
1556      /* We've probably entered recovery and the peer still has some
1557       * of the data we've sent. Update snd_nxt and accept the ack */
1558      if (seq_leq (vnet_buffer (b)->tcp.ack_number, tc->snd_una_max)
1559	  && seq_gt (vnet_buffer (b)->tcp.ack_number, tc->snd_una))
1560	{
1561	  tc->snd_nxt = vnet_buffer (b)->tcp.ack_number;
1562	  goto process_ack;
1563	}
1564
1565      tc->errors.above_ack_wnd += 1;
1566      *error = TCP_ERROR_ACK_FUTURE;
1567      TCP_EVT (TCP_EVT_ACK_RCV_ERR, tc, 0, vnet_buffer (b)->tcp.ack_number);
1568      return -1;
1569    }
1570
1571  /* If old ACK, probably it's an old dupack */
1572  if (PREDICT_FALSE (seq_lt (vnet_buffer (b)->tcp.ack_number, tc->snd_una)))
1573    {
1574      tc->errors.below_ack_wnd += 1;
1575      *error = TCP_ERROR_ACK_OLD;
1576      TCP_EVT (TCP_EVT_ACK_RCV_ERR, tc, 1, vnet_buffer (b)->tcp.ack_number);
1577      if (tcp_in_fastrecovery (tc) && tc->rcv_dupacks == TCP_DUPACK_THRESHOLD)
1578	tcp_cc_handle_event (tc, 0, 1);
1579      /* Don't drop yet */
1580      return 0;
1581    }
1582
1583process_ack:
1584
1585  /*
1586   * Looks okay, process feedback
1587   */
1588
1589  if (tcp_opts_sack_permitted (&tc->rcv_opts))
1590    tcp_rcv_sacks (tc, vnet_buffer (b)->tcp.ack_number);
1591
1592  prev_snd_wnd = tc->snd_wnd;
1593  prev_snd_una = tc->snd_una;
1594  tcp_update_snd_wnd (tc, vnet_buffer (b)->tcp.seq_number,
1595		      vnet_buffer (b)->tcp.ack_number,
1596		      clib_net_to_host_u16 (th->window) << tc->snd_wscale);
1597  tc->bytes_acked = vnet_buffer (b)->tcp.ack_number - tc->snd_una;
1598  tc->snd_una = vnet_buffer (b)->tcp.ack_number;
1599  tcp_validate_txf_size (tc, tc->bytes_acked);
1600
1601  if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
1602    tcp_bt_sample_delivery_rate (tc, &rs);
1603
1604  if (tc->bytes_acked)
1605    {
1606      tcp_program_dequeue (wrk, tc);
1607      tcp_update_rtt (tc, &rs, vnet_buffer (b)->tcp.ack_number);
1608    }
1609
1610  TCP_EVT (TCP_EVT_ACK_RCVD, tc);
1611
1612  /*
1613   * Check if we have congestion event
1614   */
1615
1616  if (tcp_ack_is_cc_event (tc, b, prev_snd_wnd, prev_snd_una, &is_dack))
1617    {
1618      tcp_cc_handle_event (tc, &rs, is_dack);
1619      tc->dupacks_in += is_dack;
1620      if (!tcp_in_cong_recovery (tc))
1621	{
1622	  *error = TCP_ERROR_ACK_OK;
1623	  return 0;
1624	}
1625      *error = TCP_ERROR_ACK_DUP;
1626      if (vnet_buffer (b)->tcp.data_len || tcp_is_fin (th))
1627	return 0;
1628      return -1;
1629    }
1630
1631  /*
1632   * Update congestion control (slow start/congestion avoidance)
1633   */
1634  tcp_cc_update (tc, &rs);
1635  *error = TCP_ERROR_ACK_OK;
1636  return 0;
1637}
1638
1639static void
1640tcp_program_disconnect (tcp_worker_ctx_t * wrk, tcp_connection_t * tc)
1641{
1642  if (!tcp_disconnect_pending (tc))
1643    {
1644      vec_add1 (wrk->pending_disconnects, tc->c_c_index);
1645      tcp_disconnect_pending_on (tc);
1646    }
1647}
1648
1649static void
1650tcp_handle_disconnects (tcp_worker_ctx_t * wrk)
1651{
1652  u32 thread_index, *pending_disconnects;
1653  tcp_connection_t *tc;
1654  int i;
1655
1656  if (!vec_len (wrk->pending_disconnects))
1657    return;
1658
1659  thread_index = wrk->vm->thread_index;
1660  pending_disconnects = wrk->pending_disconnects;
1661  for (i = 0; i < vec_len (pending_disconnects); i++)
1662    {
1663      tc = tcp_connection_get (pending_disconnects[i], thread_index);
1664      tcp_disconnect_pending_off (tc);
1665      session_transport_closing_notify (&tc->connection);
1666    }
1667  _vec_len (wrk->pending_disconnects) = 0;
1668}
1669
1670static void
1671tcp_rcv_fin (tcp_worker_ctx_t * wrk, tcp_connection_t * tc, vlib_buffer_t * b,
1672	     u32 * error)
1673{
1674  /* Reject out-of-order fins */
1675  if (vnet_buffer (b)->tcp.seq_end != tc->rcv_nxt)
1676    return;
1677
1678  /* Account for the FIN and send ack */
1679  tc->rcv_nxt += 1;
1680  tc->flags |= TCP_CONN_FINRCVD;
1681  tcp_program_ack (tc);
1682  /* Enter CLOSE-WAIT and notify session. To avoid lingering
1683   * in CLOSE-WAIT, set timer (reuse WAITCLOSE). */
1684  tcp_connection_set_state (tc, TCP_STATE_CLOSE_WAIT);
1685  tcp_program_disconnect (wrk, tc);
1686  tcp_timer_update (tc, TCP_TIMER_WAITCLOSE, tcp_cfg.closewait_time);
1687  TCP_EVT (TCP_EVT_FIN_RCVD, tc);
1688  *error = TCP_ERROR_FIN_RCVD;
1689}
1690
1691#ifndef CLIB_MARCH_VARIANT
1692static u8
1693tcp_sack_vector_is_sane (sack_block_t * sacks)
1694{
1695  int i;
1696  for (i = 1; i < vec_len (sacks); i++)
1697    {
1698      if (sacks[i - 1].end == sacks[i].start)
1699	return 0;
1700    }
1701  return 1;
1702}
1703
1704/**
1705 * Build SACK list as per RFC2018.
1706 *
1707 * Makes sure the first block contains the segment that generated the current
1708 * ACK and the following ones are the ones most recently reported in SACK
1709 * blocks.
1710 *
1711 * @param tc TCP connection for which the SACK list is updated
1712 * @param start Start sequence number of the newest SACK block
1713 * @param end End sequence of the newest SACK block
1714 */
1715void
1716tcp_update_sack_list (tcp_connection_t * tc, u32 start, u32 end)
1717{
1718  sack_block_t *new_list = tc->snd_sacks_fl, *block = 0;
1719  int i;
1720
1721  /* If the first segment is ooo add it to the list. Last write might've moved
1722   * rcv_nxt over the first segment. */
1723  if (seq_lt (tc->rcv_nxt, start))
1724    {
1725      vec_add2 (new_list, block, 1);
1726      block->start = start;
1727      block->end = end;
1728    }
1729
1730  /* Find the blocks still worth keeping. */
1731  for (i = 0; i < vec_len (tc->snd_sacks); i++)
1732    {
1733      /* Discard if rcv_nxt advanced beyond current block */
1734      if (seq_leq (tc->snd_sacks[i].start, tc->rcv_nxt))
1735	continue;
1736
1737      /* Merge or drop if segment overlapped by the new segment */
1738      if (block && (seq_geq (tc->snd_sacks[i].end, new_list[0].start)
1739		    && seq_leq (tc->snd_sacks[i].start, new_list[0].end)))
1740	{
1741	  if (seq_lt (tc->snd_sacks[i].start, new_list[0].start))
1742	    new_list[0].start = tc->snd_sacks[i].start;
1743	  if (seq_lt (new_list[0].end, tc->snd_sacks[i].end))
1744	    new_list[0].end = tc->snd_sacks[i].end;
1745	  continue;
1746	}
1747
1748      /* Save to new SACK list if we have space. */
1749      if (vec_len (new_list) < TCP_MAX_SACK_BLOCKS)
1750	vec_add1 (new_list, tc->snd_sacks[i]);
1751    }
1752
1753  ASSERT (vec_len (new_list) <= TCP_MAX_SACK_BLOCKS);
1754
1755  /* Replace old vector with new one */
1756  vec_reset_length (tc->snd_sacks);
1757  tc->snd_sacks_fl = tc->snd_sacks;
1758  tc->snd_sacks = new_list;
1759
1760  /* Segments should not 'touch' */
1761  ASSERT (tcp_sack_vector_is_sane (tc->snd_sacks));
1762}
1763
1764u32
1765tcp_sack_list_bytes (tcp_connection_t * tc)
1766{
1767  u32 bytes = 0, i;
1768  for (i = 0; i < vec_len (tc->snd_sacks); i++)
1769    bytes += tc->snd_sacks[i].end - tc->snd_sacks[i].start;
1770  return bytes;
1771}
1772#endif /* CLIB_MARCH_VARIANT */
1773
1774/** Enqueue data for delivery to application */
1775static int
1776tcp_session_enqueue_data (tcp_connection_t * tc, vlib_buffer_t * b,
1777			  u16 data_len)
1778{
1779  int written, error = TCP_ERROR_ENQUEUED;
1780
1781  ASSERT (seq_geq (vnet_buffer (b)->tcp.seq_number, tc->rcv_nxt));
1782  ASSERT (data_len);
1783  written = session_enqueue_stream_connection (&tc->connection, b, 0,
1784					       1 /* queue event */ , 1);
1785  tc->bytes_in += written;
1786
1787  TCP_EVT (TCP_EVT_INPUT, tc, 0, data_len, written);
1788
1789  /* Update rcv_nxt */
1790  if (PREDICT_TRUE (written == data_len))
1791    {
1792      tc->rcv_nxt += written;
1793    }
1794  /* If more data written than expected, account for out-of-order bytes. */
1795  else if (written > data_len)
1796    {
1797      tc->rcv_nxt += written;
1798      TCP_EVT (TCP_EVT_CC_INPUT, tc, data_len, written);
1799    }
1800  else if (written > 0)
1801    {
1802      /* We've written something but FIFO is probably full now */
1803      tc->rcv_nxt += written;
1804      error = TCP_ERROR_PARTIALLY_ENQUEUED;
1805    }
1806  else
1807    {
1808      return TCP_ERROR_FIFO_FULL;
1809    }
1810
1811  /* Update SACK list if need be */
1812  if (tcp_opts_sack_permitted (&tc->rcv_opts))
1813    {
1814      /* Remove SACK blocks that have been delivered */
1815      tcp_update_sack_list (tc, tc->rcv_nxt, tc->rcv_nxt);
1816    }
1817
1818  return error;
1819}
1820
1821/** Enqueue out-of-order data */
1822static int
1823tcp_session_enqueue_ooo (tcp_connection_t * tc, vlib_buffer_t * b,
1824			 u16 data_len)
1825{
1826  session_t *s0;
1827  int rv, offset;
1828
1829  ASSERT (seq_gt (vnet_buffer (b)->tcp.seq_number, tc->rcv_nxt));
1830  ASSERT (data_len);
1831
1832  /* Enqueue out-of-order data with relative offset */
1833  rv = session_enqueue_stream_connection (&tc->connection, b,
1834					  vnet_buffer (b)->tcp.seq_number -
1835					  tc->rcv_nxt, 0 /* queue event */ ,
1836					  0);
1837
1838  /* Nothing written */
1839  if (rv)
1840    {
1841      TCP_EVT (TCP_EVT_INPUT, tc, 1, data_len, 0);
1842      return TCP_ERROR_FIFO_FULL;
1843    }
1844
1845  TCP_EVT (TCP_EVT_INPUT, tc, 1, data_len, data_len);
1846  tc->bytes_in += data_len;
1847
1848  /* Update SACK list if in use */
1849  if (tcp_opts_sack_permitted (&tc->rcv_opts))
1850    {
1851      ooo_segment_t *newest;
1852      u32 start, end;
1853
1854      s0 = session_get (tc->c_s_index, tc->c_thread_index);
1855
1856      /* Get the newest segment from the fifo */
1857      newest = svm_fifo_newest_ooo_segment (s0->rx_fifo);
1858      if (newest)
1859	{
1860	  offset = ooo_segment_offset_prod (s0->rx_fifo, newest);
1861	  ASSERT (offset <= vnet_buffer (b)->tcp.seq_number - tc->rcv_nxt);
1862	  start = tc->rcv_nxt + offset;
1863	  end = start + ooo_segment_length (s0->rx_fifo, newest);
1864	  tcp_update_sack_list (tc, start, end);
1865	  svm_fifo_newest_ooo_segment_reset (s0->rx_fifo);
1866	  TCP_EVT (TCP_EVT_CC_SACKS, tc);
1867	}
1868    }
1869
1870  return TCP_ERROR_ENQUEUED_OOO;
1871}
1872
1873/**
1874 * Check if ACK could be delayed. If ack can be delayed, it should return
1875 * true for a full frame. If we're always acking return 0.
1876 */
1877always_inline int
1878tcp_can_delack (tcp_connection_t * tc)
1879{
1880  /* Send ack if ... */
1881  if (TCP_ALWAYS_ACK
1882      /* just sent a rcv wnd 0
1883         || (tc->flags & TCP_CONN_SENT_RCV_WND0) != 0 */
1884      /* constrained to send ack */
1885      || (tc->flags & TCP_CONN_SNDACK) != 0
1886      /* we're almost out of tx wnd */
1887      || tcp_available_cc_snd_space (tc) < 4 * tc->snd_mss)
1888    return 0;
1889
1890  return 1;
1891}
1892
1893static int
1894tcp_buffer_discard_bytes (vlib_buffer_t * b, u32 n_bytes_to_drop)
1895{
1896  u32 discard, first = b->current_length;
1897  vlib_main_t *vm = vlib_get_main ();
1898
1899  /* Handle multi-buffer segments */
1900  if (n_bytes_to_drop > b->current_length)
1901    {
1902      if (!(b->flags & VLIB_BUFFER_NEXT_PRESENT))
1903	return -1;
1904      do
1905	{
1906	  discard = clib_min (n_bytes_to_drop, b->current_length);
1907	  vlib_buffer_advance (b, discard);
1908	  b = vlib_get_buffer (vm, b->next_buffer);
1909	  n_bytes_to_drop -= discard;
1910	}
1911      while (n_bytes_to_drop);
1912      if (n_bytes_to_drop > first)
1913	b->total_length_not_including_first_buffer -= n_bytes_to_drop - first;
1914    }
1915  else
1916    vlib_buffer_advance (b, n_bytes_to_drop);
1917  vnet_buffer (b)->tcp.data_len -= n_bytes_to_drop;
1918  return 0;
1919}
1920
1921/**
1922 * Receive buffer for connection and handle acks
1923 *
1924 * It handles both in order or out-of-order data.
1925 */
1926static int
1927tcp_segment_rcv (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
1928		 vlib_buffer_t * b)
1929{
1930  u32 error, n_bytes_to_drop, n_data_bytes;
1931
1932  vlib_buffer_advance (b, vnet_buffer (b)->tcp.data_offset);
1933  n_data_bytes = vnet_buffer (b)->tcp.data_len;
1934  ASSERT (n_data_bytes);
1935  tc->data_segs_in += 1;
1936
1937  /* Handle out-of-order data */
1938  if (PREDICT_FALSE (vnet_buffer (b)->tcp.seq_number != tc->rcv_nxt))
1939    {
1940      /* Old sequence numbers allowed through because they overlapped
1941       * the rx window */
1942      if (seq_lt (vnet_buffer (b)->tcp.seq_number, tc->rcv_nxt))
1943	{
1944	  /* Completely in the past (possible retransmit). Ack
1945	   * retransmissions since we may not have any data to send */
1946	  if (seq_leq (vnet_buffer (b)->tcp.seq_end, tc->rcv_nxt))
1947	    {
1948	      tcp_program_ack (tc);
1949	      error = TCP_ERROR_SEGMENT_OLD;
1950	      goto done;
1951	    }
1952
1953	  /* Chop off the bytes in the past and see if what is left
1954	   * can be enqueued in order */
1955	  n_bytes_to_drop = tc->rcv_nxt - vnet_buffer (b)->tcp.seq_number;
1956	  n_data_bytes -= n_bytes_to_drop;
1957	  vnet_buffer (b)->tcp.seq_number = tc->rcv_nxt;
1958	  if (tcp_buffer_discard_bytes (b, n_bytes_to_drop))
1959	    {
1960	      error = TCP_ERROR_SEGMENT_OLD;
1961	      goto done;
1962	    }
1963	  goto in_order;
1964	}
1965
1966      /* RFC2581: Enqueue and send DUPACK for fast retransmit */
1967      error = tcp_session_enqueue_ooo (tc, b, n_data_bytes);
1968      tcp_program_dupack (tc);
1969      TCP_EVT (TCP_EVT_DUPACK_SENT, tc, vnet_buffer (b)->tcp);
1970      tc->errors.above_data_wnd += seq_gt (vnet_buffer (b)->tcp.seq_end,
1971					   tc->rcv_las + tc->rcv_wnd);
1972      goto done;
1973    }
1974
1975in_order:
1976
1977  /* In order data, enqueue. Fifo figures out by itself if any out-of-order
1978   * segments can be enqueued after fifo tail offset changes. */
1979  error = tcp_session_enqueue_data (tc, b, n_data_bytes);
1980  if (tcp_can_delack (tc))
1981    {
1982      if (!tcp_timer_is_active (tc, TCP_TIMER_DELACK))
1983	tcp_timer_set (tc, TCP_TIMER_DELACK, tcp_cfg.delack_time);
1984      goto done;
1985    }
1986
1987  tcp_program_ack (tc);
1988
1989done:
1990  return error;
1991}
1992
1993typedef struct
1994{
1995  tcp_header_t tcp_header;
1996  tcp_connection_t tcp_connection;
1997} tcp_rx_trace_t;
1998
1999static u8 *
2000format_tcp_rx_trace (u8 * s, va_list * args)
2001{
2002  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
2003  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
2004  tcp_rx_trace_t *t = va_arg (*args, tcp_rx_trace_t *);
2005  u32 indent = format_get_indent (s);
2006
2007  s = format (s, "%U\n%U%U",
2008	      format_tcp_header, &t->tcp_header, 128,
2009	      format_white_space, indent,
2010	      format_tcp_connection, &t->tcp_connection, 1);
2011
2012  return s;
2013}
2014
2015static u8 *
2016format_tcp_rx_trace_short (u8 * s, va_list * args)
2017{
2018  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
2019  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
2020  tcp_rx_trace_t *t = va_arg (*args, tcp_rx_trace_t *);
2021
2022  s = format (s, "%d -> %d (%U)",
2023	      clib_net_to_host_u16 (t->tcp_header.dst_port),
2024	      clib_net_to_host_u16 (t->tcp_header.src_port), format_tcp_state,
2025	      t->tcp_connection.state);
2026
2027  return s;
2028}
2029
2030static void
2031tcp_set_rx_trace_data (tcp_rx_trace_t * t0, tcp_connection_t * tc0,
2032		       tcp_header_t * th0, vlib_buffer_t * b0, u8 is_ip4)
2033{
2034  if (tc0)
2035    {
2036      clib_memcpy_fast (&t0->tcp_connection, tc0,
2037			sizeof (t0->tcp_connection));
2038    }
2039  else
2040    {
2041      th0 = tcp_buffer_hdr (b0);
2042    }
2043  clib_memcpy_fast (&t0->tcp_header, th0, sizeof (t0->tcp_header));
2044}
2045
2046static void
2047tcp_established_trace_frame (vlib_main_t * vm, vlib_node_runtime_t * node,
2048			     vlib_frame_t * frame, u8 is_ip4)
2049{
2050  u32 *from, n_left;
2051
2052  n_left = frame->n_vectors;
2053  from = vlib_frame_vector_args (frame);
2054
2055  while (n_left >= 1)
2056    {
2057      tcp_connection_t *tc0;
2058      tcp_rx_trace_t *t0;
2059      tcp_header_t *th0;
2060      vlib_buffer_t *b0;
2061      u32 bi0;
2062
2063      bi0 = from[0];
2064      b0 = vlib_get_buffer (vm, bi0);
2065
2066      if (b0->flags & VLIB_BUFFER_IS_TRACED)
2067	{
2068	  t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
2069	  tc0 = tcp_connection_get (vnet_buffer (b0)->tcp.connection_index,
2070				    vm->thread_index);
2071	  th0 = tcp_buffer_hdr (b0);
2072	  tcp_set_rx_trace_data (t0, tc0, th0, b0, is_ip4);
2073	}
2074
2075      from += 1;
2076      n_left -= 1;
2077    }
2078}
2079
2080always_inline void
2081tcp_node_inc_counter_i (vlib_main_t * vm, u32 tcp4_node, u32 tcp6_node,
2082			u8 is_ip4, u32 evt, u32 val)
2083{
2084  if (is_ip4)
2085    vlib_node_increment_counter (vm, tcp4_node, evt, val);
2086  else
2087    vlib_node_increment_counter (vm, tcp6_node, evt, val);
2088}
2089
2090#define tcp_maybe_inc_counter(node_id, err, count)			\
2091{									\
2092  if (next0 != tcp_next_drop (is_ip4))					\
2093    tcp_node_inc_counter_i (vm, tcp4_##node_id##_node.index,		\
2094                            tcp6_##node_id##_node.index, is_ip4, err, 	\
2095			    1);						\
2096}
2097#define tcp_inc_counter(node_id, err, count)				\
2098  tcp_node_inc_counter_i (vm, tcp4_##node_id##_node.index,		\
2099	                   tcp6_##node_id##_node.index, is_ip4,		\
2100	                   err, count)
2101#define tcp_maybe_inc_err_counter(cnts, err)				\
2102{									\
2103  cnts[err] += (next0 != tcp_next_drop (is_ip4));			\
2104}
2105#define tcp_inc_err_counter(cnts, err, val)				\
2106{									\
2107  cnts[err] += val;							\
2108}
2109#define tcp_store_err_counters(node_id, cnts)				\
2110{									\
2111  int i;								\
2112  for (i = 0; i < TCP_N_ERROR; i++)					\
2113    if (cnts[i])							\
2114      tcp_inc_counter(node_id, i, cnts[i]);				\
2115}
2116
2117
2118always_inline uword
2119tcp46_established_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
2120			  vlib_frame_t * frame, int is_ip4)
2121{
2122  u32 thread_index = vm->thread_index, errors = 0;
2123  tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index);
2124  u32 n_left_from, *from, *first_buffer;
2125  u16 err_counters[TCP_N_ERROR] = { 0 };
2126
2127  if (node->flags & VLIB_NODE_FLAG_TRACE)
2128    tcp_established_trace_frame (vm, node, frame, is_ip4);
2129
2130  first_buffer = from = vlib_frame_vector_args (frame);
2131  n_left_from = frame->n_vectors;
2132
2133  while (n_left_from > 0)
2134    {
2135      u32 bi0, error0 = TCP_ERROR_ACK_OK;
2136      vlib_buffer_t *b0;
2137      tcp_header_t *th0;
2138      tcp_connection_t *tc0;
2139
2140      if (n_left_from > 1)
2141	{
2142	  vlib_buffer_t *pb;
2143	  pb = vlib_get_buffer (vm, from[1]);
2144	  vlib_prefetch_buffer_header (pb, LOAD);
2145	  CLIB_PREFETCH (pb->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
2146	}
2147
2148      bi0 = from[0];
2149      from += 1;
2150      n_left_from -= 1;
2151
2152      b0 = vlib_get_buffer (vm, bi0);
2153      tc0 = tcp_connection_get (vnet_buffer (b0)->tcp.connection_index,
2154				thread_index);
2155
2156      if (PREDICT_FALSE (tc0 == 0))
2157	{
2158	  error0 = TCP_ERROR_INVALID_CONNECTION;
2159	  goto done;
2160	}
2161
2162      th0 = tcp_buffer_hdr (b0);
2163
2164      /* TODO header prediction fast path */
2165
2166      /* 1-4: check SEQ, RST, SYN */
2167      if (PREDICT_FALSE (tcp_segment_validate (wrk, tc0, b0, th0, &error0)))
2168	{
2169	  TCP_EVT (TCP_EVT_SEG_INVALID, tc0, vnet_buffer (b0)->tcp);
2170	  goto done;
2171	}
2172
2173      /* 5: check the ACK field  */
2174      if (PREDICT_FALSE (tcp_rcv_ack (wrk, tc0, b0, th0, &error0)))
2175	goto done;
2176
2177      /* 6: check the URG bit TODO */
2178
2179      /* 7: process the segment text */
2180      if (vnet_buffer (b0)->tcp.data_len)
2181	error0 = tcp_segment_rcv (wrk, tc0, b0);
2182
2183      /* 8: check the FIN bit */
2184      if (PREDICT_FALSE (tcp_is_fin (th0)))
2185	tcp_rcv_fin (wrk, tc0, b0, &error0);
2186
2187    done:
2188      tcp_inc_err_counter (err_counters, error0, 1);
2189    }
2190
2191  errors = session_main_flush_enqueue_events (TRANSPORT_PROTO_TCP,
2192					      thread_index);
2193  err_counters[TCP_ERROR_MSG_QUEUE_FULL] = errors;
2194  tcp_store_err_counters (established, err_counters);
2195  tcp_handle_postponed_dequeues (wrk);
2196  tcp_handle_disconnects (wrk);
2197  vlib_buffer_free (vm, first_buffer, frame->n_vectors);
2198
2199  return frame->n_vectors;
2200}
2201
2202VLIB_NODE_FN (tcp4_established_node) (vlib_main_t * vm,
2203				      vlib_node_runtime_t * node,
2204				      vlib_frame_t * from_frame)
2205{
2206  return tcp46_established_inline (vm, node, from_frame, 1 /* is_ip4 */ );
2207}
2208
2209VLIB_NODE_FN (tcp6_established_node) (vlib_main_t * vm,
2210				      vlib_node_runtime_t * node,
2211				      vlib_frame_t * from_frame)
2212{
2213  return tcp46_established_inline (vm, node, from_frame, 0 /* is_ip4 */ );
2214}
2215
2216/* *INDENT-OFF* */
2217VLIB_REGISTER_NODE (tcp4_established_node) =
2218{
2219  .name = "tcp4-established",
2220  /* Takes a vector of packets. */
2221  .vector_size = sizeof (u32),
2222  .n_errors = TCP_N_ERROR,
2223  .error_strings = tcp_error_strings,
2224  .n_next_nodes = TCP_ESTABLISHED_N_NEXT,
2225  .next_nodes =
2226  {
2227#define _(s,n) [TCP_ESTABLISHED_NEXT_##s] = n,
2228    foreach_tcp_state_next
2229#undef _
2230  },
2231  .format_trace = format_tcp_rx_trace_short,
2232};
2233/* *INDENT-ON* */
2234
2235/* *INDENT-OFF* */
2236VLIB_REGISTER_NODE (tcp6_established_node) =
2237{
2238  .name = "tcp6-established",
2239  /* Takes a vector of packets. */
2240  .vector_size = sizeof (u32),
2241  .n_errors = TCP_N_ERROR,
2242  .error_strings = tcp_error_strings,
2243  .n_next_nodes = TCP_ESTABLISHED_N_NEXT,
2244  .next_nodes =
2245  {
2246#define _(s,n) [TCP_ESTABLISHED_NEXT_##s] = n,
2247    foreach_tcp_state_next
2248#undef _
2249  },
2250  .format_trace = format_tcp_rx_trace_short,
2251};
2252/* *INDENT-ON* */
2253
2254
2255static u8
2256tcp_lookup_is_valid (tcp_connection_t * tc, vlib_buffer_t * b,
2257		     tcp_header_t * hdr)
2258{
2259  transport_connection_t *tmp = 0;
2260  u64 handle;
2261
2262  if (!tc)
2263    return 1;
2264
2265  /* Proxy case */
2266  if (tc->c_lcl_port == 0 && tc->state == TCP_STATE_LISTEN)
2267    return 1;
2268
2269  u8 is_ip_valid = 0, val_l, val_r;
2270
2271  if (tc->connection.is_ip4)
2272    {
2273      ip4_header_t *ip4_hdr = (ip4_header_t *) vlib_buffer_get_current (b);
2274
2275      val_l = !ip4_address_compare (&ip4_hdr->dst_address,
2276				    &tc->connection.lcl_ip.ip4);
2277      val_l = val_l || ip_is_zero (&tc->connection.lcl_ip, 1);
2278      val_r = !ip4_address_compare (&ip4_hdr->src_address,
2279				    &tc->connection.rmt_ip.ip4);
2280      val_r = val_r || tc->state == TCP_STATE_LISTEN;
2281      is_ip_valid = val_l && val_r;
2282    }
2283  else
2284    {
2285      ip6_header_t *ip6_hdr = (ip6_header_t *) vlib_buffer_get_current (b);
2286
2287      val_l = !ip6_address_compare (&ip6_hdr->dst_address,
2288				    &tc->connection.lcl_ip.ip6);
2289      val_l = val_l || ip_is_zero (&tc->connection.lcl_ip, 0);
2290      val_r = !ip6_address_compare (&ip6_hdr->src_address,
2291				    &tc->connection.rmt_ip.ip6);
2292      val_r = val_r || tc->state == TCP_STATE_LISTEN;
2293      is_ip_valid = val_l && val_r;
2294    }
2295
2296  u8 is_valid = (tc->c_lcl_port == hdr->dst_port
2297		 && (tc->state == TCP_STATE_LISTEN
2298		     || tc->c_rmt_port == hdr->src_port) && is_ip_valid);
2299
2300  if (!is_valid)
2301    {
2302      handle = session_lookup_half_open_handle (&tc->connection);
2303      tmp = session_lookup_half_open_connection (handle & 0xFFFFFFFF,
2304						 tc->c_proto, tc->c_is_ip4);
2305
2306      if (tmp)
2307	{
2308	  if (tmp->lcl_port == hdr->dst_port
2309	      && tmp->rmt_port == hdr->src_port)
2310	    {
2311	      TCP_DBG ("half-open is valid!");
2312	      is_valid = 1;
2313	    }
2314	}
2315    }
2316  return is_valid;
2317}
2318
2319/**
2320 * Lookup transport connection
2321 */
2322static tcp_connection_t *
2323tcp_lookup_connection (u32 fib_index, vlib_buffer_t * b, u8 thread_index,
2324		       u8 is_ip4)
2325{
2326  tcp_header_t *tcp;
2327  transport_connection_t *tconn;
2328  tcp_connection_t *tc;
2329  u8 is_filtered = 0;
2330  if (is_ip4)
2331    {
2332      ip4_header_t *ip4;
2333      ip4 = vlib_buffer_get_current (b);
2334      tcp = ip4_next_header (ip4);
2335      tconn = session_lookup_connection_wt4 (fib_index,
2336					     &ip4->dst_address,
2337					     &ip4->src_address,
2338					     tcp->dst_port,
2339					     tcp->src_port,
2340					     TRANSPORT_PROTO_TCP,
2341					     thread_index, &is_filtered);
2342      tc = tcp_get_connection_from_transport (tconn);
2343      ASSERT (tcp_lookup_is_valid (tc, b, tcp));
2344    }
2345  else
2346    {
2347      ip6_header_t *ip6;
2348      ip6 = vlib_buffer_get_current (b);
2349      tcp = ip6_next_header (ip6);
2350      tconn = session_lookup_connection_wt6 (fib_index,
2351					     &ip6->dst_address,
2352					     &ip6->src_address,
2353					     tcp->dst_port,
2354					     tcp->src_port,
2355					     TRANSPORT_PROTO_TCP,
2356					     thread_index, &is_filtered);
2357      tc = tcp_get_connection_from_transport (tconn);
2358      ASSERT (tcp_lookup_is_valid (tc, b, tcp));
2359    }
2360  return tc;
2361}
2362
2363always_inline void
2364tcp_check_tx_offload (tcp_connection_t * tc, int is_ipv4)
2365{
2366  vnet_main_t *vnm = vnet_get_main ();
2367  const dpo_id_t *dpo;
2368  const load_balance_t *lb;
2369  vnet_hw_interface_t *hw_if;
2370  u32 sw_if_idx, lb_idx;
2371
2372  if (is_ipv4)
2373    {
2374      ip4_address_t *dst_addr = &(tc->c_rmt_ip.ip4);
2375      lb_idx = ip4_fib_forwarding_lookup (tc->c_fib_index, dst_addr);
2376    }
2377  else
2378    {
2379      ip6_address_t *dst_addr = &(tc->c_rmt_ip.ip6);
2380      lb_idx = ip6_fib_table_fwding_lookup (tc->c_fib_index, dst_addr);
2381    }
2382
2383  lb = load_balance_get (lb_idx);
2384  dpo = load_balance_get_bucket_i (lb, 0);
2385
2386  sw_if_idx = dpo->dpoi_index;
2387  hw_if = vnet_get_sup_hw_interface (vnm, sw_if_idx);
2388
2389  if (hw_if->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO)
2390    tc->cfg_flags |= TCP_CFG_F_TSO;
2391}
2392
2393always_inline uword
2394tcp46_syn_sent_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
2395		       vlib_frame_t * from_frame, int is_ip4)
2396{
2397  u32 n_left_from, *from, *first_buffer, errors = 0;
2398  u32 my_thread_index = vm->thread_index;
2399  tcp_worker_ctx_t *wrk = tcp_get_worker (my_thread_index);
2400
2401  from = first_buffer = vlib_frame_vector_args (from_frame);
2402  n_left_from = from_frame->n_vectors;
2403
2404  while (n_left_from > 0)
2405    {
2406      u32 bi0, ack0, seq0, error0 = TCP_ERROR_NONE;
2407      tcp_connection_t *tc0, *new_tc0;
2408      tcp_header_t *tcp0 = 0;
2409      tcp_rx_trace_t *t0;
2410      vlib_buffer_t *b0;
2411
2412      bi0 = from[0];
2413      from += 1;
2414      n_left_from -= 1;
2415
2416      b0 = vlib_get_buffer (vm, bi0);
2417      tc0 =
2418	tcp_half_open_connection_get (vnet_buffer (b0)->tcp.connection_index);
2419      if (PREDICT_FALSE (tc0 == 0))
2420	{
2421	  error0 = TCP_ERROR_INVALID_CONNECTION;
2422	  goto drop;
2423	}
2424
2425      /* Half-open completed recently but the connection was't removed
2426       * yet by the owning thread */
2427      if (PREDICT_FALSE (tc0->flags & TCP_CONN_HALF_OPEN_DONE))
2428	{
2429	  /* Make sure the connection actually exists */
2430	  ASSERT (tcp_lookup_connection (tc0->c_fib_index, b0,
2431					 my_thread_index, is_ip4));
2432	  error0 = TCP_ERROR_SPURIOUS_SYN_ACK;
2433	  goto drop;
2434	}
2435
2436      ack0 = vnet_buffer (b0)->tcp.ack_number;
2437      seq0 = vnet_buffer (b0)->tcp.seq_number;
2438      tcp0 = tcp_buffer_hdr (b0);
2439
2440      /* Crude check to see if the connection handle does not match
2441       * the packet. Probably connection just switched to established */
2442      if (PREDICT_FALSE (tcp0->dst_port != tc0->c_lcl_port
2443			 || tcp0->src_port != tc0->c_rmt_port))
2444	{
2445	  error0 = TCP_ERROR_INVALID_CONNECTION;
2446	  goto drop;
2447	}
2448
2449      if (PREDICT_FALSE (!tcp_ack (tcp0) && !tcp_rst (tcp0)
2450			 && !tcp_syn (tcp0)))
2451	{
2452	  error0 = TCP_ERROR_SEGMENT_INVALID;
2453	  goto drop;
2454	}
2455
2456      /* SYNs consume sequence numbers */
2457      vnet_buffer (b0)->tcp.seq_end += tcp_is_syn (tcp0);
2458
2459      /*
2460       *  1. check the ACK bit
2461       */
2462
2463      /*
2464       *   If the ACK bit is set
2465       *     If SEG.ACK =< ISS, or SEG.ACK > SND.NXT, send a reset (unless
2466       *     the RST bit is set, if so drop the segment and return)
2467       *       <SEQ=SEG.ACK><CTL=RST>
2468       *     and discard the segment.  Return.
2469       *     If SND.UNA =< SEG.ACK =< SND.NXT then the ACK is acceptable.
2470       */
2471      if (tcp_ack (tcp0))
2472	{
2473	  if (seq_leq (ack0, tc0->iss) || seq_gt (ack0, tc0->snd_nxt))
2474	    {
2475	      if (!tcp_rst (tcp0))
2476		tcp_send_reset_w_pkt (tc0, b0, my_thread_index, is_ip4);
2477	      error0 = TCP_ERROR_RCV_WND;
2478	      goto drop;
2479	    }
2480
2481	  /* Make sure ACK is valid */
2482	  if (seq_gt (tc0->snd_una, ack0))
2483	    {
2484	      error0 = TCP_ERROR_ACK_INVALID;
2485	      goto drop;
2486	    }
2487	}
2488
2489      /*
2490       * 2. check the RST bit
2491       */
2492
2493      if (tcp_rst (tcp0))
2494	{
2495	  /* If ACK is acceptable, signal client that peer is not
2496	   * willing to accept connection and drop connection*/
2497	  if (tcp_ack (tcp0))
2498	    tcp_connection_reset (tc0);
2499	  error0 = TCP_ERROR_RST_RCVD;
2500	  goto drop;
2501	}
2502
2503      /*
2504       * 3. check the security and precedence (skipped)
2505       */
2506
2507      /*
2508       * 4. check the SYN bit
2509       */
2510
2511      /* No SYN flag. Drop. */
2512      if (!tcp_syn (tcp0))
2513	{
2514	  error0 = TCP_ERROR_SEGMENT_INVALID;
2515	  goto drop;
2516	}
2517
2518      /* Parse options */
2519      if (tcp_options_parse (tcp0, &tc0->rcv_opts, 1))
2520	{
2521	  error0 = TCP_ERROR_OPTIONS;
2522	  goto drop;
2523	}
2524
2525      /* Valid SYN or SYN-ACK. Move connection from half-open pool to
2526       * current thread pool. */
2527      new_tc0 = tcp_connection_alloc_w_base (my_thread_index, tc0);
2528      new_tc0->rcv_nxt = vnet_buffer (b0)->tcp.seq_end;
2529      new_tc0->irs = seq0;
2530      new_tc0->timers[TCP_TIMER_RETRANSMIT_SYN] = TCP_TIMER_HANDLE_INVALID;
2531      new_tc0->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
2532
2533      /* If this is not the owning thread, wait for syn retransmit to
2534       * expire and cleanup then */
2535      if (tcp_half_open_connection_cleanup (tc0))
2536	tc0->flags |= TCP_CONN_HALF_OPEN_DONE;
2537
2538      if (tcp_opts_tstamp (&new_tc0->rcv_opts))
2539	{
2540	  new_tc0->tsval_recent = new_tc0->rcv_opts.tsval;
2541	  new_tc0->tsval_recent_age = tcp_time_now ();
2542	}
2543
2544      if (tcp_opts_wscale (&new_tc0->rcv_opts))
2545	new_tc0->snd_wscale = new_tc0->rcv_opts.wscale;
2546      else
2547	new_tc0->rcv_wscale = 0;
2548
2549      new_tc0->snd_wnd = clib_net_to_host_u16 (tcp0->window)
2550	<< new_tc0->snd_wscale;
2551      new_tc0->snd_wl1 = seq0;
2552      new_tc0->snd_wl2 = ack0;
2553
2554      tcp_connection_init_vars (new_tc0);
2555
2556      /* SYN-ACK: See if we can switch to ESTABLISHED state */
2557      if (PREDICT_TRUE (tcp_ack (tcp0)))
2558	{
2559	  /* Our SYN is ACKed: we have iss < ack = snd_una */
2560
2561	  /* TODO Dequeue acknowledged segments if we support Fast Open */
2562	  new_tc0->snd_una = ack0;
2563	  new_tc0->state = TCP_STATE_ESTABLISHED;
2564
2565	  /* Make sure las is initialized for the wnd computation */
2566	  new_tc0->rcv_las = new_tc0->rcv_nxt;
2567
2568	  /* Notify app that we have connection. If session layer can't
2569	   * allocate session send reset */
2570	  if (session_stream_connect_notify (&new_tc0->connection, 0))
2571	    {
2572	      tcp_send_reset_w_pkt (new_tc0, b0, my_thread_index, is_ip4);
2573	      tcp_connection_cleanup (new_tc0);
2574	      error0 = TCP_ERROR_CREATE_SESSION_FAIL;
2575	      goto drop;
2576	    }
2577
2578	  new_tc0->tx_fifo_size =
2579	    transport_tx_fifo_size (&new_tc0->connection);
2580	  /* Update rtt with the syn-ack sample */
2581	  tcp_estimate_initial_rtt (new_tc0);
2582	  TCP_EVT (TCP_EVT_SYNACK_RCVD, new_tc0);
2583	  error0 = TCP_ERROR_SYN_ACKS_RCVD;
2584	}
2585      /* SYN: Simultaneous open. Change state to SYN-RCVD and send SYN-ACK */
2586      else
2587	{
2588	  new_tc0->state = TCP_STATE_SYN_RCVD;
2589
2590	  /* Notify app that we have connection */
2591	  if (session_stream_connect_notify (&new_tc0->connection, 0))
2592	    {
2593	      tcp_connection_cleanup (new_tc0);
2594	      tcp_send_reset_w_pkt (tc0, b0, my_thread_index, is_ip4);
2595	      TCP_EVT (TCP_EVT_RST_SENT, tc0);
2596	      error0 = TCP_ERROR_CREATE_SESSION_FAIL;
2597	      goto drop;
2598	    }
2599
2600	  new_tc0->tx_fifo_size =
2601	    transport_tx_fifo_size (&new_tc0->connection);
2602	  new_tc0->rtt_ts = 0;
2603	  tcp_init_snd_vars (new_tc0);
2604	  tcp_send_synack (new_tc0);
2605	  error0 = TCP_ERROR_SYNS_RCVD;
2606	  goto drop;
2607	}
2608
2609      if (!(new_tc0->cfg_flags & TCP_CFG_F_NO_TSO))
2610	tcp_check_tx_offload (new_tc0, is_ip4);
2611
2612      /* Read data, if any */
2613      if (PREDICT_FALSE (vnet_buffer (b0)->tcp.data_len))
2614	{
2615	  clib_warning ("rcvd data in syn-sent");
2616	  error0 = tcp_segment_rcv (wrk, new_tc0, b0);
2617	  if (error0 == TCP_ERROR_ACK_OK)
2618	    error0 = TCP_ERROR_SYN_ACKS_RCVD;
2619	}
2620      else
2621	{
2622	  /* Send ack now instead of programming it because connection was
2623	   * just established and it's not optional. */
2624	  tcp_send_ack (new_tc0);
2625	}
2626
2627    drop:
2628
2629      tcp_inc_counter (syn_sent, error0, 1);
2630      if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED) && tcp0 != 0))
2631	{
2632	  t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
2633	  clib_memcpy_fast (&t0->tcp_header, tcp0, sizeof (t0->tcp_header));
2634	  clib_memcpy_fast (&t0->tcp_connection, tc0,
2635			    sizeof (t0->tcp_connection));
2636	}
2637    }
2638
2639  errors = session_main_flush_enqueue_events (TRANSPORT_PROTO_TCP,
2640					      my_thread_index);
2641  tcp_inc_counter (syn_sent, TCP_ERROR_MSG_QUEUE_FULL, errors);
2642  vlib_buffer_free (vm, first_buffer, from_frame->n_vectors);
2643
2644  return from_frame->n_vectors;
2645}
2646
2647VLIB_NODE_FN (tcp4_syn_sent_node) (vlib_main_t * vm,
2648				   vlib_node_runtime_t * node,
2649				   vlib_frame_t * from_frame)
2650{
2651  return tcp46_syn_sent_inline (vm, node, from_frame, 1 /* is_ip4 */ );
2652}
2653
2654VLIB_NODE_FN (tcp6_syn_sent_node) (vlib_main_t * vm,
2655				   vlib_node_runtime_t * node,
2656				   vlib_frame_t * from_frame)
2657{
2658  return tcp46_syn_sent_inline (vm, node, from_frame, 0 /* is_ip4 */ );
2659}
2660
2661/* *INDENT-OFF* */
2662VLIB_REGISTER_NODE (tcp4_syn_sent_node) =
2663{
2664  .name = "tcp4-syn-sent",
2665  /* Takes a vector of packets. */
2666  .vector_size = sizeof (u32),
2667  .n_errors = TCP_N_ERROR,
2668  .error_strings = tcp_error_strings,
2669  .n_next_nodes = TCP_SYN_SENT_N_NEXT,
2670  .next_nodes =
2671  {
2672#define _(s,n) [TCP_SYN_SENT_NEXT_##s] = n,
2673    foreach_tcp_state_next
2674#undef _
2675  },
2676  .format_trace = format_tcp_rx_trace_short,
2677};
2678/* *INDENT-ON* */
2679
2680/* *INDENT-OFF* */
2681VLIB_REGISTER_NODE (tcp6_syn_sent_node) =
2682{
2683  .name = "tcp6-syn-sent",
2684  /* Takes a vector of packets. */
2685  .vector_size = sizeof (u32),
2686  .n_errors = TCP_N_ERROR,
2687  .error_strings = tcp_error_strings,
2688  .n_next_nodes = TCP_SYN_SENT_N_NEXT,
2689  .next_nodes =
2690  {
2691#define _(s,n) [TCP_SYN_SENT_NEXT_##s] = n,
2692    foreach_tcp_state_next
2693#undef _
2694  },
2695  .format_trace = format_tcp_rx_trace_short,
2696};
2697/* *INDENT-ON* */
2698
2699/**
2700 * Handles reception for all states except LISTEN, SYN-SENT and ESTABLISHED
2701 * as per RFC793 p. 64
2702 */
2703always_inline uword
2704tcp46_rcv_process_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
2705			  vlib_frame_t * from_frame, int is_ip4)
2706{
2707  u32 thread_index = vm->thread_index, errors = 0, *first_buffer;
2708  tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index);
2709  u32 n_left_from, *from, max_dequeue;
2710
2711  from = first_buffer = vlib_frame_vector_args (from_frame);
2712  n_left_from = from_frame->n_vectors;
2713
2714  while (n_left_from > 0)
2715    {
2716      u32 bi0, error0 = TCP_ERROR_NONE;
2717      tcp_header_t *tcp0 = 0;
2718      tcp_connection_t *tc0;
2719      vlib_buffer_t *b0;
2720      u8 is_fin0;
2721
2722      bi0 = from[0];
2723      from += 1;
2724      n_left_from -= 1;
2725
2726      b0 = vlib_get_buffer (vm, bi0);
2727      tc0 = tcp_connection_get (vnet_buffer (b0)->tcp.connection_index,
2728				thread_index);
2729      if (PREDICT_FALSE (tc0 == 0))
2730	{
2731	  error0 = TCP_ERROR_INVALID_CONNECTION;
2732	  goto drop;
2733	}
2734
2735      tcp0 = tcp_buffer_hdr (b0);
2736      is_fin0 = tcp_is_fin (tcp0);
2737
2738      if (CLIB_DEBUG)
2739	{
2740	  if (!(tc0->connection.flags & TRANSPORT_CONNECTION_F_NO_LOOKUP))
2741	    {
2742	      tcp_connection_t *tmp;
2743	      tmp = tcp_lookup_connection (tc0->c_fib_index, b0, thread_index,
2744					   is_ip4);
2745	      if (tmp->state != tc0->state)
2746		{
2747		  if (tc0->state != TCP_STATE_CLOSED)
2748		    clib_warning ("state changed");
2749		  goto drop;
2750		}
2751	    }
2752	}
2753
2754      /*
2755       * Special treatment for CLOSED
2756       */
2757      if (PREDICT_FALSE (tc0->state == TCP_STATE_CLOSED))
2758	{
2759	  error0 = TCP_ERROR_CONNECTION_CLOSED;
2760	  goto drop;
2761	}
2762
2763      /*
2764       * For all other states (except LISTEN)
2765       */
2766
2767      /* 1-4: check SEQ, RST, SYN */
2768      if (PREDICT_FALSE (tcp_segment_validate (wrk, tc0, b0, tcp0, &error0)))
2769	goto drop;
2770
2771      /* 5: check the ACK field  */
2772      switch (tc0->state)
2773	{
2774	case TCP_STATE_SYN_RCVD:
2775
2776	  /* Make sure the segment is exactly right */
2777	  if (tc0->rcv_nxt != vnet_buffer (b0)->tcp.seq_number || is_fin0)
2778	    {
2779	      tcp_connection_reset (tc0);
2780	      error0 = TCP_ERROR_SEGMENT_INVALID;
2781	      goto drop;
2782	    }
2783
2784	  /*
2785	   * If the segment acknowledgment is not acceptable, form a
2786	   * reset segment,
2787	   *  <SEQ=SEG.ACK><CTL=RST>
2788	   * and send it.
2789	   */
2790	  if (tcp_rcv_ack_no_cc (tc0, b0, &error0))
2791	    {
2792	      tcp_connection_reset (tc0);
2793	      goto drop;
2794	    }
2795
2796	  /* Update rtt and rto */
2797	  tcp_estimate_initial_rtt (tc0);
2798	  tcp_connection_tx_pacer_update (tc0);
2799
2800	  /* Switch state to ESTABLISHED */
2801	  tc0->state = TCP_STATE_ESTABLISHED;
2802	  TCP_EVT (TCP_EVT_STATE_CHANGE, tc0);
2803
2804	  if (!(tc0->cfg_flags & TCP_CFG_F_NO_TSO))
2805	    tcp_check_tx_offload (tc0, is_ip4);
2806
2807	  /* Initialize session variables */
2808	  tc0->snd_una = vnet_buffer (b0)->tcp.ack_number;
2809	  tc0->snd_wnd = clib_net_to_host_u16 (tcp0->window)
2810	    << tc0->rcv_opts.wscale;
2811	  tc0->snd_wl1 = vnet_buffer (b0)->tcp.seq_number;
2812	  tc0->snd_wl2 = vnet_buffer (b0)->tcp.ack_number;
2813
2814	  /* Reset SYN-ACK retransmit and SYN_RCV establish timers */
2815	  tcp_retransmit_timer_reset (tc0);
2816	  if (session_stream_accept_notify (&tc0->connection))
2817	    {
2818	      error0 = TCP_ERROR_MSG_QUEUE_FULL;
2819	      tcp_connection_reset (tc0);
2820	      goto drop;
2821	    }
2822	  error0 = TCP_ERROR_ACK_OK;
2823	  break;
2824	case TCP_STATE_ESTABLISHED:
2825	  /* We can get packets in established state here because they
2826	   * were enqueued before state change */
2827	  if (tcp_rcv_ack (wrk, tc0, b0, tcp0, &error0))
2828	    goto drop;
2829
2830	  break;
2831	case TCP_STATE_FIN_WAIT_1:
2832	  /* In addition to the processing for the ESTABLISHED state, if
2833	   * our FIN is now acknowledged then enter FIN-WAIT-2 and
2834	   * continue processing in that state. */
2835	  if (tcp_rcv_ack (wrk, tc0, b0, tcp0, &error0))
2836	    goto drop;
2837
2838	  /* Still have to send the FIN */
2839	  if (tc0->flags & TCP_CONN_FINPNDG)
2840	    {
2841	      /* TX fifo finally drained */
2842	      max_dequeue = transport_max_tx_dequeue (&tc0->connection);
2843	      if (max_dequeue <= tc0->burst_acked)
2844		tcp_send_fin (tc0);
2845	      /* If a fin was received and data was acked extend wait */
2846	      else if ((tc0->flags & TCP_CONN_FINRCVD) && tc0->bytes_acked)
2847		tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE,
2848				  tcp_cfg.closewait_time);
2849	    }
2850	  /* If FIN is ACKed */
2851	  else if (tc0->snd_una == tc0->snd_nxt)
2852	    {
2853	      /* Stop all retransmit timers because we have nothing more
2854	       * to send. */
2855	      tcp_connection_timers_reset (tc0);
2856
2857	      /* We already have a FIN but didn't transition to CLOSING
2858	       * because of outstanding tx data. Close the connection. */
2859	      if (tc0->flags & TCP_CONN_FINRCVD)
2860		{
2861		  tcp_connection_set_state (tc0, TCP_STATE_CLOSED);
2862		  tcp_timer_set (tc0, TCP_TIMER_WAITCLOSE,
2863				 tcp_cfg.cleanup_time);
2864		  session_transport_closed_notify (&tc0->connection);
2865		  goto drop;
2866		}
2867
2868	      tcp_connection_set_state (tc0, TCP_STATE_FIN_WAIT_2);
2869	      /* Enable waitclose because we're willing to wait for peer's
2870	       * FIN but not indefinitely. */
2871	      tcp_timer_set (tc0, TCP_TIMER_WAITCLOSE, tcp_cfg.finwait2_time);
2872
2873	      /* Don't try to deq the FIN acked */
2874	      if (tc0->burst_acked > 1)
2875		session_tx_fifo_dequeue_drop (&tc0->connection,
2876					      tc0->burst_acked - 1);
2877	      tc0->burst_acked = 0;
2878	    }
2879	  break;
2880	case TCP_STATE_FIN_WAIT_2:
2881	  /* In addition to the processing for the ESTABLISHED state, if
2882	   * the retransmission queue is empty, the user's CLOSE can be
2883	   * acknowledged ("ok") but do not delete the TCB. */
2884	  if (tcp_rcv_ack_no_cc (tc0, b0, &error0))
2885	    goto drop;
2886	  tc0->burst_acked = 0;
2887	  break;
2888	case TCP_STATE_CLOSE_WAIT:
2889	  /* Do the same processing as for the ESTABLISHED state. */
2890	  if (tcp_rcv_ack (wrk, tc0, b0, tcp0, &error0))
2891	    goto drop;
2892
2893	  if (!(tc0->flags & TCP_CONN_FINPNDG))
2894	    break;
2895
2896	  /* Still have outstanding tx data */
2897	  max_dequeue = transport_max_tx_dequeue (&tc0->connection);
2898	  if (max_dequeue > tc0->burst_acked)
2899	    break;
2900
2901	  tcp_send_fin (tc0);
2902	  tcp_connection_timers_reset (tc0);
2903	  tcp_connection_set_state (tc0, TCP_STATE_LAST_ACK);
2904	  tcp_timer_set (tc0, TCP_TIMER_WAITCLOSE, tcp_cfg.lastack_time);
2905	  break;
2906	case TCP_STATE_CLOSING:
2907	  /* In addition to the processing for the ESTABLISHED state, if
2908	   * the ACK acknowledges our FIN then enter the TIME-WAIT state,
2909	   * otherwise ignore the segment. */
2910	  if (tcp_rcv_ack_no_cc (tc0, b0, &error0))
2911	    goto drop;
2912
2913	  if (tc0->snd_una != tc0->snd_nxt)
2914	    goto drop;
2915
2916	  tcp_connection_timers_reset (tc0);
2917	  tcp_connection_set_state (tc0, TCP_STATE_TIME_WAIT);
2918	  tcp_timer_set (tc0, TCP_TIMER_WAITCLOSE, tcp_cfg.timewait_time);
2919	  session_transport_closed_notify (&tc0->connection);
2920	  goto drop;
2921
2922	  break;
2923	case TCP_STATE_LAST_ACK:
2924	  /* The only thing that [should] arrive in this state is an
2925	   * acknowledgment of our FIN. If our FIN is now acknowledged,
2926	   * delete the TCB, enter the CLOSED state, and return. */
2927
2928	  if (tcp_rcv_ack_no_cc (tc0, b0, &error0))
2929	    goto drop;
2930
2931	  /* Apparently our ACK for the peer's FIN was lost */
2932	  if (is_fin0 && tc0->snd_una != tc0->snd_nxt)
2933	    {
2934	      tcp_send_fin (tc0);
2935	      goto drop;
2936	    }
2937
2938	  tcp_connection_set_state (tc0, TCP_STATE_CLOSED);
2939	  session_transport_closed_notify (&tc0->connection);
2940
2941	  /* Don't free the connection from the data path since
2942	   * we can't ensure that we have no packets already enqueued
2943	   * to output. Rely instead on the waitclose timer */
2944	  tcp_connection_timers_reset (tc0);
2945	  tcp_timer_set (tc0, TCP_TIMER_WAITCLOSE, tcp_cfg.cleanup_time);
2946
2947	  goto drop;
2948
2949	  break;
2950	case TCP_STATE_TIME_WAIT:
2951	  /* The only thing that can arrive in this state is a
2952	   * retransmission of the remote FIN. Acknowledge it, and restart
2953	   * the 2 MSL timeout. */
2954
2955	  if (tcp_rcv_ack_no_cc (tc0, b0, &error0))
2956	    goto drop;
2957
2958	  if (!is_fin0)
2959	    goto drop;
2960
2961	  tcp_program_ack (tc0);
2962	  tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, tcp_cfg.timewait_time);
2963	  goto drop;
2964
2965	  break;
2966	default:
2967	  ASSERT (0);
2968	}
2969
2970      /* 6: check the URG bit TODO */
2971
2972      /* 7: process the segment text */
2973      switch (tc0->state)
2974	{
2975	case TCP_STATE_ESTABLISHED:
2976	case TCP_STATE_FIN_WAIT_1:
2977	case TCP_STATE_FIN_WAIT_2:
2978	  if (vnet_buffer (b0)->tcp.data_len)
2979	    error0 = tcp_segment_rcv (wrk, tc0, b0);
2980	  break;
2981	case TCP_STATE_CLOSE_WAIT:
2982	case TCP_STATE_CLOSING:
2983	case TCP_STATE_LAST_ACK:
2984	case TCP_STATE_TIME_WAIT:
2985	  /* This should not occur, since a FIN has been received from the
2986	   * remote side.  Ignore the segment text. */
2987	  break;
2988	}
2989
2990      /* 8: check the FIN bit */
2991      if (!is_fin0)
2992	goto drop;
2993
2994      TCP_EVT (TCP_EVT_FIN_RCVD, tc0);
2995
2996      switch (tc0->state)
2997	{
2998	case TCP_STATE_ESTABLISHED:
2999	  /* Account for the FIN and send ack */
3000	  tc0->rcv_nxt += 1;
3001	  tcp_program_ack (tc0);
3002	  tcp_connection_set_state (tc0, TCP_STATE_CLOSE_WAIT);
3003	  tcp_program_disconnect (wrk, tc0);
3004	  tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, tcp_cfg.closewait_time);
3005	  break;
3006	case TCP_STATE_SYN_RCVD:
3007	  /* Send FIN-ACK, enter LAST-ACK and because the app was not
3008	   * notified yet, set a cleanup timer instead of relying on
3009	   * disconnect notify and the implicit close call. */
3010	  tcp_connection_timers_reset (tc0);
3011	  tc0->rcv_nxt += 1;
3012	  tcp_send_fin (tc0);
3013	  tcp_connection_set_state (tc0, TCP_STATE_LAST_ACK);
3014	  tcp_timer_set (tc0, TCP_TIMER_WAITCLOSE, tcp_cfg.lastack_time);
3015	  break;
3016	case TCP_STATE_CLOSE_WAIT:
3017	case TCP_STATE_CLOSING:
3018	case TCP_STATE_LAST_ACK:
3019	  /* move along .. */
3020	  break;
3021	case TCP_STATE_FIN_WAIT_1:
3022	  tc0->rcv_nxt += 1;
3023
3024	  if (tc0->flags & TCP_CONN_FINPNDG)
3025	    {
3026	      /* If data is outstanding, stay in FIN_WAIT_1 and try to finish
3027	       * sending it. Since we already received a fin, do not wait
3028	       * for too long. */
3029	      tc0->flags |= TCP_CONN_FINRCVD;
3030	      tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE,
3031				tcp_cfg.closewait_time);
3032	    }
3033	  else
3034	    {
3035	      tcp_connection_set_state (tc0, TCP_STATE_CLOSING);
3036	      tcp_program_ack (tc0);
3037	      /* Wait for ACK for our FIN but not forever */
3038	      tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE,
3039				tcp_cfg.closing_time);
3040	    }
3041	  break;
3042	case TCP_STATE_FIN_WAIT_2:
3043	  /* Got FIN, send ACK! Be more aggressive with resource cleanup */
3044	  tc0->rcv_nxt += 1;
3045	  tcp_connection_set_state (tc0, TCP_STATE_TIME_WAIT);
3046	  tcp_connection_timers_reset (tc0);
3047	  tcp_timer_set (tc0, TCP_TIMER_WAITCLOSE, tcp_cfg.timewait_time);
3048	  tcp_program_ack (tc0);
3049	  session_transport_closed_notify (&tc0->connection);
3050	  break;
3051	case TCP_STATE_TIME_WAIT:
3052	  /* Remain in the TIME-WAIT state. Restart the time-wait
3053	   * timeout.
3054	   */
3055	  tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, tcp_cfg.timewait_time);
3056	  break;
3057	}
3058      error0 = TCP_ERROR_FIN_RCVD;
3059
3060    drop:
3061
3062      tcp_inc_counter (rcv_process, error0, 1);
3063      if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
3064	{
3065	  tcp_rx_trace_t *t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
3066	  tcp_set_rx_trace_data (t0, tc0, tcp0, b0, is_ip4);
3067	}
3068    }
3069
3070  errors = session_main_flush_enqueue_events (TRANSPORT_PROTO_TCP,
3071					      thread_index);
3072  tcp_inc_counter (rcv_process, TCP_ERROR_MSG_QUEUE_FULL, errors);
3073  tcp_handle_postponed_dequeues (wrk);
3074  tcp_handle_disconnects (wrk);
3075  vlib_buffer_free (vm, first_buffer, from_frame->n_vectors);
3076
3077  return from_frame->n_vectors;
3078}
3079
3080VLIB_NODE_FN (tcp4_rcv_process_node) (vlib_main_t * vm,
3081				      vlib_node_runtime_t * node,
3082				      vlib_frame_t * from_frame)
3083{
3084  return tcp46_rcv_process_inline (vm, node, from_frame, 1 /* is_ip4 */ );
3085}
3086
3087VLIB_NODE_FN (tcp6_rcv_process_node) (vlib_main_t * vm,
3088				      vlib_node_runtime_t * node,
3089				      vlib_frame_t * from_frame)
3090{
3091  return tcp46_rcv_process_inline (vm, node, from_frame, 0 /* is_ip4 */ );
3092}
3093
3094/* *INDENT-OFF* */
3095VLIB_REGISTER_NODE (tcp4_rcv_process_node) =
3096{
3097  .name = "tcp4-rcv-process",
3098  /* Takes a vector of packets. */
3099  .vector_size = sizeof (u32),
3100  .n_errors = TCP_N_ERROR,
3101  .error_strings = tcp_error_strings,
3102  .n_next_nodes = TCP_RCV_PROCESS_N_NEXT,
3103  .next_nodes =
3104  {
3105#define _(s,n) [TCP_RCV_PROCESS_NEXT_##s] = n,
3106    foreach_tcp_state_next
3107#undef _
3108  },
3109  .format_trace = format_tcp_rx_trace_short,
3110};
3111/* *INDENT-ON* */
3112
3113/* *INDENT-OFF* */
3114VLIB_REGISTER_NODE (tcp6_rcv_process_node) =
3115{
3116  .name = "tcp6-rcv-process",
3117  /* Takes a vector of packets. */
3118  .vector_size = sizeof (u32),
3119  .n_errors = TCP_N_ERROR,
3120  .error_strings = tcp_error_strings,
3121  .n_next_nodes = TCP_RCV_PROCESS_N_NEXT,
3122  .next_nodes =
3123  {
3124#define _(s,n) [TCP_RCV_PROCESS_NEXT_##s] = n,
3125    foreach_tcp_state_next
3126#undef _
3127  },
3128  .format_trace = format_tcp_rx_trace_short,
3129};
3130/* *INDENT-ON* */
3131
3132/**
3133 * LISTEN state processing as per RFC 793 p. 65
3134 */
3135always_inline uword
3136tcp46_listen_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
3137		     vlib_frame_t * from_frame, int is_ip4)
3138{
3139  u32 n_left_from, *from, n_syns = 0, *first_buffer;
3140  u32 my_thread_index = vm->thread_index;
3141
3142  from = first_buffer = vlib_frame_vector_args (from_frame);
3143  n_left_from = from_frame->n_vectors;
3144
3145  while (n_left_from > 0)
3146    {
3147      u32 bi0;
3148      vlib_buffer_t *b0;
3149      tcp_rx_trace_t *t0;
3150      tcp_header_t *th0 = 0;
3151      tcp_connection_t *lc0;
3152      ip4_header_t *ip40;
3153      ip6_header_t *ip60;
3154      tcp_connection_t *child0;
3155      u32 error0 = TCP_ERROR_NONE;
3156
3157      bi0 = from[0];
3158      from += 1;
3159      n_left_from -= 1;
3160
3161      b0 = vlib_get_buffer (vm, bi0);
3162      lc0 = tcp_listener_get (vnet_buffer (b0)->tcp.connection_index);
3163
3164      if (is_ip4)
3165	{
3166	  ip40 = vlib_buffer_get_current (b0);
3167	  th0 = ip4_next_header (ip40);
3168	}
3169      else
3170	{
3171	  ip60 = vlib_buffer_get_current (b0);
3172	  th0 = ip6_next_header (ip60);
3173	}
3174
3175      /* Create child session. For syn-flood protection use filter */
3176
3177      /* 1. first check for an RST: handled in dispatch */
3178      /* if (tcp_rst (th0))
3179         goto drop;
3180       */
3181
3182      /* 2. second check for an ACK: handled in dispatch */
3183      /* if (tcp_ack (th0))
3184         {
3185         tcp_send_reset (b0, is_ip4);
3186         goto drop;
3187         }
3188       */
3189
3190      /* 3. check for a SYN (did that already) */
3191
3192      /* Make sure connection wasn't just created */
3193      child0 = tcp_lookup_connection (lc0->c_fib_index, b0, my_thread_index,
3194				      is_ip4);
3195      if (PREDICT_FALSE (child0->state != TCP_STATE_LISTEN))
3196	{
3197	  error0 = TCP_ERROR_CREATE_EXISTS;
3198	  goto drop;
3199	}
3200
3201      /* Create child session and send SYN-ACK */
3202      child0 = tcp_connection_alloc (my_thread_index);
3203      child0->c_lcl_port = th0->dst_port;
3204      child0->c_rmt_port = th0->src_port;
3205      child0->c_is_ip4 = is_ip4;
3206      child0->state = TCP_STATE_SYN_RCVD;
3207      child0->c_fib_index = lc0->c_fib_index;
3208      child0->cc_algo = lc0->cc_algo;
3209
3210      if (is_ip4)
3211	{
3212	  child0->c_lcl_ip4.as_u32 = ip40->dst_address.as_u32;
3213	  child0->c_rmt_ip4.as_u32 = ip40->src_address.as_u32;
3214	}
3215      else
3216	{
3217	  clib_memcpy_fast (&child0->c_lcl_ip6, &ip60->dst_address,
3218			    sizeof (ip6_address_t));
3219	  clib_memcpy_fast (&child0->c_rmt_ip6, &ip60->src_address,
3220			    sizeof (ip6_address_t));
3221	}
3222
3223      if (tcp_options_parse (th0, &child0->rcv_opts, 1))
3224	{
3225	  error0 = TCP_ERROR_OPTIONS;
3226	  tcp_connection_free (child0);
3227	  goto drop;
3228	}
3229
3230      child0->irs = vnet_buffer (b0)->tcp.seq_number;
3231      child0->rcv_nxt = vnet_buffer (b0)->tcp.seq_number + 1;
3232      child0->rcv_las = child0->rcv_nxt;
3233      child0->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
3234
3235      /* RFC1323: TSval timestamps sent on {SYN} and {SYN,ACK}
3236       * segments are used to initialize PAWS. */
3237      if (tcp_opts_tstamp (&child0->rcv_opts))
3238	{
3239	  child0->tsval_recent = child0->rcv_opts.tsval;
3240	  child0->tsval_recent_age = tcp_time_now ();
3241	}
3242
3243      if (tcp_opts_wscale (&child0->rcv_opts))
3244	child0->snd_wscale = child0->rcv_opts.wscale;
3245
3246      child0->snd_wnd = clib_net_to_host_u16 (th0->window)
3247	<< child0->snd_wscale;
3248      child0->snd_wl1 = vnet_buffer (b0)->tcp.seq_number;
3249      child0->snd_wl2 = vnet_buffer (b0)->tcp.ack_number;
3250
3251      tcp_connection_init_vars (child0);
3252      child0->rto = TCP_RTO_MIN;
3253
3254      if (session_stream_accept (&child0->connection, lc0->c_s_index,
3255				 lc0->c_thread_index, 0 /* notify */ ))
3256	{
3257	  tcp_connection_cleanup (child0);
3258	  error0 = TCP_ERROR_CREATE_SESSION_FAIL;
3259	  goto drop;
3260	}
3261
3262      TCP_EVT (TCP_EVT_SYN_RCVD, child0, 1);
3263      child0->tx_fifo_size = transport_tx_fifo_size (&child0->connection);
3264      tcp_send_synack (child0);
3265
3266    drop:
3267
3268      if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
3269	{
3270	  t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
3271	  clib_memcpy_fast (&t0->tcp_header, th0, sizeof (t0->tcp_header));
3272	  clib_memcpy_fast (&t0->tcp_connection, lc0,
3273			    sizeof (t0->tcp_connection));
3274	}
3275
3276      n_syns += (error0 == TCP_ERROR_NONE);
3277    }
3278
3279  tcp_inc_counter (listen, TCP_ERROR_SYNS_RCVD, n_syns);
3280  vlib_buffer_free (vm, first_buffer, from_frame->n_vectors);
3281
3282  return from_frame->n_vectors;
3283}
3284
3285VLIB_NODE_FN (tcp4_listen_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
3286				 vlib_frame_t * from_frame)
3287{
3288  return tcp46_listen_inline (vm, node, from_frame, 1 /* is_ip4 */ );
3289}
3290
3291VLIB_NODE_FN (tcp6_listen_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
3292				 vlib_frame_t * from_frame)
3293{
3294  return tcp46_listen_inline (vm, node, from_frame, 0 /* is_ip4 */ );
3295}
3296
3297/* *INDENT-OFF* */
3298VLIB_REGISTER_NODE (tcp4_listen_node) =
3299{
3300  .name = "tcp4-listen",
3301  /* Takes a vector of packets. */
3302  .vector_size = sizeof (u32),
3303  .n_errors = TCP_N_ERROR,
3304  .error_strings = tcp_error_strings,
3305  .n_next_nodes = TCP_LISTEN_N_NEXT,
3306  .next_nodes =
3307  {
3308#define _(s,n) [TCP_LISTEN_NEXT_##s] = n,
3309    foreach_tcp_state_next
3310#undef _
3311  },
3312  .format_trace = format_tcp_rx_trace_short,
3313};
3314/* *INDENT-ON* */
3315
3316/* *INDENT-OFF* */
3317VLIB_REGISTER_NODE (tcp6_listen_node) =
3318{
3319  .name = "tcp6-listen",
3320  /* Takes a vector of packets. */
3321  .vector_size = sizeof (u32),
3322  .n_errors = TCP_N_ERROR,
3323  .error_strings = tcp_error_strings,
3324  .n_next_nodes = TCP_LISTEN_N_NEXT,
3325  .next_nodes =
3326  {
3327#define _(s,n) [TCP_LISTEN_NEXT_##s] = n,
3328    foreach_tcp_state_next
3329#undef _
3330  },
3331  .format_trace = format_tcp_rx_trace_short,
3332};
3333/* *INDENT-ON* */
3334
3335typedef enum _tcp_input_next
3336{
3337  TCP_INPUT_NEXT_DROP,
3338  TCP_INPUT_NEXT_LISTEN,
3339  TCP_INPUT_NEXT_RCV_PROCESS,
3340  TCP_INPUT_NEXT_SYN_SENT,
3341  TCP_INPUT_NEXT_ESTABLISHED,
3342  TCP_INPUT_NEXT_RESET,
3343  TCP_INPUT_NEXT_PUNT,
3344  TCP_INPUT_N_NEXT
3345} tcp_input_next_t;
3346
3347#define foreach_tcp4_input_next                 \
3348  _ (DROP, "ip4-drop")                          \
3349  _ (LISTEN, "tcp4-listen")                     \
3350  _ (RCV_PROCESS, "tcp4-rcv-process")           \
3351  _ (SYN_SENT, "tcp4-syn-sent")                 \
3352  _ (ESTABLISHED, "tcp4-established")		\
3353  _ (RESET, "tcp4-reset")			\
3354  _ (PUNT, "ip4-punt")
3355
3356#define foreach_tcp6_input_next                 \
3357  _ (DROP, "ip6-drop")                          \
3358  _ (LISTEN, "tcp6-listen")                     \
3359  _ (RCV_PROCESS, "tcp6-rcv-process")           \
3360  _ (SYN_SENT, "tcp6-syn-sent")                 \
3361  _ (ESTABLISHED, "tcp6-established")		\
3362  _ (RESET, "tcp6-reset")			\
3363  _ (PUNT, "ip6-punt")
3364
3365#define filter_flags (TCP_FLAG_SYN|TCP_FLAG_ACK|TCP_FLAG_RST|TCP_FLAG_FIN)
3366
3367static void
3368tcp_input_trace_frame (vlib_main_t * vm, vlib_node_runtime_t * node,
3369		       vlib_buffer_t ** bs, u32 n_bufs, u8 is_ip4)
3370{
3371  tcp_connection_t *tc;
3372  tcp_header_t *tcp;
3373  tcp_rx_trace_t *t;
3374  int i;
3375
3376  for (i = 0; i < n_bufs; i++)
3377    {
3378      if (bs[i]->flags & VLIB_BUFFER_IS_TRACED)
3379	{
3380	  t = vlib_add_trace (vm, node, bs[i], sizeof (*t));
3381	  tc = tcp_connection_get (vnet_buffer (bs[i])->tcp.connection_index,
3382				   vm->thread_index);
3383	  tcp = vlib_buffer_get_current (bs[i]);
3384	  tcp_set_rx_trace_data (t, tc, tcp, bs[i], is_ip4);
3385	}
3386    }
3387}
3388
3389static void
3390tcp_input_set_error_next (tcp_main_t * tm, u16 * next, u32 * error, u8 is_ip4)
3391{
3392  if (*error == TCP_ERROR_FILTERED || *error == TCP_ERROR_WRONG_THREAD)
3393    {
3394      *next = TCP_INPUT_NEXT_DROP;
3395    }
3396  else if ((is_ip4 && tm->punt_unknown4) || (!is_ip4 && tm->punt_unknown6))
3397    {
3398      *next = TCP_INPUT_NEXT_PUNT;
3399      *error = TCP_ERROR_PUNT;
3400    }
3401  else
3402    {
3403      *next = TCP_INPUT_NEXT_RESET;
3404      *error = TCP_ERROR_NO_LISTENER;
3405    }
3406}
3407
3408always_inline tcp_connection_t *
3409tcp_input_lookup_buffer (vlib_buffer_t * b, u8 thread_index, u32 * error,
3410			 u8 is_ip4, u8 is_nolookup)
3411{
3412  u32 fib_index = vnet_buffer (b)->ip.fib_index;
3413  int n_advance_bytes, n_data_bytes;
3414  transport_connection_t *tc;
3415  tcp_header_t *tcp;
3416  u8 result = 0;
3417
3418  if (is_ip4)
3419    {
3420      ip4_header_t *ip4 = vlib_buffer_get_current (b);
3421      int ip_hdr_bytes = ip4_header_bytes (ip4);
3422      if (PREDICT_FALSE (b->current_length < ip_hdr_bytes + sizeof (*tcp)))
3423	{
3424	  *error = TCP_ERROR_LENGTH;
3425	  return 0;
3426	}
3427      tcp = ip4_next_header (ip4);
3428      vnet_buffer (b)->tcp.hdr_offset = (u8 *) tcp - (u8 *) ip4;
3429      n_advance_bytes = (ip_hdr_bytes + tcp_header_bytes (tcp));
3430      n_data_bytes = clib_net_to_host_u16 (ip4->length) - n_advance_bytes;
3431
3432      /* Length check. Checksum computed by ipx_local no need to compute again */
3433      if (PREDICT_FALSE (n_data_bytes < 0))
3434	{
3435	  *error = TCP_ERROR_LENGTH;
3436	  return 0;
3437	}
3438
3439      if (!is_nolookup)
3440	tc = session_lookup_connection_wt4 (fib_index, &ip4->dst_address,
3441					    &ip4->src_address, tcp->dst_port,
3442					    tcp->src_port,
3443					    TRANSPORT_PROTO_TCP, thread_index,
3444					    &result);
3445    }
3446  else
3447    {
3448      ip6_header_t *ip6 = vlib_buffer_get_current (b);
3449      if (PREDICT_FALSE (b->current_length < sizeof (*ip6) + sizeof (*tcp)))
3450	{
3451	  *error = TCP_ERROR_LENGTH;
3452	  return 0;
3453	}
3454      tcp = ip6_next_header (ip6);
3455      vnet_buffer (b)->tcp.hdr_offset = (u8 *) tcp - (u8 *) ip6;
3456      n_advance_bytes = tcp_header_bytes (tcp);
3457      n_data_bytes = clib_net_to_host_u16 (ip6->payload_length)
3458	- n_advance_bytes;
3459      n_advance_bytes += sizeof (ip6[0]);
3460
3461      if (PREDICT_FALSE (n_data_bytes < 0))
3462	{
3463	  *error = TCP_ERROR_LENGTH;
3464	  return 0;
3465	}
3466
3467      if (!is_nolookup)
3468	{
3469	  if (PREDICT_FALSE
3470	      (ip6_address_is_link_local_unicast (&ip6->dst_address)))
3471	    {
3472	      ip4_main_t *im = &ip4_main;
3473	      fib_index = vec_elt (im->fib_index_by_sw_if_index,
3474				   vnet_buffer (b)->sw_if_index[VLIB_RX]);
3475	    }
3476
3477	  tc = session_lookup_connection_wt6 (fib_index, &ip6->dst_address,
3478					      &ip6->src_address,
3479					      tcp->dst_port, tcp->src_port,
3480					      TRANSPORT_PROTO_TCP,
3481					      thread_index, &result);
3482	}
3483    }
3484
3485  if (is_nolookup)
3486    tc =
3487      (transport_connection_t *) tcp_connection_get (vnet_buffer (b)->
3488						     tcp.connection_index,
3489						     thread_index);
3490
3491  vnet_buffer (b)->tcp.seq_number = clib_net_to_host_u32 (tcp->seq_number);
3492  vnet_buffer (b)->tcp.ack_number = clib_net_to_host_u32 (tcp->ack_number);
3493  vnet_buffer (b)->tcp.data_offset = n_advance_bytes;
3494  vnet_buffer (b)->tcp.data_len = n_data_bytes;
3495  vnet_buffer (b)->tcp.seq_end = vnet_buffer (b)->tcp.seq_number
3496    + n_data_bytes;
3497  vnet_buffer (b)->tcp.flags = 0;
3498
3499  *error = result ? TCP_ERROR_NONE + result : *error;
3500
3501  return tcp_get_connection_from_transport (tc);
3502}
3503
3504static inline void
3505tcp_input_dispatch_buffer (tcp_main_t * tm, tcp_connection_t * tc,
3506			   vlib_buffer_t * b, u16 * next, u32 * error)
3507{
3508  tcp_header_t *tcp;
3509  u8 flags;
3510
3511  tcp = tcp_buffer_hdr (b);
3512  flags = tcp->flags & filter_flags;
3513  *next = tm->dispatch_table[tc->state][flags].next;
3514  *error = tm->dispatch_table[tc->state][flags].error;
3515  tc->segs_in += 1;
3516
3517  if (PREDICT_FALSE (*error == TCP_ERROR_DISPATCH
3518		     || *next == TCP_INPUT_NEXT_RESET))
3519    {
3520      /* Overload tcp flags to store state */
3521      tcp_state_t state = tc->state;
3522      vnet_buffer (b)->tcp.flags = tc->state;
3523
3524      if (*error == TCP_ERROR_DISPATCH)
3525	clib_warning ("tcp conn %u disp error state %U flags %U",
3526		      tc->c_c_index, format_tcp_state, state,
3527		      format_tcp_flags, (int) flags);
3528    }
3529}
3530
3531always_inline uword
3532tcp46_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
3533		    vlib_frame_t * frame, int is_ip4, u8 is_nolookup)
3534{
3535  u32 n_left_from, *from, thread_index = vm->thread_index;
3536  tcp_main_t *tm = vnet_get_tcp_main ();
3537  vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
3538  u16 nexts[VLIB_FRAME_SIZE], *next;
3539
3540  tcp_set_time_now (tcp_get_worker (thread_index));
3541
3542  from = vlib_frame_vector_args (frame);
3543  n_left_from = frame->n_vectors;
3544  vlib_get_buffers (vm, from, bufs, n_left_from);
3545
3546  b = bufs;
3547  next = nexts;
3548
3549  while (n_left_from >= 4)
3550    {
3551      u32 error0 = TCP_ERROR_NO_LISTENER, error1 = TCP_ERROR_NO_LISTENER;
3552      tcp_connection_t *tc0, *tc1;
3553
3554      {
3555	vlib_prefetch_buffer_header (b[2], STORE);
3556	CLIB_PREFETCH (b[2]->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
3557
3558	vlib_prefetch_buffer_header (b[3], STORE);
3559	CLIB_PREFETCH (b[3]->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
3560      }
3561
3562      next[0] = next[1] = TCP_INPUT_NEXT_DROP;
3563
3564      tc0 = tcp_input_lookup_buffer (b[0], thread_index, &error0, is_ip4,
3565				     is_nolookup);
3566      tc1 = tcp_input_lookup_buffer (b[1], thread_index, &error1, is_ip4,
3567				     is_nolookup);
3568
3569      if (PREDICT_TRUE (!tc0 + !tc1 == 0))
3570	{
3571	  ASSERT (tcp_lookup_is_valid (tc0, b[0], tcp_buffer_hdr (b[0])));
3572	  ASSERT (tcp_lookup_is_valid (tc1, b[1], tcp_buffer_hdr (b[1])));
3573
3574	  vnet_buffer (b[0])->tcp.connection_index = tc0->c_c_index;
3575	  vnet_buffer (b[1])->tcp.connection_index = tc1->c_c_index;
3576
3577	  tcp_input_dispatch_buffer (tm, tc0, b[0], &next[0], &error0);
3578	  tcp_input_dispatch_buffer (tm, tc1, b[1], &next[1], &error1);
3579	}
3580      else
3581	{
3582	  if (PREDICT_TRUE (tc0 != 0))
3583	    {
3584	      ASSERT (tcp_lookup_is_valid (tc0, b[0], tcp_buffer_hdr (b[0])));
3585	      vnet_buffer (b[0])->tcp.connection_index = tc0->c_c_index;
3586	      tcp_input_dispatch_buffer (tm, tc0, b[0], &next[0], &error0);
3587	    }
3588	  else
3589	    tcp_input_set_error_next (tm, &next[0], &error0, is_ip4);
3590
3591	  if (PREDICT_TRUE (tc1 != 0))
3592	    {
3593	      ASSERT (tcp_lookup_is_valid (tc1, b[1], tcp_buffer_hdr (b[1])));
3594	      vnet_buffer (b[1])->tcp.connection_index = tc1->c_c_index;
3595	      tcp_input_dispatch_buffer (tm, tc1, b[1], &next[1], &error1);
3596	    }
3597	  else
3598	    tcp_input_set_error_next (tm, &next[1], &error1, is_ip4);
3599	}
3600
3601      b += 2;
3602      next += 2;
3603      n_left_from -= 2;
3604    }
3605  while (n_left_from > 0)
3606    {
3607      tcp_connection_t *tc0;
3608      u32 error0 = TCP_ERROR_NO_LISTENER;
3609
3610      if (n_left_from > 1)
3611	{
3612	  vlib_prefetch_buffer_header (b[1], STORE);
3613	  CLIB_PREFETCH (b[1]->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
3614	}
3615
3616      next[0] = TCP_INPUT_NEXT_DROP;
3617      tc0 = tcp_input_lookup_buffer (b[0], thread_index, &error0, is_ip4,
3618				     is_nolookup);
3619      if (PREDICT_TRUE (tc0 != 0))
3620	{
3621	  ASSERT (tcp_lookup_is_valid (tc0, b[0], tcp_buffer_hdr (b[0])));
3622	  vnet_buffer (b[0])->tcp.connection_index = tc0->c_c_index;
3623	  tcp_input_dispatch_buffer (tm, tc0, b[0], &next[0], &error0);
3624	}
3625      else
3626	tcp_input_set_error_next (tm, &next[0], &error0, is_ip4);
3627
3628      b += 1;
3629      next += 1;
3630      n_left_from -= 1;
3631    }
3632
3633  if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
3634    tcp_input_trace_frame (vm, node, bufs, frame->n_vectors, is_ip4);
3635
3636  vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
3637  return frame->n_vectors;
3638}
3639
3640VLIB_NODE_FN (tcp4_input_nolookup_node) (vlib_main_t * vm,
3641					 vlib_node_runtime_t * node,
3642					 vlib_frame_t * from_frame)
3643{
3644  return tcp46_input_inline (vm, node, from_frame, 1 /* is_ip4 */ ,
3645			     1 /* is_nolookup */ );
3646}
3647
3648VLIB_NODE_FN (tcp6_input_nolookup_node) (vlib_main_t * vm,
3649					 vlib_node_runtime_t * node,
3650					 vlib_frame_t * from_frame)
3651{
3652  return tcp46_input_inline (vm, node, from_frame, 0 /* is_ip4 */ ,
3653			     1 /* is_nolookup */ );
3654}
3655
3656/* *INDENT-OFF* */
3657VLIB_REGISTER_NODE (tcp4_input_nolookup_node) =
3658{
3659  .name = "tcp4-input-nolookup",
3660  /* Takes a vector of packets. */
3661  .vector_size = sizeof (u32),
3662  .n_errors = TCP_N_ERROR,
3663  .error_strings = tcp_error_strings,
3664  .n_next_nodes = TCP_INPUT_N_NEXT,
3665  .next_nodes =
3666  {
3667#define _(s,n) [TCP_INPUT_NEXT_##s] = n,
3668    foreach_tcp4_input_next
3669#undef _
3670  },
3671  .format_buffer = format_tcp_header,
3672  .format_trace = format_tcp_rx_trace,
3673};
3674/* *INDENT-ON* */
3675
3676/* *INDENT-OFF* */
3677VLIB_REGISTER_NODE (tcp6_input_nolookup_node) =
3678{
3679  .name = "tcp6-input-nolookup",
3680  /* Takes a vector of packets. */
3681  .vector_size = sizeof (u32),
3682  .n_errors = TCP_N_ERROR,
3683  .error_strings = tcp_error_strings,
3684  .n_next_nodes = TCP_INPUT_N_NEXT,
3685  .next_nodes =
3686  {
3687#define _(s,n) [TCP_INPUT_NEXT_##s] = n,
3688    foreach_tcp6_input_next
3689#undef _
3690  },
3691  .format_buffer = format_tcp_header,
3692  .format_trace = format_tcp_rx_trace,
3693};
3694/* *INDENT-ON* */
3695
3696VLIB_NODE_FN (tcp4_input_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
3697				vlib_frame_t * from_frame)
3698{
3699  return tcp46_input_inline (vm, node, from_frame, 1 /* is_ip4 */ ,
3700			     0 /* is_nolookup */ );
3701}
3702
3703VLIB_NODE_FN (tcp6_input_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
3704				vlib_frame_t * from_frame)
3705{
3706  return tcp46_input_inline (vm, node, from_frame, 0 /* is_ip4 */ ,
3707			     0 /* is_nolookup */ );
3708}
3709
3710/* *INDENT-OFF* */
3711VLIB_REGISTER_NODE (tcp4_input_node) =
3712{
3713  .name = "tcp4-input",
3714  /* Takes a vector of packets. */
3715  .vector_size = sizeof (u32),
3716  .n_errors = TCP_N_ERROR,
3717  .error_strings = tcp_error_strings,
3718  .n_next_nodes = TCP_INPUT_N_NEXT,
3719  .next_nodes =
3720  {
3721#define _(s,n) [TCP_INPUT_NEXT_##s] = n,
3722    foreach_tcp4_input_next
3723#undef _
3724  },
3725  .format_buffer = format_tcp_header,
3726  .format_trace = format_tcp_rx_trace,
3727};
3728/* *INDENT-ON* */
3729
3730/* *INDENT-OFF* */
3731VLIB_REGISTER_NODE (tcp6_input_node) =
3732{
3733  .name = "tcp6-input",
3734  /* Takes a vector of packets. */
3735  .vector_size = sizeof (u32),
3736  .n_errors = TCP_N_ERROR,
3737  .error_strings = tcp_error_strings,
3738  .n_next_nodes = TCP_INPUT_N_NEXT,
3739  .next_nodes =
3740  {
3741#define _(s,n) [TCP_INPUT_NEXT_##s] = n,
3742    foreach_tcp6_input_next
3743#undef _
3744  },
3745  .format_buffer = format_tcp_header,
3746  .format_trace = format_tcp_rx_trace,
3747};
3748/* *INDENT-ON* */
3749
3750#ifndef CLIB_MARCH_VARIANT
3751static void
3752tcp_dispatch_table_init (tcp_main_t * tm)
3753{
3754  int i, j;
3755  for (i = 0; i < ARRAY_LEN (tm->dispatch_table); i++)
3756    for (j = 0; j < ARRAY_LEN (tm->dispatch_table[i]); j++)
3757      {
3758	tm->dispatch_table[i][j].next = TCP_INPUT_NEXT_DROP;
3759	tm->dispatch_table[i][j].error = TCP_ERROR_DISPATCH;
3760      }
3761
3762#define _(t,f,n,e)                                           	\
3763do {                                                       	\
3764    tm->dispatch_table[TCP_STATE_##t][f].next = (n);         	\
3765    tm->dispatch_table[TCP_STATE_##t][f].error = (e);        	\
3766} while (0)
3767
3768  /* RFC 793: In LISTEN if RST drop and if ACK return RST */
3769  _(LISTEN, 0, TCP_INPUT_NEXT_DROP, TCP_ERROR_SEGMENT_INVALID);
3770  _(LISTEN, TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET, TCP_ERROR_ACK_INVALID);
3771  _(LISTEN, TCP_FLAG_RST, TCP_INPUT_NEXT_DROP, TCP_ERROR_INVALID_CONNECTION);
3772  _(LISTEN, TCP_FLAG_SYN, TCP_INPUT_NEXT_LISTEN, TCP_ERROR_NONE);
3773  _(LISTEN, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET,
3774    TCP_ERROR_ACK_INVALID);
3775  _(LISTEN, TCP_FLAG_SYN | TCP_FLAG_RST, TCP_INPUT_NEXT_DROP,
3776    TCP_ERROR_SEGMENT_INVALID);
3777  _(LISTEN, TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_DROP,
3778    TCP_ERROR_SEGMENT_INVALID);
3779  _(LISTEN, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_DROP,
3780    TCP_ERROR_INVALID_CONNECTION);
3781  _(LISTEN, TCP_FLAG_FIN, TCP_INPUT_NEXT_RESET, TCP_ERROR_SEGMENT_INVALID);
3782  _(LISTEN, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET,
3783    TCP_ERROR_SEGMENT_INVALID);
3784  _(LISTEN, TCP_FLAG_FIN | TCP_FLAG_RST, TCP_INPUT_NEXT_DROP,
3785    TCP_ERROR_SEGMENT_INVALID);
3786  _(LISTEN, TCP_FLAG_FIN | TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_DROP,
3787    TCP_ERROR_NONE);
3788  _(LISTEN, TCP_FLAG_FIN | TCP_FLAG_SYN, TCP_INPUT_NEXT_DROP,
3789    TCP_ERROR_SEGMENT_INVALID);
3790  _(LISTEN, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_DROP,
3791    TCP_ERROR_SEGMENT_INVALID);
3792  _(LISTEN, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST, TCP_INPUT_NEXT_DROP,
3793    TCP_ERROR_SEGMENT_INVALID);
3794  _(LISTEN, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3795    TCP_INPUT_NEXT_DROP, TCP_ERROR_SEGMENT_INVALID);
3796  /* ACK for for a SYN-ACK -> tcp-rcv-process. */
3797  _(SYN_RCVD, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3798  _(SYN_RCVD, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3799  _(SYN_RCVD, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3800    TCP_ERROR_NONE);
3801  _(SYN_RCVD, TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3802  _(SYN_RCVD, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3803    TCP_ERROR_NONE);
3804  _(SYN_RCVD, TCP_FLAG_SYN | TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS,
3805    TCP_ERROR_NONE);
3806  _(SYN_RCVD, TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3807    TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3808  _(SYN_RCVD, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3809  _(SYN_RCVD, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3810    TCP_ERROR_NONE);
3811  _(SYN_RCVD, TCP_FLAG_FIN | TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS,
3812    TCP_ERROR_NONE);
3813  _(SYN_RCVD, TCP_FLAG_FIN | TCP_FLAG_RST | TCP_FLAG_ACK,
3814    TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3815  _(SYN_RCVD, TCP_FLAG_FIN | TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS,
3816    TCP_ERROR_NONE);
3817  _(SYN_RCVD, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST,
3818    TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3819  _(SYN_RCVD, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_ACK,
3820    TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3821  _(SYN_RCVD, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3822    TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3823  _(SYN_RCVD, 0, TCP_INPUT_NEXT_DROP, TCP_ERROR_SEGMENT_INVALID);
3824  /* SYN-ACK for a SYN */
3825  _(SYN_SENT, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_SYN_SENT,
3826    TCP_ERROR_NONE);
3827  _(SYN_SENT, TCP_FLAG_ACK, TCP_INPUT_NEXT_SYN_SENT, TCP_ERROR_NONE);
3828  _(SYN_SENT, TCP_FLAG_RST, TCP_INPUT_NEXT_SYN_SENT, TCP_ERROR_NONE);
3829  _(SYN_SENT, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_SYN_SENT,
3830    TCP_ERROR_NONE);
3831  _(SYN_SENT, TCP_FLAG_FIN, TCP_INPUT_NEXT_SYN_SENT, TCP_ERROR_NONE);
3832  _(SYN_SENT, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_SYN_SENT,
3833    TCP_ERROR_NONE);
3834  /* ACK for for established connection -> tcp-established. */
3835  _(ESTABLISHED, TCP_FLAG_ACK, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3836  /* FIN for for established connection -> tcp-established. */
3837  _(ESTABLISHED, TCP_FLAG_FIN, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3838  _(ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_ESTABLISHED,
3839    TCP_ERROR_NONE);
3840  _(ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_RST, TCP_INPUT_NEXT_ESTABLISHED,
3841    TCP_ERROR_NONE);
3842  _(ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_RST | TCP_FLAG_ACK,
3843    TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3844  _(ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_SYN, TCP_INPUT_NEXT_ESTABLISHED,
3845    TCP_ERROR_NONE);
3846  _(ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_ACK,
3847    TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3848  _(ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST,
3849    TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3850  _(ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3851    TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3852  _(ESTABLISHED, TCP_FLAG_RST, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3853  _(ESTABLISHED, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_ESTABLISHED,
3854    TCP_ERROR_NONE);
3855  _(ESTABLISHED, TCP_FLAG_SYN, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3856  _(ESTABLISHED, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_ESTABLISHED,
3857    TCP_ERROR_NONE);
3858  _(ESTABLISHED, TCP_FLAG_SYN | TCP_FLAG_RST, TCP_INPUT_NEXT_ESTABLISHED,
3859    TCP_ERROR_NONE);
3860  _(ESTABLISHED, TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3861    TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3862  _(ESTABLISHED, 0, TCP_INPUT_NEXT_DROP, TCP_ERROR_SEGMENT_INVALID);
3863  /* ACK or FIN-ACK to our FIN */
3864  _(FIN_WAIT_1, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3865  _(FIN_WAIT_1, TCP_FLAG_ACK | TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS,
3866    TCP_ERROR_NONE);
3867  /* FIN in reply to our FIN from the other side */
3868  _(FIN_WAIT_1, 0, TCP_INPUT_NEXT_DROP, TCP_ERROR_SEGMENT_INVALID);
3869  _(FIN_WAIT_1, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3870  _(FIN_WAIT_1, TCP_FLAG_FIN | TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS,
3871    TCP_ERROR_NONE);
3872  _(FIN_WAIT_1, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_ACK,
3873    TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3874  _(FIN_WAIT_1, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST,
3875    TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3876  _(FIN_WAIT_1, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3877    TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3878  _(FIN_WAIT_1, TCP_FLAG_FIN | TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS,
3879    TCP_ERROR_NONE);
3880  _(FIN_WAIT_1, TCP_FLAG_FIN | TCP_FLAG_RST | TCP_FLAG_ACK,
3881    TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3882  _(FIN_WAIT_1, TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3883  _(FIN_WAIT_1, TCP_FLAG_SYN | TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS,
3884    TCP_ERROR_NONE);
3885  _(FIN_WAIT_1, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3886    TCP_ERROR_NONE);
3887  _(FIN_WAIT_1, TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3888    TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3889  _(FIN_WAIT_1, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3890  _(FIN_WAIT_1, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3891    TCP_ERROR_NONE);
3892  _(CLOSING, 0, TCP_INPUT_NEXT_DROP, TCP_ERROR_SEGMENT_INVALID);
3893  _(CLOSING, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3894  _(CLOSING, TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3895  _(CLOSING, TCP_FLAG_SYN | TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS,
3896    TCP_ERROR_NONE);
3897  _(CLOSING, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3898    TCP_ERROR_NONE);
3899  _(CLOSING, TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3900    TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3901  _(CLOSING, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3902  _(CLOSING, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3903    TCP_ERROR_NONE);
3904  _(CLOSING, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3905  _(CLOSING, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3906    TCP_ERROR_NONE);
3907  _(CLOSING, TCP_FLAG_FIN | TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS,
3908    TCP_ERROR_NONE);
3909  _(CLOSING, TCP_FLAG_FIN | TCP_FLAG_RST | TCP_FLAG_ACK,
3910    TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3911  _(CLOSING, TCP_FLAG_FIN | TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS,
3912    TCP_ERROR_NONE);
3913  _(CLOSING, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_ACK,
3914    TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3915  _(CLOSING, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST,
3916    TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3917  _(CLOSING, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3918    TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3919  /* FIN confirming that the peer (app) has closed */
3920  _(FIN_WAIT_2, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3921  _(FIN_WAIT_2, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3922  _(FIN_WAIT_2, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3923    TCP_ERROR_NONE);
3924  _(FIN_WAIT_2, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3925  _(FIN_WAIT_2, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3926    TCP_ERROR_NONE);
3927  _(CLOSE_WAIT, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3928  _(CLOSE_WAIT, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3929    TCP_ERROR_NONE);
3930  _(CLOSE_WAIT, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3931  _(CLOSE_WAIT, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3932    TCP_ERROR_NONE);
3933  _(LAST_ACK, 0, TCP_INPUT_NEXT_DROP, TCP_ERROR_SEGMENT_INVALID);
3934  _(LAST_ACK, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3935  _(LAST_ACK, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3936  _(LAST_ACK, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3937    TCP_ERROR_NONE);
3938  _(LAST_ACK, TCP_FLAG_FIN | TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS,
3939    TCP_ERROR_NONE);
3940  _(LAST_ACK, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_ACK,
3941    TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3942  _(LAST_ACK, TCP_FLAG_FIN | TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS,
3943    TCP_ERROR_NONE);
3944  _(LAST_ACK, TCP_FLAG_FIN | TCP_FLAG_RST | TCP_FLAG_ACK,
3945    TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3946  _(LAST_ACK, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST,
3947    TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3948  _(LAST_ACK, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3949    TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3950  _(LAST_ACK, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3951  _(LAST_ACK, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3952    TCP_ERROR_NONE);
3953  _(LAST_ACK, TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3954  _(LAST_ACK, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3955    TCP_ERROR_NONE);
3956  _(LAST_ACK, TCP_FLAG_SYN | TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS,
3957    TCP_ERROR_NONE);
3958  _(LAST_ACK, TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3959    TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3960  _(TIME_WAIT, TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3961  _(TIME_WAIT, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3962  _(TIME_WAIT, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3963    TCP_ERROR_NONE);
3964  _(TIME_WAIT, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3965  _(TIME_WAIT, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3966    TCP_ERROR_NONE);
3967  _(TIME_WAIT, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3968  /* RFC793 CLOSED: An incoming segment containing a RST is discarded. An
3969   * incoming segment not containing a RST causes a RST to be sent in
3970   * response.*/
3971  _(CLOSED, TCP_FLAG_RST, TCP_INPUT_NEXT_DROP, TCP_ERROR_CONNECTION_CLOSED);
3972  _(CLOSED, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_DROP,
3973    TCP_ERROR_CONNECTION_CLOSED);
3974  _(CLOSED, TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET, TCP_ERROR_NONE);
3975  _(CLOSED, TCP_FLAG_SYN, TCP_INPUT_NEXT_RESET, TCP_ERROR_NONE);
3976  _(CLOSED, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET,
3977    TCP_ERROR_NONE);
3978#undef _
3979}
3980
3981static clib_error_t *
3982tcp_input_init (vlib_main_t * vm)
3983{
3984  clib_error_t *error = 0;
3985  tcp_main_t *tm = vnet_get_tcp_main ();
3986
3987  if ((error = vlib_call_init_function (vm, tcp_init)))
3988    return error;
3989
3990  /* Initialize dispatch table. */
3991  tcp_dispatch_table_init (tm);
3992
3993  return error;
3994}
3995
3996VLIB_INIT_FUNCTION (tcp_input_init);
3997
3998#endif /* CLIB_MARCH_VARIANT */
3999
4000/*
4001 * fd.io coding-style-patch-verification: ON
4002 *
4003 * Local Variables:
4004 * eval: (c-set-style "gnu")
4005 * End:
4006 */
4007