1/*---------------------------------------------------------------------------
2 * Copyright (c) 2009-2014 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 *     http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 *---------------------------------------------------------------------------
15 */
16/*
17 * IPv4 Fragmentation Node
18 *
19 *
20 */
21
22#include "ip_frag.h"
23
24#include <vnet/ip/ip.h>
25
26typedef struct
27{
28  u8 ipv6;
29  u16 mtu;
30  u8 next;
31  u16 n_fragments;
32} ip_frag_trace_t;
33
34static u8 *
35format_ip_frag_trace (u8 * s, va_list * args)
36{
37  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
38  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
39  ip_frag_trace_t *t = va_arg (*args, ip_frag_trace_t *);
40  s = format (s, "IPv%s mtu: %u fragments: %u next: %d",
41	      t->ipv6 ? "6" : "4", t->mtu, t->n_fragments, t->next);
42  return s;
43}
44
45static u32 running_fragment_id;
46
47static void
48frag_set_sw_if_index (vlib_buffer_t * to, vlib_buffer_t * from)
49{
50  vnet_buffer (to)->sw_if_index[VLIB_RX] =
51    vnet_buffer (from)->sw_if_index[VLIB_RX];
52  vnet_buffer (to)->sw_if_index[VLIB_TX] =
53    vnet_buffer (from)->sw_if_index[VLIB_TX];
54
55  /* Copy adj_index in case DPO based node is sending for the
56   * fragmentation, the packet would be sent back to the proper
57   * DPO next node and Index
58   */
59  vnet_buffer (to)->ip.adj_index[VLIB_RX] =
60    vnet_buffer (from)->ip.adj_index[VLIB_RX];
61  vnet_buffer (to)->ip.adj_index[VLIB_TX] =
62    vnet_buffer (from)->ip.adj_index[VLIB_TX];
63
64  /* Copy QoS Bits */
65  if (PREDICT_TRUE (from->flags & VNET_BUFFER_F_QOS_DATA_VALID))
66    {
67      vnet_buffer2 (to)->qos = vnet_buffer2 (from)->qos;
68      to->flags |= VNET_BUFFER_F_QOS_DATA_VALID;
69    }
70}
71
72static vlib_buffer_t *
73frag_buffer_alloc (vlib_buffer_t * org_b, u32 * bi)
74{
75  vlib_main_t *vm = vlib_get_main ();
76  if (vlib_buffer_alloc (vm, bi, 1) != 1)
77    return 0;
78
79  vlib_buffer_t *b = vlib_get_buffer (vm, *bi);
80  VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b);
81  vlib_buffer_copy_trace_flag (vm, org_b, *bi);
82
83  return b;
84}
85
86/*
87 * Limitation: Does follow buffer chains in the packet to fragment,
88 * but does not generate buffer chains. I.e. a fragment is always
89 * contained with in a single buffer and limited to the max buffer
90 * size.
91 * from_bi: current pointer must point to IPv4 header
92 */
93ip_frag_error_t
94ip4_frag_do_fragment (vlib_main_t * vm, u32 from_bi, u16 mtu,
95		      u16 l2unfragmentablesize, u32 ** buffer)
96{
97  vlib_buffer_t *from_b;
98  ip4_header_t *ip4;
99  u16 len, max, rem, ip_frag_id, ip_frag_offset;
100  u8 *org_from_packet, more;
101
102  from_b = vlib_get_buffer (vm, from_bi);
103  org_from_packet = vlib_buffer_get_current (from_b);
104  ip4 = vlib_buffer_get_current (from_b) + l2unfragmentablesize;
105
106  rem = clib_net_to_host_u16 (ip4->length) - sizeof (ip4_header_t);
107  max =
108    (clib_min (mtu, vlib_buffer_get_default_data_size (vm)) -
109     sizeof (ip4_header_t)) & ~0x7;
110
111  if (rem >
112      (vlib_buffer_length_in_chain (vm, from_b) - sizeof (ip4_header_t)))
113    {
114      return IP_FRAG_ERROR_MALFORMED;
115    }
116
117  if (mtu < sizeof (ip4_header_t))
118    {
119      return IP_FRAG_ERROR_CANT_FRAGMENT_HEADER;
120    }
121
122  if (ip4->flags_and_fragment_offset &
123      clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT))
124    {
125      return IP_FRAG_ERROR_DONT_FRAGMENT_SET;
126    }
127
128  if (ip4_is_fragment (ip4))
129    {
130      ip_frag_id = ip4->fragment_id;
131      ip_frag_offset = ip4_get_fragment_offset (ip4);
132      more =
133	!(!(ip4->flags_and_fragment_offset &
134	    clib_host_to_net_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS)));
135    }
136  else
137    {
138      ip_frag_id = (++running_fragment_id);
139      ip_frag_offset = 0;
140      more = 0;
141    }
142
143  u8 *from_data = (void *) (ip4 + 1);
144  vlib_buffer_t *org_from_b = from_b;
145  u16 fo = 0;
146  u16 left_in_from_buffer =
147    from_b->current_length - (l2unfragmentablesize + sizeof (ip4_header_t));
148  u16 ptr = 0;
149
150  /* Do the actual fragmentation */
151  while (rem)
152    {
153      u32 to_bi;
154      vlib_buffer_t *to_b;
155      ip4_header_t *to_ip4;
156      u8 *to_data;
157
158      len = (rem > max ? max : rem);
159      if (len != rem)		/* Last fragment does not need to divisible by 8 */
160	len &= ~0x7;
161      if ((to_b = frag_buffer_alloc (org_from_b, &to_bi)) == 0)
162	{
163	  return IP_FRAG_ERROR_MEMORY;
164	}
165      vec_add1 (*buffer, to_bi);
166      frag_set_sw_if_index (to_b, org_from_b);
167
168      /* Copy ip4 header */
169      to_data = vlib_buffer_get_current (to_b);
170      clib_memcpy_fast (to_data, org_from_packet,
171			l2unfragmentablesize + sizeof (ip4_header_t));
172      to_ip4 = (ip4_header_t *) (to_data + l2unfragmentablesize);
173      to_data = (void *) (to_ip4 + 1);
174      vnet_buffer (to_b)->l3_hdr_offset = to_b->current_data;
175      vlib_buffer_copy_trace_flag (vm, from_b, to_bi);
176      to_b->flags |= VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
177
178      if (from_b->flags & VNET_BUFFER_F_L4_HDR_OFFSET_VALID)
179	{
180	  vnet_buffer (to_b)->l4_hdr_offset =
181	    (vnet_buffer (to_b)->l3_hdr_offset +
182	     (vnet_buffer (from_b)->l4_hdr_offset -
183	      vnet_buffer (from_b)->l3_hdr_offset));
184	  to_b->flags |= VNET_BUFFER_F_L4_HDR_OFFSET_VALID;
185	}
186
187      /* Spin through from buffers filling up the to buffer */
188      u16 left_in_to_buffer = len, to_ptr = 0;
189      while (1)
190	{
191	  u16 bytes_to_copy;
192
193	  /* Figure out how many bytes we can safely copy */
194	  bytes_to_copy = left_in_to_buffer <= left_in_from_buffer ?
195	    left_in_to_buffer : left_in_from_buffer;
196	  clib_memcpy_fast (to_data + to_ptr, from_data + ptr, bytes_to_copy);
197	  left_in_to_buffer -= bytes_to_copy;
198	  ptr += bytes_to_copy;
199	  left_in_from_buffer -= bytes_to_copy;
200	  if (left_in_to_buffer == 0)
201	    break;
202
203	  ASSERT (left_in_from_buffer <= 0);
204	  /* Move buffer */
205	  if (!(from_b->flags & VLIB_BUFFER_NEXT_PRESENT))
206	    {
207	      return IP_FRAG_ERROR_MALFORMED;
208	    }
209	  from_b = vlib_get_buffer (vm, from_b->next_buffer);
210	  from_data = (u8 *) vlib_buffer_get_current (from_b);
211	  ptr = 0;
212	  left_in_from_buffer = from_b->current_length;
213	  to_ptr += bytes_to_copy;
214	}
215
216      to_b->flags |= VNET_BUFFER_F_IS_IP4;
217      to_b->current_length =
218	len + sizeof (ip4_header_t) + l2unfragmentablesize;
219
220      to_ip4->fragment_id = ip_frag_id;
221      to_ip4->flags_and_fragment_offset =
222	clib_host_to_net_u16 ((fo >> 3) + ip_frag_offset);
223      to_ip4->flags_and_fragment_offset |=
224	clib_host_to_net_u16 (((len != rem) || more) << 13);
225      to_ip4->length = clib_host_to_net_u16 (len + sizeof (ip4_header_t));
226      to_ip4->checksum = ip4_header_checksum (to_ip4);
227
228      /* we've just done the IP checksum .. */
229      to_b->flags &= ~VNET_BUFFER_F_OFFLOAD_IP_CKSUM;
230
231      rem -= len;
232      fo += len;
233    }
234
235  return IP_FRAG_ERROR_NONE;
236}
237
238void
239ip_frag_set_vnet_buffer (vlib_buffer_t * b, u16 mtu, u8 next_index, u8 flags)
240{
241  vnet_buffer (b)->ip_frag.mtu = mtu;
242  vnet_buffer (b)->ip_frag.next_index = next_index;
243  vnet_buffer (b)->ip_frag.flags = flags;
244}
245
246
247static inline uword
248frag_node_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
249		  vlib_frame_t * frame, u32 node_index, bool is_ip6)
250{
251  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
252  vlib_node_runtime_t *error_node = vlib_node_get_runtime (vm, node_index);
253  from = vlib_frame_vector_args (frame);
254  n_left_from = frame->n_vectors;
255  next_index = node->cached_next_index;
256  u32 frag_sent = 0, small_packets = 0;
257  u32 *buffer = 0;
258
259  while (n_left_from > 0)
260    {
261      vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
262
263      while (n_left_from > 0 && n_left_to_next > 0)
264	{
265	  u32 pi0, *frag_from, frag_left;
266	  vlib_buffer_t *p0;
267	  ip_frag_error_t error0;
268	  int next0;
269
270	  /*
271	   * Note: The packet is not enqueued now. It is instead put
272	   * in a vector where other fragments will be put as well.
273	   */
274	  pi0 = from[0];
275	  from += 1;
276	  n_left_from -= 1;
277
278	  p0 = vlib_get_buffer (vm, pi0);
279	  u16 mtu = vnet_buffer (p0)->ip_frag.mtu;
280	  if (is_ip6)
281	    error0 = ip6_frag_do_fragment (vm, pi0, mtu, 0, &buffer);
282	  else
283	    error0 = ip4_frag_do_fragment (vm, pi0, mtu, 0, &buffer);
284
285	  if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
286	    {
287	      ip_frag_trace_t *tr =
288		vlib_add_trace (vm, node, p0, sizeof (*tr));
289	      tr->mtu = mtu;
290	      tr->ipv6 = is_ip6 ? 1 : 0;
291	      tr->n_fragments = vec_len (buffer);
292	      tr->next = vnet_buffer (p0)->ip_frag.next_index;
293	    }
294
295	  if (!is_ip6 && error0 == IP_FRAG_ERROR_DONT_FRAGMENT_SET)
296	    {
297	      icmp4_error_set_vnet_buffer (p0, ICMP4_destination_unreachable,
298					   ICMP4_destination_unreachable_fragmentation_needed_and_dont_fragment_set,
299					   vnet_buffer (p0)->ip_frag.mtu);
300	      next0 = IP_FRAG_NEXT_ICMP_ERROR;
301	    }
302	  else
303	    {
304	      next0 = (error0 == IP_FRAG_ERROR_NONE ?
305		       vnet_buffer (p0)->ip_frag.next_index :
306		       IP_FRAG_NEXT_DROP);
307	    }
308
309	  if (error0 == IP_FRAG_ERROR_NONE)
310	    {
311	      /* Free original buffer chain */
312	      frag_sent += vec_len (buffer);
313	      small_packets += (vec_len (buffer) == 1);
314	      vlib_buffer_free_one (vm, pi0);	/* Free original packet */
315	    }
316	  else
317	    {
318	      vlib_error_count (vm, node_index, error0, 1);
319	      vec_add1 (buffer, pi0);	/* Get rid of the original buffer */
320	    }
321
322	  /* Send fragments that were added in the frame */
323	  frag_from = buffer;
324	  frag_left = vec_len (buffer);
325
326	  while (frag_left > 0)
327	    {
328	      while (frag_left > 0 && n_left_to_next > 0)
329		{
330		  u32 i;
331		  i = to_next[0] = frag_from[0];
332		  frag_from += 1;
333		  frag_left -= 1;
334		  to_next += 1;
335		  n_left_to_next -= 1;
336
337		  vlib_get_buffer (vm, i)->error = error_node->errors[error0];
338		  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
339						   to_next, n_left_to_next, i,
340						   next0);
341		}
342	      vlib_put_next_frame (vm, node, next_index, n_left_to_next);
343	      vlib_get_next_frame (vm, node, next_index, to_next,
344				   n_left_to_next);
345	    }
346	  vec_reset_length (buffer);
347	}
348      vlib_put_next_frame (vm, node, next_index, n_left_to_next);
349    }
350  vec_free (buffer);
351
352  vlib_node_increment_counter (vm, node_index,
353			       IP_FRAG_ERROR_FRAGMENT_SENT, frag_sent);
354  vlib_node_increment_counter (vm, node_index,
355			       IP_FRAG_ERROR_SMALL_PACKET, small_packets);
356
357  return frame->n_vectors;
358}
359
360
361
362static uword
363ip4_frag (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
364{
365  return frag_node_inline (vm, node, frame, ip4_frag_node.index,
366			   0 /* is_ip6 */ );
367}
368
369static uword
370ip6_frag (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
371{
372  return frag_node_inline (vm, node, frame, ip6_frag_node.index,
373			   1 /* is_ip6 */ );
374}
375
376/*
377 * Fragments the packet given in from_bi. Fragments are returned in the buffer vector.
378 * Caller must ensure the original packet is freed.
379 * from_bi: current pointer must point to IPv6 header
380 */
381ip_frag_error_t
382ip6_frag_do_fragment (vlib_main_t * vm, u32 from_bi, u16 mtu,
383		      u16 l2unfragmentablesize, u32 ** buffer)
384{
385  vlib_buffer_t *from_b;
386  ip6_header_t *ip6;
387  u16 len, max, rem, ip_frag_id;
388  u8 *org_from_packet;
389
390  from_b = vlib_get_buffer (vm, from_bi);
391  org_from_packet = vlib_buffer_get_current (from_b);
392  ip6 = vlib_buffer_get_current (from_b) + l2unfragmentablesize;
393
394  rem = clib_net_to_host_u16 (ip6->payload_length);
395  max = (mtu - sizeof (ip6_header_t) - sizeof (ip6_frag_hdr_t)) & ~0x7;	// TODO: Is max correct??
396
397  if (rem >
398      (vlib_buffer_length_in_chain (vm, from_b) - sizeof (ip6_header_t)))
399    {
400      return IP_FRAG_ERROR_MALFORMED;
401    }
402
403  /* TODO: Look through header chain for fragmentation header */
404  if (ip6->protocol == IP_PROTOCOL_IPV6_FRAGMENTATION)
405    {
406      return IP_FRAG_ERROR_MALFORMED;
407    }
408
409  u8 *from_data = (void *) (ip6 + 1);
410  vlib_buffer_t *org_from_b = from_b;
411  u16 fo = 0;
412  u16 left_in_from_buffer =
413    from_b->current_length - (l2unfragmentablesize + sizeof (ip6_header_t));
414  u16 ptr = 0;
415
416  ip_frag_id = ++running_fragment_id;	// Fix
417
418  /* Do the actual fragmentation */
419  while (rem)
420    {
421      u32 to_bi;
422      vlib_buffer_t *to_b;
423      ip6_header_t *to_ip6;
424      ip6_frag_hdr_t *to_frag_hdr;
425      u8 *to_data;
426
427      len =
428	(rem >
429	 (mtu - sizeof (ip6_header_t) - sizeof (ip6_frag_hdr_t)) ? max : rem);
430      if (len != rem)		/* Last fragment does not need to divisible by 8 */
431	len &= ~0x7;
432      if ((to_b = frag_buffer_alloc (org_from_b, &to_bi)) == 0)
433	{
434	  return IP_FRAG_ERROR_MEMORY;
435	}
436      vec_add1 (*buffer, to_bi);
437      frag_set_sw_if_index (to_b, org_from_b);
438
439      /* Copy ip6 header */
440      clib_memcpy_fast (to_b->data, org_from_packet,
441			l2unfragmentablesize + sizeof (ip6_header_t));
442      to_ip6 = vlib_buffer_get_current (to_b);
443      to_frag_hdr = (ip6_frag_hdr_t *) (to_ip6 + 1);
444      to_data = (void *) (to_frag_hdr + 1);
445
446      vnet_buffer (to_b)->l3_hdr_offset = to_b->current_data;
447      to_b->flags |= VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
448
449      if (from_b->flags & VNET_BUFFER_F_L4_HDR_OFFSET_VALID)
450	{
451	  vnet_buffer (to_b)->l4_hdr_offset =
452	    (vnet_buffer (to_b)->l3_hdr_offset +
453	     (vnet_buffer (from_b)->l4_hdr_offset -
454	      vnet_buffer (from_b)->l3_hdr_offset));
455	  to_b->flags |= VNET_BUFFER_F_L4_HDR_OFFSET_VALID;
456	}
457      to_b->flags |= VNET_BUFFER_F_IS_IP6;
458
459      /* Spin through from buffers filling up the to buffer */
460      u16 left_in_to_buffer = len, to_ptr = 0;
461      while (1)
462	{
463	  u16 bytes_to_copy;
464
465	  /* Figure out how many bytes we can safely copy */
466	  bytes_to_copy = left_in_to_buffer <= left_in_from_buffer ?
467	    left_in_to_buffer : left_in_from_buffer;
468	  clib_memcpy_fast (to_data + to_ptr, from_data + ptr, bytes_to_copy);
469	  left_in_to_buffer -= bytes_to_copy;
470	  ptr += bytes_to_copy;
471	  left_in_from_buffer -= bytes_to_copy;
472	  if (left_in_to_buffer == 0)
473	    break;
474
475	  ASSERT (left_in_from_buffer <= 0);
476	  /* Move buffer */
477	  if (!(from_b->flags & VLIB_BUFFER_NEXT_PRESENT))
478	    {
479	      return IP_FRAG_ERROR_MALFORMED;
480	    }
481	  from_b = vlib_get_buffer (vm, from_b->next_buffer);
482	  from_data = (u8 *) vlib_buffer_get_current (from_b);
483	  ptr = 0;
484	  left_in_from_buffer = from_b->current_length;
485	  to_ptr += bytes_to_copy;
486	}
487
488      to_b->current_length =
489	len + sizeof (ip6_header_t) + sizeof (ip6_frag_hdr_t);
490      to_ip6->payload_length =
491	clib_host_to_net_u16 (len + sizeof (ip6_frag_hdr_t));
492      to_ip6->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION;
493      to_frag_hdr->fragment_offset_and_more =
494	ip6_frag_hdr_offset_and_more ((fo >> 3), len != rem);
495      to_frag_hdr->identification = ip_frag_id;
496      to_frag_hdr->next_hdr = ip6->protocol;
497      to_frag_hdr->rsv = 0;
498
499      rem -= len;
500      fo += len;
501    }
502
503  return IP_FRAG_ERROR_NONE;
504}
505
506static char *ip4_frag_error_strings[] = {
507#define _(sym,string) string,
508  foreach_ip_frag_error
509#undef _
510};
511
512/* *INDENT-OFF* */
513VLIB_REGISTER_NODE (ip4_frag_node) = {
514  .function = ip4_frag,
515  .name = IP4_FRAG_NODE_NAME,
516  .vector_size = sizeof (u32),
517  .format_trace = format_ip_frag_trace,
518  .type = VLIB_NODE_TYPE_INTERNAL,
519
520  .n_errors = IP_FRAG_N_ERROR,
521  .error_strings = ip4_frag_error_strings,
522
523  .n_next_nodes = IP_FRAG_N_NEXT,
524  .next_nodes = {
525    [IP_FRAG_NEXT_IP_REWRITE] = "ip4-rewrite",
526    [IP_FRAG_NEXT_IP_REWRITE_MIDCHAIN] = "ip4-midchain",
527    [IP_FRAG_NEXT_IP4_LOOKUP] = "ip4-lookup",
528    [IP_FRAG_NEXT_IP6_LOOKUP] = "ip6-lookup",
529    [IP_FRAG_NEXT_ICMP_ERROR] = "ip4-icmp-error",
530    [IP_FRAG_NEXT_DROP] = "ip4-drop"
531  },
532};
533/* *INDENT-ON* */
534
535/* *INDENT-OFF* */
536VLIB_REGISTER_NODE (ip6_frag_node) = {
537  .function = ip6_frag,
538  .name = IP6_FRAG_NODE_NAME,
539  .vector_size = sizeof (u32),
540  .format_trace = format_ip_frag_trace,
541  .type = VLIB_NODE_TYPE_INTERNAL,
542
543  .n_errors = IP_FRAG_N_ERROR,
544  .error_strings = ip4_frag_error_strings,
545
546  .n_next_nodes = IP_FRAG_N_NEXT,
547  .next_nodes = {
548    [IP_FRAG_NEXT_IP_REWRITE] = "ip6-rewrite",
549    [IP_FRAG_NEXT_IP_REWRITE_MIDCHAIN] = "ip6-midchain",
550    [IP_FRAG_NEXT_IP4_LOOKUP] = "ip4-lookup",
551    [IP_FRAG_NEXT_IP6_LOOKUP] = "ip6-lookup",
552    [IP_FRAG_NEXT_ICMP_ERROR] = "error-drop",
553    [IP_FRAG_NEXT_DROP] = "ip6-drop"
554  },
555};
556/* *INDENT-ON* */
557
558/*
559 * fd.io coding-style-patch-verification: ON
560 *
561 * Local Variables:
562 * eval: (c-set-style "gnu")
563 * End:
564 */
565