1/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 *     http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15#include <stdbool.h>
16#include <vppinfra/error.h>
17#include <vnet/vnet.h>
18#include <vnet/ip/ip.h>
19#include <vlib/vlib.h>
20#include <vnet/fib/fib_types.h>
21#include <vnet/fib/ip4_fib.h>
22#include <vnet/adj/adj.h>
23#include <vnet/dpo/load_balance.h>
24#include "lpm.h"
25#include <vppinfra/lock.h>
26
27#define MAP_SKIP_IP6_LOOKUP 1
28
29#define MAP_ERR_GOOD			0
30#define MAP_ERR_BAD_POOL_SIZE		-1
31#define MAP_ERR_BAD_HT_RATIO		-2
32#define MAP_ERR_BAD_LIFETIME		-3
33#define MAP_ERR_BAD_BUFFERS		-4
34#define MAP_ERR_BAD_BUFFERS_TOO_LARGE	-5
35#define MAP_ERR_UNSUPPORTED             -6
36
37int map_create_domain (ip4_address_t * ip4_prefix, u8 ip4_prefix_len,
38		       ip6_address_t * ip6_prefix, u8 ip6_prefix_len,
39		       ip6_address_t * ip6_src, u8 ip6_src_len,
40		       u8 ea_bits_len, u8 psid_offset, u8 psid_length,
41		       u32 * map_domain_index, u16 mtu, u8 flags, u8 * tag);
42int map_delete_domain (u32 map_domain_index);
43int map_add_del_psid (u32 map_domain_index, u16 psid, ip6_address_t * tep,
44		      bool is_add);
45int map_if_enable_disable (bool is_enable, u32 sw_if_index,
46			   bool is_translation);
47u8 *format_map_trace (u8 * s, va_list * args);
48
49int map_param_set_fragmentation (bool inner, bool ignore_df);
50int map_param_set_icmp (ip4_address_t * ip4_err_relay_src);
51int map_param_set_icmp6 (u8 enable_unreachable);
52void map_pre_resolve (ip4_address_t * ip4, ip6_address_t * ip6, bool is_del);
53int map_param_set_security_check (bool enable, bool fragments);
54int map_param_set_traffic_class (bool copy, u8 tc);
55int map_param_set_tcp (u16 tcp_mss);
56
57
58typedef enum
59{
60  MAP_DOMAIN_PREFIX = 1 << 0,
61  MAP_DOMAIN_TRANSLATION = 1 << 1,	// The domain uses MAP-T
62  MAP_DOMAIN_RFC6052 = 1 << 2,
63} __attribute__ ((__packed__)) map_domain_flags_e;
64
65//#define IP6_MAP_T_OVERRIDE_TOS 0
66
67/*
68 * This structure _MUST_ be no larger than a single cache line (64 bytes).
69 * If more space is needed make a union of ip6_prefix and *rules, as
70 * those are mutually exclusive.
71 */
72typedef struct
73{
74  /* Required for pool_get_aligned */
75  CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
76  ip6_address_t ip6_src;
77  ip6_address_t ip6_prefix;
78  ip6_address_t *rules;
79  u32 suffix_mask;
80  ip4_address_t ip4_prefix;
81  u16 psid_mask;
82  u16 mtu;
83  map_domain_flags_e flags;
84  u8 ip6_prefix_len;
85  u8 ip6_src_len;
86  u8 ea_bits_len;
87  u8 psid_offset;
88  u8 psid_length;
89
90  /* helpers */
91  u8 psid_shift;
92  u8 suffix_shift;
93  u8 ea_shift;
94
95  /* not used by forwarding */
96  u8 ip4_prefix_len;
97} map_domain_t;
98
99STATIC_ASSERT ((sizeof (map_domain_t) <= CLIB_CACHE_LINE_BYTES),
100	       "MAP domain fits in one cacheline");
101
102/*
103 * Extra data about a domain that doesn't need to be time/space critical.
104 * This structure is in a vector parallel to the main map_domain_t,
105 * and indexed by the same map-domain-index values.
106 */
107typedef struct
108{
109  u8 *tag;			/* Probably a user-assigned domain name. */
110} map_domain_extra_t;
111
112#define MAP_REASS_INDEX_NONE ((u16)0xffff)
113
114/*
115 * MAP domain counters
116 */
117typedef enum
118{
119  /* Simple counters */
120  MAP_DOMAIN_IPV4_FRAGMENT = 0,
121  /* Combined counters */
122  MAP_DOMAIN_COUNTER_RX = 0,
123  MAP_DOMAIN_COUNTER_TX,
124  MAP_N_DOMAIN_COUNTER
125} map_domain_counter_t;
126
127#ifdef MAP_SKIP_IP6_LOOKUP
128/**
129 * A pre-resolved next-hop
130 */
131typedef struct map_main_pre_resolved_t_
132{
133  /**
134   * Linkage into the FIB graph
135   */
136  fib_node_t node;
137
138  /**
139   * The FIB entry index of the next-hop
140   */
141  fib_node_index_t fei;
142
143  /**
144   * This object sibling index on the FIB entry's child dependency list
145   */
146  u32 sibling;
147
148  /**
149   * The Load-balance object index to use to forward
150   */
151  dpo_id_t dpo;
152} map_main_pre_resolved_t;
153
154/**
155 * Pre-resolved next hops for v4 and v6. Why these are global and not
156 * per-domain is beyond me.
157 */
158extern map_main_pre_resolved_t pre_resolved[FIB_PROTOCOL_MAX];
159#endif
160
161typedef struct
162{
163  /* pool of MAP domains */
164  map_domain_t *domains;
165  map_domain_extra_t *domain_extras;
166
167  /* MAP Domain packet/byte counters indexed by map domain index */
168  vlib_simple_counter_main_t *simple_domain_counters;
169  vlib_combined_counter_main_t *domain_counters;
170  volatile u32 *counter_lock;
171
172  /* API message id base */
173  u16 msg_id_base;
174
175  /* Traffic class: zero, copy (~0) or fixed value */
176  u8 tc;
177  bool tc_copy;
178
179  bool sec_check;		/* Inbound security check */
180  bool sec_check_frag;		/* Inbound security check for (subsequent) fragments */
181  bool icmp6_enabled;		/* Send destination unreachable for security check failure */
182
183  u16 tcp_mss;			/* TCP MSS clamp value */
184
185  /* ICMPv6 -> ICMPv4 relay parameters */
186  ip4_address_t icmp4_src_address;
187  vlib_simple_counter_main_t icmp_relayed;
188
189  /* convenience */
190  vlib_main_t *vlib_main;
191  vnet_main_t *vnet_main;
192
193  bool frag_inner;		/* Inner or outer fragmentation */
194  bool frag_ignore_df;		/* Fragment (outer) packet even if DF is set */
195
196  /* Graph node state */
197  uword *bm_trans_enabled_by_sw_if;
198  uword *bm_encap_enabled_by_sw_if;
199
200  /* Lookup tables */
201  lpm_t *ip4_prefix_tbl;
202  lpm_t *ip6_prefix_tbl;
203  lpm_t *ip6_src_prefix_tbl;
204
205  uword ip4_sv_reass_custom_next_index;
206} map_main_t;
207
208/*
209 * MAP Error counters/messages
210 */
211#define foreach_map_error				\
212  /* Must be first. */					\
213 _(NONE, "valid MAP packets")				\
214 _(BAD_PROTOCOL, "bad protocol")			\
215 _(SEC_CHECK, "security check failed")			\
216 _(ENCAP_SEC_CHECK, "encap security check failed")	\
217 _(DECAP_SEC_CHECK, "decap security check failed")	\
218 _(ICMP, "unable to translate ICMP")			\
219 _(ICMP_RELAY, "unable to relay ICMP")			\
220 _(UNKNOWN, "unknown")					\
221 _(NO_BINDING, "no binding")				\
222 _(NO_DOMAIN, "no domain")				\
223 _(FRAGMENTED, "packet is a fragment")                  \
224 _(FRAGMENT_MEMORY, "could not cache fragment")	        \
225 _(FRAGMENT_MALFORMED, "fragment has unexpected format")\
226 _(FRAGMENT_DROPPED, "dropped cached fragment")         \
227 _(MALFORMED, "malformed packet")			\
228 _(DF_SET, "can't fragment, DF set")			\
229 _(TIME_EXCEEDED, "time exceeded")			\
230
231typedef enum
232{
233#define _(sym,str) MAP_ERROR_##sym,
234  foreach_map_error
235#undef _
236    MAP_N_ERROR,
237} map_error_t;
238
239u64 map_error_counter_get (u32 node_index, map_error_t map_error);
240
241typedef struct
242{
243  u32 map_domain_index;
244  u16 port;
245} map_trace_t;
246
247always_inline void
248map_add_trace (vlib_main_t * vm, vlib_node_runtime_t * node,
249	       vlib_buffer_t * b, u32 map_domain_index, u16 port)
250{
251  map_trace_t *tr = vlib_add_trace (vm, node, b, sizeof (*tr));
252  tr->map_domain_index = map_domain_index;
253  tr->port = port;
254}
255
256extern map_main_t map_main;
257
258extern vlib_node_registration_t ip4_map_node;
259extern vlib_node_registration_t ip6_map_node;
260
261extern vlib_node_registration_t ip4_map_t_node;
262extern vlib_node_registration_t ip4_map_t_fragmented_node;
263extern vlib_node_registration_t ip4_map_t_tcp_udp_node;
264extern vlib_node_registration_t ip4_map_t_icmp_node;
265
266extern vlib_node_registration_t ip6_map_t_node;
267extern vlib_node_registration_t ip6_map_t_fragmented_node;
268extern vlib_node_registration_t ip6_map_t_tcp_udp_node;
269extern vlib_node_registration_t ip6_map_t_icmp_node;
270
271/*
272 * map_get_pfx
273 */
274static_always_inline u64
275map_get_pfx (map_domain_t * d, u32 addr, u16 port)
276{
277  u16 psid = (port >> d->psid_shift) & d->psid_mask;
278
279  if (d->ea_bits_len == 0 && d->rules)
280    return clib_net_to_host_u64 (d->rules[psid].as_u64[0]);
281
282  u32 suffix = (addr >> d->suffix_shift) & d->suffix_mask;
283  u64 ea =
284    d->ea_bits_len == 0 ? 0 : (((u64) suffix << d->psid_length)) | psid;
285
286  return clib_net_to_host_u64 (d->ip6_prefix.as_u64[0]) | ea << d->ea_shift;
287}
288
289static_always_inline u64
290map_get_pfx_net (map_domain_t * d, u32 addr, u16 port)
291{
292  return clib_host_to_net_u64 (map_get_pfx (d, clib_net_to_host_u32 (addr),
293					    clib_net_to_host_u16 (port)));
294}
295
296/*
297 * map_get_sfx
298 */
299static_always_inline u64
300map_get_sfx (map_domain_t * d, u32 addr, u16 port)
301{
302  u16 psid = (port >> d->psid_shift) & d->psid_mask;
303
304  /* Shared 1:1 mode. */
305  if (d->ea_bits_len == 0 && d->rules)
306    return clib_net_to_host_u64 (d->rules[psid].as_u64[1]);
307  if (d->ip6_prefix_len == 128)
308    return clib_net_to_host_u64 (d->ip6_prefix.as_u64[1]);
309
310  if (d->ip6_src_len == 96)
311    return (clib_net_to_host_u64 (d->ip6_prefix.as_u64[1]) | addr);
312
313  /* IPv4 prefix */
314  if (d->flags & MAP_DOMAIN_PREFIX)
315    return (u64) (addr & (0xFFFFFFFF << d->suffix_shift)) << 16;
316
317  /* Shared or full IPv4 address */
318  return ((u64) addr << 16) | psid;
319}
320
321static_always_inline u64
322map_get_sfx_net (map_domain_t * d, u32 addr, u16 port)
323{
324  return clib_host_to_net_u64 (map_get_sfx (d, clib_net_to_host_u32 (addr),
325					    clib_net_to_host_u16 (port)));
326}
327
328static_always_inline u32
329map_get_ip4 (ip6_address_t * addr, u16 prefix_len)
330{
331  ASSERT (prefix_len == 64 || prefix_len == 96);
332  if (prefix_len == 96)
333    return clib_host_to_net_u32 (clib_net_to_host_u64 (addr->as_u64[1]));
334  else
335    return clib_host_to_net_u32 (clib_net_to_host_u64 (addr->as_u64[1]) >>
336				 16);
337}
338
339static_always_inline map_domain_t *
340ip4_map_get_domain (ip4_address_t * addr, u32 * map_domain_index, u8 * error)
341{
342  map_main_t *mm = &map_main;
343
344  u32 mdi = mm->ip4_prefix_tbl->lookup (mm->ip4_prefix_tbl, addr, 32);
345  if (mdi == ~0)
346    {
347      *error = MAP_ERROR_NO_DOMAIN;
348      return 0;
349    }
350  *map_domain_index = mdi;
351  return pool_elt_at_index (mm->domains, mdi);
352}
353
354/*
355 * Get the MAP domain from an IPv6 address.
356 * If the IPv6 address or
357 * prefix is shared the IPv4 address must be used.
358 */
359static_always_inline map_domain_t *
360ip6_map_get_domain (ip6_address_t * addr, u32 * map_domain_index, u8 * error)
361{
362  map_main_t *mm = &map_main;
363  u32 mdi =
364    mm->ip6_src_prefix_tbl->lookup (mm->ip6_src_prefix_tbl, addr, 128);
365  if (mdi == ~0)
366    {
367      *error = MAP_ERROR_NO_DOMAIN;
368      return 0;
369    }
370
371  *map_domain_index = mdi;
372  return pool_elt_at_index (mm->domains, mdi);
373}
374
375clib_error_t *map_plugin_api_hookup (vlib_main_t * vm);
376
377void map_ip6_drop_pi (u32 pi);
378
379/*
380 * Supports prefix of 96 or 64 (with u-octet)
381 */
382static_always_inline void
383ip4_map_t_embedded_address (map_domain_t * d,
384			    ip6_address_t * ip6, const ip4_address_t * ip4)
385{
386  ASSERT (d->ip6_src_len == 96 || d->ip6_src_len == 64);	//No support for other lengths for now
387  u8 offset = d->ip6_src_len == 64 ? 9 : 12;
388  ip6->as_u64[0] = d->ip6_src.as_u64[0];
389  ip6->as_u64[1] = d->ip6_src.as_u64[1];
390  clib_memcpy_fast (&ip6->as_u8[offset], ip4, 4);
391}
392
393static_always_inline u32
394ip6_map_t_embedded_address (map_domain_t * d, ip6_address_t * addr)
395{
396  ASSERT (d->ip6_src_len == 64 || d->ip6_src_len == 96);
397  u32 x;
398  u8 offset = d->ip6_src_len == 64 ? 9 : 12;
399  clib_memcpy (&x, &addr->as_u8[offset], 4);
400  return x;
401}
402
403static inline void
404map_domain_counter_lock (map_main_t * mm)
405{
406  if (mm->counter_lock)
407    while (clib_atomic_test_and_set (mm->counter_lock))
408      /* zzzz */ ;
409}
410
411static inline void
412map_domain_counter_unlock (map_main_t * mm)
413{
414  if (mm->counter_lock)
415    clib_atomic_release (mm->counter_lock);
416}
417
418
419static_always_inline void
420map_send_all_to_node (vlib_main_t * vm, u32 * pi_vector,
421		      vlib_node_runtime_t * node, vlib_error_t * error,
422		      u32 next)
423{
424  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
425  //Deal with fragments that are ready
426  from = pi_vector;
427  n_left_from = vec_len (pi_vector);
428  next_index = node->cached_next_index;
429  while (n_left_from > 0)
430    {
431      vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
432      while (n_left_from > 0 && n_left_to_next > 0)
433	{
434	  u32 pi0 = to_next[0] = from[0];
435	  from += 1;
436	  n_left_from -= 1;
437	  to_next += 1;
438	  n_left_to_next -= 1;
439	  vlib_buffer_t *p0 = vlib_get_buffer (vm, pi0);
440	  p0->error = *error;
441	  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
442					   n_left_to_next, pi0, next);
443	}
444      vlib_put_next_frame (vm, node, next_index, n_left_to_next);
445    }
446}
447
448static_always_inline void
449map_mss_clamping (tcp_header_t * tcp, ip_csum_t * sum, u16 mss_clamping)
450{
451  u8 *data;
452  u8 opt_len, opts_len, kind;
453  u16 mss;
454  u16 mss_value_net = clib_host_to_net_u16 (mss_clamping);
455
456  if (!tcp_syn (tcp))
457    return;
458
459  opts_len = (tcp_doff (tcp) << 2) - sizeof (tcp_header_t);
460  data = (u8 *) (tcp + 1);
461  for (; opts_len > 0; opts_len -= opt_len, data += opt_len)
462    {
463      kind = data[0];
464
465      if (kind == TCP_OPTION_EOL)
466	break;
467      else if (kind == TCP_OPTION_NOOP)
468	{
469	  opt_len = 1;
470	  continue;
471	}
472      else
473	{
474	  if (opts_len < 2)
475	    return;
476	  opt_len = data[1];
477
478	  if (opt_len < 2 || opt_len > opts_len)
479	    return;
480	}
481
482      if (kind == TCP_OPTION_MSS)
483	{
484	  mss = *(u16 *) (data + 2);
485	  if (clib_net_to_host_u16 (mss) > mss_clamping)
486	    {
487	      *sum =
488		ip_csum_update (*sum, mss, mss_value_net, ip4_header_t,
489				length);
490	      clib_memcpy (data + 2, &mss_value_net, 2);
491	    }
492	  return;
493	}
494    }
495}
496
497static_always_inline bool
498ip4_map_ip6_lookup_bypass (vlib_buffer_t * p0, ip4_header_t * ip)
499{
500#ifdef MAP_SKIP_IP6_LOOKUP
501  if (FIB_NODE_INDEX_INVALID != pre_resolved[FIB_PROTOCOL_IP6].fei)
502    {
503      vnet_buffer (p0)->ip.adj_index[VLIB_TX] =
504	pre_resolved[FIB_PROTOCOL_IP6].dpo.dpoi_index;
505      return (true);
506    }
507#endif
508  return (false);
509}
510
511static_always_inline bool
512ip6_map_ip4_lookup_bypass (vlib_buffer_t * p0, ip4_header_t * ip)
513{
514#ifdef MAP_SKIP_IP6_LOOKUP
515  if (FIB_NODE_INDEX_INVALID != pre_resolved[FIB_PROTOCOL_IP4].fei)
516    {
517      vnet_buffer (p0)->ip.adj_index[VLIB_TX] =
518	pre_resolved[FIB_PROTOCOL_IP4].dpo.dpoi_index;
519      return (true);
520    }
521#endif
522  return (false);
523}
524
525/*
526 * fd.io coding-style-patch-verification: ON
527 *
528 * Local Variables:
529 * eval: (c-set-style "gnu")
530 * End:
531 */
532