device.c revision 7ca5aaac
1/*
2 *------------------------------------------------------------------
3 * Copyright (c) 2017 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 *     http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
16 */
17
18#define _GNU_SOURCE
19#include <stdint.h>
20#include <vnet/ethernet/ethernet.h>
21#include <vnet/ip/ip4_packet.h>
22#include <vnet/ip/ip6_packet.h>
23#include <vnet/ip/ip6_hop_by_hop_packet.h>
24#include <vnet/bonding/node.h>
25#include <vppinfra/lb_hash_hash.h>
26#include <vnet/ip/ip.h>
27#include <vnet/ethernet/arp_packet.h>
28
29#define foreach_bond_tx_error     \
30  _(NONE, "no error")             \
31  _(IF_DOWN, "interface down")    \
32  _(NO_SLAVE, "no slave")
33
34typedef enum
35{
36#define _(f,s) BOND_TX_ERROR_##f,
37  foreach_bond_tx_error
38#undef _
39    BOND_TX_N_ERROR,
40} bond_tx_error_t;
41
42static char *bond_tx_error_strings[] = {
43#define _(n,s) s,
44  foreach_bond_tx_error
45#undef _
46};
47
48static u8 *
49format_bond_tx_trace (u8 * s, va_list * args)
50{
51  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
52  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
53  bond_packet_trace_t *t = va_arg (*args, bond_packet_trace_t *);
54  vnet_hw_interface_t *hw, *hw1;
55  vnet_main_t *vnm = vnet_get_main ();
56
57  hw = vnet_get_sup_hw_interface (vnm, t->sw_if_index);
58  hw1 = vnet_get_sup_hw_interface (vnm, t->bond_sw_if_index);
59  s = format (s, "src %U, dst %U, %s -> %s",
60	      format_ethernet_address, t->ethernet.src_address,
61	      format_ethernet_address, t->ethernet.dst_address,
62	      hw->name, hw1->name);
63
64  return s;
65}
66
67#ifndef CLIB_MARCH_VARIANT
68u8 *
69format_bond_interface_name (u8 * s, va_list * args)
70{
71  u32 dev_instance = va_arg (*args, u32);
72  bond_main_t *bm = &bond_main;
73  bond_if_t *bif = pool_elt_at_index (bm->interfaces, dev_instance);
74
75  s = format (s, "BondEthernet%lu", bif->id);
76
77  return s;
78}
79#endif
80
81static __clib_unused clib_error_t *
82bond_set_l2_mode_function (vnet_main_t * vnm,
83			   struct vnet_hw_interface_t *bif_hw,
84			   i32 l2_if_adjust)
85{
86  bond_if_t *bif;
87  u32 *sw_if_index;
88  struct vnet_hw_interface_t *sif_hw;
89
90  bif = bond_get_master_by_sw_if_index (bif_hw->sw_if_index);
91  if (!bif)
92    return 0;
93
94  if ((bif_hw->l2_if_count == 1) && (l2_if_adjust == 1))
95    {
96      /* Just added first L2 interface on this port */
97      vec_foreach (sw_if_index, bif->slaves)
98      {
99	sif_hw = vnet_get_sup_hw_interface (vnm, *sw_if_index);
100	ethernet_set_flags (vnm, sif_hw->hw_if_index,
101			    ETHERNET_INTERFACE_FLAG_ACCEPT_ALL);
102
103	/* ensure all packets go to ethernet-input */
104	ethernet_set_rx_redirect (vnm, sif_hw, 1);
105      }
106    }
107  else if ((bif_hw->l2_if_count == 0) && (l2_if_adjust == -1))
108    {
109      /* Just removed last L2 subinterface on this port */
110      vec_foreach (sw_if_index, bif->slaves)
111      {
112	sif_hw = vnet_get_sup_hw_interface (vnm, *sw_if_index);
113
114	/* Allow ip packets to go directly to ip4-input etc */
115	ethernet_set_rx_redirect (vnm, sif_hw, 0);
116      }
117    }
118
119  return 0;
120}
121
122static __clib_unused clib_error_t *
123bond_subif_add_del_function (vnet_main_t * vnm, u32 hw_if_index,
124			     struct vnet_sw_interface_t *st, int is_add)
125{
126  /* Nothing for now */
127  return 0;
128}
129
130static clib_error_t *
131bond_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
132{
133  vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
134  uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
135  bond_main_t *bm = &bond_main;
136  bond_if_t *bif = pool_elt_at_index (bm->interfaces, hif->dev_instance);
137
138  bif->admin_up = is_up;
139  if (is_up && vec_len (bif->active_slaves))
140    vnet_hw_interface_set_flags (vnm, bif->hw_if_index,
141				 VNET_HW_INTERFACE_FLAG_LINK_UP);
142  return 0;
143}
144
145static_always_inline void
146bond_tx_add_to_queue (bond_per_thread_data_t * ptd, u32 port, u32 bi)
147{
148  u32 idx = ptd->per_port_queue[port].n_buffers++;
149  ptd->per_port_queue[port].buffers[idx] = bi;
150}
151
152static_always_inline u32
153bond_lb_broadcast (vlib_main_t * vm,
154		   bond_if_t * bif, vlib_buffer_t * b0, uword n_slaves)
155{
156  bond_main_t *bm = &bond_main;
157  vlib_buffer_t *c0;
158  int port;
159  u32 sw_if_index;
160  u16 thread_index = vm->thread_index;
161  bond_per_thread_data_t *ptd = vec_elt_at_index (bm->per_thread_data,
162						  thread_index);
163
164  for (port = 1; port < n_slaves; port++)
165    {
166      sw_if_index = *vec_elt_at_index (bif->active_slaves, port);
167      c0 = vlib_buffer_copy (vm, b0);
168      if (PREDICT_TRUE (c0 != 0))
169	{
170	  vnet_buffer (c0)->sw_if_index[VLIB_TX] = sw_if_index;
171	  bond_tx_add_to_queue (ptd, port, vlib_get_buffer_index (vm, c0));
172	}
173    }
174
175  return 0;
176}
177
178static_always_inline u32
179bond_lb_l2 (vlib_buffer_t * b0)
180{
181  ethernet_header_t *eth = vlib_buffer_get_current (b0);
182  u64 *dst = (u64 *) & eth->dst_address[0];
183  u64 a = clib_mem_unaligned (dst, u64);
184  u32 *src = (u32 *) & eth->src_address[2];
185  u32 b = clib_mem_unaligned (src, u32);
186
187  return lb_hash_hash_2_tuples (a, b);
188}
189
190static_always_inline u16 *
191bond_locate_ethertype (ethernet_header_t * eth)
192{
193  u16 *ethertype_p;
194  ethernet_vlan_header_t *vlan;
195
196  if (!ethernet_frame_is_tagged (clib_net_to_host_u16 (eth->type)))
197    {
198      ethertype_p = &eth->type;
199    }
200  else
201    {
202      vlan = (void *) (eth + 1);
203      ethertype_p = &vlan->type;
204      if (*ethertype_p == ntohs (ETHERNET_TYPE_VLAN))
205	{
206	  vlan++;
207	  ethertype_p = &vlan->type;
208	}
209    }
210  return ethertype_p;
211}
212
213static_always_inline u32
214bond_lb_l23 (vlib_buffer_t * b0)
215{
216  ethernet_header_t *eth = vlib_buffer_get_current (b0);
217  u8 ip_version;
218  ip4_header_t *ip4;
219  u16 ethertype, *ethertype_p;
220  u32 *mac1, *mac2, *mac3;
221
222  ethertype_p = bond_locate_ethertype (eth);
223  ethertype = clib_mem_unaligned (ethertype_p, u16);
224
225  if ((ethertype != htons (ETHERNET_TYPE_IP4)) &&
226      (ethertype != htons (ETHERNET_TYPE_IP6)))
227    return bond_lb_l2 (b0);
228
229  ip4 = (ip4_header_t *) (ethertype_p + 1);
230  ip_version = (ip4->ip_version_and_header_length >> 4);
231
232  if (ip_version == 0x4)
233    {
234      u32 a, c;
235
236      mac1 = (u32 *) & eth->dst_address[0];
237      mac2 = (u32 *) & eth->dst_address[4];
238      mac3 = (u32 *) & eth->src_address[2];
239
240      a = clib_mem_unaligned (mac1, u32) ^ clib_mem_unaligned (mac2, u32) ^
241	clib_mem_unaligned (mac3, u32);
242      c =
243	lb_hash_hash_2_tuples (clib_mem_unaligned (&ip4->address_pair, u64),
244			       a);
245      return c;
246    }
247  else if (ip_version == 0x6)
248    {
249      u64 a;
250      u32 c;
251      ip6_header_t *ip6 = (ip6_header_t *) (eth + 1);
252
253      mac1 = (u32 *) & eth->dst_address[0];
254      mac2 = (u32 *) & eth->dst_address[4];
255      mac3 = (u32 *) & eth->src_address[2];
256
257      a = clib_mem_unaligned (mac1, u32) ^ clib_mem_unaligned (mac2, u32) ^
258	clib_mem_unaligned (mac3, u32);
259      c =
260	lb_hash_hash (clib_mem_unaligned
261		      (&ip6->src_address.as_uword[0], uword),
262		      clib_mem_unaligned (&ip6->src_address.as_uword[1],
263					  uword),
264		      clib_mem_unaligned (&ip6->dst_address.as_uword[0],
265					  uword),
266		      clib_mem_unaligned (&ip6->dst_address.as_uword[1],
267					  uword), a);
268      return c;
269    }
270  return bond_lb_l2 (b0);
271}
272
273static_always_inline u32
274bond_lb_l34 (vlib_buffer_t * b0)
275{
276  ethernet_header_t *eth = vlib_buffer_get_current (b0);
277  u8 ip_version;
278  uword is_tcp_udp;
279  ip4_header_t *ip4;
280  u16 ethertype, *ethertype_p;
281
282  ethertype_p = bond_locate_ethertype (eth);
283  ethertype = clib_mem_unaligned (ethertype_p, u16);
284
285  if ((ethertype != htons (ETHERNET_TYPE_IP4)) &&
286      (ethertype != htons (ETHERNET_TYPE_IP6)))
287    return (bond_lb_l2 (b0));
288
289  ip4 = (ip4_header_t *) (ethertype_p + 1);
290  ip_version = (ip4->ip_version_and_header_length >> 4);
291
292  if (ip_version == 0x4)
293    {
294      u32 a, t1, t2;
295      tcp_header_t *tcp = (void *) (ip4 + 1);
296
297      is_tcp_udp = (ip4->protocol == IP_PROTOCOL_TCP) ||
298	(ip4->protocol == IP_PROTOCOL_UDP);
299      t1 = is_tcp_udp ? clib_mem_unaligned (&tcp->src, u16) : 0;
300      t2 = is_tcp_udp ? clib_mem_unaligned (&tcp->dst, u16) : 0;
301      a = t1 ^ t2;
302      return
303	lb_hash_hash_2_tuples (clib_mem_unaligned (&ip4->address_pair, u64),
304			       a);
305    }
306  else if (ip_version == 0x6)
307    {
308      u64 a;
309      u32 c, t1, t2;
310      ip6_header_t *ip6 = (ip6_header_t *) (eth + 1);
311      tcp_header_t *tcp = (void *) (ip6 + 1);
312
313      is_tcp_udp = 0;
314      if (PREDICT_TRUE ((ip6->protocol == IP_PROTOCOL_TCP) ||
315			(ip6->protocol == IP_PROTOCOL_UDP)))
316	{
317	  is_tcp_udp = 1;
318	  tcp = (void *) (ip6 + 1);
319	}
320      else if (ip6->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS)
321	{
322	  ip6_hop_by_hop_header_t *hbh =
323	    (ip6_hop_by_hop_header_t *) (ip6 + 1);
324	  if ((hbh->protocol == IP_PROTOCOL_TCP)
325	      || (hbh->protocol == IP_PROTOCOL_UDP))
326	    {
327	      is_tcp_udp = 1;
328	      tcp = (tcp_header_t *) ((u8 *) hbh + ((hbh->length + 1) << 3));
329	    }
330	}
331      t1 = is_tcp_udp ? clib_mem_unaligned (&tcp->src, u16) : 0;
332      t2 = is_tcp_udp ? clib_mem_unaligned (&tcp->dst, u16) : 0;
333      a = t1 ^ t2;
334      c =
335	lb_hash_hash (clib_mem_unaligned
336		      (&ip6->src_address.as_uword[0], uword),
337		      clib_mem_unaligned (&ip6->src_address.as_uword[1],
338					  uword),
339		      clib_mem_unaligned (&ip6->dst_address.as_uword[0],
340					  uword),
341		      clib_mem_unaligned (&ip6->dst_address.as_uword[1],
342					  uword), a);
343      return c;
344    }
345
346  return bond_lb_l2 (b0);
347}
348
349static_always_inline u32
350bond_lb_round_robin (bond_if_t * bif, vlib_buffer_t * b0, uword n_slaves)
351{
352  bif->lb_rr_last_index++;
353  if (bif->lb_rr_last_index >= n_slaves)
354    bif->lb_rr_last_index = 0;
355
356  return bif->lb_rr_last_index;
357}
358
359static_always_inline void
360bond_tx_inline (vlib_main_t * vm, bond_if_t * bif, vlib_buffer_t ** b,
361		u32 * h, u32 n_left, uword n_slaves, u32 lb_alg)
362{
363  while (n_left >= 4)
364    {
365      // Prefetch next iteration
366      if (n_left >= 8)
367	{
368	  vlib_buffer_t **pb = b + 4;
369
370	  vlib_prefetch_buffer_header (pb[0], LOAD);
371	  vlib_prefetch_buffer_header (pb[1], LOAD);
372	  vlib_prefetch_buffer_header (pb[2], LOAD);
373	  vlib_prefetch_buffer_header (pb[3], LOAD);
374
375	  CLIB_PREFETCH (pb[0]->data, CLIB_CACHE_LINE_BYTES, LOAD);
376	  CLIB_PREFETCH (pb[1]->data, CLIB_CACHE_LINE_BYTES, LOAD);
377	  CLIB_PREFETCH (pb[2]->data, CLIB_CACHE_LINE_BYTES, LOAD);
378	  CLIB_PREFETCH (pb[3]->data, CLIB_CACHE_LINE_BYTES, LOAD);
379	}
380
381      VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[0]);
382      VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[1]);
383      VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[2]);
384      VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[3]);
385
386      if (lb_alg == BOND_LB_L2)
387	{
388	  h[0] = bond_lb_l2 (b[0]);
389	  h[1] = bond_lb_l2 (b[1]);
390	  h[2] = bond_lb_l2 (b[2]);
391	  h[3] = bond_lb_l2 (b[3]);
392	}
393      else if (lb_alg == BOND_LB_L34)
394	{
395	  h[0] = bond_lb_l34 (b[0]);
396	  h[1] = bond_lb_l34 (b[1]);
397	  h[2] = bond_lb_l34 (b[2]);
398	  h[3] = bond_lb_l34 (b[3]);
399	}
400      else if (lb_alg == BOND_LB_L23)
401	{
402	  h[0] = bond_lb_l23 (b[0]);
403	  h[1] = bond_lb_l23 (b[1]);
404	  h[2] = bond_lb_l23 (b[2]);
405	  h[3] = bond_lb_l23 (b[3]);
406	}
407      else if (lb_alg == BOND_LB_RR)
408	{
409	  h[0] = bond_lb_round_robin (bif, b[0], n_slaves);
410	  h[1] = bond_lb_round_robin (bif, b[1], n_slaves);
411	  h[2] = bond_lb_round_robin (bif, b[2], n_slaves);
412	  h[3] = bond_lb_round_robin (bif, b[3], n_slaves);
413	}
414      else if (lb_alg == BOND_LB_BC)
415	{
416	  h[0] = bond_lb_broadcast (vm, bif, b[0], n_slaves);
417	  h[1] = bond_lb_broadcast (vm, bif, b[1], n_slaves);
418	  h[2] = bond_lb_broadcast (vm, bif, b[2], n_slaves);
419	  h[3] = bond_lb_broadcast (vm, bif, b[3], n_slaves);
420	}
421      else
422	{
423	  ASSERT (0);
424	}
425
426      n_left -= 4;
427      b += 4;
428      h += 4;
429    }
430
431  while (n_left > 0)
432    {
433      VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[0]);
434
435      if (bif->lb == BOND_LB_L2)
436	h[0] = bond_lb_l2 (b[0]);
437      else if (bif->lb == BOND_LB_L34)
438	h[0] = bond_lb_l34 (b[0]);
439      else if (bif->lb == BOND_LB_L23)
440	h[0] = bond_lb_l23 (b[0]);
441      else if (bif->lb == BOND_LB_RR)
442	h[0] = bond_lb_round_robin (bif, b[0], n_slaves);
443      else if (bif->lb == BOND_LB_BC)
444	h[0] = bond_lb_broadcast (vm, bif, b[0], n_slaves);
445      else
446	{
447	  ASSERT (0);
448	}
449
450      n_left -= 1;
451      b += 1;
452    }
453}
454
455static_always_inline void
456bond_hash_to_port (u32 * h, u32 n_left, u32 n_slaves, int use_modulo_shortcut)
457{
458  u32 mask = n_slaves - 1;
459
460#ifdef CLIB_HAVE_VEC256
461  /* only lower 16 bits of hash due to single precision fp arithmetic */
462  u32x8 mask8, sc8u, h8a, h8b;
463  f32x8 sc8f;
464
465  if (use_modulo_shortcut)
466    {
467      mask8 = u32x8_splat (mask);
468    }
469  else
470    {
471      mask8 = u32x8_splat (0xffff);
472      sc8u = u32x8_splat (n_slaves);
473      sc8f = f32x8_from_u32x8 (sc8u);
474    }
475
476  while (n_left > 16)
477    {
478      h8a = u32x8_load_unaligned (h) & mask8;
479      h8b = u32x8_load_unaligned (h + 8) & mask8;
480
481      if (use_modulo_shortcut == 0)
482	{
483	  h8a -= sc8u * u32x8_from_f32x8 (f32x8_from_u32x8 (h8a) / sc8f);
484	  h8b -= sc8u * u32x8_from_f32x8 (f32x8_from_u32x8 (h8b) / sc8f);
485	}
486
487      u32x8_store_unaligned (h8a, h);
488      u32x8_store_unaligned (h8b, h + 8);
489      n_left -= 16;
490      h += 16;
491    }
492#endif
493
494  while (n_left > 4)
495    {
496      if (use_modulo_shortcut)
497	{
498	  h[0] &= mask;
499	  h[1] &= mask;
500	  h[2] &= mask;
501	  h[3] &= mask;
502	}
503      else
504	{
505	  h[0] %= n_slaves;
506	  h[1] %= n_slaves;
507	  h[2] %= n_slaves;
508	  h[3] %= n_slaves;
509	}
510      n_left -= 4;
511      h += 4;
512    }
513  while (n_left)
514    {
515      if (use_modulo_shortcut)
516	h[0] &= mask;
517      else
518	h[0] %= n_slaves;
519      n_left -= 1;
520      h += 1;
521    }
522}
523
524static_always_inline void
525bond_update_sw_if_index (bond_per_thread_data_t * ptd, bond_if_t * bif,
526			 u32 * bi, vlib_buffer_t ** b, u32 * data, u32 n_left,
527			 int single_sw_if_index)
528{
529  u32 sw_if_index = data[0];
530  u32 *h = data;
531
532  while (n_left >= 4)
533    {
534      // Prefetch next iteration
535      if (n_left >= 8)
536	{
537	  vlib_buffer_t **pb = b + 4;
538	  vlib_prefetch_buffer_header (pb[0], LOAD);
539	  vlib_prefetch_buffer_header (pb[1], LOAD);
540	  vlib_prefetch_buffer_header (pb[2], LOAD);
541	  vlib_prefetch_buffer_header (pb[3], LOAD);
542	}
543
544      if (PREDICT_FALSE (single_sw_if_index))
545	{
546	  vnet_buffer (b[0])->sw_if_index[VLIB_TX] = sw_if_index;
547	  vnet_buffer (b[1])->sw_if_index[VLIB_TX] = sw_if_index;
548	  vnet_buffer (b[2])->sw_if_index[VLIB_TX] = sw_if_index;
549	  vnet_buffer (b[3])->sw_if_index[VLIB_TX] = sw_if_index;
550
551	  bond_tx_add_to_queue (ptd, 0, bi[0]);
552	  bond_tx_add_to_queue (ptd, 0, bi[1]);
553	  bond_tx_add_to_queue (ptd, 0, bi[2]);
554	  bond_tx_add_to_queue (ptd, 0, bi[3]);
555	}
556      else
557	{
558	  u32 sw_if_index[4];
559
560	  sw_if_index[0] = *vec_elt_at_index (bif->active_slaves, h[0]);
561	  sw_if_index[1] = *vec_elt_at_index (bif->active_slaves, h[1]);
562	  sw_if_index[2] = *vec_elt_at_index (bif->active_slaves, h[2]);
563	  sw_if_index[3] = *vec_elt_at_index (bif->active_slaves, h[3]);
564
565	  vnet_buffer (b[0])->sw_if_index[VLIB_TX] = sw_if_index[0];
566	  vnet_buffer (b[1])->sw_if_index[VLIB_TX] = sw_if_index[1];
567	  vnet_buffer (b[2])->sw_if_index[VLIB_TX] = sw_if_index[2];
568	  vnet_buffer (b[3])->sw_if_index[VLIB_TX] = sw_if_index[3];
569
570	  bond_tx_add_to_queue (ptd, h[0], bi[0]);
571	  bond_tx_add_to_queue (ptd, h[1], bi[1]);
572	  bond_tx_add_to_queue (ptd, h[2], bi[2]);
573	  bond_tx_add_to_queue (ptd, h[3], bi[3]);
574	}
575
576      bi += 4;
577      h += 4;
578      b += 4;
579      n_left -= 4;
580    }
581  while (n_left)
582    {
583      if (PREDICT_FALSE (single_sw_if_index))
584	{
585	  vnet_buffer (b[0])->sw_if_index[VLIB_TX] = sw_if_index;
586	  bond_tx_add_to_queue (ptd, 0, bi[0]);
587	}
588      else
589	{
590	  u32 sw_if_index0 = *vec_elt_at_index (bif->active_slaves, h[0]);
591
592	  vnet_buffer (b[0])->sw_if_index[VLIB_TX] = sw_if_index0;
593	  bond_tx_add_to_queue (ptd, h[0], bi[0]);
594	}
595
596      bi += 1;
597      h += 1;
598      b += 1;
599      n_left -= 1;
600    }
601}
602
603static_always_inline void
604bond_tx_trace (vlib_main_t * vm, vlib_node_runtime_t * node, bond_if_t * bif,
605	       vlib_buffer_t ** b, u32 n_left, u32 * h)
606{
607  uword n_trace = vlib_get_trace_count (vm, node);
608
609  while (n_trace > 0 && n_left > 0)
610    {
611      bond_packet_trace_t *t0;
612      ethernet_header_t *eth;
613      u32 next0 = 0;
614
615      vlib_trace_buffer (vm, node, next0, b[0], 0 /* follow_chain */ );
616      vlib_set_trace_count (vm, node, --n_trace);
617      t0 = vlib_add_trace (vm, node, b[0], sizeof (*t0));
618      eth = vlib_buffer_get_current (b[0]);
619      t0->ethernet = *eth;
620      t0->sw_if_index = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
621      if (!h)
622	{
623	  t0->bond_sw_if_index = *vec_elt_at_index (bif->active_slaves, 0);
624	}
625      else
626	{
627	  t0->bond_sw_if_index = *vec_elt_at_index (bif->active_slaves, h[0]);
628	  h++;
629	}
630      b++;
631      n_left--;
632    }
633}
634
635VNET_DEVICE_CLASS_TX_FN (bond_dev_class) (vlib_main_t * vm,
636					  vlib_node_runtime_t * node,
637					  vlib_frame_t * frame)
638{
639  vnet_interface_output_runtime_t *rund = (void *) node->runtime_data;
640  bond_main_t *bm = &bond_main;
641  u16 thread_index = vm->thread_index;
642  bond_if_t *bif = pool_elt_at_index (bm->interfaces, rund->dev_instance);
643  uword n_slaves;
644  vlib_buffer_t *bufs[VLIB_FRAME_SIZE];
645  u32 *from = vlib_frame_vector_args (frame);
646  u32 n_left = frame->n_vectors;
647  u32 hashes[VLIB_FRAME_SIZE], *h;
648  vnet_main_t *vnm = vnet_get_main ();
649  bond_per_thread_data_t *ptd = vec_elt_at_index (bm->per_thread_data,
650						  thread_index);
651  u32 p, sw_if_index;
652
653  if (PREDICT_FALSE (bif->admin_up == 0))
654    {
655      vlib_buffer_free (vm, vlib_frame_vector_args (frame), frame->n_vectors);
656      vlib_increment_simple_counter (vnet_main.interface_main.sw_if_counters +
657				     VNET_INTERFACE_COUNTER_DROP,
658				     thread_index, bif->sw_if_index,
659				     frame->n_vectors);
660      vlib_error_count (vm, node->node_index, BOND_TX_ERROR_IF_DOWN,
661			frame->n_vectors);
662      return frame->n_vectors;
663    }
664
665  n_slaves = vec_len (bif->active_slaves);
666  if (PREDICT_FALSE (n_slaves == 0))
667    {
668      vlib_buffer_free (vm, vlib_frame_vector_args (frame), frame->n_vectors);
669      vlib_increment_simple_counter (vnet_main.interface_main.sw_if_counters +
670				     VNET_INTERFACE_COUNTER_DROP,
671				     thread_index, bif->sw_if_index,
672				     frame->n_vectors);
673      vlib_error_count (vm, node->node_index, BOND_TX_ERROR_NO_SLAVE,
674			frame->n_vectors);
675      return frame->n_vectors;
676    }
677
678  vlib_get_buffers (vm, from, bufs, n_left);
679
680  /* active-backup mode, ship everything to first sw if index */
681  if ((bif->lb == BOND_LB_AB) || PREDICT_FALSE (n_slaves == 1))
682    {
683      sw_if_index = *vec_elt_at_index (bif->active_slaves, 0);
684
685      bond_tx_trace (vm, node, bif, bufs, frame->n_vectors, 0);
686      bond_update_sw_if_index (ptd, bif, from, bufs, &sw_if_index, n_left,
687			       /* single_sw_if_index */ 1);
688      goto done;
689    }
690
691  if (bif->lb == BOND_LB_BC)
692    {
693      sw_if_index = *vec_elt_at_index (bif->active_slaves, 0);
694
695      bond_tx_inline (vm, bif, bufs, hashes, n_left, n_slaves, BOND_LB_BC);
696      bond_tx_trace (vm, node, bif, bufs, frame->n_vectors, 0);
697      bond_update_sw_if_index (ptd, bif, from, bufs, &sw_if_index, n_left,
698			       /* single_sw_if_index */ 1);
699      goto done;
700    }
701
702  /* if have at least one slave on local numa node, only slaves on local numa
703     node will transmit pkts when bif->local_numa_only is enabled */
704  if (bif->n_numa_slaves >= 1)
705    n_slaves = bif->n_numa_slaves;
706
707  if (bif->lb == BOND_LB_L2)
708    bond_tx_inline (vm, bif, bufs, hashes, n_left, n_slaves, BOND_LB_L2);
709  else if (bif->lb == BOND_LB_L34)
710    bond_tx_inline (vm, bif, bufs, hashes, n_left, n_slaves, BOND_LB_L34);
711  else if (bif->lb == BOND_LB_L23)
712    bond_tx_inline (vm, bif, bufs, hashes, n_left, n_slaves, BOND_LB_L23);
713  else if (bif->lb == BOND_LB_RR)
714    bond_tx_inline (vm, bif, bufs, hashes, n_left, n_slaves, BOND_LB_RR);
715  else
716    ASSERT (0);
717
718  /* calculate port out of hash */
719  h = hashes;
720  if (BOND_MODULO_SHORTCUT (n_slaves))
721    bond_hash_to_port (h, frame->n_vectors, n_slaves, 1);
722  else
723    bond_hash_to_port (h, frame->n_vectors, n_slaves, 0);
724
725  bond_tx_trace (vm, node, bif, bufs, frame->n_vectors, h);
726
727  bond_update_sw_if_index (ptd, bif, from, bufs, hashes, frame->n_vectors,
728			   /* single_sw_if_index */ 0);
729
730done:
731  for (p = 0; p < n_slaves; p++)
732    {
733      vlib_frame_t *f;
734      u32 *to_next;
735
736      sw_if_index = *vec_elt_at_index (bif->active_slaves, p);
737      if (PREDICT_TRUE (ptd->per_port_queue[p].n_buffers))
738	{
739	  f = vnet_get_frame_to_sw_interface (vnm, sw_if_index);
740	  f->n_vectors = ptd->per_port_queue[p].n_buffers;
741	  to_next = vlib_frame_vector_args (f);
742	  clib_memcpy_fast (to_next, ptd->per_port_queue[p].buffers,
743			    f->n_vectors * sizeof (u32));
744	  vnet_put_frame_to_sw_interface (vnm, sw_if_index, f);
745	  ptd->per_port_queue[p].n_buffers = 0;
746	}
747    }
748  return frame->n_vectors;
749}
750
751static walk_rc_t
752bond_active_interface_switch_cb (vnet_main_t * vnm, u32 sw_if_index,
753				 void *arg)
754{
755  bond_main_t *bm = &bond_main;
756
757  send_ip4_garp (bm->vlib_main, sw_if_index);
758  send_ip6_na (bm->vlib_main, sw_if_index);
759
760  return (WALK_CONTINUE);
761}
762
763static uword
764bond_process (vlib_main_t * vm, vlib_node_runtime_t * rt, vlib_frame_t * f)
765{
766  vnet_main_t *vnm = vnet_get_main ();
767  uword event_type, *event_data = 0;
768
769  while (1)
770    {
771      u32 i;
772      u32 hw_if_index;
773
774      vlib_process_wait_for_event (vm);
775      event_type = vlib_process_get_events (vm, &event_data);
776      ASSERT (event_type == BOND_SEND_GARP_NA);
777      for (i = 0; i < vec_len (event_data); i++)
778	{
779	  hw_if_index = event_data[i];
780	  if (vnet_get_hw_interface_or_null (vnm, hw_if_index))
781	    /* walk hw interface to process all subinterfaces */
782	    vnet_hw_interface_walk_sw (vnm, hw_if_index,
783				       bond_active_interface_switch_cb, 0);
784	}
785      vec_reset_length (event_data);
786    }
787  return 0;
788}
789
790/* *INDENT-OFF* */
791VLIB_REGISTER_NODE (bond_process_node) = {
792  .function = bond_process,
793  .flags = VLIB_NODE_FLAG_TRACE_SUPPORTED,
794  .type = VLIB_NODE_TYPE_PROCESS,
795  .name = "bond-process",
796};
797/* *INDENT-ON* */
798
799/* *INDENT-OFF* */
800VNET_DEVICE_CLASS (bond_dev_class) = {
801  .name = "bond",
802  .tx_function_n_errors = BOND_TX_N_ERROR,
803  .tx_function_error_strings = bond_tx_error_strings,
804  .format_device_name = format_bond_interface_name,
805  .set_l2_mode_function = bond_set_l2_mode_function,
806  .admin_up_down_function = bond_interface_admin_up_down,
807  .subif_add_del_function = bond_subif_add_del_function,
808  .format_tx_trace = format_bond_tx_trace,
809};
810
811/* *INDENT-ON* */
812
813static clib_error_t *
814bond_slave_interface_add_del (vnet_main_t * vnm, u32 sw_if_index, u32 is_add)
815{
816  bond_main_t *bm = &bond_main;
817  slave_if_t *sif;
818  bond_detach_slave_args_t args = { 0 };
819
820  if (is_add)
821    return 0;
822  sif = bond_get_slave_by_sw_if_index (sw_if_index);
823  if (!sif)
824    return 0;
825  args.slave = sw_if_index;
826  bond_detach_slave (bm->vlib_main, &args);
827  return args.error;
828}
829
830VNET_SW_INTERFACE_ADD_DEL_FUNCTION (bond_slave_interface_add_del);
831
832/*
833 * fd.io coding-style-patch-verification: ON
834 *
835 * Local Variables:
836 * eval: (c-set-style "gnu")
837 * End:
838 */
839