input.c revision 7ca5aaac
1/*
2 *------------------------------------------------------------------
3 * Copyright (c) 2018 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 *     http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
16 */
17
18#include <vlib/vlib.h>
19#include <vlib/unix/unix.h>
20#include <vlib/pci/pci.h>
21#include <vnet/ethernet/ethernet.h>
22#include <vnet/devices/devices.h>
23
24#include <avf/avf.h>
25
26#define foreach_avf_input_error \
27  _(BUFFER_ALLOC, "buffer alloc error")
28
29typedef enum
30{
31#define _(f,s) AVF_INPUT_ERROR_##f,
32  foreach_avf_input_error
33#undef _
34    AVF_INPUT_N_ERROR,
35} avf_input_error_t;
36
37static __clib_unused char *avf_input_error_strings[] = {
38#define _(n,s) s,
39  foreach_avf_input_error
40#undef _
41};
42
43#define AVF_INPUT_REFILL_TRESHOLD 32
44
45static_always_inline void
46avf_rx_desc_write (avf_rx_desc_t * d, u64 addr)
47{
48#ifdef CLIB_HAVE_VEC256
49  u64x4 v = { addr, 0, 0, 0 };
50  u64x4_store_unaligned (v, (void *) d);
51#else
52  d->qword[0] = addr;
53  d->qword[1] = 0;
54#endif
55}
56
57static_always_inline void
58avf_rxq_refill (vlib_main_t * vm, vlib_node_runtime_t * node, avf_rxq_t * rxq,
59		int use_va_dma)
60{
61  u16 n_refill, mask, n_alloc, slot, size;
62  vlib_buffer_t *b[8];
63  avf_rx_desc_t *d, *first_d;
64  void *p[8];
65
66  size = rxq->size;
67  mask = size - 1;
68  n_refill = mask - rxq->n_enqueued;
69  if (PREDICT_TRUE (n_refill <= AVF_INPUT_REFILL_TRESHOLD))
70    return;
71
72  slot = (rxq->next - n_refill - 1) & mask;
73
74  n_refill &= ~7;		/* round to 8 */
75  n_alloc =
76    vlib_buffer_alloc_to_ring_from_pool (vm, rxq->bufs, slot, size, n_refill,
77					 rxq->buffer_pool_index);
78
79  if (PREDICT_FALSE (n_alloc != n_refill))
80    {
81      vlib_error_count (vm, node->node_index,
82			AVF_INPUT_ERROR_BUFFER_ALLOC, 1);
83      if (n_alloc)
84	vlib_buffer_free_from_ring (vm, rxq->bufs, slot, size, n_alloc);
85      return;
86    }
87
88  rxq->n_enqueued += n_alloc;
89  first_d = rxq->descs;
90
91  ASSERT (slot % 8 == 0);
92
93  while (n_alloc >= 8)
94    {
95      d = first_d + slot;
96
97      if (use_va_dma)
98	{
99	  vlib_get_buffers_with_offset (vm, rxq->bufs + slot, p, 8,
100					sizeof (vlib_buffer_t));
101	  avf_rx_desc_write (d + 0, pointer_to_uword (p[0]));
102	  avf_rx_desc_write (d + 1, pointer_to_uword (p[1]));
103	  avf_rx_desc_write (d + 2, pointer_to_uword (p[2]));
104	  avf_rx_desc_write (d + 3, pointer_to_uword (p[3]));
105	  avf_rx_desc_write (d + 4, pointer_to_uword (p[4]));
106	  avf_rx_desc_write (d + 5, pointer_to_uword (p[5]));
107	  avf_rx_desc_write (d + 6, pointer_to_uword (p[6]));
108	  avf_rx_desc_write (d + 7, pointer_to_uword (p[7]));
109	}
110      else
111	{
112	  vlib_get_buffers (vm, rxq->bufs + slot, b, 8);
113	  avf_rx_desc_write (d + 0, vlib_buffer_get_pa (vm, b[0]));
114	  avf_rx_desc_write (d + 1, vlib_buffer_get_pa (vm, b[1]));
115	  avf_rx_desc_write (d + 2, vlib_buffer_get_pa (vm, b[2]));
116	  avf_rx_desc_write (d + 3, vlib_buffer_get_pa (vm, b[3]));
117	  avf_rx_desc_write (d + 4, vlib_buffer_get_pa (vm, b[4]));
118	  avf_rx_desc_write (d + 5, vlib_buffer_get_pa (vm, b[5]));
119	  avf_rx_desc_write (d + 6, vlib_buffer_get_pa (vm, b[6]));
120	  avf_rx_desc_write (d + 7, vlib_buffer_get_pa (vm, b[7]));
121	}
122
123      /* next */
124      slot = (slot + 8) & mask;
125      n_alloc -= 8;
126    }
127
128  CLIB_MEMORY_STORE_BARRIER ();
129  *(rxq->qrx_tail) = slot;
130}
131
132
133static_always_inline uword
134avf_rx_attach_tail (vlib_main_t * vm, vlib_buffer_t * bt, vlib_buffer_t * b,
135		    u64 qw1, avf_rx_tail_t * t)
136{
137  vlib_buffer_t *hb = b;
138  u32 tlnifb = 0, i = 0;
139
140  if (qw1 & AVF_RXD_STATUS_EOP)
141    return 0;
142
143  while ((qw1 & AVF_RXD_STATUS_EOP) == 0)
144    {
145      ASSERT (i < AVF_RX_MAX_DESC_IN_CHAIN - 1);
146      ASSERT (qw1 & AVF_RXD_STATUS_DD);
147      qw1 = t->qw1s[i];
148      b->next_buffer = t->buffers[i];
149      b->flags |= VLIB_BUFFER_NEXT_PRESENT;
150      b = vlib_get_buffer (vm, b->next_buffer);
151      vlib_buffer_copy_template (b, bt);
152      tlnifb += b->current_length = qw1 >> AVF_RXD_LEN_SHIFT;
153      i++;
154    }
155
156  hb->total_length_not_including_first_buffer = tlnifb;
157  hb->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
158  return tlnifb;
159}
160
161static_always_inline uword
162avf_process_rx_burst (vlib_main_t * vm, vlib_node_runtime_t * node,
163		      avf_per_thread_data_t * ptd, u32 n_left,
164		      int maybe_multiseg)
165{
166  vlib_buffer_t bt;
167  vlib_buffer_t **b = ptd->bufs;
168  u64 *qw1 = ptd->qw1s;
169  avf_rx_tail_t *tail = ptd->tails;
170  uword n_rx_bytes = 0;
171
172  /* copy template into local variable - will save per packet load */
173  vlib_buffer_copy_template (&bt, &ptd->buffer_template);
174
175  while (n_left >= 4)
176    {
177      if (n_left >= 12)
178	{
179	  vlib_prefetch_buffer_header (b[8], LOAD);
180	  vlib_prefetch_buffer_header (b[9], LOAD);
181	  vlib_prefetch_buffer_header (b[10], LOAD);
182	  vlib_prefetch_buffer_header (b[11], LOAD);
183	}
184
185      vlib_buffer_copy_template (b[0], &bt);
186      vlib_buffer_copy_template (b[1], &bt);
187      vlib_buffer_copy_template (b[2], &bt);
188      vlib_buffer_copy_template (b[3], &bt);
189
190      n_rx_bytes += b[0]->current_length = qw1[0] >> AVF_RXD_LEN_SHIFT;
191      n_rx_bytes += b[1]->current_length = qw1[1] >> AVF_RXD_LEN_SHIFT;
192      n_rx_bytes += b[2]->current_length = qw1[2] >> AVF_RXD_LEN_SHIFT;
193      n_rx_bytes += b[3]->current_length = qw1[3] >> AVF_RXD_LEN_SHIFT;
194
195      if (maybe_multiseg)
196	{
197	  n_rx_bytes += avf_rx_attach_tail (vm, &bt, b[0], qw1[0], tail + 0);
198	  n_rx_bytes += avf_rx_attach_tail (vm, &bt, b[1], qw1[1], tail + 1);
199	  n_rx_bytes += avf_rx_attach_tail (vm, &bt, b[2], qw1[2], tail + 2);
200	  n_rx_bytes += avf_rx_attach_tail (vm, &bt, b[3], qw1[3], tail + 3);
201	}
202
203      VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[0]);
204      VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[1]);
205      VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[2]);
206      VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[3]);
207
208      /* next */
209      qw1 += 4;
210      tail += 4;
211      b += 4;
212      n_left -= 4;
213    }
214  while (n_left)
215    {
216      vlib_buffer_copy_template (b[0], &bt);
217
218      n_rx_bytes += b[0]->current_length = qw1[0] >> AVF_RXD_LEN_SHIFT;
219
220      if (maybe_multiseg)
221	n_rx_bytes += avf_rx_attach_tail (vm, &bt, b[0], qw1[0], tail + 0);
222
223      VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[0]);
224
225      /* next */
226      qw1 += 1;
227      tail += 1;
228      b += 1;
229      n_left -= 1;
230    }
231  return n_rx_bytes;
232}
233
234static_always_inline uword
235avf_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
236			 vlib_frame_t * frame, avf_device_t * ad, u16 qid)
237{
238  avf_main_t *am = &avf_main;
239  vnet_main_t *vnm = vnet_get_main ();
240  u32 thr_idx = vlib_get_thread_index ();
241  avf_per_thread_data_t *ptd =
242    vec_elt_at_index (am->per_thread_data, thr_idx);
243  avf_rxq_t *rxq = vec_elt_at_index (ad->rxqs, qid);
244  u32 n_trace, n_rx_packets = 0, n_rx_bytes = 0;
245  u16 n_tail_desc = 0;
246  u64 or_qw1 = 0;
247  u32 *bi, *to_next, n_left_to_next;
248  vlib_buffer_t *bt = &ptd->buffer_template;
249  u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
250  u16 next = rxq->next;
251  u16 size = rxq->size;
252  u16 mask = size - 1;
253  avf_rx_desc_t *d, *fd = rxq->descs;
254#ifdef CLIB_HAVE_VEC256
255  u64x4 q1x4, or_q1x4 = { 0 };
256  u64x4 dd_eop_mask4 = u64x4_splat (AVF_RXD_STATUS_DD | AVF_RXD_STATUS_EOP);
257#endif
258
259  /* is there anything on the ring */
260  d = fd + next;
261  if ((d->qword[1] & AVF_RXD_STATUS_DD) == 0)
262    goto done;
263
264  if (PREDICT_FALSE (ad->per_interface_next_index != ~0))
265    next_index = ad->per_interface_next_index;
266
267  if (PREDICT_FALSE (vnet_device_input_have_features (ad->sw_if_index)))
268    vnet_feature_start_device_input_x1 (ad->sw_if_index, &next_index, bt);
269
270  vlib_get_new_next_frame (vm, node, next_index, to_next, n_left_to_next);
271
272  /* fetch up to AVF_RX_VECTOR_SZ from the rx ring, unflatten them and
273     copy needed data from descriptor to rx vector */
274  bi = to_next;
275
276  while (n_rx_packets < AVF_RX_VECTOR_SZ)
277    {
278      if (next + 11 < size)
279	{
280	  int stride = 8;
281	  CLIB_PREFETCH ((void *) (fd + (next + stride)),
282			 CLIB_CACHE_LINE_BYTES, LOAD);
283	  CLIB_PREFETCH ((void *) (fd + (next + stride + 1)),
284			 CLIB_CACHE_LINE_BYTES, LOAD);
285	  CLIB_PREFETCH ((void *) (fd + (next + stride + 2)),
286			 CLIB_CACHE_LINE_BYTES, LOAD);
287	  CLIB_PREFETCH ((void *) (fd + (next + stride + 3)),
288			 CLIB_CACHE_LINE_BYTES, LOAD);
289	}
290
291#ifdef CLIB_HAVE_VEC256
292      if (n_rx_packets >= AVF_RX_VECTOR_SZ - 4 || next >= size - 4)
293	goto one_by_one;
294
295      q1x4 = u64x4_gather ((void *) &d[0].qword[1], (void *) &d[1].qword[1],
296			   (void *) &d[2].qword[1], (void *) &d[3].qword[1]);
297
298      /* not all packets are ready or at least one of them is chained */
299      if (!u64x4_is_equal (q1x4 & dd_eop_mask4, dd_eop_mask4))
300	goto one_by_one;
301
302      or_q1x4 |= q1x4;
303      u64x4_store_unaligned (q1x4, ptd->qw1s + n_rx_packets);
304      vlib_buffer_copy_indices (bi, rxq->bufs + next, 4);
305
306      /* next */
307      next = (next + 4) & mask;
308      d = fd + next;
309      n_rx_packets += 4;
310      bi += 4;
311      continue;
312    one_by_one:
313#endif
314      CLIB_PREFETCH ((void *) (fd + ((next + 8) & mask)),
315		     CLIB_CACHE_LINE_BYTES, LOAD);
316
317      if (avf_rxd_is_not_dd (d))
318	break;
319
320      bi[0] = rxq->bufs[next];
321
322      /* deal with chained buffers */
323      if (PREDICT_FALSE (avf_rxd_is_not_eop (d)))
324	{
325	  u16 tail_desc = 0;
326	  u16 tail_next = next;
327	  avf_rx_tail_t *tail = ptd->tails + n_rx_packets;
328	  avf_rx_desc_t *td;
329	  do
330	    {
331	      tail_next = (tail_next + 1) & mask;
332	      td = fd + tail_next;
333
334	      /* bail out in case of incomplete transaction */
335	      if (avf_rxd_is_not_dd (td))
336		goto no_more_desc;
337
338	      or_qw1 |= tail->qw1s[tail_desc] = td[0].qword[1];
339	      tail->buffers[tail_desc] = rxq->bufs[tail_next];
340	      tail_desc++;
341	    }
342	  while (avf_rxd_is_not_eop (td));
343	  next = tail_next;
344	  n_tail_desc += tail_desc;
345	}
346
347      or_qw1 |= ptd->qw1s[n_rx_packets] = d[0].qword[1];
348
349      /* next */
350      next = (next + 1) & mask;
351      d = fd + next;
352      n_rx_packets++;
353      bi++;
354    }
355no_more_desc:
356
357  if (n_rx_packets == 0)
358    goto done;
359
360  rxq->next = next;
361  rxq->n_enqueued -= n_rx_packets + n_tail_desc;
362
363#ifdef CLIB_HAVE_VEC256
364  or_qw1 |= or_q1x4[0] | or_q1x4[1] | or_q1x4[2] | or_q1x4[3];
365#endif
366
367  /* refill rx ring */
368  if (ad->flags & AVF_DEVICE_F_VA_DMA)
369    avf_rxq_refill (vm, node, rxq, 1 /* use_va_dma */ );
370  else
371    avf_rxq_refill (vm, node, rxq, 0 /* use_va_dma */ );
372
373  vlib_get_buffers (vm, to_next, ptd->bufs, n_rx_packets);
374
375  vnet_buffer (bt)->sw_if_index[VLIB_RX] = ad->sw_if_index;
376  vnet_buffer (bt)->sw_if_index[VLIB_TX] = ~0;
377  bt->buffer_pool_index = rxq->buffer_pool_index;
378  bt->ref_count = 1;
379
380  if (n_tail_desc)
381    n_rx_bytes = avf_process_rx_burst (vm, node, ptd, n_rx_packets, 1);
382  else
383    n_rx_bytes = avf_process_rx_burst (vm, node, ptd, n_rx_packets, 0);
384
385  /* packet trace if enabled */
386  if (PREDICT_FALSE ((n_trace = vlib_get_trace_count (vm, node))))
387    {
388      u32 n_left = n_rx_packets, i = 0, j;
389      bi = to_next;
390
391      while (n_trace && n_left)
392	{
393	  vlib_buffer_t *b;
394	  avf_input_trace_t *tr;
395	  b = vlib_get_buffer (vm, bi[0]);
396	  vlib_trace_buffer (vm, node, next_index, b, /* follow_chain */ 0);
397	  tr = vlib_add_trace (vm, node, b, sizeof (*tr));
398	  tr->next_index = next_index;
399	  tr->qid = qid;
400	  tr->hw_if_index = ad->hw_if_index;
401	  tr->qw1s[0] = ptd->qw1s[i];
402	  for (j = 1; j < AVF_RX_MAX_DESC_IN_CHAIN; j++)
403	    tr->qw1s[j] = ptd->tails[i].qw1s[j - 1];
404
405	  /* next */
406	  n_trace--;
407	  n_left--;
408	  bi++;
409	  i++;
410	}
411      vlib_set_trace_count (vm, node, n_trace);
412    }
413
414  if (PREDICT_TRUE (next_index == VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT))
415    {
416      vlib_next_frame_t *nf;
417      vlib_frame_t *f;
418      ethernet_input_frame_t *ef;
419      nf = vlib_node_runtime_get_next_frame (vm, node, next_index);
420      f = vlib_get_frame (vm, nf->frame);
421      f->flags = ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX;
422
423      ef = vlib_frame_scalar_args (f);
424      ef->sw_if_index = ad->sw_if_index;
425      ef->hw_if_index = ad->hw_if_index;
426
427      if ((or_qw1 & AVF_RXD_ERROR_IPE) == 0)
428	f->flags |= ETH_INPUT_FRAME_F_IP4_CKSUM_OK;
429      vlib_frame_no_append (f);
430    }
431
432  n_left_to_next -= n_rx_packets;
433  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
434
435  vlib_increment_combined_counter (vnm->interface_main.combined_sw_if_counters
436				   + VNET_INTERFACE_COUNTER_RX, thr_idx,
437				   ad->hw_if_index, n_rx_packets, n_rx_bytes);
438
439done:
440  return n_rx_packets;
441}
442
443VLIB_NODE_FN (avf_input_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
444			       vlib_frame_t * frame)
445{
446  u32 n_rx = 0;
447  avf_main_t *am = &avf_main;
448  vnet_device_input_runtime_t *rt = (void *) node->runtime_data;
449  vnet_device_and_queue_t *dq;
450
451  foreach_device_and_queue (dq, rt->devices_and_queues)
452  {
453    avf_device_t *ad;
454    ad = vec_elt_at_index (am->devices, dq->dev_instance);
455    if ((ad->flags & AVF_DEVICE_F_ADMIN_UP) == 0)
456      continue;
457    n_rx += avf_device_input_inline (vm, node, frame, ad, dq->queue_id);
458  }
459  return n_rx;
460}
461
462/* *INDENT-OFF* */
463VLIB_REGISTER_NODE (avf_input_node) = {
464  .name = "avf-input",
465  .sibling_of = "device-input",
466  .format_trace = format_avf_input_trace,
467  .type = VLIB_NODE_TYPE_INPUT,
468  .state = VLIB_NODE_STATE_DISABLED,
469  .n_errors = AVF_INPUT_N_ERROR,
470  .error_strings = avf_input_error_strings,
471  .flags = VLIB_NODE_FLAG_TRACE_SUPPORTED,
472};
473
474/* *INDENT-ON* */
475
476
477/*
478 * fd.io coding-style-patch-verification: ON
479 *
480 * Local Variables:
481 * eval: (c-set-style "gnu")
482 * End:
483 */
484