1/*
2 *------------------------------------------------------------------
3 * Copyright (c) 2016 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 *     http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
16 */
17
18#include <sys/types.h>
19#include <sys/stat.h>
20#include <fcntl.h>
21#include <net/if.h>
22#include <linux/if_tun.h>
23#include <sys/ioctl.h>
24#include <sys/eventfd.h>
25
26#include <vlib/vlib.h>
27#include <vlib/unix/unix.h>
28#include <vnet/ethernet/ethernet.h>
29#include <vnet/devices/devices.h>
30#include <vnet/feature/feature.h>
31#include <vnet/ip/ip4_packet.h>
32#include <vnet/ip/ip6_packet.h>
33#include <vnet/udp/udp_packet.h>
34#include <vnet/devices/virtio/virtio.h>
35
36
37#define foreach_virtio_input_error \
38  _(UNKNOWN, "unknown")
39
40typedef enum
41{
42#define _(f,s) VIRTIO_INPUT_ERROR_##f,
43  foreach_virtio_input_error
44#undef _
45    VIRTIO_INPUT_N_ERROR,
46} virtio_input_error_t;
47
48static char *virtio_input_error_strings[] = {
49#define _(n,s) s,
50  foreach_virtio_input_error
51#undef _
52};
53
54typedef struct
55{
56  u32 next_index;
57  u32 hw_if_index;
58  u16 ring;
59  u16 len;
60  struct virtio_net_hdr_v1 hdr;
61} virtio_input_trace_t;
62
63static u8 *
64format_virtio_input_trace (u8 * s, va_list * args)
65{
66  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
67  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
68  virtio_input_trace_t *t = va_arg (*args, virtio_input_trace_t *);
69  u32 indent = format_get_indent (s);
70
71  s = format (s, "virtio: hw_if_index %d next-index %d vring %u len %u",
72	      t->hw_if_index, t->next_index, t->ring, t->len);
73  s = format (s, "\n%Uhdr: flags 0x%02x gso_type 0x%02x hdr_len %u "
74	      "gso_size %u csum_start %u csum_offset %u num_buffers %u",
75	      format_white_space, indent + 2,
76	      t->hdr.flags, t->hdr.gso_type, t->hdr.hdr_len, t->hdr.gso_size,
77	      t->hdr.csum_start, t->hdr.csum_offset, t->hdr.num_buffers);
78  return s;
79}
80
81static_always_inline void
82virtio_refill_vring (vlib_main_t * vm, virtio_if_t * vif,
83		     virtio_vring_t * vring, const int hdr_sz)
84{
85  u16 used, next, avail, n_slots;
86  u16 sz = vring->size;
87  u16 mask = sz - 1;
88
89more:
90  used = vring->desc_in_use;
91
92  if (sz - used < sz / 8)
93    return;
94
95  /* deliver free buffers in chunks of 64 */
96  n_slots = clib_min (sz - used, 64);
97
98  next = vring->desc_next;
99  avail = vring->avail->idx;
100  n_slots =
101    vlib_buffer_alloc_to_ring_from_pool (vm, vring->buffers, next,
102					 vring->size, n_slots,
103					 vring->buffer_pool_index);
104
105  if (n_slots == 0)
106    return;
107
108  while (n_slots)
109    {
110      struct vring_desc *d = &vring->desc[next];;
111      vlib_buffer_t *b = vlib_get_buffer (vm, vring->buffers[next]);
112      /*
113       * current_data may not be initialized with 0 and may contain
114       * previous offset. Here we want to make sure, it should be 0
115       * initialized.
116       */
117      b->current_data = 0;
118      b->current_data -= hdr_sz;
119      memset (vlib_buffer_get_current (b), 0, hdr_sz);
120      d->addr =
121	((vif->type == VIRTIO_IF_TYPE_PCI) ? vlib_buffer_get_current_pa (vm,
122									 b) :
123	 pointer_to_uword (vlib_buffer_get_current (b)));
124      d->len = vlib_buffer_get_default_data_size (vm) + hdr_sz;
125      d->flags = VRING_DESC_F_WRITE;
126      vring->avail->ring[avail & mask] = next;
127      avail++;
128      next = (next + 1) & mask;
129      n_slots--;
130      used++;
131    }
132  CLIB_MEMORY_STORE_BARRIER ();
133  vring->avail->idx = avail;
134  vring->desc_next = next;
135  vring->desc_in_use = used;
136
137  if ((vring->used->flags & VIRTIO_RING_FLAG_MASK_INT) == 0)
138    {
139      virtio_kick (vm, vring, vif);
140    }
141  goto more;
142}
143
144static_always_inline void
145virtio_needs_csum (vlib_buffer_t * b0, struct virtio_net_hdr_v1 *hdr,
146		   u8 * l4_proto, u8 * l4_hdr_sz)
147{
148  if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
149
150    {
151      ethernet_header_t *eh =
152	(ethernet_header_t *) vlib_buffer_get_current (b0);
153      u16 ethertype = clib_net_to_host_u16 (eh->type);
154      u16 l2hdr_sz = sizeof (ethernet_header_t);
155
156      if (ethernet_frame_is_tagged (ethertype))
157	{
158	  ethernet_vlan_header_t *vlan = (ethernet_vlan_header_t *) (eh + 1);
159
160	  ethertype = clib_net_to_host_u16 (vlan->type);
161	  l2hdr_sz += sizeof (*vlan);
162	  if (ethertype == ETHERNET_TYPE_VLAN)
163	    {
164	      vlan++;
165	      ethertype = clib_net_to_host_u16 (vlan->type);
166	      l2hdr_sz += sizeof (*vlan);
167	    }
168	}
169
170      vnet_buffer (b0)->l2_hdr_offset = 0;
171      vnet_buffer (b0)->l3_hdr_offset = l2hdr_sz;
172      if (PREDICT_TRUE (ethertype == ETHERNET_TYPE_IP4))
173	{
174	  ip4_header_t *ip4 =
175	    (ip4_header_t *) (vlib_buffer_get_current (b0) + l2hdr_sz);
176	  vnet_buffer (b0)->l4_hdr_offset = l2hdr_sz + ip4_header_bytes (ip4);
177	  *l4_proto = ip4->protocol;
178	  b0->flags |=
179	    (VNET_BUFFER_F_IS_IP4 | VNET_BUFFER_F_OFFLOAD_IP_CKSUM);
180	  b0->flags |=
181	    (VNET_BUFFER_F_L2_HDR_OFFSET_VALID
182	     | VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
183	     VNET_BUFFER_F_L4_HDR_OFFSET_VALID);
184	}
185      else if (PREDICT_TRUE (ethertype == ETHERNET_TYPE_IP6))
186	{
187	  ip6_header_t *ip6 =
188	    (ip6_header_t *) (vlib_buffer_get_current (b0) + l2hdr_sz);
189	  vnet_buffer (b0)->l4_hdr_offset = l2hdr_sz + sizeof (ip6_header_t);
190	  /* FIXME IPv6 EH traversal */
191	  *l4_proto = ip6->protocol;
192	  b0->flags |= (VNET_BUFFER_F_IS_IP6 |
193			VNET_BUFFER_F_L2_HDR_OFFSET_VALID
194			| VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
195			VNET_BUFFER_F_L4_HDR_OFFSET_VALID);
196	}
197      if (*l4_proto == IP_PROTOCOL_TCP)
198	{
199	  b0->flags |= VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
200	  tcp_header_t *tcp = (tcp_header_t *) (vlib_buffer_get_current (b0) +
201						vnet_buffer
202						(b0)->l4_hdr_offset);
203	  *l4_hdr_sz = tcp_header_bytes (tcp);
204	  tcp->checksum = 0;
205	}
206      else if (*l4_proto == IP_PROTOCOL_UDP)
207	{
208	  b0->flags |= VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
209	  udp_header_t *udp = (udp_header_t *) (vlib_buffer_get_current (b0) +
210						vnet_buffer
211						(b0)->l4_hdr_offset);
212	  *l4_hdr_sz = sizeof (*udp);
213	  udp->checksum = 0;
214	}
215    }
216
217}
218
219static_always_inline void
220fill_gso_buffer_flags (vlib_buffer_t * b0, struct virtio_net_hdr_v1 *hdr,
221		       u8 l4_proto, u8 l4_hdr_sz)
222{
223  if (hdr->gso_type == VIRTIO_NET_HDR_GSO_TCPV4)
224    {
225      ASSERT (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM);
226      vnet_buffer2 (b0)->gso_size = hdr->gso_size;
227      vnet_buffer2 (b0)->gso_l4_hdr_sz = l4_hdr_sz;
228      b0->flags |= VNET_BUFFER_F_GSO | VNET_BUFFER_F_IS_IP4;
229    }
230  if (hdr->gso_type == VIRTIO_NET_HDR_GSO_TCPV6)
231    {
232      ASSERT (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM);
233      vnet_buffer2 (b0)->gso_size = hdr->gso_size;
234      vnet_buffer2 (b0)->gso_l4_hdr_sz = l4_hdr_sz;
235      b0->flags |= VNET_BUFFER_F_GSO | VNET_BUFFER_F_IS_IP6;
236    }
237}
238
239static_always_inline uword
240virtio_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
241			    vlib_frame_t * frame, virtio_if_t * vif, u16 qid,
242			    int gso_enabled, int checksum_offload_enabled)
243{
244  vnet_main_t *vnm = vnet_get_main ();
245  u32 thread_index = vm->thread_index;
246  uword n_trace = vlib_get_trace_count (vm, node);
247  virtio_vring_t *vring = vec_elt_at_index (vif->rxq_vrings, qid);
248  u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
249  const int hdr_sz = vif->virtio_net_hdr_sz;
250  u32 *to_next = 0;
251  u32 n_rx_packets = 0;
252  u32 n_rx_bytes = 0;
253  u16 mask = vring->size - 1;
254  u16 last = vring->last_used_idx;
255  u16 n_left = vring->used->idx - last;
256
257  if ((vring->used->flags & VIRTIO_RING_FLAG_MASK_INT) == 0 &&
258      vring->last_kick_avail_idx != vring->avail->idx)
259    virtio_kick (vm, vring, vif);
260
261  if (n_left == 0)
262    goto refill;
263
264  while (n_left)
265    {
266      u32 n_left_to_next;
267      u32 next0 = next_index;
268      vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
269
270      while (n_left && n_left_to_next)
271	{
272	  u8 l4_proto = 0, l4_hdr_sz = 0;
273	  u16 num_buffers = 1;
274	  struct vring_used_elem *e = &vring->used->ring[last & mask];
275	  struct virtio_net_hdr_v1 *hdr;
276	  u16 slot = e->id;
277	  u16 len = e->len - hdr_sz;
278	  u32 bi0 = vring->buffers[slot];
279	  vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
280	  hdr = vlib_buffer_get_current (b0);
281	  if (hdr_sz == sizeof (struct virtio_net_hdr_v1))
282	    num_buffers = hdr->num_buffers;
283
284	  b0->current_data += hdr_sz;
285	  b0->current_length = len;
286	  b0->total_length_not_including_first_buffer = 0;
287	  b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
288
289	  if (checksum_offload_enabled)
290	    virtio_needs_csum (b0, hdr, &l4_proto, &l4_hdr_sz);
291
292	  if (gso_enabled)
293	    fill_gso_buffer_flags (b0, hdr, l4_proto, l4_hdr_sz);
294
295	  vnet_buffer (b0)->sw_if_index[VLIB_RX] = vif->sw_if_index;
296	  vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
297
298	  /* if multisegment packet */
299	  if (PREDICT_FALSE (num_buffers > 1))
300	    {
301	      vlib_buffer_t *pb, *cb;
302	      pb = b0;
303	      while (num_buffers > 1)
304		{
305		  last++;
306		  e = &vring->used->ring[last & mask];
307		  u32 cbi = vring->buffers[e->id];
308		  cb = vlib_get_buffer (vm, cbi);
309
310		  /* current buffer */
311		  cb->current_length = e->len;
312
313		  /* previous buffer */
314		  pb->next_buffer = cbi;
315		  pb->flags |= VLIB_BUFFER_NEXT_PRESENT;
316
317		  /* first buffer */
318		  b0->total_length_not_including_first_buffer += e->len;
319
320		  pb = cb;
321		  vring->desc_in_use--;
322		  num_buffers--;
323		  n_left--;
324		}
325	    }
326
327	  if (PREDICT_FALSE (vif->per_interface_next_index != ~0))
328	    next0 = vif->per_interface_next_index;
329
330	  /* redirect if feature path enabled */
331	  vnet_feature_start_device_input_x1 (vif->sw_if_index, &next0, b0);
332
333	  /* trace */
334	  VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
335
336	  if (PREDICT_FALSE (n_trace > 0))
337	    {
338	      virtio_input_trace_t *tr;
339	      vlib_trace_buffer (vm, node, next0, b0,
340				 /* follow_chain */ 1);
341	      vlib_set_trace_count (vm, node, --n_trace);
342	      tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
343	      tr->next_index = next0;
344	      tr->hw_if_index = vif->hw_if_index;
345	      tr->len = len + b0->total_length_not_including_first_buffer;
346	      clib_memcpy_fast (&tr->hdr, hdr, hdr_sz);
347	    }
348
349	  /* enqueue buffer */
350	  to_next[0] = bi0;
351	  vring->desc_in_use--;
352	  to_next += 1;
353	  n_left_to_next--;
354	  n_left--;
355	  last++;
356
357	  /* enqueue */
358	  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
359					   n_left_to_next, bi0, next0);
360
361	  /* next packet */
362	  n_rx_packets++;
363	  n_rx_bytes += (len + b0->total_length_not_including_first_buffer);
364	}
365      vlib_put_next_frame (vm, node, next_index, n_left_to_next);
366    }
367  vring->last_used_idx = last;
368
369  vlib_increment_combined_counter (vnm->interface_main.combined_sw_if_counters
370				   + VNET_INTERFACE_COUNTER_RX, thread_index,
371				   vif->sw_if_index, n_rx_packets,
372				   n_rx_bytes);
373
374refill:
375  virtio_refill_vring (vm, vif, vring, hdr_sz);
376
377  return n_rx_packets;
378}
379
380VLIB_NODE_FN (virtio_input_node) (vlib_main_t * vm,
381				  vlib_node_runtime_t * node,
382				  vlib_frame_t * frame)
383{
384  u32 n_rx = 0;
385  virtio_main_t *nm = &virtio_main;
386  vnet_device_input_runtime_t *rt = (void *) node->runtime_data;
387  vnet_device_and_queue_t *dq;
388
389  foreach_device_and_queue (dq, rt->devices_and_queues)
390  {
391    virtio_if_t *vif;
392    vif = vec_elt_at_index (nm->interfaces, dq->dev_instance);
393    if (vif->flags & VIRTIO_IF_FLAG_ADMIN_UP)
394      {
395	if (vif->gso_enabled)
396	  n_rx += virtio_device_input_inline (vm, node, frame, vif,
397					      dq->queue_id, 1, 1);
398	else if (vif->csum_offload_enabled)
399	  n_rx += virtio_device_input_inline (vm, node, frame, vif,
400					      dq->queue_id, 0, 1);
401	else
402	  n_rx += virtio_device_input_inline (vm, node, frame, vif,
403					      dq->queue_id, 0, 0);
404      }
405  }
406
407  return n_rx;
408}
409
410/* *INDENT-OFF* */
411VLIB_REGISTER_NODE (virtio_input_node) = {
412  .name = "virtio-input",
413  .sibling_of = "device-input",
414  .format_trace = format_virtio_input_trace,
415  .flags = VLIB_NODE_FLAG_TRACE_SUPPORTED,
416  .type = VLIB_NODE_TYPE_INPUT,
417  .state = VLIB_NODE_STATE_INTERRUPT,
418  .n_errors = VIRTIO_INPUT_N_ERROR,
419  .error_strings = virtio_input_error_strings,
420};
421/* *INDENT-ON* */
422
423/*
424 * fd.io coding-style-patch-verification: ON
425 *
426 * Local Variables:
427 * eval: (c-set-style "gnu")
428 * End:
429 */
430