device.c revision 6d4af891
1/*
2 *------------------------------------------------------------------
3 * Copyright (c) 2016 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 *     http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
16 */
17
18#include <sys/types.h>
19#include <sys/stat.h>
20#include <fcntl.h>
21
22#include <vlib/vlib.h>
23#include <vlib/unix/unix.h>
24#include <vnet/ethernet/ethernet.h>
25#include <vnet/gso/gso.h>
26#include <vnet/ip/ip4_packet.h>
27#include <vnet/ip/ip6_packet.h>
28#include <vnet/tcp/tcp_packet.h>
29#include <vnet/udp/udp_packet.h>
30#include <vnet/devices/virtio/virtio.h>
31
32#define foreach_virtio_tx_func_error	       \
33_(NO_FREE_SLOTS, "no free tx slots")           \
34_(TRUNC_PACKET, "packet > buffer size -- truncated in tx ring") \
35_(PENDING_MSGS, "pending msgs in tx ring") \
36_(NO_TX_QUEUES, "no tx queues")
37
38typedef enum
39{
40#define _(f,s) VIRTIO_TX_ERROR_##f,
41  foreach_virtio_tx_func_error
42#undef _
43    VIRTIO_TX_N_ERROR,
44} virtio_tx_func_error_t;
45
46static char *virtio_tx_func_error_strings[] = {
47#define _(n,s) s,
48  foreach_virtio_tx_func_error
49#undef _
50};
51
52static u8 *
53format_virtio_device (u8 * s, va_list * args)
54{
55  u32 dev_instance = va_arg (*args, u32);
56  int verbose = va_arg (*args, int);
57  u32 indent = format_get_indent (s);
58
59  s = format (s, "VIRTIO interface");
60  if (verbose)
61    {
62      s = format (s, "\n%U instance %u", format_white_space, indent + 2,
63		  dev_instance);
64    }
65  return s;
66}
67
68static u8 *
69format_virtio_tx_trace (u8 * s, va_list * args)
70{
71  s = format (s, "Unimplemented...");
72  return s;
73}
74
75static_always_inline void
76virtio_free_used_device_desc (vlib_main_t * vm, virtio_vring_t * vring)
77{
78  u16 used = vring->desc_in_use;
79  u16 sz = vring->size;
80  u16 mask = sz - 1;
81  u16 last = vring->last_used_idx;
82  u16 n_left = vring->used->idx - last;
83
84  if (n_left == 0)
85    return;
86
87  while (n_left)
88    {
89      struct vring_used_elem *e = &vring->used->ring[last & mask];
90      u16 slot, n_buffers;
91      slot = n_buffers = e->id;
92
93      while (e->id == n_buffers)
94	{
95	  n_left--;
96	  last++;
97	  n_buffers++;
98	  if (n_left == 0)
99	    break;
100	  e = &vring->used->ring[last & mask];
101	}
102      vlib_buffer_free_from_ring (vm, vring->buffers, slot,
103				  sz, (n_buffers - slot));
104      used -= (n_buffers - slot);
105
106      if (n_left > 0)
107	{
108	  slot = e->id;
109
110	  vlib_buffer_free (vm, &vring->buffers[slot], 1);
111	  used--;
112	  last++;
113	  n_left--;
114	}
115    }
116  vring->desc_in_use = used;
117  vring->last_used_idx = last;
118}
119
120static_always_inline void
121set_checksum_offsets (vlib_main_t * vm, virtio_if_t * vif, vlib_buffer_t * b,
122		      struct virtio_net_hdr_v1 *hdr)
123{
124  if (b->flags & VNET_BUFFER_F_IS_IP4)
125    {
126      ip4_header_t *ip4;
127      gso_header_offset_t gho = vnet_gso_header_offset_parser (b, 0);
128      hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
129      hdr->csum_start = gho.l4_hdr_offset;	// 0x22;
130      if (b->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM)
131	hdr->csum_offset = STRUCT_OFFSET_OF (tcp_header_t, checksum);
132      else if (b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM)
133	hdr->csum_offset = STRUCT_OFFSET_OF (udp_header_t, checksum);
134
135      /*
136       * virtio devices do not support IP4 checksum offload. So driver takes care
137       * of it while doing tx.
138       */
139      ip4 =
140	(ip4_header_t *) (vlib_buffer_get_current (b) + gho.l3_hdr_offset);
141      if (b->flags & VNET_BUFFER_F_OFFLOAD_IP_CKSUM)
142	ip4->checksum = ip4_header_checksum (ip4);
143    }
144  else if (b->flags & VNET_BUFFER_F_IS_IP6)
145    {
146      gso_header_offset_t gho = vnet_gso_header_offset_parser (b, 1);
147      hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
148      hdr->csum_start = gho.l4_hdr_offset;	// 0x36;
149      if (b->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM)
150	hdr->csum_offset = STRUCT_OFFSET_OF (tcp_header_t, checksum);
151      else if (b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM)
152	hdr->csum_offset = STRUCT_OFFSET_OF (udp_header_t, checksum);
153    }
154}
155
156static_always_inline u16
157add_buffer_to_slot (vlib_main_t * vm, virtio_if_t * vif,
158		    virtio_vring_t * vring, u32 bi, u16 avail, u16 next,
159		    u16 mask, int do_gso, int csum_offload)
160{
161  u16 n_added = 0;
162  int hdr_sz = vif->virtio_net_hdr_sz;
163  struct vring_desc *d;
164  d = &vring->desc[next];
165  vlib_buffer_t *b = vlib_get_buffer (vm, bi);
166  struct virtio_net_hdr_v1 *hdr = vlib_buffer_get_current (b) - hdr_sz;
167
168  clib_memset (hdr, 0, hdr_sz);
169
170  if (do_gso && (b->flags & VNET_BUFFER_F_GSO))
171    {
172      if (b->flags & VNET_BUFFER_F_IS_IP4)
173	{
174	  ip4_header_t *ip4;
175	  gso_header_offset_t gho = vnet_gso_header_offset_parser (b, 0);
176	  hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
177	  hdr->gso_size = vnet_buffer2 (b)->gso_size;
178	  hdr->hdr_len = gho.l4_hdr_offset + gho.l4_hdr_sz;
179	  hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
180	  hdr->csum_start = gho.l4_hdr_offset;	// 0x22;
181	  hdr->csum_offset = STRUCT_OFFSET_OF (tcp_header_t, checksum);
182	  ip4 =
183	    (ip4_header_t *) (vlib_buffer_get_current (b) +
184			      gho.l3_hdr_offset);
185	  /*
186	   * virtio devices do not support IP4 checksum offload. So driver takes care
187	   * of it while doing tx.
188	   */
189	  if (b->flags & VNET_BUFFER_F_OFFLOAD_IP_CKSUM)
190	    ip4->checksum = ip4_header_checksum (ip4);
191	}
192      else if (b->flags & VNET_BUFFER_F_IS_IP6)
193	{
194	  gso_header_offset_t gho = vnet_gso_header_offset_parser (b, 1);
195	  hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
196	  hdr->gso_size = vnet_buffer2 (b)->gso_size;
197	  hdr->hdr_len = gho.l4_hdr_offset + gho.l4_hdr_sz;
198	  hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
199	  hdr->csum_start = gho.l4_hdr_offset;	// 0x36;
200	  hdr->csum_offset = STRUCT_OFFSET_OF (tcp_header_t, checksum);
201	}
202    }
203  else if (csum_offload
204	   && (b->flags & (VNET_BUFFER_F_OFFLOAD_TCP_CKSUM |
205			   VNET_BUFFER_F_OFFLOAD_UDP_CKSUM)))
206    {
207      set_checksum_offsets (vm, vif, b, hdr);
208    }
209
210  if (PREDICT_TRUE ((b->flags & VLIB_BUFFER_NEXT_PRESENT) == 0))
211    {
212      d->addr =
213	((vif->type == VIRTIO_IF_TYPE_PCI) ? vlib_buffer_get_current_pa (vm,
214									 b) :
215	 pointer_to_uword (vlib_buffer_get_current (b))) - hdr_sz;
216      d->len = b->current_length + hdr_sz;
217      d->flags = 0;
218    }
219  else
220    {
221      /*
222       * We are using single vlib_buffer_t for indirect descriptor(s)
223       * chain. Single descriptor is 16 bytes and vlib_buffer_t
224       * has 2048 bytes space. So maximum long chain can have 128
225       * (=2048/16) indirect descriptors.
226       * It can easily support 65535 bytes of Jumbo frames with
227       * each data buffer size of 512 bytes minimum.
228       */
229      u32 indirect_buffer = 0;
230      if (PREDICT_FALSE (vlib_buffer_alloc (vm, &indirect_buffer, 1) == 0))
231	return n_added;
232
233      vlib_buffer_t *indirect_desc = vlib_get_buffer (vm, indirect_buffer);
234      indirect_desc->current_data = 0;
235      indirect_desc->flags |= VLIB_BUFFER_NEXT_PRESENT;
236      indirect_desc->next_buffer = bi;
237      bi = indirect_buffer;
238
239      struct vring_desc *id =
240	(struct vring_desc *) vlib_buffer_get_current (indirect_desc);
241      u32 count = 1;
242      if (vif->type == VIRTIO_IF_TYPE_PCI)
243	{
244	  d->addr = vlib_physmem_get_pa (vm, id);
245	  id->addr = vlib_buffer_get_current_pa (vm, b) - hdr_sz;
246
247	  /*
248	   * If VIRTIO_F_ANY_LAYOUT is not negotiated, then virtio_net_hdr
249	   * should be presented in separate descriptor and data will start
250	   * from next descriptor.
251	   */
252	  if (PREDICT_TRUE
253	      (vif->features & VIRTIO_FEATURE (VIRTIO_F_ANY_LAYOUT)))
254	    id->len = b->current_length + hdr_sz;
255	  else
256	    {
257	      id->len = hdr_sz;
258	      id->flags = VRING_DESC_F_NEXT;
259	      id->next = count;
260	      count++;
261	      id++;
262	      id->addr = vlib_buffer_get_current_pa (vm, b);
263	      id->len = b->current_length;
264	    }
265	  while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
266	    {
267	      id->flags = VRING_DESC_F_NEXT;
268	      id->next = count;
269	      count++;
270	      id++;
271	      b = vlib_get_buffer (vm, b->next_buffer);
272	      id->addr = vlib_buffer_get_current_pa (vm, b);
273	      id->len = b->current_length;
274	    }
275	}
276      else			/* VIRTIO_IF_TYPE_TAP */
277	{
278	  d->addr = pointer_to_uword (id);
279	  /* first buffer in chain */
280	  id->addr = pointer_to_uword (vlib_buffer_get_current (b)) - hdr_sz;
281	  id->len = b->current_length + hdr_sz;
282
283	  while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
284	    {
285	      id->flags = VRING_DESC_F_NEXT;
286	      id->next = count;
287	      count++;
288	      id++;
289	      b = vlib_get_buffer (vm, b->next_buffer);
290	      id->addr = pointer_to_uword (vlib_buffer_get_current (b));
291	      id->len = b->current_length;
292	    }
293	}
294      id->flags = 0;
295      id->next = 0;
296      d->len = count * sizeof (struct vring_desc);
297      d->flags = VRING_DESC_F_INDIRECT;
298    }
299  vring->buffers[next] = bi;
300  vring->avail->ring[avail & mask] = next;
301  n_added++;
302  return n_added;
303}
304
305static_always_inline uword
306virtio_interface_tx_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
307			    vlib_frame_t * frame, virtio_if_t * vif,
308			    int do_gso, int csum_offload)
309{
310  u16 n_left = frame->n_vectors;
311  virtio_vring_t *vring;
312  u16 qid = vm->thread_index % vif->num_txqs;
313  vring = vec_elt_at_index (vif->txq_vrings, qid);
314  u16 used, next, avail;
315  u16 sz = vring->size;
316  u16 mask = sz - 1;
317  u32 *buffers = vlib_frame_vector_args (frame);
318
319  clib_spinlock_lock_if_init (&vring->lockp);
320
321  if ((vring->used->flags & VIRTIO_RING_FLAG_MASK_INT) == 0 &&
322      (vring->last_kick_avail_idx != vring->avail->idx))
323    virtio_kick (vm, vring, vif);
324
325  /* free consumed buffers */
326  virtio_free_used_device_desc (vm, vring);
327
328  used = vring->desc_in_use;
329  next = vring->desc_next;
330  avail = vring->avail->idx;
331
332  while (n_left && used < sz)
333    {
334      u16 n_added = 0;
335      n_added =
336	add_buffer_to_slot (vm, vif, vring, buffers[0], avail, next, mask,
337			    do_gso, csum_offload);
338      if (!n_added)
339	break;
340      avail += n_added;
341      next = (next + n_added) & mask;
342      used += n_added;
343      buffers++;
344      n_left--;
345    }
346
347  if (n_left != frame->n_vectors)
348    {
349      CLIB_MEMORY_STORE_BARRIER ();
350      vring->avail->idx = avail;
351      vring->desc_next = next;
352      vring->desc_in_use = used;
353      if ((vring->used->flags & VIRTIO_RING_FLAG_MASK_INT) == 0)
354	virtio_kick (vm, vring, vif);
355    }
356
357  if (n_left)
358    {
359      vlib_error_count (vm, node->node_index, VIRTIO_TX_ERROR_NO_FREE_SLOTS,
360			n_left);
361      vlib_buffer_free (vm, buffers, n_left);
362    }
363
364  clib_spinlock_unlock_if_init (&vring->lockp);
365
366  return frame->n_vectors - n_left;
367}
368
369VNET_DEVICE_CLASS_TX_FN (virtio_device_class) (vlib_main_t * vm,
370					       vlib_node_runtime_t * node,
371					       vlib_frame_t * frame)
372{
373  vnet_main_t *vnm = vnet_get_main ();
374  virtio_main_t *nm = &virtio_main;
375  vnet_interface_output_runtime_t *rund = (void *) node->runtime_data;
376  virtio_if_t *vif = pool_elt_at_index (nm->interfaces, rund->dev_instance);
377  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, vif->hw_if_index);
378
379  if (hw->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO)
380    return virtio_interface_tx_inline (vm, node, frame, vif, 1 /* do_gso */ ,
381				       1);
382  else if (hw->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_TX_L4_CKSUM_OFFLOAD)
383    return virtio_interface_tx_inline (vm, node, frame, vif,
384				       0 /* no do_gso */ , 1);
385  else
386    return virtio_interface_tx_inline (vm, node, frame, vif,
387				       0 /* no do_gso */ , 0);
388}
389
390static void
391virtio_set_interface_next_node (vnet_main_t * vnm, u32 hw_if_index,
392				u32 node_index)
393{
394  virtio_main_t *apm = &virtio_main;
395  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
396  virtio_if_t *vif = pool_elt_at_index (apm->interfaces, hw->dev_instance);
397
398  /* Shut off redirection */
399  if (node_index == ~0)
400    {
401      vif->per_interface_next_index = node_index;
402      return;
403    }
404
405  vif->per_interface_next_index =
406    vlib_node_add_next (vlib_get_main (), virtio_input_node.index,
407			node_index);
408}
409
410static void
411virtio_clear_hw_interface_counters (u32 instance)
412{
413  /* Nothing for now */
414}
415
416static clib_error_t *
417virtio_interface_rx_mode_change (vnet_main_t * vnm, u32 hw_if_index, u32 qid,
418				 vnet_hw_interface_rx_mode mode)
419{
420  virtio_main_t *mm = &virtio_main;
421  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
422  virtio_if_t *vif = pool_elt_at_index (mm->interfaces, hw->dev_instance);
423  virtio_vring_t *vring = vec_elt_at_index (vif->rxq_vrings, qid);
424
425  if (vif->type == VIRTIO_IF_TYPE_PCI && !(vif->support_int_mode))
426    {
427      vring->avail->flags |= VIRTIO_RING_FLAG_MASK_INT;
428      return clib_error_return (0, "interrupt mode is not supported");
429    }
430
431  if (mode == VNET_HW_INTERFACE_RX_MODE_POLLING)
432    vring->avail->flags |= VIRTIO_RING_FLAG_MASK_INT;
433  else
434    vring->avail->flags &= ~VIRTIO_RING_FLAG_MASK_INT;
435
436  return 0;
437}
438
439static clib_error_t *
440virtio_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
441{
442  virtio_main_t *mm = &virtio_main;
443  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
444  virtio_if_t *vif = pool_elt_at_index (mm->interfaces, hw->dev_instance);
445
446  if (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP)
447    vif->flags |= VIRTIO_IF_FLAG_ADMIN_UP;
448  else
449    vif->flags &= ~VIRTIO_IF_FLAG_ADMIN_UP;
450
451  return 0;
452}
453
454static clib_error_t *
455virtio_subif_add_del_function (vnet_main_t * vnm,
456			       u32 hw_if_index,
457			       struct vnet_sw_interface_t *st, int is_add)
458{
459  /* Nothing for now */
460  return 0;
461}
462
463/* *INDENT-OFF* */
464VNET_DEVICE_CLASS (virtio_device_class) = {
465  .name = "virtio",
466  .format_device_name = format_virtio_device_name,
467  .format_device = format_virtio_device,
468  .format_tx_trace = format_virtio_tx_trace,
469  .tx_function_n_errors = VIRTIO_TX_N_ERROR,
470  .tx_function_error_strings = virtio_tx_func_error_strings,
471  .rx_redirect_to_node = virtio_set_interface_next_node,
472  .clear_counters = virtio_clear_hw_interface_counters,
473  .admin_up_down_function = virtio_interface_admin_up_down,
474  .subif_add_del_function = virtio_subif_add_del_function,
475  .rx_mode_change_function = virtio_interface_rx_mode_change,
476};
477/* *INDENT-ON* */
478
479/*
480 * fd.io coding-style-patch-verification: ON
481 *
482 * Local Variables:
483 * eval: (c-set-style "gnu")
484 * End:
485 */
486