1/*
2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 *     http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15#ifndef __VIRTIO_VHOST_USER_INLINE_H__
16#define __VIRTIO_VHOST_USER_INLINE_H__
17/* vhost-user inline functions */
18#include <vppinfra/elog.h>
19
20static_always_inline void *
21map_guest_mem (vhost_user_intf_t * vui, uword addr, u32 * hint)
22{
23  int i = *hint;
24  if (PREDICT_TRUE ((vui->regions[i].guest_phys_addr <= addr) &&
25		    ((vui->regions[i].guest_phys_addr +
26		      vui->regions[i].memory_size) > addr)))
27    {
28      return (void *) (vui->region_mmap_addr[i] + addr -
29		       vui->regions[i].guest_phys_addr);
30    }
31#if __SSE4_2__
32  __m128i rl, rh, al, ah, r;
33  al = _mm_set1_epi64x (addr + 1);
34  ah = _mm_set1_epi64x (addr);
35
36  rl = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_lo[0]);
37  rl = _mm_cmpgt_epi64 (al, rl);
38  rh = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_hi[0]);
39  rh = _mm_cmpgt_epi64 (rh, ah);
40  r = _mm_and_si128 (rl, rh);
41
42  rl = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_lo[2]);
43  rl = _mm_cmpgt_epi64 (al, rl);
44  rh = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_hi[2]);
45  rh = _mm_cmpgt_epi64 (rh, ah);
46  r = _mm_blend_epi16 (r, _mm_and_si128 (rl, rh), 0x22);
47
48  rl = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_lo[4]);
49  rl = _mm_cmpgt_epi64 (al, rl);
50  rh = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_hi[4]);
51  rh = _mm_cmpgt_epi64 (rh, ah);
52  r = _mm_blend_epi16 (r, _mm_and_si128 (rl, rh), 0x44);
53
54  rl = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_lo[6]);
55  rl = _mm_cmpgt_epi64 (al, rl);
56  rh = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_hi[6]);
57  rh = _mm_cmpgt_epi64 (rh, ah);
58  r = _mm_blend_epi16 (r, _mm_and_si128 (rl, rh), 0x88);
59
60  r = _mm_shuffle_epi8 (r, _mm_set_epi64x (0, 0x0e060c040a020800));
61  i = count_trailing_zeros (_mm_movemask_epi8 (r) |
62			    (1 << VHOST_MEMORY_MAX_NREGIONS));
63
64  if (i < vui->nregions)
65    {
66      *hint = i;
67      return (void *) (vui->region_mmap_addr[i] + addr -
68		       vui->regions[i].guest_phys_addr);
69    }
70#elif __aarch64__ && __ARM_NEON
71  uint64x2_t al, ah, rl, rh, r;
72  uint32_t u32 = 0;
73
74  al = vdupq_n_u64 (addr + 1);
75  ah = vdupq_n_u64 (addr);
76
77  /*First Iteration */
78  rl = vld1q_u64 (&vui->region_guest_addr_lo[0]);
79  rl = vcgtq_u64 (al, rl);
80  rh = vld1q_u64 (&vui->region_guest_addr_hi[0]);
81  rh = vcgtq_u64 (rh, ah);
82  r = vandq_u64 (rl, rh);
83  u32 |= (vgetq_lane_u8 (vreinterpretq_u8_u64 (r), 0) & 0x1);
84  u32 |= ((vgetq_lane_u8 (vreinterpretq_u8_u64 (r), 8) & 0x1) << 1);
85
86  if (u32)
87    {
88      i = count_trailing_zeros (u32);
89      goto vhost_map_guest_mem_done;
90    }
91
92  /*Second Iteration */
93  rl = vld1q_u64 (&vui->region_guest_addr_lo[2]);
94  rl = vcgtq_u64 (al, rl);
95  rh = vld1q_u64 (&vui->region_guest_addr_hi[2]);
96  rh = vcgtq_u64 (rh, ah);
97  r = vandq_u64 (rl, rh);
98  u32 |= ((vgetq_lane_u8 (vreinterpretq_u8_u64 (r), 0) & 0x1) << 2);
99  u32 |= ((vgetq_lane_u8 (vreinterpretq_u8_u64 (r), 8) & 0x1) << 3);
100
101  if (u32)
102    {
103      i = count_trailing_zeros (u32);
104      goto vhost_map_guest_mem_done;
105    }
106
107  /*Third Iteration */
108  rl = vld1q_u64 (&vui->region_guest_addr_lo[4]);
109  rl = vcgtq_u64 (al, rl);
110  rh = vld1q_u64 (&vui->region_guest_addr_hi[4]);
111  rh = vcgtq_u64 (rh, ah);
112  r = vandq_u64 (rl, rh);
113  u32 |= ((vgetq_lane_u8 (vreinterpretq_u8_u64 (r), 0) & 0x1) << 6);
114  u32 |= ((vgetq_lane_u8 (vreinterpretq_u8_u64 (r), 8) & 0x1) << 7);
115
116  i = count_trailing_zeros (u32 | (1 << VHOST_MEMORY_MAX_NREGIONS));
117
118vhost_map_guest_mem_done:
119  if (i < vui->nregions)
120    {
121      *hint = i;
122      return (void *) (vui->region_mmap_addr[i] + addr -
123		       vui->regions[i].guest_phys_addr);
124    }
125#else
126  for (i = 0; i < vui->nregions; i++)
127    {
128      if ((vui->regions[i].guest_phys_addr <= addr) &&
129	  ((vui->regions[i].guest_phys_addr + vui->regions[i].memory_size) >
130	   addr))
131	{
132	  *hint = i;
133	  return (void *) (vui->region_mmap_addr[i] + addr -
134			   vui->regions[i].guest_phys_addr);
135	}
136    }
137#endif
138  /* *INDENT-OFF* */
139  ELOG_TYPE_DECLARE (el) =
140  {
141    .format = "failed to map guest mem addr %lx",
142    .format_args = "i8",
143  };
144  /* *INDENT-ON* */
145  struct
146  {
147    uword addr;
148  } *ed;
149  ed = ELOG_DATA (&vlib_global_main.elog_main, el);
150  ed->addr = addr;
151  *hint = 0;
152  return 0;
153}
154
155static_always_inline void *
156map_user_mem (vhost_user_intf_t * vui, uword addr)
157{
158  int i;
159  for (i = 0; i < vui->nregions; i++)
160    {
161      if ((vui->regions[i].userspace_addr <= addr) &&
162	  ((vui->regions[i].userspace_addr + vui->regions[i].memory_size) >
163	   addr))
164	{
165	  return (void *) (vui->region_mmap_addr[i] + addr -
166			   vui->regions[i].userspace_addr);
167	}
168    }
169  return 0;
170}
171
172#define VHOST_LOG_PAGE 0x1000
173
174static_always_inline void
175vhost_user_log_dirty_pages_2 (vhost_user_intf_t * vui,
176			      u64 addr, u64 len, u8 is_host_address)
177{
178  if (PREDICT_TRUE (vui->log_base_addr == 0
179		    || !(vui->features & (1 << FEAT_VHOST_F_LOG_ALL))))
180    {
181      return;
182    }
183  if (is_host_address)
184    {
185      addr = pointer_to_uword (map_user_mem (vui, (uword) addr));
186    }
187  if (PREDICT_FALSE ((addr + len - 1) / VHOST_LOG_PAGE / 8 >= vui->log_size))
188    {
189      vu_log_debug (vui, "vhost_user_log_dirty_pages(): out of range\n");
190      return;
191    }
192
193  CLIB_MEMORY_BARRIER ();
194  u64 page = addr / VHOST_LOG_PAGE;
195  while (page * VHOST_LOG_PAGE < addr + len)
196    {
197      ((u8 *) vui->log_base_addr)[page / 8] |= 1 << page % 8;
198      page++;
199    }
200}
201
202
203#define vhost_user_log_dirty_ring(vui, vq, member) \
204  if (PREDICT_FALSE(vq->log_used)) { \
205    vhost_user_log_dirty_pages_2(vui, vq->log_guest_addr + STRUCT_OFFSET_OF(vring_used_t, member), \
206                             sizeof(vq->used->member), 0); \
207  }
208
209static_always_inline u8 *
210format_vhost_trace (u8 * s, va_list * va)
211{
212  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
213  CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
214  CLIB_UNUSED (vnet_main_t * vnm) = vnet_get_main ();
215  vhost_user_main_t *vum = &vhost_user_main;
216  vhost_trace_t *t = va_arg (*va, vhost_trace_t *);
217  vhost_user_intf_t *vui = vum->vhost_user_interfaces + t->device_index;
218  vnet_sw_interface_t *sw;
219  u32 indent;
220
221  if (pool_is_free (vum->vhost_user_interfaces, vui))
222    {
223      s = format (s, "vhost-user interface is deleted");
224      return s;
225    }
226  sw = vnet_get_sw_interface (vnm, vui->sw_if_index);
227  indent = format_get_indent (s);
228  s = format (s, "%U %U queue %d\n", format_white_space, indent,
229	      format_vnet_sw_interface_name, vnm, sw, t->qid);
230
231  s = format (s, "%U virtio flags:\n", format_white_space, indent);
232#define _(n,i,st) \
233          if (t->virtio_ring_flags & (1 << VIRTIO_TRACE_F_##n)) \
234            s = format (s, "%U  %s %s\n", format_white_space, indent, #n, st);
235  foreach_virtio_trace_flags
236#undef _
237    s = format (s, "%U virtio_net_hdr first_desc_len %u\n",
238		format_white_space, indent, t->first_desc_len);
239
240  s = format (s, "%U   flags 0x%02x gso_type %u\n",
241	      format_white_space, indent,
242	      t->hdr.hdr.flags, t->hdr.hdr.gso_type);
243
244  if (vui->virtio_net_hdr_sz == 12)
245    s = format (s, "%U   num_buff %u",
246		format_white_space, indent, t->hdr.num_buffers);
247
248  return s;
249}
250
251static_always_inline void
252vhost_user_send_call (vlib_main_t * vm, vhost_user_vring_t * vq)
253{
254  vhost_user_main_t *vum = &vhost_user_main;
255  u64 x = 1;
256  int fd = UNIX_GET_FD (vq->callfd_idx);
257  int rv;
258
259  rv = write (fd, &x, sizeof (x));
260  if (rv <= 0)
261    {
262      clib_unix_warning
263	("Error: Could not write to unix socket for callfd %d", fd);
264      return;
265    }
266
267  vq->n_since_last_int = 0;
268  vq->int_deadline = vlib_time_now (vm) + vum->coalesce_time;
269}
270
271static_always_inline u8
272vui_is_link_up (vhost_user_intf_t * vui)
273{
274  return vui->admin_up && vui->is_ready;
275}
276
277static_always_inline void
278vhost_user_update_gso_interface_count (vhost_user_intf_t * vui, u8 add)
279{
280  vhost_user_main_t *vum = &vhost_user_main;
281
282  if (vui->enable_gso)
283    {
284      if (add)
285	{
286	  vum->gso_count++;
287	}
288      else
289	{
290	  ASSERT (vum->gso_count > 0);
291	  vum->gso_count--;
292	}
293    }
294}
295
296static_always_inline u8
297vhost_user_packed_desc_available (vhost_user_vring_t * vring, u16 idx)
298{
299  return (((vring->packed_desc[idx].flags & VIRTQ_DESC_F_AVAIL) ==
300	   vring->avail_wrap_counter));
301}
302
303static_always_inline void
304vhost_user_advance_last_avail_idx (vhost_user_vring_t * vring)
305{
306  vring->last_avail_idx++;
307  if (PREDICT_FALSE ((vring->last_avail_idx & vring->qsz_mask) == 0))
308    vring->avail_wrap_counter ^= VIRTQ_DESC_F_AVAIL;
309}
310
311static_always_inline void
312vhost_user_advance_last_avail_table_idx (vhost_user_intf_t * vui,
313					 vhost_user_vring_t * vring,
314					 u8 chained)
315{
316  if (chained)
317    {
318      vring_packed_desc_t *desc_table = vring->packed_desc;
319
320      /* pick up the slot of the next avail idx */
321      while (desc_table[vring->last_avail_idx & vring->qsz_mask].flags &
322	     VIRTQ_DESC_F_NEXT)
323	vhost_user_advance_last_avail_idx (vring);
324    }
325
326  vhost_user_advance_last_avail_idx (vring);
327}
328
329static_always_inline void
330vhost_user_undo_advanced_last_avail_idx (vhost_user_vring_t * vring)
331{
332  if (PREDICT_FALSE ((vring->last_avail_idx & vring->qsz_mask) == 0))
333    vring->avail_wrap_counter ^= VIRTQ_DESC_F_AVAIL;
334  vring->last_avail_idx--;
335}
336
337static_always_inline void
338vhost_user_dequeue_descs (vhost_user_vring_t * rxvq,
339			  virtio_net_hdr_mrg_rxbuf_t * hdr,
340			  u16 * n_descs_processed)
341{
342  u16 i;
343
344  *n_descs_processed -= (hdr->num_buffers - 1);
345  for (i = 0; i < hdr->num_buffers - 1; i++)
346    vhost_user_undo_advanced_last_avail_idx (rxvq);
347}
348
349static_always_inline void
350vhost_user_dequeue_chained_descs (vhost_user_vring_t * rxvq,
351				  u16 * n_descs_processed)
352{
353  while (*n_descs_processed)
354    {
355      vhost_user_undo_advanced_last_avail_idx (rxvq);
356      (*n_descs_processed)--;
357    }
358}
359
360static_always_inline void
361vhost_user_advance_last_used_idx (vhost_user_vring_t * vring)
362{
363  vring->last_used_idx++;
364  if (PREDICT_FALSE ((vring->last_used_idx & vring->qsz_mask) == 0))
365    vring->used_wrap_counter ^= 1;
366}
367
368static_always_inline u64
369vhost_user_is_packed_ring_supported (vhost_user_intf_t * vui)
370{
371  return (vui->features & (1ULL << FEAT_VIRTIO_F_RING_PACKED));
372}
373
374#endif
375
376/*
377 * fd.io coding-style-patch-verification: ON
378 *
379 * Local Variables:
380 * eval: (c-set-style "gnu")
381 * End:
382 */
383