eal_vfio.c revision 39157ec0
1/*-
2 *   BSD LICENSE
3 *
4 *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5 *   All rights reserved.
6 *
7 *   Redistribution and use in source and binary forms, with or without
8 *   modification, are permitted provided that the following conditions
9 *   are met:
10 *
11 *     * Redistributions of source code must retain the above copyright
12 *       notice, this list of conditions and the following disclaimer.
13 *     * Redistributions in binary form must reproduce the above copyright
14 *       notice, this list of conditions and the following disclaimer in
15 *       the documentation and/or other materials provided with the
16 *       distribution.
17 *     * Neither the name of Intel Corporation nor the names of its
18 *       contributors may be used to endorse or promote products derived
19 *       from this software without specific prior written permission.
20 *
21 *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <string.h>
35#include <fcntl.h>
36#include <unistd.h>
37#include <sys/ioctl.h>
38
39#include <rte_log.h>
40#include <rte_memory.h>
41#include <rte_eal_memconfig.h>
42
43#include "eal_filesystem.h"
44#include "eal_vfio.h"
45#include "eal_private.h"
46
47#ifdef VFIO_PRESENT
48
49/* per-process VFIO config */
50static struct vfio_config vfio_cfg;
51
52static int vfio_type1_dma_map(int);
53static int vfio_spapr_dma_map(int);
54static int vfio_noiommu_dma_map(int);
55
56/* IOMMU types we support */
57static const struct vfio_iommu_type iommu_types[] = {
58	/* x86 IOMMU, otherwise known as type 1 */
59	{ RTE_VFIO_TYPE1, "Type 1", &vfio_type1_dma_map},
60	/* ppc64 IOMMU, otherwise known as spapr */
61	{ RTE_VFIO_SPAPR, "sPAPR", &vfio_spapr_dma_map},
62	/* IOMMU-less mode */
63	{ RTE_VFIO_NOIOMMU, "No-IOMMU", &vfio_noiommu_dma_map},
64};
65
66int
67vfio_get_group_fd(int iommu_group_no)
68{
69	int i;
70	int vfio_group_fd;
71	char filename[PATH_MAX];
72
73	/* check if we already have the group descriptor open */
74	for (i = 0; i < vfio_cfg.vfio_group_idx; i++)
75		if (vfio_cfg.vfio_groups[i].group_no == iommu_group_no)
76			return vfio_cfg.vfio_groups[i].fd;
77
78	/* if primary, try to open the group */
79	if (internal_config.process_type == RTE_PROC_PRIMARY) {
80		/* try regular group format */
81		snprintf(filename, sizeof(filename),
82				 VFIO_GROUP_FMT, iommu_group_no);
83		vfio_group_fd = open(filename, O_RDWR);
84		if (vfio_group_fd < 0) {
85			/* if file not found, it's not an error */
86			if (errno != ENOENT) {
87				RTE_LOG(ERR, EAL, "Cannot open %s: %s\n", filename,
88						strerror(errno));
89				return -1;
90			}
91
92			/* special case: try no-IOMMU path as well */
93			snprintf(filename, sizeof(filename),
94					VFIO_NOIOMMU_GROUP_FMT, iommu_group_no);
95			vfio_group_fd = open(filename, O_RDWR);
96			if (vfio_group_fd < 0) {
97				if (errno != ENOENT) {
98					RTE_LOG(ERR, EAL, "Cannot open %s: %s\n", filename,
99							strerror(errno));
100					return -1;
101				}
102				return 0;
103			}
104			/* noiommu group found */
105		}
106
107		/* if the fd is valid, create a new group for it */
108		if (vfio_cfg.vfio_group_idx == VFIO_MAX_GROUPS) {
109			RTE_LOG(ERR, EAL, "Maximum number of VFIO groups reached!\n");
110			close(vfio_group_fd);
111			return -1;
112		}
113		vfio_cfg.vfio_groups[vfio_cfg.vfio_group_idx].group_no = iommu_group_no;
114		vfio_cfg.vfio_groups[vfio_cfg.vfio_group_idx].fd = vfio_group_fd;
115		return vfio_group_fd;
116	}
117	/* if we're in a secondary process, request group fd from the primary
118	 * process via our socket
119	 */
120	else {
121		int socket_fd, ret;
122
123		socket_fd = vfio_mp_sync_connect_to_primary();
124
125		if (socket_fd < 0) {
126			RTE_LOG(ERR, EAL, "  cannot connect to primary process!\n");
127			return -1;
128		}
129		if (vfio_mp_sync_send_request(socket_fd, SOCKET_REQ_GROUP) < 0) {
130			RTE_LOG(ERR, EAL, "  cannot request container fd!\n");
131			close(socket_fd);
132			return -1;
133		}
134		if (vfio_mp_sync_send_request(socket_fd, iommu_group_no) < 0) {
135			RTE_LOG(ERR, EAL, "  cannot send group number!\n");
136			close(socket_fd);
137			return -1;
138		}
139		ret = vfio_mp_sync_receive_request(socket_fd);
140		switch (ret) {
141		case SOCKET_NO_FD:
142			close(socket_fd);
143			return 0;
144		case SOCKET_OK:
145			vfio_group_fd = vfio_mp_sync_receive_fd(socket_fd);
146			/* if we got the fd, return it */
147			if (vfio_group_fd > 0) {
148				close(socket_fd);
149				return vfio_group_fd;
150			}
151			/* fall-through on error */
152		default:
153			RTE_LOG(ERR, EAL, "  cannot get container fd!\n");
154			close(socket_fd);
155			return -1;
156		}
157	}
158	return -1;
159}
160
161static void
162clear_current_group(void)
163{
164	vfio_cfg.vfio_groups[vfio_cfg.vfio_group_idx].group_no = 0;
165	vfio_cfg.vfio_groups[vfio_cfg.vfio_group_idx].fd = -1;
166}
167
168int vfio_setup_device(const char *sysfs_base, const char *dev_addr,
169		int *vfio_dev_fd, struct vfio_device_info *device_info)
170{
171	struct vfio_group_status group_status = {
172			.argsz = sizeof(group_status)
173	};
174	int vfio_group_fd;
175	int iommu_group_no;
176	int ret;
177
178	/* get group number */
179	ret = vfio_get_group_no(sysfs_base, dev_addr, &iommu_group_no);
180	if (ret == 0) {
181		RTE_LOG(WARNING, EAL, "  %s not managed by VFIO driver, skipping\n",
182			dev_addr);
183		return 1;
184	}
185
186	/* if negative, something failed */
187	if (ret < 0)
188		return -1;
189
190	/* get the actual group fd */
191	vfio_group_fd = vfio_get_group_fd(iommu_group_no);
192	if (vfio_group_fd < 0)
193		return -1;
194
195	/* store group fd */
196	vfio_cfg.vfio_groups[vfio_cfg.vfio_group_idx].group_no = iommu_group_no;
197	vfio_cfg.vfio_groups[vfio_cfg.vfio_group_idx].fd = vfio_group_fd;
198
199	/* if group_fd == 0, that means the device isn't managed by VFIO */
200	if (vfio_group_fd == 0) {
201		RTE_LOG(WARNING, EAL, "  %s not managed by VFIO driver, skipping\n",
202				dev_addr);
203		/* we store 0 as group fd to distinguish between existing but
204		 * unbound VFIO groups, and groups that don't exist at all.
205		 */
206		vfio_cfg.vfio_group_idx++;
207		return 1;
208	}
209
210	/*
211	 * at this point, we know that this group is viable (meaning, all devices
212	 * are either bound to VFIO or not bound to anything)
213	 */
214
215	/* check if the group is viable */
216	ret = ioctl(vfio_group_fd, VFIO_GROUP_GET_STATUS, &group_status);
217	if (ret) {
218		RTE_LOG(ERR, EAL, "  %s cannot get group status, "
219				"error %i (%s)\n", dev_addr, errno, strerror(errno));
220		close(vfio_group_fd);
221		clear_current_group();
222		return -1;
223	} else if (!(group_status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
224		RTE_LOG(ERR, EAL, "  %s VFIO group is not viable!\n", dev_addr);
225		close(vfio_group_fd);
226		clear_current_group();
227		return -1;
228	}
229
230	/* check if group does not have a container yet */
231	if (!(group_status.flags & VFIO_GROUP_FLAGS_CONTAINER_SET)) {
232
233		/* add group to a container */
234		ret = ioctl(vfio_group_fd, VFIO_GROUP_SET_CONTAINER,
235				&vfio_cfg.vfio_container_fd);
236		if (ret) {
237			RTE_LOG(ERR, EAL, "  %s cannot add VFIO group to container, "
238					"error %i (%s)\n", dev_addr, errno, strerror(errno));
239			close(vfio_group_fd);
240			clear_current_group();
241			return -1;
242		}
243		/*
244		 * at this point we know that this group has been successfully
245		 * initialized, so we increment vfio_group_idx to indicate that we can
246		 * add new groups.
247		 */
248		vfio_cfg.vfio_group_idx++;
249	}
250
251	/*
252	 * pick an IOMMU type and set up DMA mappings for container
253	 *
254	 * needs to be done only once, only when at least one group is assigned to
255	 * a container and only in primary process
256	 */
257	if (internal_config.process_type == RTE_PROC_PRIMARY &&
258			vfio_cfg.vfio_container_has_dma == 0) {
259		/* select an IOMMU type which we will be using */
260		const struct vfio_iommu_type *t =
261				vfio_set_iommu_type(vfio_cfg.vfio_container_fd);
262		if (!t) {
263			RTE_LOG(ERR, EAL, "  %s failed to select IOMMU type\n", dev_addr);
264			return -1;
265		}
266		ret = t->dma_map_func(vfio_cfg.vfio_container_fd);
267		if (ret) {
268			RTE_LOG(ERR, EAL, "  %s DMA remapping failed, "
269					"error %i (%s)\n", dev_addr, errno, strerror(errno));
270			return -1;
271		}
272		vfio_cfg.vfio_container_has_dma = 1;
273	}
274
275	/* get a file descriptor for the device */
276	*vfio_dev_fd = ioctl(vfio_group_fd, VFIO_GROUP_GET_DEVICE_FD, dev_addr);
277	if (*vfio_dev_fd < 0) {
278		/* if we cannot get a device fd, this simply means that this
279		* particular port is not bound to VFIO
280		*/
281		RTE_LOG(WARNING, EAL, "  %s not managed by VFIO driver, skipping\n",
282				dev_addr);
283		return 1;
284	}
285
286	/* test and setup the device */
287	ret = ioctl(*vfio_dev_fd, VFIO_DEVICE_GET_INFO, device_info);
288	if (ret) {
289		RTE_LOG(ERR, EAL, "  %s cannot get device info, "
290				"error %i (%s)\n", dev_addr, errno, strerror(errno));
291		close(*vfio_dev_fd);
292		return -1;
293	}
294
295	return 0;
296}
297
298int
299vfio_enable(const char *modname)
300{
301	/* initialize group list */
302	int i;
303	int vfio_available;
304
305	for (i = 0; i < VFIO_MAX_GROUPS; i++) {
306		vfio_cfg.vfio_groups[i].fd = -1;
307		vfio_cfg.vfio_groups[i].group_no = -1;
308	}
309
310	/* inform the user that we are probing for VFIO */
311	RTE_LOG(INFO, EAL, "Probing VFIO support...\n");
312
313	/* check if vfio-pci module is loaded */
314	vfio_available = rte_eal_check_module(modname);
315
316	/* return error directly */
317	if (vfio_available == -1) {
318		RTE_LOG(INFO, EAL, "Could not get loaded module details!\n");
319		return -1;
320	}
321
322	/* return 0 if VFIO modules not loaded */
323	if (vfio_available == 0) {
324		RTE_LOG(DEBUG, EAL, "VFIO modules not loaded, "
325			"skipping VFIO support...\n");
326		return 0;
327	}
328
329	vfio_cfg.vfio_container_fd = vfio_get_container_fd();
330
331	/* check if we have VFIO driver enabled */
332	if (vfio_cfg.vfio_container_fd != -1) {
333		RTE_LOG(NOTICE, EAL, "VFIO support initialized\n");
334		vfio_cfg.vfio_enabled = 1;
335	} else {
336		RTE_LOG(NOTICE, EAL, "VFIO support could not be initialized\n");
337	}
338
339	return 0;
340}
341
342int
343vfio_is_enabled(const char *modname)
344{
345	const int mod_available = rte_eal_check_module(modname) > 0;
346	return vfio_cfg.vfio_enabled && mod_available;
347}
348
349const struct vfio_iommu_type *
350vfio_set_iommu_type(int vfio_container_fd)
351{
352	unsigned idx;
353	for (idx = 0; idx < RTE_DIM(iommu_types); idx++) {
354		const struct vfio_iommu_type *t = &iommu_types[idx];
355
356		int ret = ioctl(vfio_container_fd, VFIO_SET_IOMMU,
357				t->type_id);
358		if (!ret) {
359			RTE_LOG(NOTICE, EAL, "  using IOMMU type %d (%s)\n",
360					t->type_id, t->name);
361			return t;
362		}
363		/* not an error, there may be more supported IOMMU types */
364		RTE_LOG(DEBUG, EAL, "  set IOMMU type %d (%s) failed, "
365				"error %i (%s)\n", t->type_id, t->name, errno,
366				strerror(errno));
367	}
368	/* if we didn't find a suitable IOMMU type, fail */
369	return NULL;
370}
371
372int
373vfio_has_supported_extensions(int vfio_container_fd)
374{
375	int ret;
376	unsigned idx, n_extensions = 0;
377	for (idx = 0; idx < RTE_DIM(iommu_types); idx++) {
378		const struct vfio_iommu_type *t = &iommu_types[idx];
379
380		ret = ioctl(vfio_container_fd, VFIO_CHECK_EXTENSION,
381				t->type_id);
382		if (ret < 0) {
383			RTE_LOG(ERR, EAL, "  could not get IOMMU type, "
384				"error %i (%s)\n", errno,
385				strerror(errno));
386			close(vfio_container_fd);
387			return -1;
388		} else if (ret == 1) {
389			/* we found a supported extension */
390			n_extensions++;
391		}
392		RTE_LOG(DEBUG, EAL, "  IOMMU type %d (%s) is %s\n",
393				t->type_id, t->name,
394				ret ? "supported" : "not supported");
395	}
396
397	/* if we didn't find any supported IOMMU types, fail */
398	if (!n_extensions) {
399		close(vfio_container_fd);
400		return -1;
401	}
402
403	return 0;
404}
405
406int
407vfio_get_container_fd(void)
408{
409	int ret, vfio_container_fd;
410
411	/* if we're in a primary process, try to open the container */
412	if (internal_config.process_type == RTE_PROC_PRIMARY) {
413		vfio_container_fd = open(VFIO_CONTAINER_PATH, O_RDWR);
414		if (vfio_container_fd < 0) {
415			RTE_LOG(ERR, EAL, "  cannot open VFIO container, "
416					"error %i (%s)\n", errno, strerror(errno));
417			return -1;
418		}
419
420		/* check VFIO API version */
421		ret = ioctl(vfio_container_fd, VFIO_GET_API_VERSION);
422		if (ret != VFIO_API_VERSION) {
423			if (ret < 0)
424				RTE_LOG(ERR, EAL, "  could not get VFIO API version, "
425						"error %i (%s)\n", errno, strerror(errno));
426			else
427				RTE_LOG(ERR, EAL, "  unsupported VFIO API version!\n");
428			close(vfio_container_fd);
429			return -1;
430		}
431
432		ret = vfio_has_supported_extensions(vfio_container_fd);
433		if (ret) {
434			RTE_LOG(ERR, EAL, "  no supported IOMMU "
435					"extensions found!\n");
436			return -1;
437		}
438
439		return vfio_container_fd;
440	} else {
441		/*
442		 * if we're in a secondary process, request container fd from the
443		 * primary process via our socket
444		 */
445		int socket_fd;
446
447		socket_fd = vfio_mp_sync_connect_to_primary();
448		if (socket_fd < 0) {
449			RTE_LOG(ERR, EAL, "  cannot connect to primary process!\n");
450			return -1;
451		}
452		if (vfio_mp_sync_send_request(socket_fd, SOCKET_REQ_CONTAINER) < 0) {
453			RTE_LOG(ERR, EAL, "  cannot request container fd!\n");
454			close(socket_fd);
455			return -1;
456		}
457		vfio_container_fd = vfio_mp_sync_receive_fd(socket_fd);
458		if (vfio_container_fd < 0) {
459			RTE_LOG(ERR, EAL, "  cannot get container fd!\n");
460			close(socket_fd);
461			return -1;
462		}
463		close(socket_fd);
464		return vfio_container_fd;
465	}
466
467	return -1;
468}
469
470int
471vfio_get_group_no(const char *sysfs_base,
472		const char *dev_addr, int *iommu_group_no)
473{
474	char linkname[PATH_MAX];
475	char filename[PATH_MAX];
476	char *tok[16], *group_tok, *end;
477	int ret;
478
479	memset(linkname, 0, sizeof(linkname));
480	memset(filename, 0, sizeof(filename));
481
482	/* try to find out IOMMU group for this device */
483	snprintf(linkname, sizeof(linkname),
484			 "%s/%s/iommu_group", sysfs_base, dev_addr);
485
486	ret = readlink(linkname, filename, sizeof(filename));
487
488	/* if the link doesn't exist, no VFIO for us */
489	if (ret < 0)
490		return 0;
491
492	ret = rte_strsplit(filename, sizeof(filename),
493			tok, RTE_DIM(tok), '/');
494
495	if (ret <= 0) {
496		RTE_LOG(ERR, EAL, "  %s cannot get IOMMU group\n", dev_addr);
497		return -1;
498	}
499
500	/* IOMMU group is always the last token */
501	errno = 0;
502	group_tok = tok[ret - 1];
503	end = group_tok;
504	*iommu_group_no = strtol(group_tok, &end, 10);
505	if ((end != group_tok && *end != '\0') || errno != 0) {
506		RTE_LOG(ERR, EAL, "  %s error parsing IOMMU number!\n", dev_addr);
507		return -1;
508	}
509
510	return 1;
511}
512
513static int
514vfio_type1_dma_map(int vfio_container_fd)
515{
516	const struct rte_memseg *ms = rte_eal_get_physmem_layout();
517	int i, ret;
518
519	/* map all DPDK segments for DMA. use 1:1 PA to IOVA mapping */
520	for (i = 0; i < RTE_MAX_MEMSEG; i++) {
521		struct vfio_iommu_type1_dma_map dma_map;
522
523		if (ms[i].addr == NULL)
524			break;
525
526		memset(&dma_map, 0, sizeof(dma_map));
527		dma_map.argsz = sizeof(struct vfio_iommu_type1_dma_map);
528		dma_map.vaddr = ms[i].addr_64;
529		dma_map.size = ms[i].len;
530		dma_map.iova = ms[i].phys_addr;
531		dma_map.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE;
532
533		ret = ioctl(vfio_container_fd, VFIO_IOMMU_MAP_DMA, &dma_map);
534
535		if (ret) {
536			RTE_LOG(ERR, EAL, "  cannot set up DMA remapping, "
537					"error %i (%s)\n", errno, strerror(errno));
538			return -1;
539		}
540	}
541
542	return 0;
543}
544
545static int
546vfio_spapr_dma_map(int vfio_container_fd)
547{
548	const struct rte_memseg *ms = rte_eal_get_physmem_layout();
549	int i, ret;
550
551	struct vfio_iommu_spapr_register_memory reg = {
552		.argsz = sizeof(reg),
553		.flags = 0
554	};
555	struct vfio_iommu_spapr_tce_info info = {
556		.argsz = sizeof(info),
557	};
558	struct vfio_iommu_spapr_tce_create create = {
559		.argsz = sizeof(create),
560	};
561	struct vfio_iommu_spapr_tce_remove remove = {
562		.argsz = sizeof(remove),
563	};
564
565	/* query spapr iommu info */
566	ret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info);
567	if (ret) {
568		RTE_LOG(ERR, EAL, "  cannot get iommu info, "
569				"error %i (%s)\n", errno, strerror(errno));
570		return -1;
571	}
572
573	/* remove default DMA of 32 bit window */
574	remove.start_addr = info.dma32_window_start;
575	ret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_REMOVE, &remove);
576	if (ret) {
577		RTE_LOG(ERR, EAL, "  cannot remove default DMA window, "
578				"error %i (%s)\n", errno, strerror(errno));
579		return -1;
580	}
581
582	/* calculate window size based on number of hugepages configured */
583	create.window_size = rte_eal_get_physmem_size();
584	create.page_shift = __builtin_ctzll(ms->hugepage_sz);
585	create.levels = 2;
586
587	ret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_CREATE, &create);
588	if (ret) {
589		RTE_LOG(ERR, EAL, "  cannot create new DMA window, "
590				"error %i (%s)\n", errno, strerror(errno));
591		return -1;
592	}
593
594	/* map all DPDK segments for DMA. use 1:1 PA to IOVA mapping */
595	for (i = 0; i < RTE_MAX_MEMSEG; i++) {
596		struct vfio_iommu_type1_dma_map dma_map;
597
598		if (ms[i].addr == NULL)
599			break;
600
601		reg.vaddr = (uintptr_t) ms[i].addr;
602		reg.size = ms[i].len;
603		ret = ioctl(vfio_container_fd,
604			VFIO_IOMMU_SPAPR_REGISTER_MEMORY, &reg);
605		if (ret) {
606			RTE_LOG(ERR, EAL, "  cannot register vaddr for IOMMU, "
607				"error %i (%s)\n", errno, strerror(errno));
608			return -1;
609		}
610
611		memset(&dma_map, 0, sizeof(dma_map));
612		dma_map.argsz = sizeof(struct vfio_iommu_type1_dma_map);
613		dma_map.vaddr = ms[i].addr_64;
614		dma_map.size = ms[i].len;
615		dma_map.iova = ms[i].phys_addr;
616		dma_map.flags = VFIO_DMA_MAP_FLAG_READ |
617				 VFIO_DMA_MAP_FLAG_WRITE;
618
619		ret = ioctl(vfio_container_fd, VFIO_IOMMU_MAP_DMA, &dma_map);
620
621		if (ret) {
622			RTE_LOG(ERR, EAL, "  cannot set up DMA remapping, "
623				"error %i (%s)\n", errno, strerror(errno));
624			return -1;
625		}
626
627	}
628
629	return 0;
630}
631
632static int
633vfio_noiommu_dma_map(int __rte_unused vfio_container_fd)
634{
635	/* No-IOMMU mode does not need DMA mapping */
636	return 0;
637}
638
639#endif
640