test_common.cpp revision aa97dd1c
1/*
2 * Copyright (c) 2016  Intel Corporation.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 *     http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#include "test_common.h"
17
18int
19port_init(uint8_t port, struct rte_mempool *mbuf_pool)
20{
21	struct rte_eth_conf port_conf;
22	const uint16_t rx_rings = 1, tx_rings = 1;
23	uint16_t q;
24	int retval;
25	int socket_id;
26
27	if (port >= rte_eth_dev_count())
28		return -1;
29
30	socket_id = rte_eth_dev_socket_id(port);
31
32	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
33	port_conf.rxmode.max_rx_pkt_len = ETHER_MAX_LEN;
34
35	/* Configure the Ethernet device. */
36	retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
37	if (retval != 0)
38		return retval;
39
40	/* Allocate and set up 1 RX queue per Ethernet port. */
41	for (q = 0; q < rx_rings; q++) {
42		retval = rte_eth_rx_queue_setup(port, q, RX_RING_SIZE,
43				socket_id, NULL, mbuf_pool);
44		if (retval < 0)
45			return retval;
46	}
47
48	/* Allocate and set up 1 TX queue per Ethernet port. */
49	for (q = 0; q < tx_rings; q++) {
50		retval = rte_eth_tx_queue_setup(port, q, TX_RING_SIZE,
51				socket_id, NULL);
52		if (retval < 0)
53			return retval;
54	}
55
56	/* Start the Ethernet port. */
57	retval = rte_eth_dev_start(port);
58	if (retval < 0)
59		return retval;
60
61	/* Enable RX in promiscuous mode for the Ethernet device. */
62	rte_eth_promiscuous_enable(port);
63
64	return 0;
65}
66
67/* TODO: Shameless rip of examples/udpfwd/pkt.c below. Sorry Would like to
68 * move these funcions to separate lib so all future created apps could
69 * re-use that code.
70 */
71void
72fill_pkt_hdr_len(struct rte_mbuf *m, uint32_t l2, uint32_t l3, uint32_t l4)
73{
74	m->l2_len = l2;
75	m->l3_len = l3;
76	m->l4_len = l4;
77	m->tso_segsz = 0;
78	m->outer_l2_len = 0;
79	m->outer_l3_len = 0;
80}
81
82int
83is_ipv4_frag(const struct ipv4_hdr *iph)
84{
85	const uint16_t mask = rte_cpu_to_be_16(~IPV4_HDR_DF_FLAG);
86
87	return ((mask & iph->fragment_offset) != 0);
88}
89
90void
91fill_ipv4_hdr_len(struct rte_mbuf *m, uint32_t l2, uint32_t proto,
92	uint32_t frag)
93{
94	const struct ipv4_hdr *iph;
95	int32_t dlen, len;
96
97	dlen = rte_pktmbuf_data_len(m);
98	dlen -= l2 + sizeof(struct udp_hdr);
99
100	iph = rte_pktmbuf_mtod_offset(m, const struct ipv4_hdr *, l2);
101	len = (iph->version_ihl & IPV4_HDR_IHL_MASK) * IPV4_IHL_MULTIPLIER;
102
103	if (frag != 0 && is_ipv4_frag(iph)) {
104		m->packet_type &= ~RTE_PTYPE_L4_MASK;
105		m->packet_type |= RTE_PTYPE_L4_FRAG;
106	}
107
108	if (len > dlen || (proto <= IPPROTO_MAX && iph->next_proto_id != proto))
109		m->packet_type = RTE_PTYPE_UNKNOWN;
110	else
111		fill_pkt_hdr_len(m, l2, len, sizeof(struct udp_hdr));
112}
113
114int
115ipv6x_hdr(uint32_t proto)
116{
117	return (proto == IPPROTO_HOPOPTS ||
118		proto == IPPROTO_ROUTING ||
119		proto == IPPROTO_FRAGMENT ||
120		proto == IPPROTO_AH ||
121		proto == IPPROTO_NONE ||
122		proto == IPPROTO_DSTOPTS);
123}
124
125uint16_t
126ipv4x_cksum(const void *iph, size_t len)
127{
128        uint16_t cksum;
129
130        cksum = rte_raw_cksum(iph, len);
131        return (cksum == 0xffff) ? cksum : ~cksum;
132}
133
134void
135fill_ipv6x_hdr_len(struct rte_mbuf *m, uint32_t l2, uint32_t nproto,
136	uint32_t fproto)
137{
138	const struct ip6_ext *ipx;
139	int32_t dlen, len, ofs;
140
141	len = sizeof(struct ipv6_hdr);
142
143	dlen = rte_pktmbuf_data_len(m);
144	dlen -= l2 + sizeof(struct udp_hdr);
145
146	ofs = l2 + len;
147	ipx = rte_pktmbuf_mtod_offset(m, const struct ip6_ext *, ofs);
148
149	while (ofs > 0 && len < dlen) {
150
151		switch (nproto) {
152		case IPPROTO_HOPOPTS:
153		case IPPROTO_ROUTING:
154		case IPPROTO_DSTOPTS:
155			ofs = (ipx->ip6e_len + 1) << 3;
156			break;
157		case IPPROTO_AH:
158			ofs = (ipx->ip6e_len + 2) << 2;
159			break;
160		case IPPROTO_FRAGMENT:
161			/*
162			 * tso_segsz is not used by RX, so suse it as temporary
163			 * buffer to store the fragment offset.
164			 */
165			m->tso_segsz = ofs;
166			ofs = sizeof(struct ip6_frag);
167			m->packet_type &= ~RTE_PTYPE_L4_MASK;
168			m->packet_type |= RTE_PTYPE_L4_FRAG;
169			break;
170		default:
171			ofs = 0;
172		}
173
174		if (ofs > 0) {
175			nproto = ipx->ip6e_nxt;
176			len += ofs;
177			ipx += ofs / sizeof(*ipx);
178		}
179	}
180
181	/* undercognised or invalid packet. */
182	if ((ofs == 0 && nproto != fproto) || len > dlen)
183		m->packet_type = RTE_PTYPE_UNKNOWN;
184	else
185		fill_pkt_hdr_len(m, l2, len, sizeof(struct udp_hdr));
186}
187
188void
189fill_ipv6_hdr_len(struct rte_mbuf *m, uint32_t l2, uint32_t fproto)
190{
191	const struct ipv6_hdr *iph;
192
193	iph = rte_pktmbuf_mtod_offset(m, const struct ipv6_hdr *,
194		sizeof(struct ether_hdr));
195
196	if (iph->proto == fproto)
197		fill_pkt_hdr_len(m, l2, sizeof(struct ipv6_hdr),
198			sizeof(struct udp_hdr));
199	else if (ipv6x_hdr(iph->proto) != 0)
200		fill_ipv6x_hdr_len(m, l2, iph->proto, fproto);
201}
202
203void
204fill_eth_hdr_len(struct rte_mbuf *m)
205{
206	uint32_t dlen, l2;
207	uint16_t etp;
208	const struct ether_hdr *eth;
209
210	dlen = rte_pktmbuf_data_len(m);
211
212	/* check that first segment is at least 42B long. */
213	if (dlen < sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) +
214			sizeof(struct udp_hdr)) {
215		m->packet_type = RTE_PTYPE_UNKNOWN;
216		return;
217	}
218
219	l2 = sizeof(*eth);
220
221	eth = rte_pktmbuf_mtod(m, const struct ether_hdr *);
222	etp = eth->ether_type;
223	if (etp == rte_be_to_cpu_16(ETHER_TYPE_VLAN))
224		l2 += sizeof(struct vlan_hdr);
225
226	if (etp == rte_be_to_cpu_16(ETHER_TYPE_IPv4)) {
227		m->packet_type = RTE_PTYPE_L4_UDP |
228			RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
229			RTE_PTYPE_L2_ETHER;
230		fill_ipv4_hdr_len(m, l2, IPPROTO_UDP, 1);
231	} else if (etp == rte_be_to_cpu_16(ETHER_TYPE_IPv6) &&
232			dlen >= l2 + sizeof(struct ipv6_hdr) +
233			sizeof(struct udp_hdr)) {
234		m->packet_type = RTE_PTYPE_L4_UDP |
235			RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
236			RTE_PTYPE_L2_ETHER;
237			fill_ipv6_hdr_len(m, l2, IPPROTO_UDP);
238	} else
239		m->packet_type = RTE_PTYPE_UNKNOWN;
240}
241
242/*
243 * generic, assumes HW doesn't recognise any packet type.
244 */
245uint16_t
246typen_rx_callback(uint8_t port, __rte_unused uint16_t queue,
247	struct rte_mbuf *pkt[], uint16_t nb_pkts,
248	__rte_unused uint16_t max_pkts, void *user_param)
249{
250	uint32_t j;
251
252	for (j = 0; j != nb_pkts; j++) {
253		fill_eth_hdr_len(pkt[j]);
254
255	}
256
257	return nb_pkts;
258}
259
260int
261dummy_lookup4(void *opaque, const struct in_addr *addr, struct tle_dest *res)
262{
263	RTE_SET_USED(opaque);
264	RTE_SET_USED(addr);
265	RTE_SET_USED(res);
266	return -ENOENT;
267}
268
269int
270dummy_lookup6(void *opaque, const struct in6_addr *addr, struct tle_dest *res)
271{
272	RTE_SET_USED(opaque);
273	RTE_SET_USED(addr);
274	RTE_SET_USED(res);
275	return -ENOENT;
276}
277