1/*
2 * Copyright (c) 2016  Intel Corporation.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 *     http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#include "test_common.h"
17
18int
19port_init(dpdk_port_t port, struct rte_mempool *mbuf_pool)
20{
21	struct rte_eth_conf port_conf;
22	const uint16_t rx_rings = 1, tx_rings = 1;
23	uint16_t q;
24	int retval;
25	int socket_id;
26
27	socket_id = rte_eth_dev_socket_id(port);
28
29	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
30	port_conf.rxmode.max_rx_pkt_len = ETHER_MAX_LEN;
31
32	/* Configure the Ethernet device. */
33	retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
34	if (retval != 0)
35		return retval;
36
37	/* Allocate and set up 1 RX queue per Ethernet port. */
38	for (q = 0; q < rx_rings; q++) {
39		retval = rte_eth_rx_queue_setup(port, q, RX_RING_SIZE,
40				socket_id, NULL, mbuf_pool);
41		if (retval < 0)
42			return retval;
43	}
44
45	/* Allocate and set up 1 TX queue per Ethernet port. */
46	for (q = 0; q < tx_rings; q++) {
47		retval = rte_eth_tx_queue_setup(port, q, TX_RING_SIZE,
48				socket_id, NULL);
49		if (retval < 0)
50			return retval;
51	}
52
53	/* Start the Ethernet port. */
54	retval = rte_eth_dev_start(port);
55	if (retval < 0)
56		return retval;
57
58	/* Enable RX in promiscuous mode for the Ethernet device. */
59	rte_eth_promiscuous_enable(port);
60
61	return 0;
62}
63
64/* TODO: Shameless rip of examples/udpfwd/pkt.c below. Sorry Would like to
65 * move these funcions to separate lib so all future created apps could
66 * re-use that code.
67 */
68void
69fill_pkt_hdr_len(struct rte_mbuf *m, uint32_t l2, uint32_t l3, uint32_t l4)
70{
71	m->l2_len = l2;
72	m->l3_len = l3;
73	m->l4_len = l4;
74	m->tso_segsz = 0;
75	m->outer_l2_len = 0;
76	m->outer_l3_len = 0;
77}
78
79int
80is_ipv4_frag(const struct ipv4_hdr *iph)
81{
82	const uint16_t mask = rte_cpu_to_be_16(~IPV4_HDR_DF_FLAG);
83
84	return ((mask & iph->fragment_offset) != 0);
85}
86
87void
88fill_ipv4_hdr_len(struct rte_mbuf *m, uint32_t l2, uint32_t proto,
89	uint32_t frag)
90{
91	const struct ipv4_hdr *iph;
92	int32_t dlen, len;
93
94	dlen = rte_pktmbuf_data_len(m);
95	dlen -= l2 + sizeof(struct udp_hdr);
96
97	iph = rte_pktmbuf_mtod_offset(m, const struct ipv4_hdr *, l2);
98	len = (iph->version_ihl & IPV4_HDR_IHL_MASK) * IPV4_IHL_MULTIPLIER;
99
100	if (frag != 0 && is_ipv4_frag(iph)) {
101		m->packet_type &= ~RTE_PTYPE_L4_MASK;
102		m->packet_type |= RTE_PTYPE_L4_FRAG;
103	}
104
105	if (len > dlen || (proto <= IPPROTO_MAX && iph->next_proto_id != proto))
106		m->packet_type = RTE_PTYPE_UNKNOWN;
107	else
108		fill_pkt_hdr_len(m, l2, len, sizeof(struct udp_hdr));
109}
110
111int
112ipv6x_hdr(uint32_t proto)
113{
114	return (proto == IPPROTO_HOPOPTS ||
115		proto == IPPROTO_ROUTING ||
116		proto == IPPROTO_FRAGMENT ||
117		proto == IPPROTO_AH ||
118		proto == IPPROTO_NONE ||
119		proto == IPPROTO_DSTOPTS);
120}
121
122uint16_t
123ipv4x_cksum(const void *iph, size_t len)
124{
125        uint16_t cksum;
126
127        cksum = rte_raw_cksum(iph, len);
128        return (cksum == 0xffff) ? cksum : ~cksum;
129}
130
131void
132fill_ipv6x_hdr_len(struct rte_mbuf *m, uint32_t l2, uint32_t nproto,
133	uint32_t fproto)
134{
135	const struct ip6_ext *ipx;
136	int32_t dlen, len, ofs;
137
138	len = sizeof(struct ipv6_hdr);
139
140	dlen = rte_pktmbuf_data_len(m);
141	dlen -= l2 + sizeof(struct udp_hdr);
142
143	ofs = l2 + len;
144	ipx = rte_pktmbuf_mtod_offset(m, const struct ip6_ext *, ofs);
145
146	while (ofs > 0 && len < dlen) {
147
148		switch (nproto) {
149		case IPPROTO_HOPOPTS:
150		case IPPROTO_ROUTING:
151		case IPPROTO_DSTOPTS:
152			ofs = (ipx->ip6e_len + 1) << 3;
153			break;
154		case IPPROTO_AH:
155			ofs = (ipx->ip6e_len + 2) << 2;
156			break;
157		case IPPROTO_FRAGMENT:
158			/*
159			 * tso_segsz is not used by RX, so suse it as temporary
160			 * buffer to store the fragment offset.
161			 */
162			m->tso_segsz = ofs;
163			ofs = sizeof(struct ip6_frag);
164			m->packet_type &= ~RTE_PTYPE_L4_MASK;
165			m->packet_type |= RTE_PTYPE_L4_FRAG;
166			break;
167		default:
168			ofs = 0;
169		}
170
171		if (ofs > 0) {
172			nproto = ipx->ip6e_nxt;
173			len += ofs;
174			ipx += ofs / sizeof(*ipx);
175		}
176	}
177
178	/* undercognised or invalid packet. */
179	if ((ofs == 0 && nproto != fproto) || len > dlen)
180		m->packet_type = RTE_PTYPE_UNKNOWN;
181	else
182		fill_pkt_hdr_len(m, l2, len, sizeof(struct udp_hdr));
183}
184
185void
186fill_ipv6_hdr_len(struct rte_mbuf *m, uint32_t l2, uint32_t fproto)
187{
188	const struct ipv6_hdr *iph;
189
190	iph = rte_pktmbuf_mtod_offset(m, const struct ipv6_hdr *,
191		sizeof(struct ether_hdr));
192
193	if (iph->proto == fproto)
194		fill_pkt_hdr_len(m, l2, sizeof(struct ipv6_hdr),
195			sizeof(struct udp_hdr));
196	else if (ipv6x_hdr(iph->proto) != 0)
197		fill_ipv6x_hdr_len(m, l2, iph->proto, fproto);
198}
199
200void
201fill_eth_hdr_len(struct rte_mbuf *m)
202{
203	uint32_t dlen, l2;
204	uint16_t etp;
205	const struct ether_hdr *eth;
206
207	dlen = rte_pktmbuf_data_len(m);
208
209	/* check that first segment is at least 42B long. */
210	if (dlen < sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) +
211			sizeof(struct udp_hdr)) {
212		m->packet_type = RTE_PTYPE_UNKNOWN;
213		return;
214	}
215
216	l2 = sizeof(*eth);
217
218	eth = rte_pktmbuf_mtod(m, const struct ether_hdr *);
219	etp = eth->ether_type;
220	if (etp == rte_be_to_cpu_16(ETHER_TYPE_VLAN))
221		l2 += sizeof(struct vlan_hdr);
222
223	if (etp == rte_be_to_cpu_16(ETHER_TYPE_IPv4)) {
224		m->packet_type = RTE_PTYPE_L4_UDP |
225			RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
226			RTE_PTYPE_L2_ETHER;
227		fill_ipv4_hdr_len(m, l2, IPPROTO_UDP, 1);
228	} else if (etp == rte_be_to_cpu_16(ETHER_TYPE_IPv6) &&
229			dlen >= l2 + sizeof(struct ipv6_hdr) +
230			sizeof(struct udp_hdr)) {
231		m->packet_type = RTE_PTYPE_L4_UDP |
232			RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
233			RTE_PTYPE_L2_ETHER;
234			fill_ipv6_hdr_len(m, l2, IPPROTO_UDP);
235	} else
236		m->packet_type = RTE_PTYPE_UNKNOWN;
237}
238
239/*
240 * generic, assumes HW doesn't recognise any packet type.
241 */
242uint16_t
243typen_rx_callback(dpdk_port_t port, __rte_unused uint16_t queue,
244	struct rte_mbuf *pkt[], uint16_t nb_pkts,
245	__rte_unused uint16_t max_pkts, void *user_param)
246{
247	uint32_t j;
248
249	for (j = 0; j != nb_pkts; j++) {
250		fill_eth_hdr_len(pkt[j]);
251
252	}
253
254	return nb_pkts;
255}
256
257int
258dummy_lookup4(void *opaque, const struct in_addr *addr, struct tle_dest *res)
259{
260	RTE_SET_USED(opaque);
261	RTE_SET_USED(addr);
262	RTE_SET_USED(res);
263	return -ENOENT;
264}
265
266int
267dummy_lookup6(void *opaque, const struct in6_addr *addr, struct tle_dest *res)
268{
269	RTE_SET_USED(opaque);
270	RTE_SET_USED(addr);
271	RTE_SET_USED(res);
272	return -ENOENT;
273}
274