pkt.c revision aa97dd1c
1/*
2 * Copyright (c) 2016  Intel Corporation.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 *     http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#include <netinet/ip6.h>
17#include <rte_arp.h>
18
19#include "netbe.h"
20
21static inline uint64_t
22_mbuf_tx_offload(uint64_t il2, uint64_t il3, uint64_t il4, uint64_t tso,
23	uint64_t ol3, uint64_t ol2)
24{
25	return il2 | il3 << 7 | il4 << 16 | tso << 24 | ol3 << 40 | ol2 << 49;
26}
27
28static inline void
29fill_pkt_hdr_len(struct rte_mbuf *m, uint32_t l2, uint32_t l3, uint32_t l4)
30{
31	m->tx_offload = _mbuf_tx_offload(l2, l3, l4, 0, 0, 0);
32}
33
34static inline int
35is_ipv4_frag(const struct ipv4_hdr *iph)
36{
37	const uint16_t mask = rte_cpu_to_be_16(~IPV4_HDR_DF_FLAG);
38
39	return ((mask & iph->fragment_offset) != 0);
40}
41
42static inline uint32_t
43get_tcp_header_size(struct rte_mbuf *m, uint32_t l2_len, uint32_t l3_len)
44{
45	const struct tcp_hdr *tcp;
46
47	tcp = rte_pktmbuf_mtod_offset(m, struct tcp_hdr *, l2_len + l3_len);
48	return (tcp->data_off >> 4) * 4;
49}
50
51static inline void
52adjust_ipv4_pktlen(struct rte_mbuf *m, uint32_t l2_len)
53{
54	uint32_t plen, trim;
55	const struct ipv4_hdr *iph;
56
57	iph = rte_pktmbuf_mtod_offset(m, const struct ipv4_hdr *, l2_len);
58	plen = rte_be_to_cpu_16(iph->total_length) + l2_len;
59	if (plen < m->pkt_len) {
60		trim = m->pkt_len - plen;
61		rte_pktmbuf_trim(m, trim);
62	}
63}
64
65static inline void
66adjust_ipv6_pktlen(struct rte_mbuf *m, uint32_t l2_len)
67{
68	uint32_t plen, trim;
69	const struct ipv6_hdr *iph;
70
71	iph = rte_pktmbuf_mtod_offset(m, const struct ipv6_hdr *, l2_len);
72	plen = rte_be_to_cpu_16(iph->payload_len) + sizeof(*iph) + l2_len;
73	if (plen < m->pkt_len) {
74		trim = m->pkt_len - plen;
75		rte_pktmbuf_trim(m, trim);
76	}
77}
78
79static inline void
80tcp_stat_update(struct netbe_lcore *lc, const struct rte_mbuf *m,
81	uint32_t l2_len, uint32_t l3_len)
82{
83	const struct tcp_hdr *th;
84
85	th = rte_pktmbuf_mtod_offset(m, struct tcp_hdr *, l2_len + l3_len);
86	lc->tcp_stat.flags[th->tcp_flags]++;
87}
88
89static inline uint32_t
90get_ipv4_hdr_len(struct rte_mbuf *m, uint32_t l2, uint32_t proto, uint32_t frag)
91{
92	const struct ipv4_hdr *iph;
93	int32_t dlen, len;
94
95	dlen = rte_pktmbuf_data_len(m);
96	dlen -= l2;
97
98	iph = rte_pktmbuf_mtod_offset(m, const struct ipv4_hdr *, l2);
99	len = (iph->version_ihl & IPV4_HDR_IHL_MASK) * IPV4_IHL_MULTIPLIER;
100
101	if (frag != 0 && is_ipv4_frag(iph)) {
102		m->packet_type &= ~RTE_PTYPE_L4_MASK;
103		m->packet_type |= RTE_PTYPE_L4_FRAG;
104	}
105
106	if (len > dlen || (proto <= IPPROTO_MAX && iph->next_proto_id != proto))
107		m->packet_type = RTE_PTYPE_UNKNOWN;
108
109	return len;
110}
111
112static inline void
113fill_ipv4_hdr_len(struct rte_mbuf *m, uint32_t l2, uint32_t proto,
114	uint32_t frag, uint32_t l4_len)
115{
116	uint32_t len;
117
118	len = get_ipv4_hdr_len(m, l2, proto, frag);
119	fill_pkt_hdr_len(m, l2, len, l4_len);
120	adjust_ipv4_pktlen(m, l2);
121}
122
123static inline int
124ipv6x_hdr(uint32_t proto)
125{
126	return (proto == IPPROTO_HOPOPTS ||
127		proto == IPPROTO_ROUTING ||
128		proto == IPPROTO_FRAGMENT ||
129		proto == IPPROTO_AH ||
130		proto == IPPROTO_NONE ||
131		proto == IPPROTO_DSTOPTS);
132}
133
134static inline uint32_t
135get_ipv6x_hdr_len(struct rte_mbuf *m, uint32_t l2, uint32_t nproto,
136	uint32_t fproto)
137{
138	const struct ip6_ext *ipx;
139	int32_t dlen, len, ofs;
140
141	len = sizeof(struct ipv6_hdr);
142
143	dlen = rte_pktmbuf_data_len(m);
144	dlen -= l2;
145
146	ofs = l2 + len;
147	ipx = rte_pktmbuf_mtod_offset(m, const struct ip6_ext *, ofs);
148
149	while (ofs > 0 && len < dlen) {
150
151		switch (nproto) {
152		case IPPROTO_HOPOPTS:
153		case IPPROTO_ROUTING:
154		case IPPROTO_DSTOPTS:
155			ofs = (ipx->ip6e_len + 1) << 3;
156			break;
157		case IPPROTO_AH:
158			ofs = (ipx->ip6e_len + 2) << 2;
159			break;
160		case IPPROTO_FRAGMENT:
161			/*
162			 * tso_segsz is not used by RX, so use it as temporary
163			 * buffer to store the fragment offset.
164			 */
165			m->tso_segsz = ofs;
166			ofs = sizeof(struct ip6_frag);
167			m->packet_type &= ~RTE_PTYPE_L4_MASK;
168			m->packet_type |= RTE_PTYPE_L4_FRAG;
169			break;
170		default:
171			ofs = 0;
172		}
173
174		if (ofs > 0) {
175			nproto = ipx->ip6e_nxt;
176			len += ofs;
177			ipx += ofs / sizeof(*ipx);
178		}
179	}
180
181	/* unrecognized or invalid packet. */
182	if ((ofs == 0 && nproto != fproto) || len > dlen)
183		m->packet_type = RTE_PTYPE_UNKNOWN;
184
185	return len;
186}
187
188static inline uint32_t
189get_ipv6_hdr_len(struct rte_mbuf *m, uint32_t l2, uint32_t fproto)
190{
191	const struct ipv6_hdr *iph;
192
193	iph = rte_pktmbuf_mtod_offset(m, const struct ipv6_hdr *,
194		sizeof(struct ether_hdr));
195
196	if (iph->proto == fproto)
197		return sizeof(struct ipv6_hdr);
198	else if (ipv6x_hdr(iph->proto) != 0)
199		return get_ipv6x_hdr_len(m, l2, iph->proto, fproto);
200
201	m->packet_type = RTE_PTYPE_UNKNOWN;
202	return 0;
203}
204
205static inline void
206fill_ipv6_hdr_len(struct rte_mbuf *m, uint32_t l2, uint32_t fproto,
207	uint32_t l4_len)
208{
209	uint32_t len;
210
211	len = get_ipv6_hdr_len(m, l2, fproto);
212	fill_pkt_hdr_len(m, l2, len, l4_len);
213	adjust_ipv6_pktlen(m, l2);
214}
215
216static inline struct rte_mbuf *
217handle_arp(struct rte_mbuf *m, struct netbe_lcore *lc, uint8_t port,
218	uint32_t l2len)
219{
220	const struct arp_hdr *ahdr;
221	struct pkt_buf *abuf;
222
223	ahdr = rte_pktmbuf_mtod_offset(m, const struct arp_hdr *, l2len);
224
225	if (ahdr->arp_hrd != rte_be_to_cpu_16(ARP_HRD_ETHER) ||
226		ahdr->arp_pro != rte_be_to_cpu_16(ETHER_TYPE_IPv4) ||
227		ahdr->arp_op != rte_be_to_cpu_16(ARP_OP_REQUEST)) {
228
229		m->packet_type = RTE_PTYPE_UNKNOWN;
230		return m;
231	}
232
233	m->l2_len = l2len;
234	abuf = &lc->prtq[port].arp_buf;
235	if (abuf->num >= RTE_DIM(abuf->pkt))
236		return m;
237
238	abuf->pkt[abuf->num++] = m;
239
240	return NULL;
241}
242
243static inline struct rte_mbuf *
244fill_eth_tcp_arp_hdr_len(struct rte_mbuf *m, struct netbe_lcore *lc,
245	uint8_t port)
246{
247	uint32_t dlen, l2_len, l3_len, l4_len;
248	uint16_t etp;
249	const struct ether_hdr *eth;
250
251	dlen = rte_pktmbuf_data_len(m);
252
253	/* check that first segment is at least 54B long. */
254	if (dlen < sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) +
255			sizeof(struct tcp_hdr)) {
256		m->packet_type = RTE_PTYPE_UNKNOWN;
257		return m;
258	}
259
260	l2_len = sizeof(*eth);
261
262	eth = rte_pktmbuf_mtod(m, const struct ether_hdr *);
263	etp = eth->ether_type;
264	if (etp == rte_be_to_cpu_16(ETHER_TYPE_VLAN))
265		l2_len += sizeof(struct vlan_hdr);
266
267	if (etp == rte_be_to_cpu_16(ETHER_TYPE_ARP))
268		return handle_arp(m, lc, port, l2_len);
269
270	if (etp == rte_be_to_cpu_16(ETHER_TYPE_IPv4)) {
271		m->packet_type = RTE_PTYPE_L4_TCP |
272			RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
273			RTE_PTYPE_L2_ETHER;
274		l3_len = get_ipv4_hdr_len(m, l2_len, IPPROTO_TCP, 1);
275		l4_len = get_tcp_header_size(m, l2_len, l3_len);
276		fill_pkt_hdr_len(m, l2_len, l3_len, l4_len);
277		adjust_ipv4_pktlen(m, l2_len);
278	} else if (etp == rte_be_to_cpu_16(ETHER_TYPE_IPv6) &&
279			dlen >= l2_len + sizeof(struct ipv6_hdr) +
280			sizeof(struct tcp_hdr)) {
281		m->packet_type = RTE_PTYPE_L4_TCP |
282			RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
283			RTE_PTYPE_L2_ETHER;
284		l3_len = get_ipv6_hdr_len(m, l2_len, IPPROTO_TCP);
285		l4_len = get_tcp_header_size(m, l2_len, l3_len);
286		fill_pkt_hdr_len(m, l2_len, l3_len, l4_len);
287		adjust_ipv6_pktlen(m, l2_len);
288	} else
289		m->packet_type = RTE_PTYPE_UNKNOWN;
290
291	return m;
292}
293
294static inline void
295fill_eth_tcp_hdr_len(struct rte_mbuf *m)
296{
297	uint32_t dlen, l2_len, l3_len, l4_len;
298	uint16_t etp;
299	const struct ether_hdr *eth;
300
301	dlen = rte_pktmbuf_data_len(m);
302
303	/* check that first segment is at least 54B long. */
304	if (dlen < sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) +
305			sizeof(struct tcp_hdr)) {
306		m->packet_type = RTE_PTYPE_UNKNOWN;
307		return;
308	}
309
310	l2_len = sizeof(*eth);
311
312	eth = rte_pktmbuf_mtod(m, const struct ether_hdr *);
313	etp = eth->ether_type;
314	if (etp == rte_be_to_cpu_16(ETHER_TYPE_VLAN))
315		l2_len += sizeof(struct vlan_hdr);
316
317	if (etp == rte_be_to_cpu_16(ETHER_TYPE_IPv4)) {
318		m->packet_type = RTE_PTYPE_L4_TCP |
319			RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
320			RTE_PTYPE_L2_ETHER;
321		l3_len = get_ipv4_hdr_len(m, l2_len, IPPROTO_TCP, 1);
322		l4_len = get_tcp_header_size(m, l2_len, l3_len);
323		fill_pkt_hdr_len(m, l2_len, l3_len, l4_len);
324		adjust_ipv4_pktlen(m, l2_len);
325	} else if (etp == rte_be_to_cpu_16(ETHER_TYPE_IPv6) &&
326			dlen >= l2_len + sizeof(struct ipv6_hdr) +
327			sizeof(struct tcp_hdr)) {
328		m->packet_type = RTE_PTYPE_L4_TCP |
329			RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
330			RTE_PTYPE_L2_ETHER;
331		l3_len = get_ipv6_hdr_len(m, l2_len, IPPROTO_TCP);
332		l4_len = get_tcp_header_size(m, l2_len, l3_len);
333		fill_pkt_hdr_len(m, l2_len, l3_len, l4_len);
334		adjust_ipv6_pktlen(m, l2_len);
335	} else
336		m->packet_type = RTE_PTYPE_UNKNOWN;
337}
338
339static inline void
340fill_eth_udp_hdr_len(struct rte_mbuf *m)
341{
342	uint32_t dlen, l2_len;
343	uint16_t etp;
344	const struct ether_hdr *eth;
345
346	dlen = rte_pktmbuf_data_len(m);
347
348	/* check that first segment is at least 42B long. */
349	if (dlen < sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) +
350			sizeof(struct udp_hdr)) {
351		m->packet_type = RTE_PTYPE_UNKNOWN;
352		return;
353	}
354
355	l2_len = sizeof(*eth);
356
357	eth = rte_pktmbuf_mtod(m, const struct ether_hdr *);
358	etp = eth->ether_type;
359	if (etp == rte_be_to_cpu_16(ETHER_TYPE_VLAN))
360		l2_len += sizeof(struct vlan_hdr);
361
362	if (etp == rte_be_to_cpu_16(ETHER_TYPE_IPv4)) {
363		m->packet_type = RTE_PTYPE_L4_UDP |
364			RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
365			RTE_PTYPE_L2_ETHER;
366		fill_ipv4_hdr_len(m, l2_len, IPPROTO_UDP, 1,
367			sizeof(struct udp_hdr));
368	} else if (etp == rte_be_to_cpu_16(ETHER_TYPE_IPv6) &&
369			dlen >= l2_len + sizeof(struct ipv6_hdr) +
370			sizeof(struct udp_hdr)) {
371		m->packet_type = RTE_PTYPE_L4_UDP |
372			RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
373			RTE_PTYPE_L2_ETHER;
374		fill_ipv6_hdr_len(m, l2_len, IPPROTO_UDP,
375			sizeof(struct udp_hdr));
376	} else
377		m->packet_type = RTE_PTYPE_UNKNOWN;
378}
379
380static inline uint16_t
381ipv4x_cksum(const void *iph, size_t len)
382{
383	uint16_t cksum;
384
385	cksum = rte_raw_cksum(iph, len);
386	return (cksum == 0xffff) ? cksum : ~cksum;
387}
388
389static inline void
390fix_reassembled(struct rte_mbuf *m, int32_t hwcsum, uint32_t proto)
391{
392	struct ipv4_hdr *iph;
393
394	/* update packet type. */
395	m->packet_type &= ~RTE_PTYPE_L4_MASK;
396
397	if (proto == IPPROTO_TCP)
398		m->packet_type |= RTE_PTYPE_L4_TCP;
399	else
400		m->packet_type |= RTE_PTYPE_L4_UDP;
401
402	/* fix reassemble setting TX flags. */
403	m->ol_flags &= ~PKT_TX_IP_CKSUM;
404
405	/* fix l3_len after reassemble. */
406	if (RTE_ETH_IS_IPV6_HDR(m->packet_type))
407		m->l3_len = m->l3_len - sizeof(struct ipv6_extension_fragment);
408
409	/* recalculate ipv4 cksum after reassemble. */
410	else if (hwcsum == 0 && RTE_ETH_IS_IPV4_HDR(m->packet_type)) {
411		iph = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, m->l2_len);
412		iph->hdr_checksum = ipv4x_cksum(iph, m->l3_len);
413	}
414}
415
416static struct rte_mbuf *
417reassemble(struct rte_mbuf *m, struct netbe_lcore *lc, uint64_t tms,
418	uint8_t port, uint32_t proto)
419{
420	uint32_t l3cs;
421	struct rte_ip_frag_tbl *tbl;
422	struct rte_ip_frag_death_row *dr;
423
424	tbl = lc->ftbl;
425	dr = &lc->death_row;
426	l3cs = lc->prtq[port].port.rx_offload & DEV_RX_OFFLOAD_IPV4_CKSUM;
427
428	if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) {
429
430		struct ipv4_hdr *iph;
431
432		iph = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, m->l2_len);
433
434		/* process this fragment. */
435		m = rte_ipv4_frag_reassemble_packet(tbl, dr, m, tms, iph);
436
437	} else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) {
438
439		struct ipv6_hdr *iph;
440		struct ipv6_extension_fragment *fhdr;
441
442		iph = rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *, m->l2_len);
443
444		/*
445		 * we store fragment header offset in tso_segsz before
446		 * temporary, just to avoid another scan of ipv6 header.
447		 */
448		fhdr = rte_pktmbuf_mtod_offset(m,
449			struct ipv6_extension_fragment *, m->tso_segsz);
450		m->tso_segsz = 0;
451
452		/* process this fragment. */
453		m = rte_ipv6_frag_reassemble_packet(tbl, dr, m, tms, iph, fhdr);
454
455	} else {
456		rte_pktmbuf_free(m);
457		m = NULL;
458	}
459
460	/* got reassembled packet. */
461	if (m != NULL)
462		fix_reassembled(m, l3cs, proto);
463
464	return m;
465}
466
467/* exclude NULLs from the final list of packets. */
468static inline uint32_t
469compress_pkt_list(struct rte_mbuf *pkt[], uint32_t nb_pkt, uint32_t nb_zero)
470{
471	uint32_t i, j, k, l;
472
473	for (j = nb_pkt; nb_zero != 0 && j-- != 0; ) {
474
475		/* found a hole. */
476		if (pkt[j] == NULL) {
477
478			/* find how big is it. */
479			for (i = j; i-- != 0 && pkt[i] == NULL; )
480				;
481			/* fill the hole. */
482			for (k = j + 1, l = i + 1; k != nb_pkt; k++, l++)
483				pkt[l] = pkt[k];
484
485			nb_pkt -= j - i;
486			nb_zero -= j - i;
487			j = i + 1;
488		}
489	}
490
491	return nb_pkt;
492}
493
494/*
495 * if it is a fragment, try to reassemble it,
496 * if by some reason it can't be done, then
497 * set pkt[] entry to NULL.
498 */
499#define DO_REASSEMBLE(proto) \
500do { \
501	if ((pkt[j]->packet_type & RTE_PTYPE_L4_MASK) == \
502			RTE_PTYPE_L4_FRAG) { \
503		cts = (cts == 0) ? rte_rdtsc() : cts; \
504		pkt[j] = reassemble(pkt[j], lc, cts, port, (proto)); \
505		x += (pkt[j] == NULL); \
506	} \
507} while (0)
508
509/*
510 * HW can recognize L2/L3 with/without extensions/L4 (ixgbe/igb/fm10k)
511 */
512static uint16_t
513type0_tcp_rx_callback(__rte_unused uint8_t port, __rte_unused uint16_t queue,
514	struct rte_mbuf *pkt[], uint16_t nb_pkts,
515	__rte_unused uint16_t max_pkts, void *user_param)
516{
517	uint32_t j, tp;
518	struct netbe_lcore *lc;
519	uint32_t l4_len, l3_len, l2_len;
520	const struct ether_hdr *eth;
521
522	lc = user_param;
523	l2_len = sizeof(*eth);
524
525	RTE_SET_USED(lc);
526
527	for (j = 0; j != nb_pkts; j++) {
528
529		NETBE_PKT_DUMP(pkt[j]);
530
531		tp = pkt[j]->packet_type & (RTE_PTYPE_L4_MASK |
532			RTE_PTYPE_L3_MASK | RTE_PTYPE_L2_MASK);
533
534		switch (tp) {
535		/* non fragmented tcp packets. */
536		case (RTE_PTYPE_L4_TCP | RTE_PTYPE_L3_IPV4 |
537				RTE_PTYPE_L2_ETHER):
538			l4_len = get_tcp_header_size(pkt[j], l2_len,
539				sizeof(struct ipv4_hdr));
540			fill_pkt_hdr_len(pkt[j], l2_len,
541				sizeof(struct ipv4_hdr), l4_len);
542			adjust_ipv4_pktlen(pkt[j], l2_len);
543			break;
544		case (RTE_PTYPE_L4_TCP | RTE_PTYPE_L3_IPV6 |
545				RTE_PTYPE_L2_ETHER):
546			l4_len = get_tcp_header_size(pkt[j], l2_len,
547				sizeof(struct ipv6_hdr));
548			fill_pkt_hdr_len(pkt[j], l2_len,
549				sizeof(struct ipv6_hdr), l4_len);
550			adjust_ipv6_pktlen(pkt[j], l2_len);
551			break;
552		case (RTE_PTYPE_L4_TCP | RTE_PTYPE_L3_IPV4_EXT |
553				RTE_PTYPE_L2_ETHER):
554			l3_len = get_ipv4_hdr_len(pkt[j], l2_len,
555				IPPROTO_TCP, 0);
556			l4_len = get_tcp_header_size(pkt[j], l2_len, l3_len);
557			fill_pkt_hdr_len(pkt[j], l2_len, l3_len, l4_len);
558			adjust_ipv4_pktlen(pkt[j], l2_len);
559			break;
560		case (RTE_PTYPE_L4_TCP | RTE_PTYPE_L3_IPV6_EXT |
561				RTE_PTYPE_L2_ETHER):
562			l3_len = get_ipv6_hdr_len(pkt[j], l2_len, IPPROTO_TCP);
563			l4_len = get_tcp_header_size(pkt[j], l2_len, l3_len);
564			fill_pkt_hdr_len(pkt[j], l2_len, l3_len, l4_len);
565			adjust_ipv6_pktlen(pkt[j], l2_len);
566			break;
567		default:
568			/* treat packet types as invalid. */
569			pkt[j]->packet_type = RTE_PTYPE_UNKNOWN;
570			break;
571		}
572	}
573
574	return nb_pkts;
575}
576
577/*
578 * HW can recognize L2/L3 with/without extensions/L4 (ixgbe/igb/fm10k)
579 */
580static uint16_t
581type0_udp_rx_callback(uint8_t port, __rte_unused uint16_t queue,
582	struct rte_mbuf *pkt[], uint16_t nb_pkts,
583	__rte_unused uint16_t max_pkts, void *user_param)
584{
585	uint32_t j, tp, x;
586	uint64_t cts;
587	struct netbe_lcore *lc;
588	uint32_t l2_len;
589	const struct ether_hdr *eth;
590
591	lc = user_param;
592	cts = 0;
593	l2_len = sizeof(*eth);
594
595	x = 0;
596	for (j = 0; j != nb_pkts; j++) {
597
598		NETBE_PKT_DUMP(pkt[j]);
599
600		tp = pkt[j]->packet_type & (RTE_PTYPE_L4_MASK |
601			RTE_PTYPE_L3_MASK | RTE_PTYPE_L2_MASK);
602
603		switch (tp) {
604		/* non fragmented udp packets. */
605		case (RTE_PTYPE_L4_UDP | RTE_PTYPE_L3_IPV4 |
606				RTE_PTYPE_L2_ETHER):
607			fill_pkt_hdr_len(pkt[j], l2_len,
608				sizeof(struct ipv4_hdr),
609				sizeof(struct udp_hdr));
610			adjust_ipv4_pktlen(pkt[j], l2_len);
611			break;
612		case (RTE_PTYPE_L4_UDP | RTE_PTYPE_L3_IPV6 |
613				RTE_PTYPE_L2_ETHER):
614			fill_pkt_hdr_len(pkt[j], l2_len,
615				sizeof(struct ipv6_hdr),
616				sizeof(struct udp_hdr));
617			adjust_ipv6_pktlen(pkt[j], l2_len);
618			break;
619		case (RTE_PTYPE_L4_UDP | RTE_PTYPE_L3_IPV4_EXT |
620				RTE_PTYPE_L2_ETHER):
621			fill_ipv4_hdr_len(pkt[j], l2_len,
622				UINT32_MAX, 0, sizeof(struct udp_hdr));
623			break;
624		case (RTE_PTYPE_L4_UDP | RTE_PTYPE_L3_IPV6_EXT |
625				RTE_PTYPE_L2_ETHER):
626			fill_ipv6_hdr_len(pkt[j], l2_len,
627				IPPROTO_UDP, sizeof(struct udp_hdr));
628			break;
629		/* possibly fragmented udp packets. */
630		case (RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L2_ETHER):
631		case (RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L2_ETHER):
632			fill_ipv4_hdr_len(pkt[j], l2_len,
633				IPPROTO_UDP, 1, sizeof(struct udp_hdr));
634			break;
635		case (RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L2_ETHER):
636		case (RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L2_ETHER):
637			fill_ipv6_hdr_len(pkt[j], l2_len,
638				IPPROTO_UDP, sizeof(struct udp_hdr));
639			break;
640		default:
641			/* treat packet types as invalid. */
642			pkt[j]->packet_type = RTE_PTYPE_UNKNOWN;
643			break;
644		}
645
646		DO_REASSEMBLE(IPPROTO_UDP);
647	}
648
649	/* reassemble was invoked, cleanup its death-row. */
650	if (cts != 0)
651		rte_ip_frag_free_death_row(&lc->death_row, 0);
652
653	if (x == 0)
654		return nb_pkts;
655
656	NETBE_TRACE("%s(port=%u, queue=%u, nb_pkts=%u): "
657		"%u non-reassembled fragments;\n",
658		__func__, port, queue, nb_pkts, x);
659
660	return compress_pkt_list(pkt, nb_pkts, x);
661}
662
663/*
664 * HW can recognize L2/L3/L4 and fragments (i40e).
665 */
666static uint16_t
667type1_tcp_rx_callback(__rte_unused uint8_t port, __rte_unused uint16_t queue,
668	struct rte_mbuf *pkt[], uint16_t nb_pkts,
669	__rte_unused uint16_t max_pkts, void *user_param)
670{
671	uint32_t j, tp;
672	struct netbe_lcore *lc;
673	uint32_t l4_len, l3_len, l2_len;
674	const struct ether_hdr *eth;
675
676	lc = user_param;
677	l2_len = sizeof(*eth);
678
679	RTE_SET_USED(lc);
680
681	for (j = 0; j != nb_pkts; j++) {
682
683		NETBE_PKT_DUMP(pkt[j]);
684
685		tp = pkt[j]->packet_type & (RTE_PTYPE_L4_MASK |
686			RTE_PTYPE_L3_MASK | RTE_PTYPE_L2_MASK);
687
688		switch (tp) {
689		case (RTE_PTYPE_L4_TCP | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
690				RTE_PTYPE_L2_ETHER):
691			l3_len = get_ipv4_hdr_len(pkt[j], l2_len,
692				IPPROTO_TCP, 0);
693			l4_len = get_tcp_header_size(pkt[j], l2_len, l3_len);
694			fill_pkt_hdr_len(pkt[j], l2_len, l3_len, l4_len);
695			adjust_ipv4_pktlen(pkt[j], l2_len);
696			tcp_stat_update(lc, pkt[j], l2_len, l3_len);
697			break;
698		case (RTE_PTYPE_L4_TCP | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
699				RTE_PTYPE_L2_ETHER):
700			l3_len = get_ipv6_hdr_len(pkt[j], l2_len, IPPROTO_TCP);
701			l4_len = get_tcp_header_size(pkt[j], l2_len, l3_len);
702			fill_pkt_hdr_len(pkt[j], l2_len, l3_len, l4_len);
703			adjust_ipv6_pktlen(pkt[j], l2_len);
704			tcp_stat_update(lc, pkt[j], l2_len, l3_len);
705			break;
706		default:
707			/* treat packet types as invalid. */
708			pkt[j]->packet_type = RTE_PTYPE_UNKNOWN;
709			break;
710		}
711
712	}
713
714	return nb_pkts;
715}
716
717/*
718 * HW can recognize L2/L3/L4 and fragments (i40e).
719 */
720static uint16_t
721type1_udp_rx_callback(uint8_t port, __rte_unused uint16_t queue,
722	struct rte_mbuf *pkt[], uint16_t nb_pkts,
723	__rte_unused uint16_t max_pkts, void *user_param)
724{
725	uint32_t j, tp, x;
726	uint64_t cts;
727	struct netbe_lcore *lc;
728	uint32_t l2_len;
729	const struct ether_hdr *eth;
730
731	lc = user_param;
732	cts = 0;
733	l2_len = sizeof(*eth);
734
735	x = 0;
736	for (j = 0; j != nb_pkts; j++) {
737
738		NETBE_PKT_DUMP(pkt[j]);
739
740		tp = pkt[j]->packet_type & (RTE_PTYPE_L4_MASK |
741			RTE_PTYPE_L3_MASK | RTE_PTYPE_L2_MASK);
742
743		switch (tp) {
744		case (RTE_PTYPE_L4_UDP | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
745				RTE_PTYPE_L2_ETHER):
746			fill_ipv4_hdr_len(pkt[j], l2_len,
747				UINT32_MAX, 0, sizeof(struct udp_hdr));
748			break;
749		case (RTE_PTYPE_L4_UDP | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
750				RTE_PTYPE_L2_ETHER):
751			fill_ipv6_hdr_len(pkt[j], l2_len,
752				IPPROTO_UDP, sizeof(struct udp_hdr));
753			break;
754		case (RTE_PTYPE_L4_FRAG | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
755				RTE_PTYPE_L2_ETHER):
756			fill_ipv4_hdr_len(pkt[j], l2_len,
757				IPPROTO_UDP, 0, sizeof(struct udp_hdr));
758			break;
759		case (RTE_PTYPE_L4_FRAG | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
760				RTE_PTYPE_L2_ETHER):
761			fill_ipv6_hdr_len(pkt[j], l2_len,
762				IPPROTO_UDP, sizeof(struct udp_hdr));
763			break;
764		default:
765			/* treat packet types as invalid. */
766			pkt[j]->packet_type = RTE_PTYPE_UNKNOWN;
767			break;
768		}
769
770		DO_REASSEMBLE(IPPROTO_UDP);
771	}
772
773	/* reassemble was invoked, cleanup its death-row. */
774	if (cts != 0)
775		rte_ip_frag_free_death_row(&lc->death_row, 0);
776
777	if (x == 0)
778		return nb_pkts;
779
780	NETBE_TRACE("%s(port=%u, queue=%u, nb_pkts=%u): "
781		"%u non-reassembled fragments;\n",
782		__func__, port, queue, nb_pkts, x);
783
784	return compress_pkt_list(pkt, nb_pkts, x);
785}
786
787/*
788 * generic, assumes HW doesn't recognize any packet type.
789 */
790static uint16_t
791typen_tcp_arp_rx_callback(uint8_t port, uint16_t queue, struct rte_mbuf *pkt[],
792	uint16_t nb_pkts, uint16_t max_pkts, void *user_param)
793{
794	uint32_t j, x;
795	struct netbe_lcore *lc;
796
797	lc = user_param;
798
799	RTE_SET_USED(queue);
800	RTE_SET_USED(max_pkts);
801
802	x = 0;
803	for (j = 0; j != nb_pkts; j++) {
804
805		NETBE_PKT_DUMP(pkt[j]);
806		pkt[j] = fill_eth_tcp_arp_hdr_len(pkt[j], lc, port);
807		x += (pkt[j] == NULL);
808	}
809
810	if (x == 0)
811		return nb_pkts;
812
813	return compress_pkt_list(pkt, nb_pkts, x);
814}
815
816static uint16_t
817typen_tcp_rx_callback(__rte_unused uint8_t port, __rte_unused uint16_t queue,
818	struct rte_mbuf *pkt[], uint16_t nb_pkts,
819	__rte_unused uint16_t max_pkts, void *user_param)
820{
821	uint32_t j;
822	struct netbe_lcore *lc;
823
824	lc = user_param;
825
826	RTE_SET_USED(lc);
827
828	for (j = 0; j != nb_pkts; j++) {
829
830		NETBE_PKT_DUMP(pkt[j]);
831		fill_eth_tcp_hdr_len(pkt[j]);
832	}
833
834	return nb_pkts;
835}
836
837static uint16_t
838typen_udp_rx_callback(uint8_t port, __rte_unused uint16_t queue,
839	struct rte_mbuf *pkt[], uint16_t nb_pkts,
840	__rte_unused uint16_t max_pkts, void *user_param)
841{
842	uint32_t j, x;
843	uint64_t cts;
844	struct netbe_lcore *lc;
845
846	lc = user_param;
847	cts = 0;
848
849	x = 0;
850	for (j = 0; j != nb_pkts; j++) {
851
852		NETBE_PKT_DUMP(pkt[j]);
853		fill_eth_udp_hdr_len(pkt[j]);
854
855		DO_REASSEMBLE(IPPROTO_UDP);
856	}
857
858	/* reassemble was invoked, cleanup its death-row. */
859	if (cts != 0)
860		rte_ip_frag_free_death_row(&lc->death_row, 0);
861
862	if (x == 0)
863		return nb_pkts;
864
865	NETBE_TRACE("%s(port=%u, queue=%u, nb_pkts=%u): "
866		"%u non-reassembled fragments;\n",
867		__func__, port, queue, nb_pkts, x);
868
869	return compress_pkt_list(pkt, nb_pkts, x);
870}
871
872#include "pkt_dpdk_legacy.h"
873