1/*
2 * Copyright (c) 2017  Intel Corporation.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <assert.h>
28#include <netinet/ip6.h>
29
30#include <ngx_config.h>
31#include <ngx_core.h>
32
33#include "be.h"
34#include <rte_version.h>
35#include <rte_cycles.h>
36#include <rte_ethdev.h>
37#include <rte_errno.h>
38#include <rte_lpm6.h>
39#include <rte_lpm.h>
40#include <rte_ip.h>
41#include <rte_tcp.h>
42
43#include <tle_tcp.h>
44
45#if RTE_VERSION_NUM(17, 11, 0, 0) <= RTE_VERSION
46typedef uint16_t dpdk_port_t;
47#else
48typedef uint8_t dpdk_port_t;
49#endif
50
51#define RX_RING_SIZE    0x400
52#define TX_RING_SIZE    0x800
53#define MAX_RULES       0x100
54#define MAX_TBL8        0x800
55
56#define MPOOL_CACHE_SIZE        0x100
57#define MPOOL_NB_BUF            0x20000
58
59#define FRAG_MBUF_BUF_SIZE      (RTE_PKTMBUF_HEADROOM + TLE_DST_MAX_HDR)
60
61#define RX_CSUM_OFFLOAD (DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM)
62
63#define TCP_MAX_PROCESS 0x20
64
65static const struct rte_eth_conf port_conf_default = {
66	.rxmode = {
67		.offloads = DEV_RX_OFFLOAD_VLAN_STRIP,
68	},
69};
70
71struct ptype2cb {
72	uint32_t mask;
73	const char *name;
74	rte_rx_callback_fn fn;
75};
76
77enum {
78	ETHER_PTYPE = 0x1,
79	IPV4_PTYPE = 0x2,
80	IPV4_EXT_PTYPE = 0x4,
81	IPV6_PTYPE = 0x8,
82	IPV6_EXT_PTYPE = 0x10,
83	TCP_PTYPE = 0x20,
84	UDP_PTYPE = 0x40,
85};
86
87int
88be_lcore_lpm_init(struct tldk_ctx *tcx, uint32_t sid,
89		const struct tldk_ctx_conf *cf)
90{
91	ngx_uint_t worker = cf->worker;
92	uint32_t lcore = cf->lcore;
93	char str[RTE_LPM_NAMESIZE];
94
95	const struct rte_lpm_config lpm4_cfg = {
96		.max_rules = MAX_RULES,
97		.number_tbl8s = MAX_TBL8,
98	};
99
100	const struct rte_lpm6_config lpm6_cfg = {
101		.max_rules = MAX_RULES,
102		.number_tbl8s = MAX_TBL8,
103	};
104
105	snprintf(str, sizeof(str), "LPM4%lu-%u\n", worker, lcore);
106	tcx->lpm4 = rte_lpm_create(str, sid, &lpm4_cfg);
107	RTE_LOG(NOTICE, USER1, "%s(worker=%lu, lcore=%u): lpm4=%p;\n",
108		__func__, worker, lcore, tcx->lpm4);
109	if (tcx->lpm4 == NULL)
110		return -ENOMEM;
111
112	snprintf(str, sizeof(str), "LPM6%lu-%u\n", worker, lcore);
113	tcx->lpm6 = rte_lpm6_create(str, sid, &lpm6_cfg);
114	RTE_LOG(NOTICE, USER1, "%s(worker=%lu, lcore=%u): lpm6=%p;\n",
115		__func__, worker, lcore, tcx->lpm6);
116	if (tcx->lpm6 == NULL) {
117		rte_lpm_free(tcx->lpm4);
118		return -ENOMEM;
119	}
120
121	return 0;
122}
123
124int
125be_lpm4_dst_lookup(void *data, const struct in_addr *addr,
126		struct tle_dest *res)
127{
128	int32_t rc;
129	uint32_t idx;
130	struct tldk_ctx *tcx;
131	struct tle_dest *dst;
132
133	tcx = data;
134	rc = rte_lpm_lookup(tcx->lpm4, rte_be_to_cpu_32(addr->s_addr), &idx);
135	if (rc == 0) {
136		dst = &tcx->dst4[idx];
137		memcpy(res, dst, dst->l2_len + dst->l3_len +
138				offsetof(struct tle_dest, hdr));
139	}
140
141	return rc;
142}
143
144int
145be_lpm6_dst_lookup(void *data, const struct in6_addr *addr,
146	struct tle_dest *res)
147{
148	int32_t rc;
149	struct tldk_ctx *tcx;
150	struct tle_dest *dst;
151	uintptr_t p;
152#if RTE_VERSION_NUM(17, 5, 0, 0) <= RTE_VERSION
153	uint32_t idx;
154#else
155	uint8_t idx;
156#endif
157
158	tcx = data;
159	p = (uintptr_t)addr->s6_addr;
160	rc = rte_lpm6_lookup(tcx->lpm6, (uint8_t *)p, &idx);
161	if (rc == 0) {
162		dst = &tcx->dst6[idx];
163		memcpy(res, dst, dst->l2_len + dst->l3_len +
164				offsetof(struct tle_dest, hdr));
165	}
166
167	return rc;
168}
169
170/*
171 * Initialise DPDK port.
172 */
173static int
174port_init(const struct tldk_port_conf *pcf)
175{
176	int32_t rc;
177	struct rte_eth_conf port_conf;
178	struct rte_eth_dev_info dev_info;
179
180	rte_eth_dev_info_get(pcf->id, &dev_info);
181
182	if ((dev_info.rx_offload_capa & pcf->rx_offload) != pcf->rx_offload) {
183		RTE_LOG(ERR, USER1,
184			"port#%u supported/requested RX offloads don't match, "
185			"supported: %#" PRIx64 ", requested: %#" PRIx64 ";\n",
186			pcf->id, (uint64_t)dev_info.rx_offload_capa,
187			pcf->rx_offload);
188		return NGX_ERROR;
189	}
190	if ((dev_info.tx_offload_capa & pcf->tx_offload) != pcf->tx_offload) {
191		RTE_LOG(ERR, USER1,
192			"port#%u supported/requested TX offloads don't match, "
193			"supported: %#" PRIx64 ", requested: %#" PRIx64 ";\n",
194			pcf->id, (uint64_t)dev_info.tx_offload_capa,
195			pcf->tx_offload);
196		return NGX_ERROR;
197	}
198
199	port_conf = port_conf_default;
200
201	if ((pcf->rx_offload & RX_CSUM_OFFLOAD) != 0) {
202		RTE_LOG(ERR, USER1, "%s(%u): enabling RX csum offload;\n",
203			__func__, pcf->id);
204		port_conf.rxmode.offloads |= pcf->rx_offload & RX_CSUM_OFFLOAD;
205	}
206
207	port_conf.rxmode.max_rx_pkt_len = pcf->mtu + ETHER_CRC_LEN;
208	if (port_conf.rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
209		port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
210	port_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
211	port_conf.rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP | ETH_RSS_TCP;
212	port_conf.rx_adv_conf.rss_conf.rss_hf &=
213		dev_info.flow_type_rss_offloads;
214
215	port_conf.txmode.offloads = pcf->tx_offload;
216
217	rc = rte_eth_dev_configure(pcf->id, pcf->nb_queues, pcf->nb_queues,
218			&port_conf);
219	RTE_LOG(NOTICE, USER1,
220		"%s: rte_eth_dev_configure(prt_id=%u, nb_rxq=%u, nb_txq=%u) "
221		"returns %d;\n", __func__, pcf->id, pcf->nb_queues,
222		pcf->nb_queues, rc);
223
224	if (rc != 0)
225		return NGX_ERROR;
226
227	return NGX_OK;
228}
229
230/*
231 * Check that lcore is enabled, not master, and not in use already.
232 */
233int
234be_check_lcore(uint32_t lid)
235{
236	if (rte_lcore_is_enabled(lid) == 0) {
237		RTE_LOG(ERR, USER1, "lcore %u is not enabled\n", lid);
238		return -EINVAL;
239	}
240
241	if (rte_get_master_lcore() != lid &&
242		rte_eal_get_lcore_state(lid) == RUNNING) {
243		RTE_LOG(ERR, USER1, "lcore %u already running %p\n",
244			lid, lcore_config[lid].f);
245		return -EINVAL;
246	}
247
248	return 0;
249}
250
251int
252be_mpool_init(struct tldk_ctx *tcx)
253{
254	int32_t rc;
255	uint32_t nmb, sid;
256	struct rte_mempool *mp;
257	char name[RTE_MEMPOOL_NAMESIZE];
258
259	ngx_uint_t worker = tcx->cf->worker;
260	uint32_t lcore = tcx->cf->lcore;
261
262	sid = rte_lcore_to_socket_id(tcx->cf->lcore);
263	nmb = (tcx->cf->nb_mbuf == 0) ? MPOOL_NB_BUF : tcx->cf->nb_mbuf;
264
265	snprintf(name, sizeof(name), "MP%lu-%u", worker, lcore);
266	mp = rte_pktmbuf_pool_create(name, nmb, MPOOL_CACHE_SIZE, 0,
267			RTE_MBUF_DEFAULT_BUF_SIZE, sid);
268	if (mp == NULL) {
269		rc = -rte_errno;
270		RTE_LOG(ERR, USER1, "%s:Mempool creation failed for "
271			"ctx:wrk(%lu)-ctx:lcore(%u) with error code: %d\n",
272			__func__, worker, lcore, rc);
273		return rc;
274	}
275
276	tcx->mpool = mp;
277
278	snprintf(name, sizeof(name), "frag_MP%lu-%u",
279			worker, lcore);
280	mp = rte_pktmbuf_pool_create(name, nmb,
281			MPOOL_CACHE_SIZE, 0, FRAG_MBUF_BUF_SIZE, sid - 1);
282	if (mp == NULL) {
283		rc = -rte_errno;
284		RTE_LOG(ERR, USER1, "%s:Frag mempool creation failed for "
285			"ctx:wrk(%lu)-ctx:lcore(%u) with error code: %d\n",
286			__func__, worker, lcore, rc);
287		return rc;
288	}
289
290	tcx->frag_mpool = mp;
291
292	return 0;
293}
294
295int
296be_queue_init(struct tldk_ctx *tcx, const tldk_conf_t *cf)
297{
298	int32_t socket, rc;
299	uint16_t queue_id;
300	uint32_t port_id, i, nb_rxd, nb_txd;
301	struct rte_eth_dev_info dev_info;
302	const struct tldk_ctx_conf *ctx;
303
304	ctx = tcx->cf;
305	for (i = 0; i < ctx->nb_dev; i++) {
306		port_id = ctx->dev[i].port;
307		queue_id = ctx->dev[i].queue;
308
309		rte_eth_dev_info_get(port_id, &dev_info);
310
311		dev_info.default_rxconf.rx_drop_en = 1;
312
313		nb_rxd = RTE_MIN(RX_RING_SIZE, dev_info.rx_desc_lim.nb_max);
314		nb_txd = RTE_MIN(TX_RING_SIZE, dev_info.tx_desc_lim.nb_max);
315		dev_info.default_txconf.tx_free_thresh = nb_txd / 2;
316
317		socket = rte_eth_dev_socket_id(port_id);
318
319		rc = rte_eth_rx_queue_setup(port_id, queue_id, nb_rxd,
320				socket, &dev_info.default_rxconf, tcx->mpool);
321		if (rc < 0) {
322			RTE_LOG(ERR, USER1,
323				"%s: rx queue=%u setup failed with error "
324				"code: %d\n", __func__, queue_id, rc);
325			return rc;
326		}
327
328		rc = rte_eth_tx_queue_setup(port_id, queue_id, nb_txd,
329				socket, &dev_info.default_txconf);
330		if (rc < 0) {
331			RTE_LOG(ERR, USER1,
332				"%s: tx queue=%u setup failed with error "
333				"code: %d\n", __func__, queue_id, rc);
334			return rc;
335		}
336	}
337
338	return 0;
339}
340
341/*
342 * Setup all enabled ports.
343 */
344int
345be_port_init(tldk_conf_t *cf)
346{
347	int32_t rc;
348	uint32_t i;
349	struct tldk_port_conf *dpf;
350
351	for (i = 0; i != cf->nb_port; i++) {
352		dpf = &cf->port[i];
353		rc = port_init(dpf);
354		if (rc != 0) {
355			RTE_LOG(ERR, USER1,
356				"%s: port=%u init failed with error code: %d\n",
357				__func__, dpf->id, rc);
358			return NGX_ERROR;
359		}
360		rte_eth_macaddr_get(dpf->id, &dpf->mac);
361		rte_eth_promiscuous_enable(dpf->id);
362	}
363
364	return NGX_OK;
365}
366
367static int
368be_add_ipv4_route(struct tldk_ctx *tcx, const struct tldk_dest_conf *dcf,
369	uint8_t idx)
370{
371	int32_t rc;
372	uint32_t addr, depth;
373	char str[INET_ADDRSTRLEN];
374
375	depth = dcf->prfx;
376	addr = rte_be_to_cpu_32(dcf->ipv4.s_addr);
377
378	inet_ntop(AF_INET, &dcf->ipv4, str, sizeof(str));
379	rc = rte_lpm_add(tcx->lpm4, addr, depth, idx);
380	RTE_LOG(NOTICE, USER1, "%s(lcore=%u,dev_id=%u,dev=%p,"
381			"ipv4=%s/%u,mtu=%u,"
382			"mac=%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx) "
383			"returns %d;\n",
384			__func__, tcx->cf->lcore, dcf->dev, tcx->dst4[idx].dev,
385			str, depth, tcx->dst4[idx].mtu,
386			dcf->mac.addr_bytes[0], dcf->mac.addr_bytes[1],
387			dcf->mac.addr_bytes[2], dcf->mac.addr_bytes[3],
388			dcf->mac.addr_bytes[4], dcf->mac.addr_bytes[5],
389			rc);
390
391	return rc;
392}
393
394static int
395be_add_ipv6_route(struct tldk_ctx *tcx, const struct tldk_dest_conf *dcf,
396	uint8_t idx)
397{
398	int32_t rc;
399	uint32_t depth;
400	char str[INET6_ADDRSTRLEN];
401
402	depth = dcf->prfx;
403
404	rc = rte_lpm6_add(tcx->lpm6, (uint8_t *)(uintptr_t)dcf->ipv6.s6_addr,
405			depth, idx);
406
407	inet_ntop(AF_INET6, &dcf->ipv6, str, sizeof(str));
408	RTE_LOG(NOTICE, USER1, "%s(lcore=%u,dev_id=%u,dev=%p,"
409		"ipv6=%s/%u,mtu=%u,"
410		"mac=%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx) "
411		"returns %d;\n",
412		__func__, tcx->cf->lcore, dcf->dev, tcx->dst6[idx].dev,
413		str, depth, tcx->dst4[idx].mtu,
414		dcf->mac.addr_bytes[0], dcf->mac.addr_bytes[1],
415		dcf->mac.addr_bytes[2], dcf->mac.addr_bytes[3],
416		dcf->mac.addr_bytes[4], dcf->mac.addr_bytes[5],
417		rc);
418
419	return rc;
420}
421
422static void
423fill_dst(struct tle_dest *dst, const struct tldk_dev *td,
424	const struct tldk_port_conf *pcf, const struct tldk_dest_conf *dest,
425	uint16_t l3_type, struct rte_mempool *mp)
426{
427	struct ether_hdr *eth;
428	struct ipv4_hdr *ip4h;
429	struct ipv6_hdr *ip6h;
430
431	dst->dev = td->dev;
432	dst->head_mp = mp;
433	dst->mtu = RTE_MIN(dest->mtu, pcf->mtu);
434	dst->l2_len = sizeof(*eth);
435
436	eth = (struct ether_hdr *)dst->hdr;
437
438	ether_addr_copy(&pcf->mac, &eth->s_addr);
439	ether_addr_copy(&dest->mac, &eth->d_addr);
440	eth->ether_type = rte_cpu_to_be_16(l3_type);
441
442	if (l3_type == ETHER_TYPE_IPv4) {
443		dst->l3_len = sizeof(*ip4h);
444		ip4h = (struct ipv4_hdr *)(eth + 1);
445		ip4h->version_ihl = 4 << 4 |
446			sizeof(*ip4h) / IPV4_IHL_MULTIPLIER;
447		ip4h->time_to_live = 64;
448		ip4h->next_proto_id = IPPROTO_TCP;
449	} else if (l3_type == ETHER_TYPE_IPv6) {
450		dst->l3_len = sizeof(*ip6h);
451		ip6h = (struct ipv6_hdr *)(eth + 1);
452		ip6h->vtc_flow = 6 << 4;
453		ip6h->proto = IPPROTO_TCP;
454		ip6h->hop_limits = 64;
455	}
456}
457
458static int
459be_add_dest(const struct tldk_dest_conf *dcf, struct tldk_ctx *tcx,
460	uint32_t dev_idx, const struct tldk_port_conf *pcf, uint32_t family,
461	uint32_t dnum)
462{
463	struct tle_dest *dp;
464	uint32_t i, n, m;
465	uint16_t l3_type;
466	int32_t rc = 0;
467
468	if (family == AF_INET) {
469		n = tcx->dst4_num;
470		dp = tcx->dst4 + n;
471		m = RTE_DIM(tcx->dst4);
472		l3_type = ETHER_TYPE_IPv4;
473	} else {
474		n = tcx->dst6_num;
475		dp = tcx->dst6 + n;
476		m = RTE_DIM(tcx->dst6);
477		l3_type = ETHER_TYPE_IPv6;
478	}
479
480	if (n + dnum >= m) {
481		RTE_LOG(ERR, USER1, "%s(lcore=%u, family=%hu, dnum=%u) exceeds "
482			"maximum allowed number of destinations(%u);\n",
483			__func__, tcx->cf->lcore, family, dnum, m);
484		return -ENOSPC;
485	}
486
487	for (i = 0; i != dnum && rc == 0; i++) {
488		fill_dst(dp + i, &tcx->dev[dev_idx], pcf, dcf,
489			l3_type, tcx->frag_mpool);
490		if (family == AF_INET)
491			rc = be_add_ipv4_route(tcx, dcf, n + i);
492		else
493			rc = be_add_ipv6_route(tcx, dcf, n + i);
494	}
495
496	if (family == AF_INET)
497		tcx->dst4_num = n + i;
498	else
499		tcx->dst6_num = n + i;
500
501	return rc;
502}
503
504int
505be_dst_init(struct tldk_ctx *tcx, const tldk_conf_t *cf)
506{
507	uint32_t i, f, d, l, port_id;
508	const struct tldk_ctx_conf *ctx_cf = tcx->cf;
509	const struct tldk_dest_conf *dcf;
510	const struct tldk_port_conf *pcf;
511	int32_t rc = 0;
512
513	for (i = 0; i < ctx_cf->nb_dest; i++) {
514		dcf = &ctx_cf->dest[i];
515		f = dcf->family;
516		d = dcf->dev;
517		for (l = 0; l != tcx->nb_dev; l++) {
518			if (tcx->dev[l].cf.id == d) {
519				/* fetch the port conf for the port
520				 * associated with device
521				 */
522				port_id = tcx->dev[l].cf.port;
523				pcf = &cf->port[port_id];
524				rc = be_add_dest(dcf, tcx, l, pcf, f, 1);
525				if (rc != 0) {
526					RTE_LOG(ERR, USER1,
527						"%s(tcx=%u, family=%u) "
528						"could not add "
529						"destinations(%u)\n",
530						__func__, ctx_cf->lcore, f, i);
531					return -ENOSPC;
532				}
533				break;
534			}
535		}
536	}
537
538	return rc;
539}
540
541int
542be_add_dev(struct tldk_ctx *tcx, const tldk_conf_t *cf)
543{
544	int32_t rc = 0;
545	uint32_t i, port_id;
546	struct tle_dev_param dprm;
547	const struct tldk_port_conf *pcf;
548
549	memset(&dprm, 0, sizeof(dprm));
550
551	/* add the tle_dev on all applicable ports of the context */
552	for (i = 0; i != tcx->cf->nb_dev; i++) {
553
554		/* get the port id associated with the device */
555		port_id = tcx->cf->dev[i].port;
556
557		/* get the port config by port id */
558		pcf = &cf->port[port_id];
559
560		/* populate the tle_dev_param struct */
561		dprm.rx_offload = pcf->rx_offload;
562		dprm.tx_offload = pcf->tx_offload;
563		dprm.local_addr4.s_addr = pcf->ipv4;
564
565		memcpy(&dprm.local_addr6, &pcf->ipv6,
566			sizeof(pcf->ipv6));
567
568		/* add the tle_dev */
569		tcx->dev[i].dev = tle_add_dev(tcx->ctx, &dprm);
570
571		RTE_LOG(NOTICE, USER1, "%s(port=%u), dev: %p\n",
572			__func__, port_id,
573			tcx->dev[i].dev);
574
575		if (tcx->dev[i].dev == NULL)
576			rc = -rte_errno;
577
578		if (rc != 0)
579			return rc;
580
581		tcx->nb_dev++;
582		tcx->dev[i].cf = tcx->cf->dev[i];
583	}
584
585	return rc;
586}
587
588static uint32_t
589get_ptypes(const struct tldk_dev *td)
590{
591	uint32_t smask;
592	int32_t i, rc;
593	const uint32_t pmask = RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK |
594		RTE_PTYPE_L4_MASK;
595
596	smask = 0;
597	rc = rte_eth_dev_get_supported_ptypes(td->cf.port, pmask, NULL, 0);
598	if (rc < 0) {
599		RTE_LOG(ERR, USER1,
600			"%s(port=%u) failed to get supported ptypes;\n",
601			__func__, td->cf.port);
602		return smask;
603	}
604
605	uint32_t ptype[rc];
606	rc = rte_eth_dev_get_supported_ptypes(td->cf.port, pmask, ptype, rc);
607
608	for (i = 0; i != rc; i++) {
609		switch (ptype[i]) {
610		case RTE_PTYPE_L2_ETHER:
611			smask |= ETHER_PTYPE;
612			break;
613		case RTE_PTYPE_L3_IPV4:
614		case RTE_PTYPE_L3_IPV4_EXT_UNKNOWN:
615			smask |= IPV4_PTYPE;
616			break;
617		case RTE_PTYPE_L3_IPV4_EXT:
618			smask |= IPV4_EXT_PTYPE;
619			break;
620		case RTE_PTYPE_L3_IPV6:
621		case RTE_PTYPE_L3_IPV6_EXT_UNKNOWN:
622			smask |= IPV6_PTYPE;
623			break;
624		case RTE_PTYPE_L3_IPV6_EXT:
625			smask |= IPV6_EXT_PTYPE;
626			break;
627		case RTE_PTYPE_L4_TCP:
628			smask |= TCP_PTYPE;
629			break;
630		case RTE_PTYPE_L4_UDP:
631			smask |= UDP_PTYPE;
632			break;
633		}
634	}
635
636	return smask;
637}
638
639static inline uint64_t
640_mbuf_tx_offload(uint64_t il2, uint64_t il3, uint64_t il4, uint64_t tso,
641	uint64_t ol3, uint64_t ol2)
642{
643	return il2 | il3 << 7 | il4 << 16 | tso << 24 | ol3 << 40 | ol2 << 49;
644}
645
646static inline void
647fill_pkt_hdr_len(struct rte_mbuf *m, uint32_t l2, uint32_t l3, uint32_t l4)
648{
649	m->tx_offload = _mbuf_tx_offload(l2, l3, l4, 0, 0, 0);
650}
651
652static inline int
653is_ipv4_frag(const struct ipv4_hdr *iph)
654{
655	const uint16_t mask = rte_cpu_to_be_16(~IPV4_HDR_DF_FLAG);
656
657	return ((mask & iph->fragment_offset) != 0);
658}
659
660static inline uint32_t
661get_tcp_header_size(struct rte_mbuf *m, uint32_t l2_len, uint32_t l3_len)
662{
663	const struct tcp_hdr *tcp;
664
665	tcp = rte_pktmbuf_mtod_offset(m, struct tcp_hdr *, l2_len + l3_len);
666	return (tcp->data_off >> 4) * 4;
667}
668
669static inline void
670adjust_ipv4_pktlen(struct rte_mbuf *m, uint32_t l2_len)
671{
672	uint32_t plen, trim;
673	const struct ipv4_hdr *iph;
674
675	iph = rte_pktmbuf_mtod_offset(m, const struct ipv4_hdr *, l2_len);
676	plen = rte_be_to_cpu_16(iph->total_length) + l2_len;
677	if (plen < m->pkt_len) {
678		trim = m->pkt_len - plen;
679		rte_pktmbuf_trim(m, trim);
680	}
681}
682
683static inline void
684adjust_ipv6_pktlen(struct rte_mbuf *m, uint32_t l2_len)
685{
686	uint32_t plen, trim;
687	const struct ipv6_hdr *iph;
688
689	iph = rte_pktmbuf_mtod_offset(m, const struct ipv6_hdr *, l2_len);
690	plen = rte_be_to_cpu_16(iph->payload_len) + sizeof(*iph) + l2_len;
691	if (plen < m->pkt_len) {
692		trim = m->pkt_len - plen;
693		rte_pktmbuf_trim(m, trim);
694	}
695}
696
697static inline void
698tcp_stat_update(struct tldk_ctx *lc, const struct rte_mbuf *m,
699	uint32_t l2_len, uint32_t l3_len)
700{
701	const struct tcp_hdr *th;
702
703	th = rte_pktmbuf_mtod_offset(m, struct tcp_hdr *, l2_len + l3_len);
704	lc->tcp_stat.flags[th->tcp_flags]++;
705}
706
707static inline uint32_t
708get_ipv4_hdr_len(struct rte_mbuf *m, uint32_t l2, uint32_t proto, uint32_t frag)
709{
710	const struct ipv4_hdr *iph;
711	int32_t dlen, len;
712
713	dlen = rte_pktmbuf_data_len(m);
714	dlen -= l2;
715
716	iph = rte_pktmbuf_mtod_offset(m, const struct ipv4_hdr *, l2);
717	len = (iph->version_ihl & IPV4_HDR_IHL_MASK) * IPV4_IHL_MULTIPLIER;
718
719	if (frag != 0 && is_ipv4_frag(iph)) {
720		m->packet_type &= ~RTE_PTYPE_L4_MASK;
721		m->packet_type |= RTE_PTYPE_L4_FRAG;
722	}
723
724	if (len > dlen || (proto <= IPPROTO_MAX && iph->next_proto_id != proto))
725		m->packet_type = RTE_PTYPE_UNKNOWN;
726
727	return len;
728}
729
730static inline int
731ipv6x_hdr(uint32_t proto)
732{
733	return (proto == IPPROTO_HOPOPTS ||
734		proto == IPPROTO_ROUTING ||
735		proto == IPPROTO_FRAGMENT ||
736		proto == IPPROTO_AH ||
737		proto == IPPROTO_NONE ||
738		proto == IPPROTO_DSTOPTS);
739}
740
741static inline uint32_t
742get_ipv6x_hdr_len(struct rte_mbuf *m, uint32_t l2, uint32_t nproto,
743	uint32_t fproto)
744{
745	const struct ip6_ext *ipx;
746	int32_t dlen, len, ofs;
747
748	len = sizeof(struct ipv6_hdr);
749
750	dlen = rte_pktmbuf_data_len(m);
751	dlen -= l2;
752
753	ofs = l2 + len;
754	ipx = rte_pktmbuf_mtod_offset(m, const struct ip6_ext *, ofs);
755
756	while (ofs > 0 && len < dlen) {
757
758		switch (nproto) {
759		case IPPROTO_HOPOPTS:
760		case IPPROTO_ROUTING:
761		case IPPROTO_DSTOPTS:
762			ofs = (ipx->ip6e_len + 1) << 3;
763			break;
764		case IPPROTO_AH:
765			ofs = (ipx->ip6e_len + 2) << 2;
766			break;
767		case IPPROTO_FRAGMENT:
768			/*
769			 * tso_segsz is not used by RX, so use it as temporary
770			 * buffer to store the fragment offset.
771			 */
772			m->tso_segsz = ofs;
773			ofs = sizeof(struct ip6_frag);
774			m->packet_type &= ~RTE_PTYPE_L4_MASK;
775			m->packet_type |= RTE_PTYPE_L4_FRAG;
776			break;
777		default:
778			ofs = 0;
779		}
780
781		if (ofs > 0) {
782			nproto = ipx->ip6e_nxt;
783			len += ofs;
784			ipx += ofs / sizeof(*ipx);
785		}
786	}
787
788	/* unrecognized or invalid packet. */
789	if ((ofs == 0 && nproto != fproto) || len > dlen)
790		m->packet_type = RTE_PTYPE_UNKNOWN;
791
792	return len;
793}
794
795static inline uint32_t
796get_ipv6_hdr_len(struct rte_mbuf *m, uint32_t l2, uint32_t fproto)
797{
798	const struct ipv6_hdr *iph;
799
800	iph = rte_pktmbuf_mtod_offset(m, const struct ipv6_hdr *,
801		sizeof(struct ether_hdr));
802
803	if (iph->proto == fproto)
804		return sizeof(struct ipv6_hdr);
805	else if (ipv6x_hdr(iph->proto) != 0)
806		return get_ipv6x_hdr_len(m, l2, iph->proto, fproto);
807
808	m->packet_type = RTE_PTYPE_UNKNOWN;
809	return 0;
810}
811
812static inline void
813fill_eth_tcp_hdr_len(struct rte_mbuf *m)
814{
815	uint32_t dlen, l2_len, l3_len, l4_len;
816	uint16_t etp;
817	const struct ether_hdr *eth;
818
819	dlen = rte_pktmbuf_data_len(m);
820
821	/* check that first segment is at least 54B long. */
822	if (dlen < sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) +
823			sizeof(struct tcp_hdr)) {
824		m->packet_type = RTE_PTYPE_UNKNOWN;
825		return;
826	}
827
828	l2_len = sizeof(*eth);
829
830	eth = rte_pktmbuf_mtod(m, const struct ether_hdr *);
831	etp = eth->ether_type;
832	if (etp == rte_be_to_cpu_16(ETHER_TYPE_VLAN))
833		l2_len += sizeof(struct vlan_hdr);
834
835	if (etp == rte_be_to_cpu_16(ETHER_TYPE_IPv4)) {
836		m->packet_type = RTE_PTYPE_L4_TCP |
837			RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
838			RTE_PTYPE_L2_ETHER;
839		l3_len = get_ipv4_hdr_len(m, l2_len, IPPROTO_TCP, 1);
840		l4_len = get_tcp_header_size(m, l2_len, l3_len);
841		fill_pkt_hdr_len(m, l2_len, l3_len, l4_len);
842		adjust_ipv4_pktlen(m, l2_len);
843	} else if (etp == rte_be_to_cpu_16(ETHER_TYPE_IPv6) &&
844			dlen >= l2_len + sizeof(struct ipv6_hdr) +
845			sizeof(struct tcp_hdr)) {
846		m->packet_type = RTE_PTYPE_L4_TCP |
847			RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
848			RTE_PTYPE_L2_ETHER;
849		l3_len = get_ipv6_hdr_len(m, l2_len, IPPROTO_TCP);
850		l4_len = get_tcp_header_size(m, l2_len, l3_len);
851		fill_pkt_hdr_len(m, l2_len, l3_len, l4_len);
852		adjust_ipv6_pktlen(m, l2_len);
853	} else
854		m->packet_type = RTE_PTYPE_UNKNOWN;
855}
856
857/*
858 * HW can recognize L2/L3 with/without extensions/L4 (ixgbe/igb/fm10k)
859 */
860static uint16_t
861type0_tcp_rx_callback(__rte_unused dpdk_port_t port,
862	__rte_unused uint16_t queue,
863	struct rte_mbuf *pkt[], uint16_t nb_pkts,
864	__rte_unused uint16_t max_pkts, __rte_unused void *user_param)
865{
866	uint32_t j, tp;
867	uint32_t l4_len, l3_len, l2_len;
868	const struct ether_hdr *eth;
869
870	l2_len = sizeof(*eth);
871
872	for (j = 0; j != nb_pkts; j++) {
873
874		BE_PKT_DUMP(pkt[j]);
875
876		tp = pkt[j]->packet_type & (RTE_PTYPE_L4_MASK |
877			RTE_PTYPE_L3_MASK | RTE_PTYPE_L2_MASK);
878
879		switch (tp) {
880		/* non fragmented tcp packets. */
881		case (RTE_PTYPE_L4_TCP | RTE_PTYPE_L3_IPV4 |
882				RTE_PTYPE_L2_ETHER):
883			l4_len = get_tcp_header_size(pkt[j], l2_len,
884				sizeof(struct ipv4_hdr));
885			fill_pkt_hdr_len(pkt[j], l2_len,
886				sizeof(struct ipv4_hdr), l4_len);
887			adjust_ipv4_pktlen(pkt[j], l2_len);
888			break;
889		case (RTE_PTYPE_L4_TCP | RTE_PTYPE_L3_IPV6 |
890				RTE_PTYPE_L2_ETHER):
891			l4_len = get_tcp_header_size(pkt[j], l2_len,
892				sizeof(struct ipv6_hdr));
893			fill_pkt_hdr_len(pkt[j], l2_len,
894				sizeof(struct ipv6_hdr), l4_len);
895			adjust_ipv6_pktlen(pkt[j], l2_len);
896			break;
897		case (RTE_PTYPE_L4_TCP | RTE_PTYPE_L3_IPV4_EXT |
898				RTE_PTYPE_L2_ETHER):
899			l3_len = get_ipv4_hdr_len(pkt[j], l2_len,
900				IPPROTO_TCP, 0);
901			l4_len = get_tcp_header_size(pkt[j], l2_len, l3_len);
902			fill_pkt_hdr_len(pkt[j], l2_len, l3_len, l4_len);
903			adjust_ipv4_pktlen(pkt[j], l2_len);
904			break;
905		case (RTE_PTYPE_L4_TCP | RTE_PTYPE_L3_IPV6_EXT |
906				RTE_PTYPE_L2_ETHER):
907			l3_len = get_ipv6_hdr_len(pkt[j], l2_len, IPPROTO_TCP);
908			l4_len = get_tcp_header_size(pkt[j], l2_len, l3_len);
909			fill_pkt_hdr_len(pkt[j], l2_len, l3_len, l4_len);
910			adjust_ipv6_pktlen(pkt[j], l2_len);
911			break;
912		default:
913			/* treat packet types as invalid. */
914			pkt[j]->packet_type = RTE_PTYPE_UNKNOWN;
915			break;
916		}
917	}
918
919	return nb_pkts;
920}
921
922/*
923 * HW can recognize L2/L3/L4 and fragments (i40e).
924 */
925static uint16_t
926type1_tcp_rx_callback(__rte_unused dpdk_port_t port,
927	__rte_unused uint16_t queue,
928	struct rte_mbuf *pkt[], uint16_t nb_pkts,
929	__rte_unused uint16_t max_pkts, void *user_param)
930{
931	uint32_t j, tp;
932	struct tldk_ctx *tcx;
933	uint32_t l4_len, l3_len, l2_len;
934	const struct ether_hdr *eth;
935
936	tcx = user_param;
937	l2_len = sizeof(*eth);
938
939	for (j = 0; j != nb_pkts; j++) {
940
941		BE_PKT_DUMP(pkt[j]);
942
943		tp = pkt[j]->packet_type & (RTE_PTYPE_L4_MASK |
944			RTE_PTYPE_L3_MASK | RTE_PTYPE_L2_MASK);
945
946		switch (tp) {
947		case (RTE_PTYPE_L4_TCP | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
948				RTE_PTYPE_L2_ETHER):
949			l3_len = get_ipv4_hdr_len(pkt[j], l2_len,
950				IPPROTO_TCP, 0);
951			l4_len = get_tcp_header_size(pkt[j], l2_len, l3_len);
952			fill_pkt_hdr_len(pkt[j], l2_len, l3_len, l4_len);
953			adjust_ipv4_pktlen(pkt[j], l2_len);
954			tcp_stat_update(tcx, pkt[j], l2_len, l3_len);
955			break;
956		case (RTE_PTYPE_L4_TCP | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
957				RTE_PTYPE_L2_ETHER):
958			l3_len = get_ipv6_hdr_len(pkt[j], l2_len, IPPROTO_TCP);
959			l4_len = get_tcp_header_size(pkt[j], l2_len, l3_len);
960			fill_pkt_hdr_len(pkt[j], l2_len, l3_len, l4_len);
961			adjust_ipv6_pktlen(pkt[j], l2_len);
962			tcp_stat_update(tcx, pkt[j], l2_len, l3_len);
963			break;
964		default:
965			/* treat packet types as invalid. */
966			pkt[j]->packet_type = RTE_PTYPE_UNKNOWN;
967			break;
968		}
969
970	}
971
972	return nb_pkts;
973}
974
975static uint16_t
976typen_tcp_rx_callback(__rte_unused dpdk_port_t port,
977	__rte_unused uint16_t queue,
978	struct rte_mbuf *pkt[], uint16_t nb_pkts,
979	__rte_unused uint16_t max_pkts, __rte_unused void *user_param)
980{
981	uint32_t j;
982
983	for (j = 0; j != nb_pkts; j++) {
984
985		BE_PKT_DUMP(pkt[j]);
986		fill_eth_tcp_hdr_len(pkt[j]);
987	}
988
989	return nb_pkts;
990}
991
992int
993setup_rx_cb(const struct tldk_dev *td, struct tldk_ctx *tcx)
994{
995	int32_t rc;
996	uint32_t i, n, smask;
997	const void *cb;
998	const struct ptype2cb *ptype2cb;
999
1000	static const struct ptype2cb tcp_ptype2cb[] = {
1001		{
1002			.mask = ETHER_PTYPE | IPV4_PTYPE | IPV4_EXT_PTYPE |
1003				IPV6_PTYPE | IPV6_EXT_PTYPE | TCP_PTYPE,
1004			.name = "HW l2/l3x/l4-tcp ptype",
1005			.fn = type0_tcp_rx_callback,
1006		},
1007		{
1008			.mask = ETHER_PTYPE | IPV4_PTYPE | IPV6_PTYPE |
1009				TCP_PTYPE,
1010			.name = "HW l2/l3/l4-tcp ptype",
1011			.fn = type1_tcp_rx_callback,
1012		},
1013		{
1014			.mask = 0,
1015			.name = "tcp no HW ptype",
1016			.fn = typen_tcp_rx_callback,
1017		},
1018	};
1019
1020	smask = get_ptypes(td);
1021
1022	ptype2cb = tcp_ptype2cb;
1023	n = RTE_DIM(tcp_ptype2cb);
1024
1025	for (i = 0; i != n; i++) {
1026		if ((smask & ptype2cb[i].mask) == ptype2cb[i].mask) {
1027			cb = rte_eth_add_rx_callback(td->cf.port, td->cf.queue,
1028				ptype2cb[i].fn, tcx);
1029			rc = -rte_errno;
1030			RTE_LOG(ERR, USER1,
1031				"%s(port=%u), setup RX callback \"%s\" "
1032				"returns %p;\n",
1033				__func__, td->cf.port,  ptype2cb[i].name, cb);
1034				return ((cb == NULL) ? rc : 0);
1035		}
1036	}
1037
1038	/* no proper callback found. */
1039	RTE_LOG(ERR, USER1,
1040		"%s(port=%u) failed to find an appropriate callback;\n",
1041		__func__, td->cf.port);
1042	return -ENOENT;
1043}
1044
1045int
1046be_lcore_setup(struct tldk_ctx *tcx)
1047{
1048	uint32_t i;
1049	int32_t rc;
1050
1051	RTE_LOG(NOTICE, USER1, "%s:(lcore=%u, ctx=%p) start\n",
1052		__func__, tcx->cf->lcore, tcx->ctx);
1053
1054	rc = 0;
1055	for (i = 0; i != tcx->nb_dev && rc == 0; i++) {
1056		RTE_LOG(NOTICE, USER1, "%s:%u(port=%u, q=%u)\n",
1057			__func__, i, tcx->dev[i].cf.port, tcx->dev[i].cf.queue);
1058
1059		rc = setup_rx_cb(&tcx->dev[i], tcx);
1060		if (rc < 0)
1061			return rc;
1062	}
1063
1064	return rc;
1065}
1066
1067static inline void
1068be_rx(struct tldk_dev *dev)
1069{
1070	uint32_t j, k, n;
1071	struct rte_mbuf *pkt[MAX_PKT_BURST];
1072	struct rte_mbuf *rp[MAX_PKT_BURST];
1073	int32_t rc[MAX_PKT_BURST];
1074
1075	n = rte_eth_rx_burst(dev->cf.port,
1076		dev->cf.queue, pkt, RTE_DIM(pkt));
1077
1078	if (n != 0) {
1079		dev->rx_stat.in += n;
1080		BE_TRACE("%s(%u): rte_eth_rx_burst(%u, %u) returns %u\n",
1081			__func__, dev->cf.id, dev->cf.port,
1082			dev->cf.queue, n);
1083
1084		k = tle_tcp_rx_bulk(dev->dev, pkt, rp, rc, n);
1085
1086		dev->rx_stat.up += k;
1087		dev->rx_stat.drop += n - k;
1088		BE_TRACE("%s: tle_tcp_rx_bulk(%p, %u) returns %u\n",
1089			__func__, dev->dev, n, k);
1090
1091		for (j = 0; j != n - k; j++) {
1092			BE_TRACE("%s:%d(port=%u) rp[%u]={%p, %d};\n",
1093				__func__, __LINE__, dev->cf.port,
1094				j, rp[j], rc[j]);
1095			rte_pktmbuf_free(rp[j]);
1096		}
1097	}
1098}
1099
1100static inline void
1101be_tx(struct tldk_dev *dev)
1102{
1103	uint32_t j = 0, k, n;
1104	struct rte_mbuf **mb;
1105
1106	n = dev->tx_buf.num;
1107	k = RTE_DIM(dev->tx_buf.pkt) - n;
1108	mb = dev->tx_buf.pkt;
1109
1110	if (k >= RTE_DIM(dev->tx_buf.pkt) / 2) {
1111		j = tle_tcp_tx_bulk(dev->dev, mb + n, k);
1112		n += j;
1113		dev->tx_stat.down += j;
1114	}
1115
1116	if (n == 0)
1117		return;
1118
1119	BE_TRACE("%s: tle_tcp_tx_bulk(%p) returns %u,\n"
1120		"total pkts to send: %u\n",
1121		__func__, dev->dev, j, n);
1122
1123	for (j = 0; j != n; j++)
1124		BE_PKT_DUMP(mb[j]);
1125
1126	k = rte_eth_tx_burst(dev->cf.port,
1127			dev->cf.queue, mb, n);
1128
1129	dev->tx_stat.out += k;
1130	dev->tx_stat.drop += n - k;
1131	BE_TRACE("%s: rte_eth_tx_burst(%u, %u, %u) returns %u\n",
1132		__func__, dev->cf.port,
1133		dev->cf.queue, n, k);
1134
1135	dev->tx_buf.num = n - k;
1136	if (k != 0)
1137		for (j = k; j != n; j++)
1138			mb[j - k] = mb[j];
1139}
1140
1141void
1142be_lcore_tcp(struct tldk_ctx *tcx)
1143{
1144	uint32_t i;
1145
1146	if (tcx == NULL)
1147		return;
1148
1149	for (i = 0; i != tcx->nb_dev; i++) {
1150		be_rx(&tcx->dev[i]);
1151		be_tx(&tcx->dev[i]);
1152	}
1153	tle_tcp_process(tcx->ctx, TCP_MAX_PROCESS);
1154}
1155
1156void
1157be_lcore_clear(struct tldk_ctx *tcx)
1158{
1159	uint32_t i, j;
1160
1161	if (tcx == NULL)
1162		return;
1163
1164	RTE_LOG(NOTICE, USER1, "%s(lcore=%u, ctx: %p) finish\n",
1165		__func__, tcx->cf->lcore, tcx->ctx);
1166	for (i = 0; i != tcx->nb_dev; i++) {
1167		RTE_LOG(NOTICE, USER1, "%s:%u(port=%u, q=%u, lcore=%u, dev=%p) "
1168			"rx_stats={"
1169			"in=%" PRIu64 ",up=%" PRIu64 ",drop=%" PRIu64 "}, "
1170			"tx_stats={"
1171			"in=%" PRIu64 ",up=%" PRIu64 ",drop=%" PRIu64 "};\n",
1172			__func__, i, tcx->dev[i].cf.port, tcx->dev[i].cf.queue,
1173			tcx->cf->lcore,
1174			tcx->dev[i].dev,
1175			tcx->dev[i].rx_stat.in,
1176			tcx->dev[i].rx_stat.up,
1177			tcx->dev[i].rx_stat.drop,
1178			tcx->dev[i].tx_stat.down,
1179			tcx->dev[i].tx_stat.out,
1180			tcx->dev[i].tx_stat.drop);
1181	}
1182
1183	RTE_LOG(NOTICE, USER1, "tcp_stat={\n");
1184	for (i = 0; i != RTE_DIM(tcx->tcp_stat.flags); i++) {
1185		if (tcx->tcp_stat.flags[i] != 0)
1186			RTE_LOG(NOTICE, USER1, "[flag=%#x]==%" PRIu64 ";\n",
1187				i, tcx->tcp_stat.flags[i]);
1188	}
1189	RTE_LOG(NOTICE, USER1, "};\n");
1190
1191	for (i = 0; i != tcx->nb_dev; i++)
1192		for (j = 0; j != tcx->dev[i].tx_buf.num; j++)
1193			rte_pktmbuf_free(tcx->dev[i].tx_buf.pkt[j]);
1194
1195}
1196
1197void
1198be_stop_port(uint32_t port)
1199{
1200	struct rte_eth_stats stats;
1201
1202	RTE_LOG(NOTICE, USER1, "%s: stoping port %u\n", __func__, port);
1203
1204	rte_eth_stats_get(port, &stats);
1205	RTE_LOG(NOTICE, USER1, "port %u stats={\n"
1206		"ipackets=%" PRIu64 ";"
1207		"ibytes=%" PRIu64 ";"
1208		"ierrors=%" PRIu64 ";"
1209		"imissed=%" PRIu64 ";\n"
1210		"opackets=%" PRIu64 ";"
1211		"obytes=%" PRIu64 ";"
1212		"oerrors=%" PRIu64 ";\n"
1213		"}\n",
1214		port,
1215		stats.ipackets,
1216		stats.ibytes,
1217		stats.ierrors,
1218		stats.imissed,
1219		stats.opackets,
1220		stats.obytes,
1221		stats.oerrors);
1222	rte_eth_dev_stop(port);
1223}
1224
1225int
1226be_lcore_main(void *arg)
1227{
1228	int32_t rc;
1229	uint32_t lid, i;
1230	struct tldk_ctx *tcx;
1231	struct lcore_ctxs_list *lc_ctx;
1232
1233	lc_ctx = arg;
1234	lid = rte_lcore_id();
1235
1236	RTE_LOG(NOTICE, USER1, "%s(lcore=%u) start\n", __func__, lid);
1237
1238	rc = 0;
1239	while (force_quit == 0) {
1240		for (i = 0; i < lc_ctx->nb_ctxs; i++) {
1241			tcx = lc_ctx->ctxs[i];
1242			be_lcore_tcp(tcx);
1243		}
1244	}
1245
1246	RTE_LOG(NOTICE, USER1, "%s(lcore=%u) finish\n", __func__, lid);
1247
1248	return rc;
1249}
1250