be.c revision 5b873e3b
1/*
2 * Copyright (c) 2017  Intel Corporation.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <assert.h>
28#include <netinet/ip6.h>
29
30#include <ngx_config.h>
31#include <ngx_core.h>
32
33#include "be.h"
34#include <rte_version.h>
35#include <rte_cycles.h>
36#include <rte_ethdev.h>
37#include <rte_errno.h>
38#include <rte_lpm6.h>
39#include <rte_lpm.h>
40#include <rte_ip.h>
41#include <rte_tcp.h>
42
43#include <tle_tcp.h>
44
45#if RTE_VERSION_NUM(17, 11, 0, 0) <= RTE_VERSION
46typedef uint16_t dpdk_port_t;
47#else
48typedef uint8_t dpdk_port_t;
49#endif
50
51#define RX_RING_SIZE    0x400
52#define TX_RING_SIZE    0x800
53#define MAX_RULES       0x100
54#define MAX_TBL8        0x800
55
56#define MPOOL_CACHE_SIZE        0x100
57#define MPOOL_NB_BUF            0x20000
58
59#define FRAG_MBUF_BUF_SIZE      (RTE_PKTMBUF_HEADROOM + TLE_DST_MAX_HDR)
60
61#define RX_CSUM_OFFLOAD (DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM)
62
63#define TCP_MAX_PROCESS 0x20
64
65static const struct rte_eth_conf port_conf_default = {
66	.rxmode = {
67		.hw_vlan_strip = 1,
68	},
69};
70
71struct ptype2cb {
72	uint32_t mask;
73	const char *name;
74	rte_rx_callback_fn fn;
75};
76
77enum {
78	ETHER_PTYPE = 0x1,
79	IPV4_PTYPE = 0x2,
80	IPV4_EXT_PTYPE = 0x4,
81	IPV6_PTYPE = 0x8,
82	IPV6_EXT_PTYPE = 0x10,
83	TCP_PTYPE = 0x20,
84	UDP_PTYPE = 0x40,
85};
86
87int
88be_lcore_lpm_init(struct tldk_ctx *tcx, uint32_t sid,
89		const struct tldk_ctx_conf *cf)
90{
91	ngx_uint_t worker = cf->worker;
92	uint32_t lcore = cf->lcore;
93	char str[RTE_LPM_NAMESIZE];
94
95	const struct rte_lpm_config lpm4_cfg = {
96		.max_rules = MAX_RULES,
97		.number_tbl8s = MAX_TBL8,
98	};
99
100	const struct rte_lpm6_config lpm6_cfg = {
101		.max_rules = MAX_RULES,
102		.number_tbl8s = MAX_TBL8,
103	};
104
105	snprintf(str, sizeof(str), "LPM4%lu-%u\n", worker, lcore);
106	tcx->lpm4 = rte_lpm_create(str, sid, &lpm4_cfg);
107	RTE_LOG(NOTICE, USER1, "%s(worker=%lu, lcore=%u): lpm4=%p;\n",
108		__func__, worker, lcore, tcx->lpm4);
109	if (tcx->lpm4 == NULL)
110		return -ENOMEM;
111
112	snprintf(str, sizeof(str), "LPM6%lu-%u\n", worker, lcore);
113	tcx->lpm6 = rte_lpm6_create(str, sid, &lpm6_cfg);
114	RTE_LOG(NOTICE, USER1, "%s(worker=%lu, lcore=%u): lpm6=%p;\n",
115		__func__, worker, lcore, tcx->lpm6);
116	if (tcx->lpm6 == NULL) {
117		rte_lpm_free(tcx->lpm4);
118		return -ENOMEM;
119	}
120
121	return 0;
122}
123
124int
125be_lpm4_dst_lookup(void *data, const struct in_addr *addr,
126		struct tle_dest *res)
127{
128	int32_t rc;
129	uint32_t idx;
130	struct tldk_ctx *tcx;
131	struct tle_dest *dst;
132
133	tcx = data;
134	rc = rte_lpm_lookup(tcx->lpm4, rte_be_to_cpu_32(addr->s_addr), &idx);
135	if (rc == 0) {
136		dst = &tcx->dst4[idx];
137		memcpy(res, dst, dst->l2_len + dst->l3_len +
138				offsetof(struct tle_dest, hdr));
139	}
140
141	return rc;
142}
143
144int
145be_lpm6_dst_lookup(void *data, const struct in6_addr *addr,
146	struct tle_dest *res)
147{
148	int32_t rc;
149	struct tldk_ctx *tcx;
150	struct tle_dest *dst;
151	uintptr_t p;
152#if RTE_VERSION_NUM(17, 5, 0, 0) <= RTE_VERSION
153	uint32_t idx;
154#else
155	uint8_t idx;
156#endif
157
158	tcx = data;
159	p = (uintptr_t)addr->s6_addr;
160	rc = rte_lpm6_lookup(tcx->lpm6, (uint8_t *)p, &idx);
161	if (rc == 0) {
162		dst = &tcx->dst6[idx];
163		memcpy(res, dst, dst->l2_len + dst->l3_len +
164				offsetof(struct tle_dest, hdr));
165	}
166
167	return rc;
168}
169
170/*
171 * Initialise DPDK port.
172 */
173static int
174port_init(const struct tldk_port_conf *pcf)
175{
176	int32_t rc;
177	struct rte_eth_conf port_conf;
178	struct rte_eth_dev_info dev_info;
179
180	rte_eth_dev_info_get(pcf->id, &dev_info);
181
182	if ((dev_info.rx_offload_capa & pcf->rx_offload) != pcf->rx_offload) {
183		RTE_LOG(ERR, USER1,
184			"port#%u supported/requested RX offloads don't match, "
185			"supported: %#" PRIx64 ", requested: %#" PRIx64 ";\n",
186			pcf->id, (uint64_t)dev_info.rx_offload_capa,
187			pcf->rx_offload);
188		return NGX_ERROR;
189	}
190	if ((dev_info.tx_offload_capa & pcf->tx_offload) != pcf->tx_offload) {
191		RTE_LOG(ERR, USER1,
192			"port#%u supported/requested TX offloads don't match, "
193			"supported: %#" PRIx64 ", requested: %#" PRIx64 ";\n",
194			pcf->id, (uint64_t)dev_info.tx_offload_capa,
195			pcf->tx_offload);
196		return NGX_ERROR;
197	}
198
199	port_conf = port_conf_default;
200
201	if ((pcf->rx_offload & RX_CSUM_OFFLOAD) != 0) {
202		RTE_LOG(ERR, USER1, "%s(%u): enabling RX csum offload;\n",
203			__func__, pcf->id);
204		port_conf.rxmode.hw_ip_checksum = 1;
205	}
206
207	port_conf.rxmode.max_rx_pkt_len = pcf->mtu + ETHER_CRC_LEN;
208	if (port_conf.rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
209		port_conf.rxmode.jumbo_frame = 1;
210	port_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
211	port_conf.rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP | ETH_RSS_TCP;
212
213	rc = rte_eth_dev_configure(pcf->id, pcf->nb_queues, pcf->nb_queues,
214			&port_conf);
215	RTE_LOG(NOTICE, USER1,
216		"%s: rte_eth_dev_configure(prt_id=%u, nb_rxq=%u, nb_txq=%u) "
217		"returns %d;\n", __func__, pcf->id, pcf->nb_queues,
218		pcf->nb_queues, rc);
219
220	if (rc != 0)
221		return NGX_ERROR;
222
223	return NGX_OK;
224}
225
226/*
227 * Check that lcore is enabled, not master, and not in use already.
228 */
229int
230be_check_lcore(uint32_t lid)
231{
232	if (rte_lcore_is_enabled(lid) == 0) {
233		RTE_LOG(ERR, USER1, "lcore %u is not enabled\n", lid);
234		return -EINVAL;
235	}
236
237	if (rte_get_master_lcore() != lid &&
238		rte_eal_get_lcore_state(lid) == RUNNING) {
239		RTE_LOG(ERR, USER1, "lcore %u already running %p\n",
240			lid, lcore_config[lid].f);
241		return -EINVAL;
242	}
243
244	return 0;
245}
246
247int
248be_mpool_init(struct tldk_ctx *tcx)
249{
250	int32_t rc;
251	uint32_t nmb, sid;
252	struct rte_mempool *mp;
253	char name[RTE_MEMPOOL_NAMESIZE];
254
255	ngx_uint_t worker = tcx->cf->worker;
256	uint32_t lcore = tcx->cf->lcore;
257
258	sid = rte_lcore_to_socket_id(tcx->cf->lcore);
259	nmb = (tcx->cf->nb_mbuf == 0) ? MPOOL_NB_BUF : tcx->cf->nb_mbuf;
260
261	snprintf(name, sizeof(name), "MP%lu-%u", worker, lcore);
262	mp = rte_pktmbuf_pool_create(name, nmb, MPOOL_CACHE_SIZE, 0,
263			RTE_MBUF_DEFAULT_BUF_SIZE, sid);
264	if (mp == NULL) {
265		rc = -rte_errno;
266		RTE_LOG(ERR, USER1, "%s:Mempool creation failed for "
267			"ctx:wrk(%lu)-ctx:lcore(%u) with error code: %d\n",
268			__func__, worker, lcore, rc);
269		return rc;
270	}
271
272	tcx->mpool = mp;
273
274	snprintf(name, sizeof(name), "frag_MP%lu-%u",
275			worker, lcore);
276	mp = rte_pktmbuf_pool_create(name, nmb,
277			MPOOL_CACHE_SIZE, 0, FRAG_MBUF_BUF_SIZE, sid - 1);
278	if (mp == NULL) {
279		rc = -rte_errno;
280		RTE_LOG(ERR, USER1, "%s:Frag mempool creation failed for "
281			"ctx:wrk(%lu)-ctx:lcore(%u) with error code: %d\n",
282			__func__, worker, lcore, rc);
283		return rc;
284	}
285
286	tcx->frag_mpool = mp;
287
288	return 0;
289}
290
291int
292be_queue_init(struct tldk_ctx *tcx, const tldk_conf_t *cf)
293{
294	int32_t socket, rc;
295	uint16_t queue_id;
296	uint32_t port_id, i, nb_rxd, nb_txd;
297	struct rte_eth_dev_info dev_info;
298	const struct tldk_ctx_conf *ctx;
299	const struct tldk_port_conf *pcf;
300
301	ctx = tcx->cf;
302	for (i = 0; i < ctx->nb_dev; i++) {
303		port_id = ctx->dev[i].port;
304		queue_id = ctx->dev[i].queue;
305		pcf = &cf->port[port_id];
306
307		rte_eth_dev_info_get(port_id, &dev_info);
308
309		dev_info.default_rxconf.rx_drop_en = 1;
310
311		nb_rxd = RTE_MIN(RX_RING_SIZE, dev_info.rx_desc_lim.nb_max);
312		nb_txd = RTE_MIN(TX_RING_SIZE, dev_info.tx_desc_lim.nb_max);
313		dev_info.default_txconf.tx_free_thresh = nb_txd / 2;
314
315		if (pcf->tx_offload != 0) {
316			RTE_LOG(ERR, USER1,
317				"%s(port=%u): enabling full featured TX;\n",
318				__func__, port_id);
319			dev_info.default_txconf.txq_flags = 0;
320		}
321
322		socket = rte_eth_dev_socket_id(port_id);
323
324		rc = rte_eth_rx_queue_setup(port_id, queue_id, nb_rxd,
325				socket, &dev_info.default_rxconf, tcx->mpool);
326		if (rc < 0) {
327			RTE_LOG(ERR, USER1,
328				"%s: rx queue=%u setup failed with error "
329				"code: %d\n", __func__, queue_id, rc);
330			return rc;
331		}
332
333		rc = rte_eth_tx_queue_setup(port_id, queue_id, nb_txd,
334				socket, &dev_info.default_txconf);
335		if (rc < 0) {
336			RTE_LOG(ERR, USER1,
337				"%s: tx queue=%u setup failed with error "
338				"code: %d\n", __func__, queue_id, rc);
339			return rc;
340		}
341	}
342
343	return 0;
344}
345
346/*
347 * Setup all enabled ports.
348 */
349int
350be_port_init(tldk_conf_t *cf)
351{
352	int32_t rc;
353	uint32_t i;
354	struct tldk_port_conf *dpf;
355
356	for (i = 0; i != cf->nb_port; i++) {
357		dpf = &cf->port[i];
358		rc = port_init(dpf);
359		if (rc != 0) {
360			RTE_LOG(ERR, USER1,
361				"%s: port=%u init failed with error code: %d\n",
362				__func__, dpf->id, rc);
363			return NGX_ERROR;
364		}
365		rte_eth_macaddr_get(dpf->id, &dpf->mac);
366		rte_eth_promiscuous_enable(dpf->id);
367	}
368
369	return NGX_OK;
370}
371
372static int
373be_add_ipv4_route(struct tldk_ctx *tcx, const struct tldk_dest_conf *dcf,
374	uint8_t idx)
375{
376	int32_t rc;
377	uint32_t addr, depth;
378	char str[INET_ADDRSTRLEN];
379
380	depth = dcf->prfx;
381	addr = rte_be_to_cpu_32(dcf->ipv4.s_addr);
382
383	inet_ntop(AF_INET, &dcf->ipv4, str, sizeof(str));
384	rc = rte_lpm_add(tcx->lpm4, addr, depth, idx);
385	RTE_LOG(NOTICE, USER1, "%s(lcore=%u,dev_id=%u,dev=%p,"
386			"ipv4=%s/%u,mtu=%u,"
387			"mac=%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx) "
388			"returns %d;\n",
389			__func__, tcx->cf->lcore, dcf->dev, tcx->dst4[idx].dev,
390			str, depth, tcx->dst4[idx].mtu,
391			dcf->mac.addr_bytes[0], dcf->mac.addr_bytes[1],
392			dcf->mac.addr_bytes[2], dcf->mac.addr_bytes[3],
393			dcf->mac.addr_bytes[4], dcf->mac.addr_bytes[5],
394			rc);
395
396	return rc;
397}
398
399static int
400be_add_ipv6_route(struct tldk_ctx *tcx, const struct tldk_dest_conf *dcf,
401	uint8_t idx)
402{
403	int32_t rc;
404	uint32_t depth;
405	char str[INET6_ADDRSTRLEN];
406
407	depth = dcf->prfx;
408
409	rc = rte_lpm6_add(tcx->lpm6, (uint8_t *)(uintptr_t)dcf->ipv6.s6_addr,
410			depth, idx);
411
412	inet_ntop(AF_INET6, &dcf->ipv6, str, sizeof(str));
413	RTE_LOG(NOTICE, USER1, "%s(lcore=%u,dev_id=%u,dev=%p,"
414		"ipv6=%s/%u,mtu=%u,"
415		"mac=%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx) "
416		"returns %d;\n",
417		__func__, tcx->cf->lcore, dcf->dev, tcx->dst6[idx].dev,
418		str, depth, tcx->dst4[idx].mtu,
419		dcf->mac.addr_bytes[0], dcf->mac.addr_bytes[1],
420		dcf->mac.addr_bytes[2], dcf->mac.addr_bytes[3],
421		dcf->mac.addr_bytes[4], dcf->mac.addr_bytes[5],
422		rc);
423
424	return rc;
425}
426
427static void
428fill_dst(struct tle_dest *dst, const struct tldk_dev *td,
429	const struct tldk_port_conf *pcf, const struct tldk_dest_conf *dest,
430	uint16_t l3_type, struct rte_mempool *mp)
431{
432	struct ether_hdr *eth;
433	struct ipv4_hdr *ip4h;
434	struct ipv6_hdr *ip6h;
435
436	dst->dev = td->dev;
437	dst->head_mp = mp;
438	dst->mtu = RTE_MIN(dest->mtu, pcf->mtu);
439	dst->l2_len = sizeof(*eth);
440
441	eth = (struct ether_hdr *)dst->hdr;
442
443	ether_addr_copy(&pcf->mac, &eth->s_addr);
444	ether_addr_copy(&dest->mac, &eth->d_addr);
445	eth->ether_type = rte_cpu_to_be_16(l3_type);
446
447	if (l3_type == ETHER_TYPE_IPv4) {
448		dst->l3_len = sizeof(*ip4h);
449		ip4h = (struct ipv4_hdr *)(eth + 1);
450		ip4h->version_ihl = 4 << 4 |
451			sizeof(*ip4h) / IPV4_IHL_MULTIPLIER;
452		ip4h->time_to_live = 64;
453		ip4h->next_proto_id = IPPROTO_TCP;
454	} else if (l3_type == ETHER_TYPE_IPv6) {
455		dst->l3_len = sizeof(*ip6h);
456		ip6h = (struct ipv6_hdr *)(eth + 1);
457		ip6h->vtc_flow = 6 << 4;
458		ip6h->proto = IPPROTO_TCP;
459		ip6h->hop_limits = 64;
460	}
461}
462
463static int
464be_add_dest(const struct tldk_dest_conf *dcf, struct tldk_ctx *tcx,
465	uint32_t dev_idx, const struct tldk_port_conf *pcf, uint32_t family,
466	uint32_t dnum)
467{
468	struct tle_dest *dp;
469	uint32_t i, n, m;
470	uint16_t l3_type;
471	int32_t rc = 0;
472
473	if (family == AF_INET) {
474		n = tcx->dst4_num;
475		dp = tcx->dst4 + n;
476		m = RTE_DIM(tcx->dst4);
477		l3_type = ETHER_TYPE_IPv4;
478	} else {
479		n = tcx->dst6_num;
480		dp = tcx->dst6 + n;
481		m = RTE_DIM(tcx->dst6);
482		l3_type = ETHER_TYPE_IPv6;
483	}
484
485	if (n + dnum >= m) {
486		RTE_LOG(ERR, USER1, "%s(lcore=%u, family=%hu, dnum=%u) exceeds "
487			"maximum allowed number of destinations(%u);\n",
488			__func__, tcx->cf->lcore, family, dnum, m);
489		return -ENOSPC;
490	}
491
492	for (i = 0; i != dnum && rc == 0; i++) {
493		fill_dst(dp + i, &tcx->dev[dev_idx], pcf, dcf,
494			l3_type, tcx->frag_mpool);
495		if (family == AF_INET)
496			rc = be_add_ipv4_route(tcx, dcf, n + i);
497		else
498			rc = be_add_ipv6_route(tcx, dcf, n + i);
499	}
500
501	if (family == AF_INET)
502		tcx->dst4_num = n + i;
503	else
504		tcx->dst6_num = n + i;
505
506	return rc;
507}
508
509int
510be_dst_init(struct tldk_ctx *tcx, const tldk_conf_t *cf)
511{
512	uint32_t i, f, d, l, port_id;
513	const struct tldk_ctx_conf *ctx_cf = tcx->cf;
514	const struct tldk_dest_conf *dcf;
515	const struct tldk_port_conf *pcf;
516	int32_t rc = 0;
517
518	for (i = 0; i < ctx_cf->nb_dest; i++) {
519		dcf = &ctx_cf->dest[i];
520		f = dcf->family;
521		d = dcf->dev;
522		for (l = 0; l != tcx->nb_dev; l++) {
523			if (tcx->dev[l].cf.id == d) {
524				/* fetch the port conf for the port
525				 * associated with device
526				 */
527				port_id = tcx->dev[l].cf.port;
528				pcf = &cf->port[port_id];
529				rc = be_add_dest(dcf, tcx, l, pcf, f, 1);
530				if (rc != 0) {
531					RTE_LOG(ERR, USER1,
532						"%s(tcx=%u, family=%u) "
533						"could not add "
534						"destinations(%u)\n",
535						__func__, ctx_cf->lcore, f, i);
536					return -ENOSPC;
537				}
538				break;
539			}
540		}
541	}
542
543	return rc;
544}
545
546int
547be_add_dev(struct tldk_ctx *tcx, const tldk_conf_t *cf)
548{
549	int32_t rc = 0;
550	uint32_t i, port_id;
551	struct tle_dev_param dprm;
552	const struct tldk_port_conf *pcf;
553
554	memset(&dprm, 0, sizeof(dprm));
555
556	/* add the tle_dev on all applicable ports of the context */
557	for (i = 0; i != tcx->cf->nb_dev; i++) {
558
559		/* get the port id associated with the device */
560		port_id = tcx->cf->dev[i].port;
561
562		/* get the port config by port id */
563		pcf = &cf->port[port_id];
564
565		/* populate the tle_dev_param struct */
566		dprm.rx_offload = pcf->rx_offload;
567		dprm.tx_offload = pcf->tx_offload;
568		dprm.local_addr4.s_addr = pcf->ipv4;
569
570		memcpy(&dprm.local_addr6, &pcf->ipv6,
571			sizeof(pcf->ipv6));
572
573		/* add the tle_dev */
574		tcx->dev[i].dev = tle_add_dev(tcx->ctx, &dprm);
575
576		RTE_LOG(NOTICE, USER1, "%s(port=%u), dev: %p\n",
577			__func__, port_id,
578			tcx->dev[i].dev);
579
580		if (tcx->dev[i].dev == NULL)
581			rc = -rte_errno;
582
583		if (rc != 0)
584			return rc;
585
586		tcx->nb_dev++;
587		tcx->dev[i].cf = tcx->cf->dev[i];
588	}
589
590	return rc;
591}
592
593static uint32_t
594get_ptypes(const struct tldk_dev *td)
595{
596	uint32_t smask;
597	int32_t i, rc;
598	const uint32_t pmask = RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK |
599		RTE_PTYPE_L4_MASK;
600
601	smask = 0;
602	rc = rte_eth_dev_get_supported_ptypes(td->cf.port, pmask, NULL, 0);
603	if (rc < 0) {
604		RTE_LOG(ERR, USER1,
605			"%s(port=%u) failed to get supported ptypes;\n",
606			__func__, td->cf.port);
607		return smask;
608	}
609
610	uint32_t ptype[rc];
611	rc = rte_eth_dev_get_supported_ptypes(td->cf.port, pmask, ptype, rc);
612
613	for (i = 0; i != rc; i++) {
614		switch (ptype[i]) {
615		case RTE_PTYPE_L2_ETHER:
616			smask |= ETHER_PTYPE;
617			break;
618		case RTE_PTYPE_L3_IPV4:
619		case RTE_PTYPE_L3_IPV4_EXT_UNKNOWN:
620			smask |= IPV4_PTYPE;
621			break;
622		case RTE_PTYPE_L3_IPV4_EXT:
623			smask |= IPV4_EXT_PTYPE;
624			break;
625		case RTE_PTYPE_L3_IPV6:
626		case RTE_PTYPE_L3_IPV6_EXT_UNKNOWN:
627			smask |= IPV6_PTYPE;
628			break;
629		case RTE_PTYPE_L3_IPV6_EXT:
630			smask |= IPV6_EXT_PTYPE;
631			break;
632		case RTE_PTYPE_L4_TCP:
633			smask |= TCP_PTYPE;
634			break;
635		case RTE_PTYPE_L4_UDP:
636			smask |= UDP_PTYPE;
637			break;
638		}
639	}
640
641	return smask;
642}
643
644static inline uint64_t
645_mbuf_tx_offload(uint64_t il2, uint64_t il3, uint64_t il4, uint64_t tso,
646	uint64_t ol3, uint64_t ol2)
647{
648	return il2 | il3 << 7 | il4 << 16 | tso << 24 | ol3 << 40 | ol2 << 49;
649}
650
651static inline void
652fill_pkt_hdr_len(struct rte_mbuf *m, uint32_t l2, uint32_t l3, uint32_t l4)
653{
654	m->tx_offload = _mbuf_tx_offload(l2, l3, l4, 0, 0, 0);
655}
656
657static inline int
658is_ipv4_frag(const struct ipv4_hdr *iph)
659{
660	const uint16_t mask = rte_cpu_to_be_16(~IPV4_HDR_DF_FLAG);
661
662	return ((mask & iph->fragment_offset) != 0);
663}
664
665static inline uint32_t
666get_tcp_header_size(struct rte_mbuf *m, uint32_t l2_len, uint32_t l3_len)
667{
668	const struct tcp_hdr *tcp;
669
670	tcp = rte_pktmbuf_mtod_offset(m, struct tcp_hdr *, l2_len + l3_len);
671	return (tcp->data_off >> 4) * 4;
672}
673
674static inline void
675adjust_ipv4_pktlen(struct rte_mbuf *m, uint32_t l2_len)
676{
677	uint32_t plen, trim;
678	const struct ipv4_hdr *iph;
679
680	iph = rte_pktmbuf_mtod_offset(m, const struct ipv4_hdr *, l2_len);
681	plen = rte_be_to_cpu_16(iph->total_length) + l2_len;
682	if (plen < m->pkt_len) {
683		trim = m->pkt_len - plen;
684		rte_pktmbuf_trim(m, trim);
685	}
686}
687
688static inline void
689adjust_ipv6_pktlen(struct rte_mbuf *m, uint32_t l2_len)
690{
691	uint32_t plen, trim;
692	const struct ipv6_hdr *iph;
693
694	iph = rte_pktmbuf_mtod_offset(m, const struct ipv6_hdr *, l2_len);
695	plen = rte_be_to_cpu_16(iph->payload_len) + sizeof(*iph) + l2_len;
696	if (plen < m->pkt_len) {
697		trim = m->pkt_len - plen;
698		rte_pktmbuf_trim(m, trim);
699	}
700}
701
702static inline void
703tcp_stat_update(struct tldk_ctx *lc, const struct rte_mbuf *m,
704	uint32_t l2_len, uint32_t l3_len)
705{
706	const struct tcp_hdr *th;
707
708	th = rte_pktmbuf_mtod_offset(m, struct tcp_hdr *, l2_len + l3_len);
709	lc->tcp_stat.flags[th->tcp_flags]++;
710}
711
712static inline uint32_t
713get_ipv4_hdr_len(struct rte_mbuf *m, uint32_t l2, uint32_t proto, uint32_t frag)
714{
715	const struct ipv4_hdr *iph;
716	int32_t dlen, len;
717
718	dlen = rte_pktmbuf_data_len(m);
719	dlen -= l2;
720
721	iph = rte_pktmbuf_mtod_offset(m, const struct ipv4_hdr *, l2);
722	len = (iph->version_ihl & IPV4_HDR_IHL_MASK) * IPV4_IHL_MULTIPLIER;
723
724	if (frag != 0 && is_ipv4_frag(iph)) {
725		m->packet_type &= ~RTE_PTYPE_L4_MASK;
726		m->packet_type |= RTE_PTYPE_L4_FRAG;
727	}
728
729	if (len > dlen || (proto <= IPPROTO_MAX && iph->next_proto_id != proto))
730		m->packet_type = RTE_PTYPE_UNKNOWN;
731
732	return len;
733}
734
735static inline int
736ipv6x_hdr(uint32_t proto)
737{
738	return (proto == IPPROTO_HOPOPTS ||
739		proto == IPPROTO_ROUTING ||
740		proto == IPPROTO_FRAGMENT ||
741		proto == IPPROTO_AH ||
742		proto == IPPROTO_NONE ||
743		proto == IPPROTO_DSTOPTS);
744}
745
746static inline uint32_t
747get_ipv6x_hdr_len(struct rte_mbuf *m, uint32_t l2, uint32_t nproto,
748	uint32_t fproto)
749{
750	const struct ip6_ext *ipx;
751	int32_t dlen, len, ofs;
752
753	len = sizeof(struct ipv6_hdr);
754
755	dlen = rte_pktmbuf_data_len(m);
756	dlen -= l2;
757
758	ofs = l2 + len;
759	ipx = rte_pktmbuf_mtod_offset(m, const struct ip6_ext *, ofs);
760
761	while (ofs > 0 && len < dlen) {
762
763		switch (nproto) {
764		case IPPROTO_HOPOPTS:
765		case IPPROTO_ROUTING:
766		case IPPROTO_DSTOPTS:
767			ofs = (ipx->ip6e_len + 1) << 3;
768			break;
769		case IPPROTO_AH:
770			ofs = (ipx->ip6e_len + 2) << 2;
771			break;
772		case IPPROTO_FRAGMENT:
773			/*
774			 * tso_segsz is not used by RX, so use it as temporary
775			 * buffer to store the fragment offset.
776			 */
777			m->tso_segsz = ofs;
778			ofs = sizeof(struct ip6_frag);
779			m->packet_type &= ~RTE_PTYPE_L4_MASK;
780			m->packet_type |= RTE_PTYPE_L4_FRAG;
781			break;
782		default:
783			ofs = 0;
784		}
785
786		if (ofs > 0) {
787			nproto = ipx->ip6e_nxt;
788			len += ofs;
789			ipx += ofs / sizeof(*ipx);
790		}
791	}
792
793	/* unrecognized or invalid packet. */
794	if ((ofs == 0 && nproto != fproto) || len > dlen)
795		m->packet_type = RTE_PTYPE_UNKNOWN;
796
797	return len;
798}
799
800static inline uint32_t
801get_ipv6_hdr_len(struct rte_mbuf *m, uint32_t l2, uint32_t fproto)
802{
803	const struct ipv6_hdr *iph;
804
805	iph = rte_pktmbuf_mtod_offset(m, const struct ipv6_hdr *,
806		sizeof(struct ether_hdr));
807
808	if (iph->proto == fproto)
809		return sizeof(struct ipv6_hdr);
810	else if (ipv6x_hdr(iph->proto) != 0)
811		return get_ipv6x_hdr_len(m, l2, iph->proto, fproto);
812
813	m->packet_type = RTE_PTYPE_UNKNOWN;
814	return 0;
815}
816
817static inline void
818fill_eth_tcp_hdr_len(struct rte_mbuf *m)
819{
820	uint32_t dlen, l2_len, l3_len, l4_len;
821	uint16_t etp;
822	const struct ether_hdr *eth;
823
824	dlen = rte_pktmbuf_data_len(m);
825
826	/* check that first segment is at least 54B long. */
827	if (dlen < sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) +
828			sizeof(struct tcp_hdr)) {
829		m->packet_type = RTE_PTYPE_UNKNOWN;
830		return;
831	}
832
833	l2_len = sizeof(*eth);
834
835	eth = rte_pktmbuf_mtod(m, const struct ether_hdr *);
836	etp = eth->ether_type;
837	if (etp == rte_be_to_cpu_16(ETHER_TYPE_VLAN))
838		l2_len += sizeof(struct vlan_hdr);
839
840	if (etp == rte_be_to_cpu_16(ETHER_TYPE_IPv4)) {
841		m->packet_type = RTE_PTYPE_L4_TCP |
842			RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
843			RTE_PTYPE_L2_ETHER;
844		l3_len = get_ipv4_hdr_len(m, l2_len, IPPROTO_TCP, 1);
845		l4_len = get_tcp_header_size(m, l2_len, l3_len);
846		fill_pkt_hdr_len(m, l2_len, l3_len, l4_len);
847		adjust_ipv4_pktlen(m, l2_len);
848	} else if (etp == rte_be_to_cpu_16(ETHER_TYPE_IPv6) &&
849			dlen >= l2_len + sizeof(struct ipv6_hdr) +
850			sizeof(struct tcp_hdr)) {
851		m->packet_type = RTE_PTYPE_L4_TCP |
852			RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
853			RTE_PTYPE_L2_ETHER;
854		l3_len = get_ipv6_hdr_len(m, l2_len, IPPROTO_TCP);
855		l4_len = get_tcp_header_size(m, l2_len, l3_len);
856		fill_pkt_hdr_len(m, l2_len, l3_len, l4_len);
857		adjust_ipv6_pktlen(m, l2_len);
858	} else
859		m->packet_type = RTE_PTYPE_UNKNOWN;
860}
861
862/*
863 * HW can recognize L2/L3 with/without extensions/L4 (ixgbe/igb/fm10k)
864 */
865static uint16_t
866type0_tcp_rx_callback(__rte_unused dpdk_port_t port,
867	__rte_unused uint16_t queue,
868	struct rte_mbuf *pkt[], uint16_t nb_pkts,
869	__rte_unused uint16_t max_pkts, __rte_unused void *user_param)
870{
871	uint32_t j, tp;
872	uint32_t l4_len, l3_len, l2_len;
873	const struct ether_hdr *eth;
874
875	l2_len = sizeof(*eth);
876
877	for (j = 0; j != nb_pkts; j++) {
878
879		BE_PKT_DUMP(pkt[j]);
880
881		tp = pkt[j]->packet_type & (RTE_PTYPE_L4_MASK |
882			RTE_PTYPE_L3_MASK | RTE_PTYPE_L2_MASK);
883
884		switch (tp) {
885		/* non fragmented tcp packets. */
886		case (RTE_PTYPE_L4_TCP | RTE_PTYPE_L3_IPV4 |
887				RTE_PTYPE_L2_ETHER):
888			l4_len = get_tcp_header_size(pkt[j], l2_len,
889				sizeof(struct ipv4_hdr));
890			fill_pkt_hdr_len(pkt[j], l2_len,
891				sizeof(struct ipv4_hdr), l4_len);
892			adjust_ipv4_pktlen(pkt[j], l2_len);
893			break;
894		case (RTE_PTYPE_L4_TCP | RTE_PTYPE_L3_IPV6 |
895				RTE_PTYPE_L2_ETHER):
896			l4_len = get_tcp_header_size(pkt[j], l2_len,
897				sizeof(struct ipv6_hdr));
898			fill_pkt_hdr_len(pkt[j], l2_len,
899				sizeof(struct ipv6_hdr), l4_len);
900			adjust_ipv6_pktlen(pkt[j], l2_len);
901			break;
902		case (RTE_PTYPE_L4_TCP | RTE_PTYPE_L3_IPV4_EXT |
903				RTE_PTYPE_L2_ETHER):
904			l3_len = get_ipv4_hdr_len(pkt[j], l2_len,
905				IPPROTO_TCP, 0);
906			l4_len = get_tcp_header_size(pkt[j], l2_len, l3_len);
907			fill_pkt_hdr_len(pkt[j], l2_len, l3_len, l4_len);
908			adjust_ipv4_pktlen(pkt[j], l2_len);
909			break;
910		case (RTE_PTYPE_L4_TCP | RTE_PTYPE_L3_IPV6_EXT |
911				RTE_PTYPE_L2_ETHER):
912			l3_len = get_ipv6_hdr_len(pkt[j], l2_len, IPPROTO_TCP);
913			l4_len = get_tcp_header_size(pkt[j], l2_len, l3_len);
914			fill_pkt_hdr_len(pkt[j], l2_len, l3_len, l4_len);
915			adjust_ipv6_pktlen(pkt[j], l2_len);
916			break;
917		default:
918			/* treat packet types as invalid. */
919			pkt[j]->packet_type = RTE_PTYPE_UNKNOWN;
920			break;
921		}
922	}
923
924	return nb_pkts;
925}
926
927/*
928 * HW can recognize L2/L3/L4 and fragments (i40e).
929 */
930static uint16_t
931type1_tcp_rx_callback(__rte_unused dpdk_port_t port,
932	__rte_unused uint16_t queue,
933	struct rte_mbuf *pkt[], uint16_t nb_pkts,
934	__rte_unused uint16_t max_pkts, void *user_param)
935{
936	uint32_t j, tp;
937	struct tldk_ctx *tcx;
938	uint32_t l4_len, l3_len, l2_len;
939	const struct ether_hdr *eth;
940
941	tcx = user_param;
942	l2_len = sizeof(*eth);
943
944	for (j = 0; j != nb_pkts; j++) {
945
946		BE_PKT_DUMP(pkt[j]);
947
948		tp = pkt[j]->packet_type & (RTE_PTYPE_L4_MASK |
949			RTE_PTYPE_L3_MASK | RTE_PTYPE_L2_MASK);
950
951		switch (tp) {
952		case (RTE_PTYPE_L4_TCP | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
953				RTE_PTYPE_L2_ETHER):
954			l3_len = get_ipv4_hdr_len(pkt[j], l2_len,
955				IPPROTO_TCP, 0);
956			l4_len = get_tcp_header_size(pkt[j], l2_len, l3_len);
957			fill_pkt_hdr_len(pkt[j], l2_len, l3_len, l4_len);
958			adjust_ipv4_pktlen(pkt[j], l2_len);
959			tcp_stat_update(tcx, pkt[j], l2_len, l3_len);
960			break;
961		case (RTE_PTYPE_L4_TCP | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
962				RTE_PTYPE_L2_ETHER):
963			l3_len = get_ipv6_hdr_len(pkt[j], l2_len, IPPROTO_TCP);
964			l4_len = get_tcp_header_size(pkt[j], l2_len, l3_len);
965			fill_pkt_hdr_len(pkt[j], l2_len, l3_len, l4_len);
966			adjust_ipv6_pktlen(pkt[j], l2_len);
967			tcp_stat_update(tcx, pkt[j], l2_len, l3_len);
968			break;
969		default:
970			/* treat packet types as invalid. */
971			pkt[j]->packet_type = RTE_PTYPE_UNKNOWN;
972			break;
973		}
974
975	}
976
977	return nb_pkts;
978}
979
980static uint16_t
981typen_tcp_rx_callback(__rte_unused dpdk_port_t port,
982	__rte_unused uint16_t queue,
983	struct rte_mbuf *pkt[], uint16_t nb_pkts,
984	__rte_unused uint16_t max_pkts, __rte_unused void *user_param)
985{
986	uint32_t j;
987
988	for (j = 0; j != nb_pkts; j++) {
989
990		BE_PKT_DUMP(pkt[j]);
991		fill_eth_tcp_hdr_len(pkt[j]);
992	}
993
994	return nb_pkts;
995}
996
997int
998setup_rx_cb(const struct tldk_dev *td, struct tldk_ctx *tcx)
999{
1000	int32_t rc;
1001	uint32_t i, n, smask;
1002	void *cb;
1003	const struct ptype2cb *ptype2cb;
1004
1005	static const struct ptype2cb tcp_ptype2cb[] = {
1006		{
1007			.mask = ETHER_PTYPE | IPV4_PTYPE | IPV4_EXT_PTYPE |
1008				IPV6_PTYPE | IPV6_EXT_PTYPE | TCP_PTYPE,
1009			.name = "HW l2/l3x/l4-tcp ptype",
1010			.fn = type0_tcp_rx_callback,
1011		},
1012		{
1013			.mask = ETHER_PTYPE | IPV4_PTYPE | IPV6_PTYPE |
1014				TCP_PTYPE,
1015			.name = "HW l2/l3/l4-tcp ptype",
1016			.fn = type1_tcp_rx_callback,
1017		},
1018		{
1019			.mask = 0,
1020			.name = "tcp no HW ptype",
1021			.fn = typen_tcp_rx_callback,
1022		},
1023	};
1024
1025	smask = get_ptypes(td);
1026
1027	ptype2cb = tcp_ptype2cb;
1028	n = RTE_DIM(tcp_ptype2cb);
1029
1030	for (i = 0; i != n; i++) {
1031		if ((smask & ptype2cb[i].mask) == ptype2cb[i].mask) {
1032			cb = rte_eth_add_rx_callback(td->cf.port, td->cf.queue,
1033				ptype2cb[i].fn, tcx);
1034			rc = -rte_errno;
1035			RTE_LOG(ERR, USER1,
1036				"%s(port=%u), setup RX callback \"%s\" "
1037				"returns %p;\n",
1038				__func__, td->cf.port,  ptype2cb[i].name, cb);
1039				return ((cb == NULL) ? rc : 0);
1040		}
1041	}
1042
1043	/* no proper callback found. */
1044	RTE_LOG(ERR, USER1,
1045		"%s(port=%u) failed to find an appropriate callback;\n",
1046		__func__, td->cf.port);
1047	return -ENOENT;
1048}
1049
1050int
1051be_lcore_setup(struct tldk_ctx *tcx)
1052{
1053	uint32_t i;
1054	int32_t rc;
1055
1056	RTE_LOG(NOTICE, USER1, "%s:(lcore=%u, ctx=%p) start\n",
1057		__func__, tcx->cf->lcore, tcx->ctx);
1058
1059	rc = 0;
1060	for (i = 0; i != tcx->nb_dev && rc == 0; i++) {
1061		RTE_LOG(NOTICE, USER1, "%s:%u(port=%u, q=%u)\n",
1062			__func__, i, tcx->dev[i].cf.port, tcx->dev[i].cf.queue);
1063
1064		rc = setup_rx_cb(&tcx->dev[i], tcx);
1065		if (rc < 0)
1066			return rc;
1067	}
1068
1069	return rc;
1070}
1071
1072static inline void
1073be_rx(struct tldk_dev *dev)
1074{
1075	uint32_t j, k, n;
1076	struct rte_mbuf *pkt[MAX_PKT_BURST];
1077	struct rte_mbuf *rp[MAX_PKT_BURST];
1078	int32_t rc[MAX_PKT_BURST];
1079
1080	n = rte_eth_rx_burst(dev->cf.port,
1081		dev->cf.queue, pkt, RTE_DIM(pkt));
1082
1083	if (n != 0) {
1084		dev->rx_stat.in += n;
1085		BE_TRACE("%s(%u): rte_eth_rx_burst(%u, %u) returns %u\n",
1086			__func__, dev->cf.id, dev->cf.port,
1087			dev->cf.queue, n);
1088
1089		k = tle_tcp_rx_bulk(dev->dev, pkt, rp, rc, n);
1090
1091		dev->rx_stat.up += k;
1092		dev->rx_stat.drop += n - k;
1093		BE_TRACE("%s: tle_tcp_rx_bulk(%p, %u) returns %u\n",
1094			__func__, dev->dev, n, k);
1095
1096		for (j = 0; j != n - k; j++) {
1097			BE_TRACE("%s:%d(port=%u) rp[%u]={%p, %d};\n",
1098				__func__, __LINE__, dev->cf.port,
1099				j, rp[j], rc[j]);
1100			rte_pktmbuf_free(rp[j]);
1101		}
1102	}
1103}
1104
1105static inline void
1106be_tx(struct tldk_dev *dev)
1107{
1108	uint32_t j = 0, k, n;
1109	struct rte_mbuf **mb;
1110
1111	n = dev->tx_buf.num;
1112	k = RTE_DIM(dev->tx_buf.pkt) - n;
1113	mb = dev->tx_buf.pkt;
1114
1115	if (k >= RTE_DIM(dev->tx_buf.pkt) / 2) {
1116		j = tle_tcp_tx_bulk(dev->dev, mb + n, k);
1117		n += j;
1118		dev->tx_stat.down += j;
1119	}
1120
1121	if (n == 0)
1122		return;
1123
1124	BE_TRACE("%s: tle_tcp_tx_bulk(%p) returns %u,\n"
1125		"total pkts to send: %u\n",
1126		__func__, dev->dev, j, n);
1127
1128	for (j = 0; j != n; j++)
1129		BE_PKT_DUMP(mb[j]);
1130
1131	k = rte_eth_tx_burst(dev->cf.port,
1132			dev->cf.queue, mb, n);
1133
1134	dev->tx_stat.out += k;
1135	dev->tx_stat.drop += n - k;
1136	BE_TRACE("%s: rte_eth_tx_burst(%u, %u, %u) returns %u\n",
1137		__func__, dev->cf.port,
1138		dev->cf.queue, n, k);
1139
1140	dev->tx_buf.num = n - k;
1141	if (k != 0)
1142		for (j = k; j != n; j++)
1143			mb[j - k] = mb[j];
1144}
1145
1146void
1147be_lcore_tcp(struct tldk_ctx *tcx)
1148{
1149	uint32_t i;
1150
1151	if (tcx == NULL)
1152		return;
1153
1154	for (i = 0; i != tcx->nb_dev; i++) {
1155		be_rx(&tcx->dev[i]);
1156		be_tx(&tcx->dev[i]);
1157	}
1158	tle_tcp_process(tcx->ctx, TCP_MAX_PROCESS);
1159}
1160
1161void
1162be_lcore_clear(struct tldk_ctx *tcx)
1163{
1164	uint32_t i, j;
1165
1166	if (tcx == NULL)
1167		return;
1168
1169	RTE_LOG(NOTICE, USER1, "%s(lcore=%u, ctx: %p) finish\n",
1170		__func__, tcx->cf->lcore, tcx->ctx);
1171	for (i = 0; i != tcx->nb_dev; i++) {
1172		RTE_LOG(NOTICE, USER1, "%s:%u(port=%u, q=%u, lcore=%u, dev=%p) "
1173			"rx_stats={"
1174			"in=%" PRIu64 ",up=%" PRIu64 ",drop=%" PRIu64 "}, "
1175			"tx_stats={"
1176			"in=%" PRIu64 ",up=%" PRIu64 ",drop=%" PRIu64 "};\n",
1177			__func__, i, tcx->dev[i].cf.port, tcx->dev[i].cf.queue,
1178			tcx->cf->lcore,
1179			tcx->dev[i].dev,
1180			tcx->dev[i].rx_stat.in,
1181			tcx->dev[i].rx_stat.up,
1182			tcx->dev[i].rx_stat.drop,
1183			tcx->dev[i].tx_stat.down,
1184			tcx->dev[i].tx_stat.out,
1185			tcx->dev[i].tx_stat.drop);
1186	}
1187
1188	RTE_LOG(NOTICE, USER1, "tcp_stat={\n");
1189	for (i = 0; i != RTE_DIM(tcx->tcp_stat.flags); i++) {
1190		if (tcx->tcp_stat.flags[i] != 0)
1191			RTE_LOG(NOTICE, USER1, "[flag=%#x]==%" PRIu64 ";\n",
1192				i, tcx->tcp_stat.flags[i]);
1193	}
1194	RTE_LOG(NOTICE, USER1, "};\n");
1195
1196	for (i = 0; i != tcx->nb_dev; i++)
1197		for (j = 0; j != tcx->dev[i].tx_buf.num; j++)
1198			rte_pktmbuf_free(tcx->dev[i].tx_buf.pkt[j]);
1199
1200}
1201
1202void
1203be_stop_port(uint32_t port)
1204{
1205	struct rte_eth_stats stats;
1206
1207	RTE_LOG(NOTICE, USER1, "%s: stoping port %u\n", __func__, port);
1208
1209	rte_eth_stats_get(port, &stats);
1210	RTE_LOG(NOTICE, USER1, "port %u stats={\n"
1211		"ipackets=%" PRIu64 ";"
1212		"ibytes=%" PRIu64 ";"
1213		"ierrors=%" PRIu64 ";"
1214		"imissed=%" PRIu64 ";\n"
1215		"opackets=%" PRIu64 ";"
1216		"obytes=%" PRIu64 ";"
1217		"oerrors=%" PRIu64 ";\n"
1218		"}\n",
1219		port,
1220		stats.ipackets,
1221		stats.ibytes,
1222		stats.ierrors,
1223		stats.imissed,
1224		stats.opackets,
1225		stats.obytes,
1226		stats.oerrors);
1227	rte_eth_dev_stop(port);
1228}
1229
1230int
1231be_lcore_main(void *arg)
1232{
1233	int32_t rc;
1234	uint32_t lid, i;
1235	struct tldk_ctx *tcx;
1236	struct lcore_ctxs_list *lc_ctx;
1237
1238	lc_ctx = arg;
1239	lid = rte_lcore_id();
1240
1241	RTE_LOG(NOTICE, USER1, "%s(lcore=%u) start\n", __func__, lid);
1242
1243	rc = 0;
1244	while (force_quit == 0) {
1245		for (i = 0; i < lc_ctx->nb_ctxs; i++) {
1246			tcx = lc_ctx->ctxs[i];
1247			be_lcore_tcp(tcx);
1248		}
1249	}
1250
1251	RTE_LOG(NOTICE, USER1, "%s(lcore=%u) finish\n", __func__, lid);
1252
1253	return rc;
1254}
1255