tcp.h revision aa97dd1c
1/*
2 * Copyright (c) 2016  Intel Corporation.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 *     http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#ifndef TCP_H_
17#define TCP_H_
18
19#define	TCP_MAX_PROCESS	0x20
20
21static inline void
22netfe_stream_term_tcp(struct netfe_lcore *fe, struct netfe_stream *fes)
23{
24	fes->s = NULL;
25	fes->fwds = NULL;
26	memset(&fes->stat, 0, sizeof(fes->stat));
27	netfe_put_stream(fe, &fe->free, fes);
28}
29
30static inline void
31netfe_stream_close_tcp(struct netfe_lcore *fe, struct netfe_stream *fes)
32{
33	tle_tcp_stream_close(fes->s);
34	netfe_stream_term_tcp(fe, fes);
35}
36
37/*
38 * helper function: opens IPv4 and IPv6 streams for selected port.
39 */
40static struct netfe_stream *
41netfe_stream_open_tcp(struct netfe_lcore *fe, struct netfe_sprm *sprm,
42	uint32_t lcore, uint16_t op, uint32_t bidx, uint8_t server_mode)
43{
44	int32_t rc;
45	struct netfe_stream *fes;
46	struct sockaddr_in *l4;
47	struct sockaddr_in6 *l6;
48	uint16_t errport;
49	struct tle_tcp_stream_param tprm;
50
51	fes = netfe_get_stream(&fe->free);
52	if (fes == NULL) {
53		rte_errno = ENOBUFS;
54		return NULL;
55	}
56
57	if (server_mode != 0) {
58		tle_event_free(fes->rxev);
59		fes->rxev = tle_event_alloc(fe->syneq, fes);
60	}
61
62	if (fes->rxev == NULL) {
63		netfe_stream_close_tcp(fe, fes);
64		rte_errno = ENOMEM;
65		return NULL;
66	}
67
68	/* activate rx, tx and err events for the stream */
69	if (op == TXONLY || op == FWD) {
70		tle_event_active(fes->txev, TLE_SEV_DOWN);
71		fes->stat.txev[TLE_SEV_DOWN]++;
72	}
73
74	if (op != TXONLY || server_mode != 0) {
75		tle_event_active(fes->rxev, TLE_SEV_DOWN);
76		fes->stat.rxev[TLE_SEV_DOWN]++;
77	}
78	tle_event_active(fes->erev, TLE_SEV_DOWN);
79	fes->stat.erev[TLE_SEV_DOWN]++;
80
81	memset(&tprm, 0, sizeof(tprm));
82	tprm.addr.local = sprm->local_addr;
83	tprm.addr.remote = sprm->remote_addr;
84	tprm.cfg.err_ev = fes->erev;
85	tprm.cfg.recv_ev = fes->rxev;
86	if (op != FWD)
87		tprm.cfg.send_ev = fes->txev;
88
89	fes->s = tle_tcp_stream_open(becfg.cpu[bidx].ctx, &tprm);
90
91	if (fes->s == NULL) {
92		rc = rte_errno;
93		netfe_stream_close_tcp(fe, fes);
94		rte_errno = rc;
95
96		if (sprm->local_addr.ss_family == AF_INET) {
97			l4 = (struct sockaddr_in *) &sprm->local_addr;
98			errport = ntohs(l4->sin_port);
99		} else {
100			l6 = (struct sockaddr_in6 *) &sprm->local_addr;
101			errport = ntohs(l6->sin6_port);
102		}
103
104		RTE_LOG(ERR, USER1, "stream open failed for port %u with error "
105			"code=%u, bidx=%u, lc=%u\n",
106			errport, rc, bidx, becfg.cpu[bidx].id);
107		return NULL;
108	}
109
110	RTE_LOG(NOTICE, USER1,
111		"%s(%u)={s=%p, op=%hu, proto=%s, rxev=%p, txev=%p}, belc=%u\n",
112		__func__, lcore, fes->s, op, proto_name[becfg.proto],
113		fes->rxev, fes->txev, becfg.cpu[bidx].id);
114
115	fes->op = op;
116	fes->proto = becfg.proto;
117	fes->family = sprm->local_addr.ss_family;
118	fes->laddr = sprm->local_addr;
119	netfe_put_stream(fe, &fe->use, fes);
120
121	return fes;
122}
123
124static int
125netfe_lcore_init_tcp(const struct netfe_lcore_prm *prm)
126{
127	size_t sz;
128	int32_t rc;
129	uint32_t i, lcore, snum;
130	struct netfe_lcore *fe;
131	struct tle_evq_param eprm;
132	struct netfe_stream *fes;
133	struct netfe_sprm *sprm;
134
135	lcore = rte_lcore_id();
136
137	snum = prm->max_streams;
138	RTE_LOG(NOTICE, USER1, "%s(lcore=%u, nb_streams=%u, max_streams=%u)\n",
139		__func__, lcore, prm->nb_streams, snum);
140
141	memset(&eprm, 0, sizeof(eprm));
142	eprm.socket_id = rte_lcore_to_socket_id(lcore);
143	eprm.max_events = snum;
144
145	sz = sizeof(*fe) + snum * sizeof(struct netfe_stream);
146	fe = rte_zmalloc_socket(NULL, sz, RTE_CACHE_LINE_SIZE,
147		rte_lcore_to_socket_id(lcore));
148
149	if (fe == NULL) {
150		RTE_LOG(ERR, USER1, "%s:%d failed to allocate %zu bytes\n",
151			__func__, __LINE__, sz);
152		return -ENOMEM;
153	}
154
155	RTE_PER_LCORE(_fe) = fe;
156
157	fe->snum = snum;
158	/* initialize the stream pool */
159	LIST_INIT(&fe->free.head);
160	LIST_INIT(&fe->use.head);
161
162	/* allocate the event queues */
163	fe->syneq = tle_evq_create(&eprm);
164	fe->ereq = tle_evq_create(&eprm);
165	fe->rxeq = tle_evq_create(&eprm);
166	fe->txeq = tle_evq_create(&eprm);
167
168	RTE_LOG(INFO, USER1, "%s(%u) synevq=%p, erevq=%p, rxevq=%p, txevq=%p\n",
169		__func__, lcore, fe->syneq, fe->ereq, fe->rxeq, fe->txeq);
170	if (fe->syneq == NULL || fe->ereq == NULL || fe->rxeq == NULL ||
171		fe->txeq == NULL)
172		return -ENOMEM;
173
174	fes = (struct netfe_stream *)(fe + 1);
175	for (i = 0; i != snum; i++) {
176		fes[i].rxev = tle_event_alloc(fe->rxeq, fes + i);
177		fes[i].txev = tle_event_alloc(fe->txeq, fes + i);
178		fes[i].erev = tle_event_alloc(fe->ereq, fes + i);
179		netfe_put_stream(fe, &fe->free, fes + i);
180	}
181
182
183	/* open all requested streams. */
184	for (i = 0; i != prm->nb_streams; i++) {
185		sprm = &prm->stream[i].sprm;
186		fes = netfe_stream_open_tcp(fe, sprm, lcore, prm->stream[i].op,
187			sprm->bidx, becfg.server);
188		if (fes == NULL) {
189			rc = -rte_errno;
190			break;
191		}
192
193		netfe_stream_dump(fes, &sprm->local_addr, &sprm->remote_addr);
194
195		if (prm->stream[i].op == FWD) {
196			fes->fwdprm = prm->stream[i].fprm;
197		} else if (prm->stream[i].op == TXONLY) {
198			fes->txlen = prm->stream[i].txlen;
199			fes->raddr = prm->stream[i].sprm.remote_addr;
200		}
201
202		if (becfg.server == 1) {
203			rc = tle_tcp_stream_listen(fes->s);
204			RTE_LOG(INFO, USER1,
205				"%s(%u) tle_tcp_stream_listen(stream=%p) "
206				"returns %d\n",
207				__func__, lcore, fes->s, rc);
208			if (rc != 0)
209				break;
210		} else {
211			rc = tle_tcp_stream_connect(fes->s,
212				(const struct sockaddr *)&sprm->remote_addr);
213			RTE_LOG(INFO, USER1,
214				"%s(%u) tle_tcp_stream_connect(stream=%p) "
215				"returns %d\n",
216				__func__, lcore, fes->s, rc);
217			if (rc != 0)
218				break;
219		}
220	}
221
222	return rc;
223}
224
225static inline struct netfe_stream *
226netfe_create_fwd_stream(struct netfe_lcore *fe, struct netfe_stream *fes,
227	uint32_t lcore, uint32_t bidx)
228{
229	uint32_t rc;
230	struct netfe_stream *fws;
231
232	fws = netfe_stream_open_tcp(fe, &fes->fwdprm, lcore, FWD, bidx, 0);
233	if (fws != NULL) {
234		rc = tle_tcp_stream_connect(fws->s,
235			(const struct sockaddr *)&fes->fwdprm.remote_addr);
236		NETFE_TRACE("%s(lc=%u, fes=%p): tle_tcp_stream_connect() "
237			"returns %d;\n",
238			__func__, rte_lcore_id(), fes, rc);
239
240		if (rc != 0) {
241			netfe_stream_term_tcp(fe, fws);
242			fws = NULL;
243		}
244	}
245
246	if (fws == NULL)
247		RTE_LOG(ERR, USER1, "%s(lc=%u fes=%p) failed to open "
248			"forwarding stream;\n",
249			__func__, rte_lcore_id(), fes);
250
251	return fws;
252}
253
254static inline void
255netfe_fwd_tcp(uint32_t lcore, struct netfe_stream *fes)
256{
257	uint32_t i, k, n;
258	struct rte_mbuf **pkt;
259	struct netfe_stream *fed;
260
261	RTE_SET_USED(lcore);
262
263	n = fes->pbuf.num;
264	pkt = fes->pbuf.pkt;
265
266	if (n == 0)
267		return;
268
269	fed = fes->fwds;
270
271	if (fed != NULL) {
272
273		k = tle_tcp_stream_send(fed->s, pkt, n);
274
275		NETFE_TRACE("%s(%u): tle_%s_stream_send(%p, %u) "
276				"returns %u\n",
277				__func__, lcore, proto_name[fes->proto],
278				fed->s, n, k);
279
280			fed->stat.txp += k;
281			fed->stat.drops += n - k;
282			fes->stat.fwp += k;
283
284	} else {
285		NETFE_TRACE("%s(%u, %p): no fwd stream for %u pkts;\n",
286			__func__, lcore, fes->s, n);
287		for (k = 0; k != n; k++) {
288			NETFE_TRACE("%s(%u, %p): free(%p);\n",
289			__func__, lcore, fes->s, pkt[k]);
290			rte_pktmbuf_free(pkt[k]);
291		}
292		fes->stat.drops += n;
293	}
294
295	/* copy unforwarded mbufs. */
296	for (i = 0; i != n - k; i++)
297		pkt[i] = pkt[i + k];
298
299	fes->pbuf.num = i;
300
301	if (i != 0) {
302		tle_event_raise(fes->txev);
303		fes->stat.txev[TLE_SEV_UP]++;
304	}
305
306	if (n == RTE_DIM(fes->pbuf.pkt)) {
307		tle_event_active(fes->rxev, TLE_SEV_UP);
308		fes->stat.rxev[TLE_SEV_UP]++;
309	}
310}
311
312static inline void
313netfe_new_conn_tcp(struct netfe_lcore *fe, __rte_unused uint32_t lcore,
314	struct netfe_stream *fes)
315{
316	uint32_t i, k, n, rc;
317	struct tle_tcp_stream_cfg *prm;
318	struct tle_tcp_accept_param acpt_prm[MAX_PKT_BURST];
319	struct tle_stream *rs[MAX_PKT_BURST];
320	struct tle_syn_req syn_reqs[MAX_PKT_BURST];
321	struct netfe_stream *ts;
322	struct netfe_stream *fs[MAX_PKT_BURST];
323
324	static const struct tle_stream_cb zcb = {.func = NULL, .data = NULL};
325
326	/* check if any syn requests are waiting */
327	n = tle_tcp_stream_synreqs(fes->s, syn_reqs, RTE_DIM(syn_reqs));
328	if (n == 0)
329		return;
330
331	NETFE_TRACE("%s(%u): tle_tcp_stream_synreqs(%p, %u) returns %u\n",
332		__func__, lcore, fes->s, MAX_PKT_BURST, n);
333
334	/* get n free streams */
335	k = netfe_get_streams(&fe->free, fs, n);
336
337	/* fill accept params to accept k connection requests*/
338	for (i = 0; i != k; i++) {
339		acpt_prm[i].syn = syn_reqs[i];
340		prm = &acpt_prm[i].cfg;
341		prm->nb_retries = 0;
342		prm->recv_ev = fs[i]->rxev;
343		prm->send_ev = fs[i]->txev;
344		prm->err_ev = fs[i]->erev;
345		tle_event_active(fs[i]->erev, TLE_SEV_DOWN);
346		prm->err_cb = zcb;
347		prm->recv_cb = zcb;
348		prm->send_cb = zcb;
349	}
350
351	/* accept k new connections */
352	rc = tle_tcp_stream_accept(fes->s, acpt_prm, rs, k);
353
354	NETFE_TRACE("%s(%u): tle_tcp_stream_accept(%p, %u) returns %u\n",
355		__func__, lcore, fes->s, k, rc);
356
357	if (rc != n) {
358		/* n - rc connections could not be accepted */
359		tle_tcp_reject(fes->s, syn_reqs + rc, n - rc);
360
361		/* put back k - rc streams free list */
362		netfe_put_streams(fe, &fe->free, fs + rc, k - rc);
363	}
364
365	/* update the params for accepted streams */
366	for (i = 0; i != rc; i++) {
367
368		ts = fs[i];
369
370		ts->s = rs[i];
371		ts->op = fes->op;
372		ts->proto = fes->proto;
373		ts->family = fes->family;
374		ts->txlen = fes->txlen;
375
376		if (fes->op == TXONLY) {
377			tle_event_active(ts->txev, TLE_SEV_UP);
378			ts->stat.txev[TLE_SEV_UP]++;
379		} else {
380			tle_event_active(ts->rxev, TLE_SEV_DOWN);
381			ts->stat.rxev[TLE_SEV_DOWN]++;
382		}
383
384		netfe_put_stream(fe, &fe->use, ts);
385		NETFE_TRACE("%s(%u) accept (stream=%p, s=%p)\n",
386			__func__, lcore, ts, rs[i]);
387
388		/* create a new fwd stream if needed */
389		if (fes->op == FWD) {
390			tle_event_active(ts->txev, TLE_SEV_DOWN);
391			ts->stat.txev[TLE_SEV_DOWN]++;
392
393			ts->fwds = netfe_create_fwd_stream(fe, fes, lcore,
394				fes->fwdprm.bidx);
395			if (ts->fwds != NULL)
396				ts->fwds->fwds = ts;
397		}
398	}
399	fe->tcp_stat.acc += rc;
400	fe->tcp_stat.rej += n - rc;
401}
402
403static inline void
404netfe_lcore_tcp_req(void)
405{
406	struct netfe_lcore *fe;
407	uint32_t j, n, lcore;
408	struct netfe_stream *fs[MAX_PKT_BURST];
409
410	fe = RTE_PER_LCORE(_fe);
411	if (fe == NULL)
412		return;
413
414	/* look for syn events */
415	n = tle_evq_get(fe->syneq, (const void **)(uintptr_t)fs, RTE_DIM(fs));
416	if (n == 0)
417		return;
418
419	lcore = rte_lcore_id();
420
421	NETFE_TRACE("%s(%u): tle_evq_get(synevq=%p) returns %u\n",
422		__func__, lcore, fe->syneq, n);
423
424	for (j = 0; j != n; j++)
425		netfe_new_conn_tcp(fe, lcore, fs[j]);
426}
427
428static inline void
429netfe_lcore_tcp_rst(void)
430{
431	struct netfe_lcore *fe;
432	struct netfe_stream *fwds;
433	uint32_t j, n;
434	struct tle_stream *s[MAX_PKT_BURST];
435	struct netfe_stream *fs[MAX_PKT_BURST];
436	struct tle_event *rv[MAX_PKT_BURST];
437	struct tle_event *tv[MAX_PKT_BURST];
438	struct tle_event *ev[MAX_PKT_BURST];
439
440	fe = RTE_PER_LCORE(_fe);
441	if (fe == NULL)
442		return;
443
444	/* look for err events */
445	n = tle_evq_get(fe->ereq, (const void **)(uintptr_t)fs, RTE_DIM(fs));
446	if (n == 0)
447		return;
448
449	NETFE_TRACE("%s(%u): tle_evq_get(errevq=%p) returns %u\n",
450		__func__, rte_lcore_id(), fe->ereq, n);
451
452	for (j = 0; j != n; j++) {
453		if (verbose > VERBOSE_NONE) {
454			struct tle_tcp_stream_addr addr;
455			tle_tcp_stream_get_addr(fs[j]->s, &addr);
456			netfe_stream_dump(fs[j], &addr.local, &addr.remote);
457		}
458		s[j] = fs[j]->s;
459		rv[j] = fs[j]->rxev;
460		tv[j] = fs[j]->txev;
461		ev[j] = fs[j]->erev;
462	}
463
464	tle_evq_idle(fe->rxeq, rv, n);
465	tle_evq_idle(fe->txeq, tv, n);
466	tle_evq_idle(fe->ereq, ev, n);
467
468	tle_tcp_stream_close_bulk(s, n);
469
470	for (j = 0; j != n; j++) {
471
472		/*
473		 * if forwarding mode, send unsent packets and
474		 * signal peer stream to terminate too.
475		 */
476		fwds = fs[j]->fwds;
477		if (fwds != NULL && fwds->s != NULL) {
478
479			/* forward all unsent packets */
480			netfe_fwd_tcp(rte_lcore_id(), fs[j]);
481
482			fwds->fwds = NULL;
483			tle_event_raise(fwds->erev);
484			fs[j]->fwds = NULL;
485		}
486
487		/* now terminate the stream receiving rst event*/
488		netfe_rem_stream(&fe->use, fs[j]);
489		netfe_stream_term_tcp(fe, fs[j]);
490		fe->tcp_stat.ter++;
491	}
492}
493
494static inline void
495netfe_rxtx_process_tcp(__rte_unused uint32_t lcore, struct netfe_stream *fes)
496{
497	uint32_t i, k, n;
498	struct rte_mbuf **pkt;
499
500	n = fes->pbuf.num;
501	pkt = fes->pbuf.pkt;
502
503	/* there is nothing to send. */
504	if (n == 0) {
505		tle_event_idle(fes->txev);
506		fes->stat.txev[TLE_SEV_IDLE]++;
507		return;
508	}
509
510
511	k = tle_tcp_stream_send(fes->s, pkt, n);
512
513	NETFE_TRACE("%s(%u): tle_%s_stream_send(%p, %u) returns %u\n",
514		__func__, lcore, proto_name[fes->proto],
515	fes->s, n, k);
516	fes->stat.txp += k;
517	fes->stat.drops += n - k;
518
519	/* not able to send anything. */
520	if (k == 0)
521		return;
522
523	if (n == RTE_DIM(fes->pbuf.pkt)) {
524		/* mark stream as readable */
525		tle_event_active(fes->rxev, TLE_SEV_UP);
526		fes->stat.rxev[TLE_SEV_UP]++;
527	}
528
529	/* adjust pbuf array. */
530	fes->pbuf.num = n - k;
531	for (i = 0; i != n - k; i++)
532		pkt[i] = pkt[i + k];
533}
534
535static inline void
536netfe_tx_process_tcp(uint32_t lcore, struct netfe_stream *fes)
537{
538	uint32_t i, k, n;
539
540	/* refill with new mbufs. */
541	pkt_buf_fill(lcore, &fes->pbuf, fes->txlen);
542
543	n = fes->pbuf.num;
544	if (n == 0)
545		return;
546
547	/**
548	 * TODO: cannot use function pointers for unequal param num.
549	 */
550	k = tle_tcp_stream_send(fes->s, fes->pbuf.pkt, n);
551
552	NETFE_TRACE("%s(%u): tle_%s_stream_send(%p, %u) returns %u\n",
553		__func__, lcore, proto_name[fes->proto], fes->s, n, k);
554	fes->stat.txp += k;
555	fes->stat.drops += n - k;
556
557	if (k == 0)
558		return;
559
560	/* adjust pbuf array. */
561	fes->pbuf.num = n - k;
562	for (i = k; i != n; i++)
563		fes->pbuf.pkt[i - k] = fes->pbuf.pkt[i];
564}
565
566static inline void
567netfe_lcore_tcp(void)
568{
569	struct netfe_lcore *fe;
570	uint32_t j, n, lcore;
571	struct netfe_stream *fs[MAX_PKT_BURST];
572
573	fe = RTE_PER_LCORE(_fe);
574	if (fe == NULL)
575		return;
576
577	lcore = rte_lcore_id();
578
579	/* look for rx events */
580	n = tle_evq_get(fe->rxeq, (const void **)(uintptr_t)fs, RTE_DIM(fs));
581
582	if (n != 0) {
583		NETFE_TRACE("%s(%u): tle_evq_get(rxevq=%p) returns %u\n",
584			__func__, lcore, fe->rxeq, n);
585		for (j = 0; j != n; j++)
586			netfe_rx_process(lcore, fs[j]);
587	}
588
589	/* look for tx events */
590	n = tle_evq_get(fe->txeq, (const void **)(uintptr_t)fs, RTE_DIM(fs));
591
592	if (n != 0) {
593		NETFE_TRACE("%s(%u): tle_evq_get(txevq=%p) returns %u\n",
594			__func__, lcore, fe->txeq, n);
595		for (j = 0; j != n; j++) {
596			if (fs[j]->op == RXTX)
597				netfe_rxtx_process_tcp(lcore, fs[j]);
598			else if (fs[j]->op == FWD)
599				netfe_fwd_tcp(lcore, fs[j]);
600			else if (fs[j]->op == TXONLY)
601				netfe_tx_process_tcp(lcore, fs[j]);
602		}
603	}
604}
605
606static void
607netfe_lcore_fini_tcp(void)
608{
609	struct netfe_lcore *fe;
610	uint32_t i, snum;
611	struct tle_tcp_stream_addr addr;
612	struct netfe_stream *fes;
613	uint32_t acc, rej, ter;
614
615	fe = RTE_PER_LCORE(_fe);
616	if (fe == NULL)
617		return;
618
619	snum = fe->use.num;
620	for (i = 0; i != snum; i++) {
621		fes = netfe_get_stream(&fe->use);
622		tle_tcp_stream_get_addr(fes->s, &addr);
623		netfe_stream_dump(fes, &addr.local, &addr.remote);
624		netfe_stream_close(fe, fes);
625	}
626
627	acc = fe->tcp_stat.acc;
628	rej = fe->tcp_stat.rej;
629	ter = fe->tcp_stat.ter;
630	RTE_LOG(NOTICE, USER1,
631		"tcp_stats={con_acc=%u,con_rej=%u,con_ter=%u};\n",
632		acc, rej, ter);
633
634	tle_evq_destroy(fe->txeq);
635	tle_evq_destroy(fe->rxeq);
636	tle_evq_destroy(fe->ereq);
637	tle_evq_destroy(fe->syneq);
638	RTE_PER_LCORE(_fe) = NULL;
639	rte_free(fe);
640}
641
642static inline void
643netbe_lcore_tcp(void)
644{
645	uint32_t i;
646	struct netbe_lcore *lc;
647
648	lc = RTE_PER_LCORE(_be);
649	if (lc == NULL)
650		return;
651
652	for (i = 0; i != lc->prtq_num; i++) {
653		netbe_rx(lc, i);
654		tle_tcp_process(lc->ctx, TCP_MAX_PROCESS);
655		netbe_tx(lc, i);
656	}
657}
658
659static int
660lcore_main_tcp(void *arg)
661{
662	int32_t rc;
663	uint32_t lcore;
664	struct lcore_prm *prm;
665
666	prm = arg;
667	lcore = rte_lcore_id();
668
669	RTE_LOG(NOTICE, USER1, "%s(lcore=%u) start\n",
670		__func__, lcore);
671
672	rc = 0;
673
674	/* lcore FE init. */
675	if (prm->fe.max_streams != 0)
676		rc = netfe_lcore_init_tcp(&prm->fe);
677
678	/* lcore FE init. */
679	if (rc == 0 && prm->be.lc != NULL)
680		rc = netbe_lcore_setup(prm->be.lc);
681
682	if (rc != 0)
683		sig_handle(SIGQUIT);
684
685	while (force_quit == 0) {
686		netfe_lcore_tcp_req();
687		netfe_lcore_tcp_rst();
688		netfe_lcore_tcp();
689		netbe_lcore_tcp();
690	}
691
692	RTE_LOG(NOTICE, USER1, "%s(lcore=%u) finish\n",
693		__func__, lcore);
694
695	netfe_lcore_fini_tcp();
696	netbe_lcore_clear();
697
698	return rc;
699}
700
701#endif /* TCP_H_ */
702