tcp.h revision c4c44906
1/*
2 * Copyright (c) 2016  Intel Corporation.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 *     http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#ifndef TCP_H_
17#define TCP_H_
18
19#define	TCP_MAX_PROCESS	0x20
20
21static inline void
22netfe_stream_term_tcp(struct netfe_lcore *fe, struct netfe_stream *fes)
23{
24	fes->s = NULL;
25	fes->fwds = NULL;
26	fes->posterr = 0;
27	memset(&fes->stat, 0, sizeof(fes->stat));
28	pkt_buf_empty(&fes->pbuf);
29	netfe_put_stream(fe, &fe->free, fes);
30}
31
32static inline void
33netfe_stream_close_tcp(struct netfe_lcore *fe, struct netfe_stream *fes)
34{
35	tle_tcp_stream_close(fes->s);
36	netfe_stream_term_tcp(fe, fes);
37}
38
39/*
40 * helper function: opens IPv4 and IPv6 streams for selected port.
41 */
42static struct netfe_stream *
43netfe_stream_open_tcp(struct netfe_lcore *fe, struct netfe_sprm *sprm,
44	uint32_t lcore, uint16_t op, uint32_t bidx, uint8_t server_mode)
45{
46	int32_t rc;
47	struct netfe_stream *fes;
48	struct sockaddr_in *l4;
49	struct sockaddr_in6 *l6;
50	uint16_t errport;
51	struct tle_tcp_stream_param tprm;
52
53	fes = netfe_get_stream(&fe->free);
54	if (fes == NULL) {
55		rte_errno = ENOBUFS;
56		return NULL;
57	}
58
59	if (server_mode != 0) {
60		tle_event_free(fes->rxev);
61		fes->rxev = tle_event_alloc(fe->syneq, fes);
62	}
63
64	if (fes->rxev == NULL) {
65		netfe_stream_close_tcp(fe, fes);
66		rte_errno = ENOMEM;
67		return NULL;
68	}
69
70	/* activate rx, tx and err events for the stream */
71	if (op == TXONLY || op == FWD) {
72		tle_event_active(fes->txev, TLE_SEV_DOWN);
73		fes->stat.txev[TLE_SEV_DOWN]++;
74	}
75
76	if (op != TXONLY || server_mode != 0) {
77		tle_event_active(fes->rxev, TLE_SEV_DOWN);
78		fes->stat.rxev[TLE_SEV_DOWN]++;
79	}
80	tle_event_active(fes->erev, TLE_SEV_DOWN);
81	fes->stat.erev[TLE_SEV_DOWN]++;
82
83	memset(&tprm, 0, sizeof(tprm));
84	tprm.addr.local = sprm->local_addr;
85	tprm.addr.remote = sprm->remote_addr;
86	tprm.cfg.err_ev = fes->erev;
87	tprm.cfg.recv_ev = fes->rxev;
88	if (op != FWD)
89		tprm.cfg.send_ev = fes->txev;
90
91	fes->s = tle_tcp_stream_open(becfg.cpu[bidx].ctx, &tprm);
92
93	if (fes->s == NULL) {
94		rc = rte_errno;
95		netfe_stream_close_tcp(fe, fes);
96		rte_errno = rc;
97
98		if (sprm->local_addr.ss_family == AF_INET) {
99			l4 = (struct sockaddr_in *) &sprm->local_addr;
100			errport = ntohs(l4->sin_port);
101		} else {
102			l6 = (struct sockaddr_in6 *) &sprm->local_addr;
103			errport = ntohs(l6->sin6_port);
104		}
105
106		RTE_LOG(ERR, USER1, "stream open failed for port %u with error "
107			"code=%u, bidx=%u, lc=%u\n",
108			errport, rc, bidx, becfg.cpu[bidx].id);
109		return NULL;
110	}
111
112	RTE_LOG(NOTICE, USER1,
113		"%s(%u)={s=%p, op=%hu, proto=%s, rxev=%p, txev=%p}, belc=%u\n",
114		__func__, lcore, fes->s, op, proto_name[becfg.proto],
115		fes->rxev, fes->txev, becfg.cpu[bidx].id);
116
117	fes->op = op;
118	fes->proto = becfg.proto;
119	fes->family = sprm->local_addr.ss_family;
120	fes->laddr = sprm->local_addr;
121	netfe_put_stream(fe, &fe->use, fes);
122
123	return fes;
124}
125
126static int
127netfe_lcore_init_tcp(const struct netfe_lcore_prm *prm)
128{
129	size_t sz;
130	int32_t rc;
131	uint32_t i, lcore, snum;
132	struct netfe_lcore *fe;
133	struct tle_evq_param eprm;
134	struct netfe_stream *fes;
135	struct netfe_sprm *sprm;
136
137	lcore = rte_lcore_id();
138
139	snum = prm->max_streams;
140	RTE_LOG(NOTICE, USER1, "%s(lcore=%u, nb_streams=%u, max_streams=%u)\n",
141		__func__, lcore, prm->nb_streams, snum);
142
143	memset(&eprm, 0, sizeof(eprm));
144	eprm.socket_id = rte_lcore_to_socket_id(lcore);
145	eprm.max_events = snum;
146
147	sz = sizeof(*fe) + snum * sizeof(struct netfe_stream);
148	fe = rte_zmalloc_socket(NULL, sz, RTE_CACHE_LINE_SIZE,
149		rte_lcore_to_socket_id(lcore));
150
151	if (fe == NULL) {
152		RTE_LOG(ERR, USER1, "%s:%d failed to allocate %zu bytes\n",
153			__func__, __LINE__, sz);
154		return -ENOMEM;
155	}
156
157	RTE_PER_LCORE(_fe) = fe;
158
159	fe->snum = snum;
160	/* initialize the stream pool */
161	LIST_INIT(&fe->free.head);
162	LIST_INIT(&fe->use.head);
163
164	/* allocate the event queues */
165	fe->syneq = tle_evq_create(&eprm);
166	fe->ereq = tle_evq_create(&eprm);
167	fe->rxeq = tle_evq_create(&eprm);
168	fe->txeq = tle_evq_create(&eprm);
169
170	RTE_LOG(INFO, USER1, "%s(%u) synevq=%p, erevq=%p, rxevq=%p, txevq=%p\n",
171		__func__, lcore, fe->syneq, fe->ereq, fe->rxeq, fe->txeq);
172	if (fe->syneq == NULL || fe->ereq == NULL || fe->rxeq == NULL ||
173		fe->txeq == NULL)
174		return -ENOMEM;
175
176	fes = (struct netfe_stream *)(fe + 1);
177	for (i = 0; i != snum; i++) {
178		fes[i].rxev = tle_event_alloc(fe->rxeq, fes + i);
179		fes[i].txev = tle_event_alloc(fe->txeq, fes + i);
180		fes[i].erev = tle_event_alloc(fe->ereq, fes + i);
181		netfe_put_stream(fe, &fe->free, fes + i);
182	}
183
184
185	/* open all requested streams. */
186	for (i = 0; i != prm->nb_streams; i++) {
187		sprm = &prm->stream[i].sprm;
188		fes = netfe_stream_open_tcp(fe, sprm, lcore, prm->stream[i].op,
189			sprm->bidx, becfg.server);
190		if (fes == NULL) {
191			rc = -rte_errno;
192			break;
193		}
194
195		netfe_stream_dump(fes, &sprm->local_addr, &sprm->remote_addr);
196
197		if (prm->stream[i].op == FWD) {
198			fes->fwdprm = prm->stream[i].fprm;
199		} else if (prm->stream[i].op == TXONLY) {
200			fes->txlen = prm->stream[i].txlen;
201			fes->raddr = prm->stream[i].sprm.remote_addr;
202		}
203
204		if (becfg.server == 1) {
205			rc = tle_tcp_stream_listen(fes->s);
206			RTE_LOG(INFO, USER1,
207				"%s(%u) tle_tcp_stream_listen(stream=%p) "
208				"returns %d\n",
209				__func__, lcore, fes->s, rc);
210			if (rc != 0)
211				break;
212		} else {
213			rc = tle_tcp_stream_connect(fes->s,
214				(const struct sockaddr *)&sprm->remote_addr);
215			RTE_LOG(INFO, USER1,
216				"%s(%u) tle_tcp_stream_connect(stream=%p) "
217				"returns %d\n",
218				__func__, lcore, fes->s, rc);
219			if (rc != 0)
220				break;
221		}
222	}
223
224	return rc;
225}
226
227static inline struct netfe_stream *
228netfe_create_fwd_stream(struct netfe_lcore *fe, struct netfe_stream *fes,
229	uint32_t lcore, uint32_t bidx)
230{
231	uint32_t rc;
232	struct netfe_stream *fws;
233
234	fws = netfe_stream_open_tcp(fe, &fes->fwdprm, lcore, FWD, bidx, 0);
235	if (fws != NULL) {
236		rc = tle_tcp_stream_connect(fws->s,
237			(const struct sockaddr *)&fes->fwdprm.remote_addr);
238		NETFE_TRACE("%s(lc=%u, fes=%p): tle_tcp_stream_connect() "
239			"returns %d;\n",
240			__func__, rte_lcore_id(), fes, rc);
241
242		if (rc != 0) {
243			netfe_stream_term_tcp(fe, fws);
244			fws = NULL;
245		}
246	}
247
248	if (fws == NULL)
249		RTE_LOG(ERR, USER1, "%s(lc=%u fes=%p) failed to open "
250			"forwarding stream;\n",
251			__func__, rte_lcore_id(), fes);
252
253	return fws;
254}
255
256static inline int
257netfe_fwd_tcp(uint32_t lcore, struct netfe_stream *fes)
258{
259	uint32_t i, k, n;
260	struct rte_mbuf **pkt;
261	struct netfe_stream *fed;
262
263	RTE_SET_USED(lcore);
264
265	n = fes->pbuf.num;
266	pkt = fes->pbuf.pkt;
267
268	if (n == 0)
269		return 0;
270
271	fed = fes->fwds;
272
273	if (fed != NULL) {
274
275		k = tle_tcp_stream_send(fed->s, pkt, n);
276
277		NETFE_TRACE("%s(%u): tle_%s_stream_send(%p, %u) "
278				"returns %u\n",
279				__func__, lcore, proto_name[fes->proto],
280				fed->s, n, k);
281
282		fed->stat.txp += k;
283		fed->stat.drops += n - k;
284		fes->stat.fwp += k;
285
286	} else {
287		NETFE_TRACE("%s(%u, %p): no fwd stream for %u pkts;\n",
288			__func__, lcore, fes->s, n);
289		for (k = 0; k != n; k++) {
290			NETFE_TRACE("%s(%u, %p): free(%p);\n",
291			__func__, lcore, fes->s, pkt[k]);
292			rte_pktmbuf_free(pkt[k]);
293		}
294		fes->stat.drops += n;
295	}
296
297	/* copy unforwarded mbufs. */
298	for (i = 0; i != n - k; i++)
299		pkt[i] = pkt[i + k];
300
301	fes->pbuf.num = i;
302
303	if (i != 0) {
304		tle_event_raise(fes->txev);
305		fes->stat.txev[TLE_SEV_UP]++;
306	}
307
308	if (n == RTE_DIM(fes->pbuf.pkt)) {
309		tle_event_active(fes->rxev, TLE_SEV_UP);
310		fes->stat.rxev[TLE_SEV_UP]++;
311	}
312
313	return (fed == NULL) ? 0 : k;
314}
315
316static inline void
317netfe_new_conn_tcp(struct netfe_lcore *fe, uint32_t lcore,
318	struct netfe_stream *fes)
319{
320	uint32_t i, k, n;
321	struct netfe_stream *ts;
322	struct tle_stream *rs[MAX_PKT_BURST];
323	struct netfe_stream *fs[MAX_PKT_BURST];
324	struct tle_tcp_stream_cfg prm[MAX_PKT_BURST];
325
326	/* check if any syn requests are waiting */
327	n = tle_tcp_stream_accept(fes->s, rs, RTE_DIM(rs));
328	if (n == 0)
329		return;
330
331	NETFE_TRACE("%s(%u): tle_tcp_stream_accept(%p, %u) returns %u\n",
332		__func__, lcore, fes->s, MAX_PKT_BURST, n);
333
334	/* get n free streams */
335	k = netfe_get_streams(&fe->free, fs, n);
336	if (n != k)
337		RTE_LOG(ERR, USER1,
338			"%s(lc=%u): not enough FE resources to handle %u new "
339			"TCP streams;\n",
340			__func__, lcore, n - k);
341
342	/* fill accept params to accept k connection requests*/
343	for (i = 0; i != k; i++) {
344
345		ts = fs[i];
346		ts->s = rs[i];
347		ts->op = fes->op;
348		ts->proto = fes->proto;
349		ts->family = fes->family;
350		ts->txlen = fes->txlen;
351
352		tle_event_active(ts->erev, TLE_SEV_DOWN);
353		if (fes->op == TXONLY || fes->op == FWD) {
354			tle_event_active(ts->txev, TLE_SEV_UP);
355			ts->stat.txev[TLE_SEV_UP]++;
356		}
357		if (fes->op != TXONLY) {
358			tle_event_active(ts->rxev, TLE_SEV_DOWN);
359			ts->stat.rxev[TLE_SEV_DOWN]++;
360		}
361
362		netfe_put_stream(fe, &fe->use, ts);
363
364		memset(&prm[i], 0, sizeof(prm[i]));
365		prm[i].recv_ev = ts->rxev;
366		prm[i].send_ev = ts->txev;
367		prm[i].err_ev = ts->erev;
368	}
369
370	tle_tcp_stream_update_cfg(rs, prm, k);
371
372	tle_tcp_stream_close_bulk(rs + k, n - k);
373
374	/* for the forwarding mode, open the second one */
375	if (fes->op == FWD) {
376		for (i = 0; i != k; i++) {
377
378			ts = fs[i];
379
380			ts->fwds = netfe_create_fwd_stream(fe, fes, lcore,
381				fes->fwdprm.bidx);
382			if (ts->fwds != NULL)
383				ts->fwds->fwds = ts;
384		}
385	}
386
387	fe->tcp_stat.acc += k;
388	fe->tcp_stat.rej += n - k;
389}
390
391static inline void
392netfe_lcore_tcp_req(void)
393{
394	struct netfe_lcore *fe;
395	uint32_t j, n, lcore;
396	struct netfe_stream *fs[MAX_PKT_BURST];
397
398	fe = RTE_PER_LCORE(_fe);
399	if (fe == NULL)
400		return;
401
402	/* look for syn events */
403	n = tle_evq_get(fe->syneq, (const void **)(uintptr_t)fs, RTE_DIM(fs));
404	if (n == 0)
405		return;
406
407	lcore = rte_lcore_id();
408
409	NETFE_TRACE("%s(%u): tle_evq_get(synevq=%p) returns %u\n",
410		__func__, lcore, fe->syneq, n);
411
412	for (j = 0; j != n; j++)
413		netfe_new_conn_tcp(fe, lcore, fs[j]);
414}
415
416static inline void
417netfe_lcore_tcp_rst(void)
418{
419	struct netfe_lcore *fe;
420	struct netfe_stream *fwds;
421	uint32_t j, k, n;
422	struct tle_stream *s[MAX_PKT_BURST];
423	struct netfe_stream *fs[MAX_PKT_BURST];
424	struct tle_event *rv[MAX_PKT_BURST];
425	struct tle_event *tv[MAX_PKT_BURST];
426	struct tle_event *ev[MAX_PKT_BURST];
427
428	fe = RTE_PER_LCORE(_fe);
429	if (fe == NULL)
430		return;
431
432	/* look for err events */
433	n = tle_evq_get(fe->ereq, (const void **)(uintptr_t)fs, RTE_DIM(fs));
434	if (n == 0)
435		return;
436
437	NETFE_TRACE("%s(%u): tle_evq_get(errevq=%p) returns %u\n",
438		__func__, rte_lcore_id(), fe->ereq, n);
439
440	k = 0;
441	for (j = 0; j != n; j++) {
442		if (verbose > VERBOSE_NONE) {
443			struct tle_tcp_stream_addr addr;
444			tle_tcp_stream_get_addr(fs[j]->s, &addr);
445			netfe_stream_dump(fs[j], &addr.local, &addr.remote);
446		}
447
448		/* check do we still have something to send/recv */
449		if (fs[j]->posterr == 0 &&
450				(tle_event_state(fs[j]->rxev) == TLE_SEV_UP ||
451				tle_event_state(fs[j]->txev) == TLE_SEV_UP)) {
452			fs[j]->posterr++;
453		} else {
454			s[k] = fs[j]->s;
455			rv[k] = fs[j]->rxev;
456			tv[k] = fs[j]->txev;
457			ev[k] = fs[j]->erev;
458			fs[k] = fs[j];
459			k++;
460		}
461	}
462
463	if (k == 0)
464		return;
465
466	tle_evq_idle(fe->rxeq, rv, k);
467	tle_evq_idle(fe->txeq, tv, k);
468	tle_evq_idle(fe->ereq, ev, k);
469
470	tle_tcp_stream_close_bulk(s, k);
471
472	for (j = 0; j != k; j++) {
473
474		/* if forwarding mode, signal peer stream to terminate too. */
475		fwds = fs[j]->fwds;
476		if (fwds != NULL && fwds->s != NULL) {
477
478			fwds->fwds = NULL;
479			tle_event_raise(fwds->erev);
480			fs[j]->fwds = NULL;
481		}
482
483		/* now terminate the stream receiving rst event*/
484		netfe_rem_stream(&fe->use, fs[j]);
485		netfe_stream_term_tcp(fe, fs[j]);
486		fe->tcp_stat.ter++;
487	}
488}
489
490static inline int
491netfe_rxtx_process_tcp(__rte_unused uint32_t lcore, struct netfe_stream *fes)
492{
493	uint32_t i, k, n;
494	struct rte_mbuf **pkt;
495
496	n = fes->pbuf.num;
497	pkt = fes->pbuf.pkt;
498
499	/* there is nothing to send. */
500	if (n == 0) {
501		tle_event_idle(fes->txev);
502		fes->stat.txev[TLE_SEV_IDLE]++;
503		return 0;
504	}
505
506
507	k = tle_tcp_stream_send(fes->s, pkt, n);
508
509	NETFE_TRACE("%s(%u): tle_%s_stream_send(%p, %u) returns %u\n",
510		__func__, lcore, proto_name[fes->proto],
511		fes->s, n, k);
512	fes->stat.txp += k;
513	fes->stat.drops += n - k;
514
515	/* not able to send anything. */
516	if (k == 0)
517		return 0;
518
519	if (n == RTE_DIM(fes->pbuf.pkt)) {
520		/* mark stream as readable */
521		tle_event_active(fes->rxev, TLE_SEV_UP);
522		fes->stat.rxev[TLE_SEV_UP]++;
523	}
524
525	/* adjust pbuf array. */
526	fes->pbuf.num = n - k;
527	for (i = 0; i != n - k; i++)
528		pkt[i] = pkt[i + k];
529
530	return k;
531}
532
533static inline int
534netfe_tx_process_tcp(uint32_t lcore, struct netfe_stream *fes)
535{
536	uint32_t i, k, n;
537
538	/* refill with new mbufs. */
539	if (fes->posterr == 0)
540		pkt_buf_fill(lcore, &fes->pbuf, fes->txlen);
541
542	n = fes->pbuf.num;
543	if (n == 0)
544		return 0;
545
546	/**
547	 * TODO: cannot use function pointers for unequal param num.
548	 */
549	k = tle_tcp_stream_send(fes->s, fes->pbuf.pkt, n);
550
551	NETFE_TRACE("%s(%u): tle_%s_stream_send(%p, %u) returns %u\n",
552		__func__, lcore, proto_name[fes->proto], fes->s, n, k);
553	fes->stat.txp += k;
554	fes->stat.drops += n - k;
555
556	if (k == 0)
557		return 0;
558
559	/* adjust pbuf array. */
560	fes->pbuf.num = n - k;
561	for (i = k; i != n; i++)
562		fes->pbuf.pkt[i - k] = fes->pbuf.pkt[i];
563
564	return k;
565}
566
567static inline void
568netfe_lcore_tcp(void)
569{
570	int32_t rc;
571	uint32_t j, n, lcore;
572	struct netfe_lcore *fe;
573	struct netfe_stream *fs[MAX_PKT_BURST];
574
575	fe = RTE_PER_LCORE(_fe);
576	if (fe == NULL)
577		return;
578
579	lcore = rte_lcore_id();
580
581	/* look for rx events */
582	n = tle_evq_get(fe->rxeq, (const void **)(uintptr_t)fs, RTE_DIM(fs));
583
584	if (n != 0) {
585
586		NETFE_TRACE("%s(%u): tle_evq_get(rxevq=%p) returns %u\n",
587			__func__, lcore, fe->rxeq, n);
588
589		for (j = 0; j != n; j++) {
590
591			rc = netfe_rx_process(lcore, fs[j]);
592
593			/* we are ok to close the stream */
594			if (rc == 0 && fs[j]->posterr != 0)
595				tle_event_raise(fs[j]->erev);
596		}
597	}
598
599	/* look for tx events */
600	n = tle_evq_get(fe->txeq, (const void **)(uintptr_t)fs, RTE_DIM(fs));
601
602	if (n != 0) {
603
604		NETFE_TRACE("%s(%u): tle_evq_get(txevq=%p) returns %u\n",
605			__func__, lcore, fe->txeq, n);
606
607		for (j = 0; j != n; j++) {
608
609			rc = 0;
610
611			if (fs[j]->op == RXTX)
612				rc = netfe_rxtx_process_tcp(lcore, fs[j]);
613			else if (fs[j]->op == FWD)
614				rc = netfe_fwd_tcp(lcore, fs[j]);
615			else if (fs[j]->op == TXONLY)
616				rc = netfe_tx_process_tcp(lcore, fs[j]);
617
618			/* we are ok to close the stream */
619			if (rc == 0 && fs[j]->posterr != 0)
620				tle_event_raise(fs[j]->erev);
621		}
622	}
623}
624
625static void
626netfe_lcore_fini_tcp(void)
627{
628	struct netfe_lcore *fe;
629	uint32_t i, snum;
630	struct tle_tcp_stream_addr addr;
631	struct netfe_stream *fes;
632	uint32_t acc, rej, ter;
633
634	fe = RTE_PER_LCORE(_fe);
635	if (fe == NULL)
636		return;
637
638	snum = fe->use.num;
639	for (i = 0; i != snum; i++) {
640		fes = netfe_get_stream(&fe->use);
641		tle_tcp_stream_get_addr(fes->s, &addr);
642		netfe_stream_dump(fes, &addr.local, &addr.remote);
643		netfe_stream_close(fe, fes);
644	}
645
646	acc = fe->tcp_stat.acc;
647	rej = fe->tcp_stat.rej;
648	ter = fe->tcp_stat.ter;
649	RTE_LOG(NOTICE, USER1,
650		"tcp_stats={con_acc=%u,con_rej=%u,con_ter=%u};\n",
651		acc, rej, ter);
652
653	tle_evq_destroy(fe->txeq);
654	tle_evq_destroy(fe->rxeq);
655	tle_evq_destroy(fe->ereq);
656	tle_evq_destroy(fe->syneq);
657	RTE_PER_LCORE(_fe) = NULL;
658	rte_free(fe);
659}
660
661static inline void
662netbe_lcore_tcp(void)
663{
664	uint32_t i;
665	struct netbe_lcore *lc;
666
667	lc = RTE_PER_LCORE(_be);
668	if (lc == NULL)
669		return;
670
671	for (i = 0; i != lc->prtq_num; i++) {
672		netbe_rx(lc, i);
673		tle_tcp_process(lc->ctx, TCP_MAX_PROCESS);
674		netbe_tx(lc, i);
675	}
676}
677
678static int
679lcore_main_tcp(void *arg)
680{
681	int32_t rc;
682	uint32_t lcore;
683	struct lcore_prm *prm;
684
685	prm = arg;
686	lcore = rte_lcore_id();
687
688	RTE_LOG(NOTICE, USER1, "%s(lcore=%u) start\n",
689		__func__, lcore);
690
691	rc = 0;
692
693	/* lcore FE init. */
694	if (prm->fe.max_streams != 0)
695		rc = netfe_lcore_init_tcp(&prm->fe);
696
697	/* lcore FE init. */
698	if (rc == 0 && prm->be.lc != NULL)
699		rc = netbe_lcore_setup(prm->be.lc);
700
701	if (rc != 0)
702		sig_handle(SIGQUIT);
703
704	while (force_quit == 0) {
705		netfe_lcore_tcp_req();
706		netfe_lcore_tcp_rst();
707		netfe_lcore_tcp();
708		netbe_lcore_tcp();
709	}
710
711	RTE_LOG(NOTICE, USER1, "%s(lcore=%u) finish\n",
712		__func__, lcore);
713
714	netfe_lcore_fini_tcp();
715	netbe_lcore_clear();
716
717	return rc;
718}
719
720#endif /* TCP_H_ */
721