lcore.h revision aa97dd1c
1/*
2 * Copyright (c) 2016  Intel Corporation.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 *     http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#ifndef LCORE_H_
17#define LCORE_H_
18
19#include "dpdk_legacy.h"
20
21/*
22 * IPv6 destination lookup callback.
23 */
24static int
25lpm6_dst_lookup(void *data, const struct in6_addr *addr,
26	struct tle_dest *res)
27{
28	int32_t rc;
29	uint8_t idx;
30	struct netbe_lcore *lc;
31	struct tle_dest *dst;
32	uintptr_t p;
33
34	lc = data;
35	p = (uintptr_t)addr->s6_addr;
36
37	rc = rte_lpm6_lookup(lc->lpm6, (uint8_t *)p, &idx);
38	if (rc == 0) {
39		dst = &lc->dst6[idx];
40		rte_memcpy(res, dst, dst->l2_len + dst->l3_len +
41			offsetof(struct tle_dest, hdr));
42	}
43	return rc;
44}
45
46static int
47create_context(struct netbe_lcore *lc, const struct tle_ctx_param *ctx_prm)
48{
49	uint32_t rc = 0, sid;
50	uint64_t frag_cycles;
51	struct tle_ctx_param cprm;
52
53	if (lc->ctx == NULL) {
54		sid = rte_lcore_to_socket_id(lc->id);
55
56		rc = lcore_lpm_init(lc);
57		if (rc != 0)
58			return rc;
59
60		cprm = *ctx_prm;
61		cprm.socket_id = sid;
62		cprm.proto = lc->proto;
63		cprm.lookup4 = lpm4_dst_lookup;
64		cprm.lookup4_data = lc;
65		cprm.lookup6 = lpm6_dst_lookup;
66		cprm.lookup6_data = lc;
67
68		frag_cycles = (rte_get_tsc_hz() + MS_PER_S - 1) /
69						MS_PER_S * FRAG_TTL;
70
71		lc->ftbl = rte_ip_frag_table_create(cprm.max_streams,
72			FRAG_TBL_BUCKET_ENTRIES, cprm.max_streams,
73			frag_cycles, sid);
74
75		RTE_LOG(NOTICE, USER1, "%s(lcore=%u): frag_tbl=%p;\n",
76			__func__, lc->id, lc->ftbl);
77
78		lc->ctx = tle_ctx_create(&cprm);
79
80		RTE_LOG(NOTICE, USER1, "%s(lcore=%u): proto=%s, ctx=%p;\n",
81			__func__, lc->id, proto_name[lc->proto], lc->ctx);
82
83		if (lc->ctx == NULL || lc->ftbl == NULL)
84			rc = ENOMEM;
85	}
86
87	return rc;
88}
89
90/*
91 * BE lcore setup routine.
92 */
93static int
94lcore_init(struct netbe_lcore *lc, const struct tle_ctx_param *ctx_prm,
95	const uint32_t prtqid, const uint16_t *bl_ports, uint32_t nb_bl_ports)
96{
97	int32_t rc = 0;
98	struct tle_dev_param dprm;
99
100	rc = create_context(lc, ctx_prm);
101
102	if (rc == 0 && lc->ctx != NULL) {
103		memset(&dprm, 0, sizeof(dprm));
104		dprm.rx_offload = lc->prtq[prtqid].port.rx_offload;
105		dprm.tx_offload = lc->prtq[prtqid].port.tx_offload;
106		dprm.local_addr4.s_addr = lc->prtq[prtqid].port.ipv4;
107		memcpy(&dprm.local_addr6,  &lc->prtq[prtqid].port.ipv6,
108			sizeof(lc->prtq[prtqid].port.ipv6));
109		dprm.bl4.nb_port = nb_bl_ports;
110		dprm.bl4.port = bl_ports;
111		dprm.bl6.nb_port = nb_bl_ports;
112		dprm.bl6.port = bl_ports;
113
114		lc->prtq[prtqid].dev = tle_add_dev(lc->ctx, &dprm);
115
116		RTE_LOG(NOTICE, USER1,
117			"%s(lcore=%u, port=%u, qid=%u), dev: %p\n",
118			__func__, lc->id, lc->prtq[prtqid].port.id,
119			lc->prtq[prtqid].rxqid, lc->prtq[prtqid].dev);
120
121		if (lc->prtq[prtqid].dev == NULL)
122			rc = -rte_errno;
123
124		if (rc != 0) {
125			RTE_LOG(ERR, USER1,
126				"%s(lcore=%u) failed with error code: %d\n",
127				__func__, lc->id, rc);
128			tle_ctx_destroy(lc->ctx);
129			rte_ip_frag_table_destroy(lc->ftbl);
130			rte_lpm_free(lc->lpm4);
131			rte_lpm6_free(lc->lpm6);
132			rte_free(lc->prtq[prtqid].port.lcore_id);
133			lc->prtq[prtqid].port.nb_lcore = 0;
134			rte_free(lc->prtq);
135			lc->prtq_num = 0;
136			return rc;
137		}
138	}
139
140	return rc;
141}
142
143static uint16_t
144create_blocklist(const struct netbe_port *beprt, uint16_t *bl_ports,
145	uint32_t q)
146{
147	uint32_t i, j, qid, align_nb_q;
148
149	align_nb_q = rte_align32pow2(beprt->nb_lcore);
150	for (i = 0, j = 0; i < (UINT16_MAX + 1); i++) {
151		qid = (i % align_nb_q) % beprt->nb_lcore;
152		if (qid != q)
153			bl_ports[j++] = i;
154	}
155
156	return j;
157}
158
159static int
160netbe_lcore_init(struct netbe_cfg *cfg, const struct tle_ctx_param *ctx_prm)
161{
162	int32_t rc;
163	uint32_t i, j, nb_bl_ports = 0, sz;
164	struct netbe_lcore *lc;
165	static uint16_t *bl_ports;
166
167	/* Create the context and attached queue for each lcore. */
168	rc = 0;
169	sz = sizeof(uint16_t) * UINT16_MAX;
170	bl_ports = rte_zmalloc(NULL, sz, RTE_CACHE_LINE_SIZE);
171	for (i = 0; i < cfg->cpu_num; i++) {
172		lc = &cfg->cpu[i];
173		for (j = 0; j < lc->prtq_num; j++) {
174			memset((uint8_t *)bl_ports, 0, sz);
175			/* create list of blocked ports based on q */
176			nb_bl_ports = create_blocklist(&lc->prtq[j].port,
177				bl_ports, lc->prtq[j].rxqid);
178			RTE_LOG(NOTICE, USER1,
179				"lc=%u, q=%u, nb_bl_ports=%u\n",
180				lc->id, lc->prtq[j].rxqid, nb_bl_ports);
181
182			rc = lcore_init(lc, ctx_prm, j, bl_ports, nb_bl_ports);
183			if (rc != 0) {
184				RTE_LOG(ERR, USER1,
185					"%s: failed with error code: %d\n",
186					__func__, rc);
187				rte_free(bl_ports);
188				return rc;
189			}
190		}
191	}
192	rte_free(bl_ports);
193
194	return 0;
195}
196
197static int
198netfe_lcore_cmp(const void *s1, const void *s2)
199{
200	const struct netfe_stream_prm *p1, *p2;
201
202	p1 = s1;
203	p2 = s2;
204	return p1->lcore - p2->lcore;
205}
206
207static int
208netbe_find6(const struct in6_addr *laddr, uint16_t lport,
209	const struct in6_addr *raddr, uint32_t belc)
210{
211	uint32_t i, j;
212	uint8_t idx;
213	struct netbe_lcore *bc;
214
215	/* we have exactly one BE, use it for all traffic */
216	if (becfg.cpu_num == 1)
217		return 0;
218
219	/* search by provided be_lcore */
220	if (belc != LCORE_ID_ANY) {
221		for (i = 0; i != becfg.cpu_num; i++) {
222			bc = becfg.cpu + i;
223			if (belc == bc->id)
224				return i;
225		}
226		RTE_LOG(NOTICE, USER1, "%s: no stream with belcore=%u\n",
227			__func__, belc);
228		return -ENOENT;
229	}
230
231	/* search by local address */
232	if (memcmp(laddr, &in6addr_any, sizeof(*laddr)) != 0) {
233		for (i = 0; i != becfg.cpu_num; i++) {
234			bc = becfg.cpu + i;
235			/* search by queue for the local port */
236			for (j = 0; j != bc->prtq_num; j++) {
237				if (memcmp(laddr, &bc->prtq[j].port.ipv6,
238						sizeof(*laddr)) == 0) {
239
240					if (lport == 0)
241						return i;
242
243					if (verify_queue_for_port(bc->prtq + j,
244							lport) != 0)
245						return i;
246				}
247			}
248		}
249	}
250
251	/* search by remote address */
252	if (memcmp(raddr, &in6addr_any, sizeof(*raddr)) == 0) {
253		for (i = 0; i != becfg.cpu_num; i++) {
254			bc = becfg.cpu + i;
255			if (rte_lpm6_lookup(bc->lpm6,
256					(uint8_t *)(uintptr_t)raddr->s6_addr,
257					&idx) == 0) {
258
259				if (lport == 0)
260					return i;
261
262				/* search by queue for the local port */
263				for (j = 0; j != bc->prtq_num; j++)
264					if (verify_queue_for_port(bc->prtq + j,
265							lport) != 0)
266						return i;
267			}
268		}
269	}
270
271	return -ENOENT;
272}
273
274static int
275netbe_find(const struct sockaddr_storage *la,
276	const struct sockaddr_storage *ra,
277	uint32_t belc)
278{
279	const struct sockaddr_in *l4, *r4;
280	const struct sockaddr_in6 *l6, *r6;
281
282	if (la->ss_family == AF_INET) {
283		l4 = (const struct sockaddr_in *)la;
284		r4 = (const struct sockaddr_in *)ra;
285		return netbe_find4(&l4->sin_addr, ntohs(l4->sin_port),
286				&r4->sin_addr, belc);
287	} else if (la->ss_family == AF_INET6) {
288		l6 = (const struct sockaddr_in6 *)la;
289		r6 = (const struct sockaddr_in6 *)ra;
290		return netbe_find6(&l6->sin6_addr, ntohs(l6->sin6_port),
291				&r6->sin6_addr, belc);
292	}
293	return -EINVAL;
294}
295
296static int
297netfe_sprm_flll_be(struct netfe_sprm *sp, uint32_t line, uint32_t belc)
298{
299	int32_t bidx;
300
301	bidx = netbe_find(&sp->local_addr, &sp->remote_addr, belc);
302
303	if (bidx < 0) {
304		RTE_LOG(ERR, USER1, "%s(line=%u): no BE for that stream\n",
305			__func__, line);
306		return -EINVAL;
307	}
308	sp->bidx = bidx;
309	return 0;
310}
311
312/* start front-end processing. */
313static int
314netfe_lcore_fill(struct lcore_prm prm[RTE_MAX_LCORE],
315	struct netfe_lcore_prm *lprm)
316{
317	uint32_t belc;
318	uint32_t i, j, lc, ln;
319	struct netfe_stream_prm *s;
320
321	/* determine on what BE each stream should be open. */
322	for (i = 0; i != lprm->nb_streams; i++) {
323		s = lprm->stream + i;
324		ln = s->line;
325		belc = s->belcore;
326		if (netfe_sprm_flll_be(&s->sprm, ln, belc) != 0 ||
327				(s->op == FWD &&
328				netfe_sprm_flll_be(&s->fprm, ln, belc) != 0))
329			return -EINVAL;
330	}
331
332	/* group all fe parameters by lcore. */
333
334	qsort(lprm->stream, lprm->nb_streams, sizeof(lprm->stream[0]),
335		netfe_lcore_cmp);
336
337	for (i = 0; i != lprm->nb_streams; i = j) {
338
339		lc = lprm->stream[i].lcore;
340		ln = lprm->stream[i].line;
341
342		if (rte_lcore_is_enabled(lc) == 0) {
343			RTE_LOG(ERR, USER1,
344				"%s(line=%u): lcore %u is not enabled\n",
345				__func__, ln, lc);
346			return -EINVAL;
347		}
348
349		if (rte_get_master_lcore() != lc &&
350				rte_eal_get_lcore_state(lc) == RUNNING) {
351			RTE_LOG(ERR, USER1,
352				"%s(line=%u): lcore %u already in use\n",
353				__func__, ln, lc);
354			return -EINVAL;
355		}
356
357		for (j = i + 1; j != lprm->nb_streams &&
358				lc == lprm->stream[j].lcore;
359				j++)
360			;
361
362		prm[lc].fe.max_streams = lprm->max_streams;
363		prm[lc].fe.nb_streams = j - i;
364		prm[lc].fe.stream = lprm->stream + i;
365	}
366
367	return 0;
368}
369
370#endif /* LCORE_H_ */
371