lcore.h revision fbba0a3b
1/*
2 * Copyright (c) 2016  Intel Corporation.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 *     http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#ifndef LCORE_H_
17#define LCORE_H_
18
19#include <rte_random.h>
20
21#include "dpdk_legacy.h"
22
23/*
24 * IPv4 destination lookup callback.
25 */
26static int
27lpm4_dst_lookup(void *data, const struct in_addr *addr,
28	struct tle_dest *res)
29{
30	int32_t rc;
31	uint32_t idx;
32	struct netbe_lcore *lc;
33	struct tle_dest *dst;
34
35	lc = data;
36
37	rc = rte_lpm_lookup(lc->lpm4, rte_be_to_cpu_32(addr->s_addr), &idx);
38	if (rc == 0) {
39		dst = &lc->dst4[idx];
40		rte_memcpy(res, dst, dst->l2_len + dst->l3_len +
41			offsetof(struct tle_dest, hdr));
42	}
43	return rc;
44}
45
46static int
47lcore_lpm_init(struct netbe_lcore *lc)
48{
49	int32_t sid;
50	char str[RTE_LPM_NAMESIZE];
51	const struct rte_lpm_config lpm4_cfg = {
52		.max_rules = MAX_RULES,
53		.number_tbl8s = MAX_TBL8,
54	};
55	const struct rte_lpm6_config lpm6_cfg = {
56		.max_rules = MAX_RULES,
57		.number_tbl8s = MAX_TBL8,
58	};
59
60	sid = rte_lcore_to_socket_id(lc->id);
61
62	snprintf(str, sizeof(str), "LPM4%u\n", lc->id);
63	lc->lpm4 = rte_lpm_create(str, sid, &lpm4_cfg);
64	RTE_LOG(NOTICE, USER1, "%s(lcore=%u): lpm4=%p;\n",
65		__func__, lc->id, lc->lpm4);
66	if (lc->lpm4 == NULL)
67		return -ENOMEM;
68
69	snprintf(str, sizeof(str), "LPM6%u\n", lc->id);
70	lc->lpm6 = rte_lpm6_create(str, sid, &lpm6_cfg);
71	RTE_LOG(NOTICE, USER1, "%s(lcore=%u): lpm6=%p;\n",
72		__func__, lc->id, lc->lpm6);
73	if (lc->lpm6 == NULL)
74		return -ENOMEM;
75
76	return 0;
77}
78
79/*
80 * Helper functions, finds BE by given local and remote addresses.
81 */
82static int
83netbe_find4(const struct in_addr *laddr, const uint16_t lport,
84	const struct in_addr *raddr, const uint32_t belc)
85{
86	uint32_t i, j;
87	uint32_t idx;
88	struct netbe_lcore *bc;
89
90	/* we have exactly one BE, use it for all traffic */
91	if (becfg.cpu_num == 1)
92		return 0;
93
94	/* search by provided be_lcore */
95	if (belc != LCORE_ID_ANY) {
96		for (i = 0; i != becfg.cpu_num; i++) {
97			bc = becfg.cpu + i;
98			if (belc == bc->id)
99				return i;
100		}
101		RTE_LOG(NOTICE, USER1, "%s: no stream with be_lcore=%u\n",
102			__func__, belc);
103		return -ENOENT;
104	}
105
106	/* search by local address */
107	if (laddr->s_addr != INADDR_ANY) {
108		for (i = 0; i != becfg.cpu_num; i++) {
109			bc = becfg.cpu + i;
110			/* search by queue for the local port */
111			for (j = 0; j != bc->prtq_num; j++) {
112				if (laddr->s_addr == bc->prtq[j].port.ipv4) {
113
114					if (lport == 0)
115						return i;
116
117					if (verify_queue_for_port(bc->prtq + j,
118							lport) != 0)
119						return i;
120				}
121			}
122		}
123	}
124
125	/* search by remote address */
126	if (raddr->s_addr != INADDR_ANY) {
127		for (i = 0; i != becfg.cpu_num; i++) {
128			bc = becfg.cpu + i;
129			if (rte_lpm_lookup(bc->lpm4,
130					rte_be_to_cpu_32(raddr->s_addr),
131					&idx) == 0) {
132
133				if (lport == 0)
134					return i;
135
136				/* search by queue for the local port */
137				for (j = 0; j != bc->prtq_num; j++)
138					if (verify_queue_for_port(bc->prtq + j,
139							lport) != 0)
140						return i;
141			}
142		}
143	}
144
145	return -ENOENT;
146}
147
148static int
149create_context(struct netbe_lcore *lc, const struct tle_ctx_param *ctx_prm)
150{
151	uint32_t rc = 0, sid;
152	uint64_t frag_cycles;
153	struct tle_ctx_param cprm;
154
155	if (lc->ctx == NULL) {
156		sid = rte_lcore_to_socket_id(lc->id);
157
158		rc = lcore_lpm_init(lc);
159		if (rc != 0)
160			return rc;
161
162		cprm = *ctx_prm;
163		cprm.socket_id = sid;
164		cprm.proto = lc->proto;
165		cprm.lookup4 = lpm4_dst_lookup;
166		cprm.lookup4_data = lc;
167		cprm.lookup6 = lpm6_dst_lookup;
168		cprm.lookup6_data = lc;
169		if (cprm.secret_key.u64[0] == 0 &&
170			cprm.secret_key.u64[1] == 0) {
171			cprm.secret_key.u64[0] = rte_rand();
172			cprm.secret_key.u64[1] = rte_rand();
173		}
174
175		frag_cycles = (rte_get_tsc_hz() + MS_PER_S - 1) /
176						MS_PER_S * FRAG_TTL;
177
178		lc->ftbl = rte_ip_frag_table_create(cprm.max_streams,
179			FRAG_TBL_BUCKET_ENTRIES, cprm.max_streams,
180			frag_cycles, sid);
181
182		RTE_LOG(NOTICE, USER1, "%s(lcore=%u): frag_tbl=%p;\n",
183			__func__, lc->id, lc->ftbl);
184
185		lc->ctx = tle_ctx_create(&cprm);
186
187		RTE_LOG(NOTICE, USER1, "%s(lcore=%u): proto=%s, ctx=%p;\n",
188			__func__, lc->id, proto_name[lc->proto], lc->ctx);
189
190		if (lc->ctx == NULL || lc->ftbl == NULL)
191			rc = ENOMEM;
192	}
193
194	return rc;
195}
196
197/*
198 * BE lcore setup routine.
199 */
200static int
201lcore_init(struct netbe_lcore *lc, const struct tle_ctx_param *ctx_prm,
202	const uint32_t prtqid, const uint16_t *bl_ports, uint32_t nb_bl_ports)
203{
204	int32_t rc = 0;
205	struct tle_dev_param dprm;
206
207	rc = create_context(lc, ctx_prm);
208
209	if (rc == 0 && lc->ctx != NULL) {
210		memset(&dprm, 0, sizeof(dprm));
211		dprm.rx_offload = lc->prtq[prtqid].port.rx_offload;
212		dprm.tx_offload = lc->prtq[prtqid].port.tx_offload;
213		dprm.local_addr4.s_addr = lc->prtq[prtqid].port.ipv4;
214		memcpy(&dprm.local_addr6,  &lc->prtq[prtqid].port.ipv6,
215			sizeof(lc->prtq[prtqid].port.ipv6));
216		dprm.bl4.nb_port = nb_bl_ports;
217		dprm.bl4.port = bl_ports;
218		dprm.bl6.nb_port = nb_bl_ports;
219		dprm.bl6.port = bl_ports;
220
221		lc->prtq[prtqid].dev = tle_add_dev(lc->ctx, &dprm);
222
223		RTE_LOG(NOTICE, USER1,
224			"%s(lcore=%u, port=%u, qid=%u), dev: %p\n",
225			__func__, lc->id, lc->prtq[prtqid].port.id,
226			lc->prtq[prtqid].rxqid, lc->prtq[prtqid].dev);
227
228		if (lc->prtq[prtqid].dev == NULL)
229			rc = -rte_errno;
230
231		if (rc != 0) {
232			RTE_LOG(ERR, USER1,
233				"%s(lcore=%u) failed with error code: %d\n",
234				__func__, lc->id, rc);
235			tle_ctx_destroy(lc->ctx);
236			rte_ip_frag_table_destroy(lc->ftbl);
237			rte_lpm_free(lc->lpm4);
238			rte_lpm6_free(lc->lpm6);
239			rte_free(lc->prtq[prtqid].port.lcore_id);
240			lc->prtq[prtqid].port.nb_lcore = 0;
241			rte_free(lc->prtq);
242			lc->prtq_num = 0;
243			return rc;
244		}
245	}
246
247	return rc;
248}
249
250static uint16_t
251create_blocklist(const struct netbe_port *beprt, uint16_t *bl_ports,
252	uint32_t q)
253{
254	uint32_t i, j, qid, align_nb_q;
255
256	align_nb_q = rte_align32pow2(beprt->nb_lcore);
257	for (i = 0, j = 0; i < (UINT16_MAX + 1); i++) {
258		qid = (i % align_nb_q) % beprt->nb_lcore;
259		if (qid != q)
260			bl_ports[j++] = i;
261	}
262
263	return j;
264}
265
266static int
267netbe_lcore_init(struct netbe_cfg *cfg, const struct tle_ctx_param *ctx_prm)
268{
269	int32_t rc;
270	uint32_t i, j, nb_bl_ports = 0, sz;
271	struct netbe_lcore *lc;
272	static uint16_t *bl_ports;
273
274	/* Create the context and attached queue for each lcore. */
275	rc = 0;
276	sz = sizeof(uint16_t) * UINT16_MAX;
277	bl_ports = rte_zmalloc(NULL, sz, RTE_CACHE_LINE_SIZE);
278	for (i = 0; i < cfg->cpu_num; i++) {
279		lc = &cfg->cpu[i];
280		for (j = 0; j < lc->prtq_num; j++) {
281			memset((uint8_t *)bl_ports, 0, sz);
282			/* create list of blocked ports based on q */
283			nb_bl_ports = create_blocklist(&lc->prtq[j].port,
284				bl_ports, lc->prtq[j].rxqid);
285			RTE_LOG(NOTICE, USER1,
286				"lc=%u, q=%u, nb_bl_ports=%u\n",
287				lc->id, lc->prtq[j].rxqid, nb_bl_ports);
288
289			rc = lcore_init(lc, ctx_prm, j, bl_ports, nb_bl_ports);
290			if (rc != 0) {
291				RTE_LOG(ERR, USER1,
292					"%s: failed with error code: %d\n",
293					__func__, rc);
294				rte_free(bl_ports);
295				return rc;
296			}
297		}
298	}
299	rte_free(bl_ports);
300
301	return 0;
302}
303
304static int
305netfe_lcore_cmp(const void *s1, const void *s2)
306{
307	const struct netfe_stream_prm *p1, *p2;
308
309	p1 = s1;
310	p2 = s2;
311	return p1->lcore - p2->lcore;
312}
313
314static int
315netbe_find(const struct sockaddr_storage *la,
316	const struct sockaddr_storage *ra,
317	uint32_t belc)
318{
319	const struct sockaddr_in *l4, *r4;
320	const struct sockaddr_in6 *l6, *r6;
321
322	if (la->ss_family == AF_INET) {
323		l4 = (const struct sockaddr_in *)la;
324		r4 = (const struct sockaddr_in *)ra;
325		return netbe_find4(&l4->sin_addr, ntohs(l4->sin_port),
326				&r4->sin_addr, belc);
327	} else if (la->ss_family == AF_INET6) {
328		l6 = (const struct sockaddr_in6 *)la;
329		r6 = (const struct sockaddr_in6 *)ra;
330		return netbe_find6(&l6->sin6_addr, ntohs(l6->sin6_port),
331				&r6->sin6_addr, belc);
332	}
333	return -EINVAL;
334}
335
336static int
337netfe_sprm_flll_be(struct netfe_sprm *sp, uint32_t line, uint32_t belc)
338{
339	int32_t bidx;
340
341	bidx = netbe_find(&sp->local_addr, &sp->remote_addr, belc);
342
343	if (bidx < 0) {
344		RTE_LOG(ERR, USER1, "%s(line=%u): no BE for that stream\n",
345			__func__, line);
346		return -EINVAL;
347	}
348	sp->bidx = bidx;
349	return 0;
350}
351
352/* start front-end processing. */
353static int
354netfe_lcore_fill(struct lcore_prm prm[RTE_MAX_LCORE],
355	struct netfe_lcore_prm *lprm)
356{
357	uint32_t belc;
358	uint32_t i, j, lc, ln;
359	struct netfe_stream_prm *s;
360
361	/* determine on what BE each stream should be open. */
362	for (i = 0; i != lprm->nb_streams; i++) {
363		s = lprm->stream + i;
364		ln = s->line;
365		belc = s->belcore;
366		if (netfe_sprm_flll_be(&s->sprm, ln, belc) != 0 ||
367				(s->op == FWD &&
368				netfe_sprm_flll_be(&s->fprm, ln, belc) != 0))
369			return -EINVAL;
370	}
371
372	/* group all fe parameters by lcore. */
373
374	qsort(lprm->stream, lprm->nb_streams, sizeof(lprm->stream[0]),
375		netfe_lcore_cmp);
376
377	for (i = 0; i != lprm->nb_streams; i = j) {
378
379		lc = lprm->stream[i].lcore;
380		ln = lprm->stream[i].line;
381
382		if (rte_lcore_is_enabled(lc) == 0) {
383			RTE_LOG(ERR, USER1,
384				"%s(line=%u): lcore %u is not enabled\n",
385				__func__, ln, lc);
386			return -EINVAL;
387		}
388
389		if (rte_get_master_lcore() != lc &&
390				rte_eal_get_lcore_state(lc) == RUNNING) {
391			RTE_LOG(ERR, USER1,
392				"%s(line=%u): lcore %u already in use\n",
393				__func__, ln, lc);
394			return -EINVAL;
395		}
396
397		for (j = i + 1; j != lprm->nb_streams &&
398				lc == lprm->stream[j].lcore;
399				j++)
400			;
401
402		prm[lc].fe.max_streams = lprm->max_streams;
403		prm[lc].fe.nb_streams = j - i;
404		prm[lc].fe.stream = lprm->stream + i;
405	}
406
407	return 0;
408}
409
410#endif /* LCORE_H_ */
411