lcore.h revision 9fa82a63
1/*
2 * Copyright (c) 2016  Intel Corporation.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 *     http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#ifndef LCORE_H_
17#define LCORE_H_
18
19#include <rte_random.h>
20
21#include "dpdk_legacy.h"
22
23/*
24 * IPv6 destination lookup callback.
25 */
26static int
27lpm6_dst_lookup(void *data, const struct in6_addr *addr,
28	struct tle_dest *res)
29{
30	int32_t rc;
31	uint8_t idx;
32	struct netbe_lcore *lc;
33	struct tle_dest *dst;
34	uintptr_t p;
35
36	lc = data;
37	p = (uintptr_t)addr->s6_addr;
38
39	rc = rte_lpm6_lookup(lc->lpm6, (uint8_t *)p, &idx);
40	if (rc == 0) {
41		dst = &lc->dst6[idx];
42		rte_memcpy(res, dst, dst->l2_len + dst->l3_len +
43			offsetof(struct tle_dest, hdr));
44	}
45	return rc;
46}
47
48static int
49create_context(struct netbe_lcore *lc, const struct tle_ctx_param *ctx_prm)
50{
51	uint32_t rc = 0, sid;
52	uint64_t frag_cycles;
53	struct tle_ctx_param cprm;
54
55	if (lc->ctx == NULL) {
56		sid = rte_lcore_to_socket_id(lc->id);
57
58		rc = lcore_lpm_init(lc);
59		if (rc != 0)
60			return rc;
61
62		cprm = *ctx_prm;
63		cprm.socket_id = sid;
64		cprm.proto = lc->proto;
65		cprm.lookup4 = lpm4_dst_lookup;
66		cprm.lookup4_data = lc;
67		cprm.lookup6 = lpm6_dst_lookup;
68		cprm.lookup6_data = lc;
69		if (cprm.secret_key.u64[0] == 0 &&
70			cprm.secret_key.u64[1] == 0) {
71			cprm.secret_key.u64[0] = rte_rand();
72			cprm.secret_key.u64[1] = rte_rand();
73		}
74
75		frag_cycles = (rte_get_tsc_hz() + MS_PER_S - 1) /
76						MS_PER_S * FRAG_TTL;
77
78		lc->ftbl = rte_ip_frag_table_create(cprm.max_streams,
79			FRAG_TBL_BUCKET_ENTRIES, cprm.max_streams,
80			frag_cycles, sid);
81
82		RTE_LOG(NOTICE, USER1, "%s(lcore=%u): frag_tbl=%p;\n",
83			__func__, lc->id, lc->ftbl);
84
85		lc->ctx = tle_ctx_create(&cprm);
86
87		RTE_LOG(NOTICE, USER1, "%s(lcore=%u): proto=%s, ctx=%p;\n",
88			__func__, lc->id, proto_name[lc->proto], lc->ctx);
89
90		if (lc->ctx == NULL || lc->ftbl == NULL)
91			rc = ENOMEM;
92	}
93
94	return rc;
95}
96
97/*
98 * BE lcore setup routine.
99 */
100static int
101lcore_init(struct netbe_lcore *lc, const struct tle_ctx_param *ctx_prm,
102	const uint32_t prtqid, const uint16_t *bl_ports, uint32_t nb_bl_ports)
103{
104	int32_t rc = 0;
105	struct tle_dev_param dprm;
106
107	rc = create_context(lc, ctx_prm);
108
109	if (rc == 0 && lc->ctx != NULL) {
110		memset(&dprm, 0, sizeof(dprm));
111		dprm.rx_offload = lc->prtq[prtqid].port.rx_offload;
112		dprm.tx_offload = lc->prtq[prtqid].port.tx_offload;
113		dprm.local_addr4.s_addr = lc->prtq[prtqid].port.ipv4;
114		memcpy(&dprm.local_addr6,  &lc->prtq[prtqid].port.ipv6,
115			sizeof(lc->prtq[prtqid].port.ipv6));
116		dprm.bl4.nb_port = nb_bl_ports;
117		dprm.bl4.port = bl_ports;
118		dprm.bl6.nb_port = nb_bl_ports;
119		dprm.bl6.port = bl_ports;
120
121		lc->prtq[prtqid].dev = tle_add_dev(lc->ctx, &dprm);
122
123		RTE_LOG(NOTICE, USER1,
124			"%s(lcore=%u, port=%u, qid=%u), dev: %p\n",
125			__func__, lc->id, lc->prtq[prtqid].port.id,
126			lc->prtq[prtqid].rxqid, lc->prtq[prtqid].dev);
127
128		if (lc->prtq[prtqid].dev == NULL)
129			rc = -rte_errno;
130
131		if (rc != 0) {
132			RTE_LOG(ERR, USER1,
133				"%s(lcore=%u) failed with error code: %d\n",
134				__func__, lc->id, rc);
135			tle_ctx_destroy(lc->ctx);
136			rte_ip_frag_table_destroy(lc->ftbl);
137			rte_lpm_free(lc->lpm4);
138			rte_lpm6_free(lc->lpm6);
139			rte_free(lc->prtq[prtqid].port.lcore_id);
140			lc->prtq[prtqid].port.nb_lcore = 0;
141			rte_free(lc->prtq);
142			lc->prtq_num = 0;
143			return rc;
144		}
145	}
146
147	return rc;
148}
149
150static uint16_t
151create_blocklist(const struct netbe_port *beprt, uint16_t *bl_ports,
152	uint32_t q)
153{
154	uint32_t i, j, qid, align_nb_q;
155
156	align_nb_q = rte_align32pow2(beprt->nb_lcore);
157	for (i = 0, j = 0; i < (UINT16_MAX + 1); i++) {
158		qid = (i % align_nb_q) % beprt->nb_lcore;
159		if (qid != q)
160			bl_ports[j++] = i;
161	}
162
163	return j;
164}
165
166static int
167netbe_lcore_init(struct netbe_cfg *cfg, const struct tle_ctx_param *ctx_prm)
168{
169	int32_t rc;
170	uint32_t i, j, nb_bl_ports = 0, sz;
171	struct netbe_lcore *lc;
172	static uint16_t *bl_ports;
173
174	/* Create the context and attached queue for each lcore. */
175	rc = 0;
176	sz = sizeof(uint16_t) * UINT16_MAX;
177	bl_ports = rte_zmalloc(NULL, sz, RTE_CACHE_LINE_SIZE);
178	for (i = 0; i < cfg->cpu_num; i++) {
179		lc = &cfg->cpu[i];
180		for (j = 0; j < lc->prtq_num; j++) {
181			memset((uint8_t *)bl_ports, 0, sz);
182			/* create list of blocked ports based on q */
183			nb_bl_ports = create_blocklist(&lc->prtq[j].port,
184				bl_ports, lc->prtq[j].rxqid);
185			RTE_LOG(NOTICE, USER1,
186				"lc=%u, q=%u, nb_bl_ports=%u\n",
187				lc->id, lc->prtq[j].rxqid, nb_bl_ports);
188
189			rc = lcore_init(lc, ctx_prm, j, bl_ports, nb_bl_ports);
190			if (rc != 0) {
191				RTE_LOG(ERR, USER1,
192					"%s: failed with error code: %d\n",
193					__func__, rc);
194				rte_free(bl_ports);
195				return rc;
196			}
197		}
198	}
199	rte_free(bl_ports);
200
201	return 0;
202}
203
204static int
205netfe_lcore_cmp(const void *s1, const void *s2)
206{
207	const struct netfe_stream_prm *p1, *p2;
208
209	p1 = s1;
210	p2 = s2;
211	return p1->lcore - p2->lcore;
212}
213
214static int
215netbe_find6(const struct in6_addr *laddr, uint16_t lport,
216	const struct in6_addr *raddr, uint32_t belc)
217{
218	uint32_t i, j;
219	uint8_t idx;
220	struct netbe_lcore *bc;
221
222	/* we have exactly one BE, use it for all traffic */
223	if (becfg.cpu_num == 1)
224		return 0;
225
226	/* search by provided be_lcore */
227	if (belc != LCORE_ID_ANY) {
228		for (i = 0; i != becfg.cpu_num; i++) {
229			bc = becfg.cpu + i;
230			if (belc == bc->id)
231				return i;
232		}
233		RTE_LOG(NOTICE, USER1, "%s: no stream with belcore=%u\n",
234			__func__, belc);
235		return -ENOENT;
236	}
237
238	/* search by local address */
239	if (memcmp(laddr, &in6addr_any, sizeof(*laddr)) != 0) {
240		for (i = 0; i != becfg.cpu_num; i++) {
241			bc = becfg.cpu + i;
242			/* search by queue for the local port */
243			for (j = 0; j != bc->prtq_num; j++) {
244				if (memcmp(laddr, &bc->prtq[j].port.ipv6,
245						sizeof(*laddr)) == 0) {
246
247					if (lport == 0)
248						return i;
249
250					if (verify_queue_for_port(bc->prtq + j,
251							lport) != 0)
252						return i;
253				}
254			}
255		}
256	}
257
258	/* search by remote address */
259	if (memcmp(raddr, &in6addr_any, sizeof(*raddr)) == 0) {
260		for (i = 0; i != becfg.cpu_num; i++) {
261			bc = becfg.cpu + i;
262			if (rte_lpm6_lookup(bc->lpm6,
263					(uint8_t *)(uintptr_t)raddr->s6_addr,
264					&idx) == 0) {
265
266				if (lport == 0)
267					return i;
268
269				/* search by queue for the local port */
270				for (j = 0; j != bc->prtq_num; j++)
271					if (verify_queue_for_port(bc->prtq + j,
272							lport) != 0)
273						return i;
274			}
275		}
276	}
277
278	return -ENOENT;
279}
280
281static int
282netbe_find(const struct sockaddr_storage *la,
283	const struct sockaddr_storage *ra,
284	uint32_t belc)
285{
286	const struct sockaddr_in *l4, *r4;
287	const struct sockaddr_in6 *l6, *r6;
288
289	if (la->ss_family == AF_INET) {
290		l4 = (const struct sockaddr_in *)la;
291		r4 = (const struct sockaddr_in *)ra;
292		return netbe_find4(&l4->sin_addr, ntohs(l4->sin_port),
293				&r4->sin_addr, belc);
294	} else if (la->ss_family == AF_INET6) {
295		l6 = (const struct sockaddr_in6 *)la;
296		r6 = (const struct sockaddr_in6 *)ra;
297		return netbe_find6(&l6->sin6_addr, ntohs(l6->sin6_port),
298				&r6->sin6_addr, belc);
299	}
300	return -EINVAL;
301}
302
303static int
304netfe_sprm_flll_be(struct netfe_sprm *sp, uint32_t line, uint32_t belc)
305{
306	int32_t bidx;
307
308	bidx = netbe_find(&sp->local_addr, &sp->remote_addr, belc);
309
310	if (bidx < 0) {
311		RTE_LOG(ERR, USER1, "%s(line=%u): no BE for that stream\n",
312			__func__, line);
313		return -EINVAL;
314	}
315	sp->bidx = bidx;
316	return 0;
317}
318
319/* start front-end processing. */
320static int
321netfe_lcore_fill(struct lcore_prm prm[RTE_MAX_LCORE],
322	struct netfe_lcore_prm *lprm)
323{
324	uint32_t belc;
325	uint32_t i, j, lc, ln;
326	struct netfe_stream_prm *s;
327
328	/* determine on what BE each stream should be open. */
329	for (i = 0; i != lprm->nb_streams; i++) {
330		s = lprm->stream + i;
331		ln = s->line;
332		belc = s->belcore;
333		if (netfe_sprm_flll_be(&s->sprm, ln, belc) != 0 ||
334				(s->op == FWD &&
335				netfe_sprm_flll_be(&s->fprm, ln, belc) != 0))
336			return -EINVAL;
337	}
338
339	/* group all fe parameters by lcore. */
340
341	qsort(lprm->stream, lprm->nb_streams, sizeof(lprm->stream[0]),
342		netfe_lcore_cmp);
343
344	for (i = 0; i != lprm->nb_streams; i = j) {
345
346		lc = lprm->stream[i].lcore;
347		ln = lprm->stream[i].line;
348
349		if (rte_lcore_is_enabled(lc) == 0) {
350			RTE_LOG(ERR, USER1,
351				"%s(line=%u): lcore %u is not enabled\n",
352				__func__, ln, lc);
353			return -EINVAL;
354		}
355
356		if (rte_get_master_lcore() != lc &&
357				rte_eal_get_lcore_state(lc) == RUNNING) {
358			RTE_LOG(ERR, USER1,
359				"%s(line=%u): lcore %u already in use\n",
360				__func__, ln, lc);
361			return -EINVAL;
362		}
363
364		for (j = i + 1; j != lprm->nb_streams &&
365				lc == lprm->stream[j].lcore;
366				j++)
367			;
368
369		prm[lc].fe.max_streams = lprm->max_streams;
370		prm[lc].fe.nb_streams = j - i;
371		prm[lc].fe.stream = lprm->stream + i;
372	}
373
374	return 0;
375}
376
377#endif /* LCORE_H_ */
378