enic_clsf.c revision 9ca4a157
1/*
2 * Copyright 2008-2014 Cisco Systems, Inc.  All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
4 *
5 * Copyright (c) 2014, Cisco Systems, Inc.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 *
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
18 * distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35#include <libgen.h>
36
37#include <rte_ethdev.h>
38#include <rte_malloc.h>
39#include <rte_hash.h>
40#include <rte_byteorder.h>
41#include <rte_ip.h>
42#include <rte_tcp.h>
43#include <rte_udp.h>
44#include <rte_sctp.h>
45#include <rte_eth_ctrl.h>
46
47#include "enic_compat.h"
48#include "enic.h"
49#include "wq_enet_desc.h"
50#include "rq_enet_desc.h"
51#include "cq_enet_desc.h"
52#include "vnic_enet.h"
53#include "vnic_dev.h"
54#include "vnic_wq.h"
55#include "vnic_rq.h"
56#include "vnic_cq.h"
57#include "vnic_intr.h"
58#include "vnic_nic.h"
59
60#ifdef RTE_MACHINE_CPUFLAG_SSE4_2
61#include <rte_hash_crc.h>
62#define DEFAULT_HASH_FUNC       rte_hash_crc
63#else
64#include <rte_jhash.h>
65#define DEFAULT_HASH_FUNC       rte_jhash
66#endif
67
68#define ENICPMD_CLSF_HASH_ENTRIES       ENICPMD_FDIR_MAX
69
70void enic_fdir_stats_get(struct enic *enic, struct rte_eth_fdir_stats *stats)
71{
72	*stats = enic->fdir.stats;
73}
74
75void enic_fdir_info_get(struct enic *enic, struct rte_eth_fdir_info *info)
76{
77	info->mode = (enum rte_fdir_mode)enic->fdir.modes;
78	info->flow_types_mask[0] = enic->fdir.types_mask;
79}
80
81void enic_fdir_info(struct enic *enic)
82{
83	enic->fdir.modes = (u32)RTE_FDIR_MODE_PERFECT;
84	enic->fdir.types_mask  = 1 << RTE_ETH_FLOW_NONFRAG_IPV4_UDP |
85				 1 << RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
86	if (enic->adv_filters) {
87		enic->fdir.types_mask |= 1 << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER |
88					 1 << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP |
89					 1 << RTE_ETH_FLOW_NONFRAG_IPV6_UDP |
90					 1 << RTE_ETH_FLOW_NONFRAG_IPV6_TCP |
91					 1 << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP |
92					 1 << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
93		enic->fdir.copy_fltr_fn = copy_fltr_v2;
94	} else {
95		enic->fdir.copy_fltr_fn = copy_fltr_v1;
96	}
97}
98
99static void
100enic_set_layer(struct filter_generic_1 *gp, unsigned int flag,
101	       enum filter_generic_1_layer layer, void *mask, void *val,
102	       unsigned int len)
103{
104	gp->mask_flags |= flag;
105	gp->val_flags |= gp->mask_flags;
106	memcpy(gp->layer[layer].mask, mask, len);
107	memcpy(gp->layer[layer].val, val, len);
108}
109
110/* Copy Flow Director filter to a VIC ipv4 filter (for Cisco VICs
111 * without advanced filter support.
112 */
113void
114copy_fltr_v1(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
115	     __rte_unused struct rte_eth_fdir_masks *masks)
116{
117	fltr->type = FILTER_IPV4_5TUPLE;
118	fltr->u.ipv4.src_addr = rte_be_to_cpu_32(
119		input->flow.ip4_flow.src_ip);
120	fltr->u.ipv4.dst_addr = rte_be_to_cpu_32(
121		input->flow.ip4_flow.dst_ip);
122	fltr->u.ipv4.src_port = rte_be_to_cpu_16(
123		input->flow.udp4_flow.src_port);
124	fltr->u.ipv4.dst_port = rte_be_to_cpu_16(
125		input->flow.udp4_flow.dst_port);
126
127	if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP)
128		fltr->u.ipv4.protocol = PROTO_TCP;
129	else
130		fltr->u.ipv4.protocol = PROTO_UDP;
131
132	fltr->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
133}
134
135/* Copy Flow Director filter to a VIC generic filter (requires advanced
136 * filter support.
137 */
138void
139copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
140	     struct rte_eth_fdir_masks *masks)
141{
142	struct filter_generic_1 *gp = &fltr->u.generic_1;
143	int i;
144
145	fltr->type = FILTER_DPDK_1;
146	memset(gp, 0, sizeof(*gp));
147
148	if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) {
149		struct udp_hdr udp_mask, udp_val;
150		memset(&udp_mask, 0, sizeof(udp_mask));
151		memset(&udp_val, 0, sizeof(udp_val));
152
153		if (input->flow.udp4_flow.src_port) {
154			udp_mask.src_port = masks->src_port_mask;
155			udp_val.src_port = input->flow.udp4_flow.src_port;
156		}
157		if (input->flow.udp4_flow.dst_port) {
158			udp_mask.dst_port = masks->dst_port_mask;
159			udp_val.dst_port = input->flow.udp4_flow.dst_port;
160		}
161
162		enic_set_layer(gp, FILTER_GENERIC_1_UDP, FILTER_GENERIC_1_L4,
163			       &udp_mask, &udp_val, sizeof(struct udp_hdr));
164	} else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP) {
165		struct tcp_hdr tcp_mask, tcp_val;
166		memset(&tcp_mask, 0, sizeof(tcp_mask));
167		memset(&tcp_val, 0, sizeof(tcp_val));
168
169		if (input->flow.tcp4_flow.src_port) {
170			tcp_mask.src_port = masks->src_port_mask;
171			tcp_val.src_port = input->flow.tcp4_flow.src_port;
172		}
173		if (input->flow.tcp4_flow.dst_port) {
174			tcp_mask.dst_port = masks->dst_port_mask;
175			tcp_val.dst_port = input->flow.tcp4_flow.dst_port;
176		}
177
178		enic_set_layer(gp, FILTER_GENERIC_1_TCP, FILTER_GENERIC_1_L4,
179			       &tcp_mask, &tcp_val, sizeof(struct tcp_hdr));
180	} else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) {
181		struct sctp_hdr sctp_mask, sctp_val;
182		memset(&sctp_mask, 0, sizeof(sctp_mask));
183		memset(&sctp_val, 0, sizeof(sctp_val));
184
185		if (input->flow.sctp4_flow.src_port) {
186			sctp_mask.src_port = masks->src_port_mask;
187			sctp_val.src_port = input->flow.sctp4_flow.src_port;
188		}
189		if (input->flow.sctp4_flow.dst_port) {
190			sctp_mask.dst_port = masks->dst_port_mask;
191			sctp_val.dst_port = input->flow.sctp4_flow.dst_port;
192		}
193		if (input->flow.sctp4_flow.verify_tag) {
194			sctp_mask.tag = 0xffffffff;
195			sctp_val.tag = input->flow.sctp4_flow.verify_tag;
196		}
197
198		/* v4 proto should be 132, override ip4_flow.proto */
199		input->flow.ip4_flow.proto = 132;
200
201		enic_set_layer(gp, 0, FILTER_GENERIC_1_L4, &sctp_mask,
202			       &sctp_val, sizeof(struct sctp_hdr));
203	}
204
205	if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP ||
206	    input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP ||
207	    input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP ||
208	    input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) {
209		struct ipv4_hdr ip4_mask, ip4_val;
210		memset(&ip4_mask, 0, sizeof(struct ipv4_hdr));
211		memset(&ip4_val, 0, sizeof(struct ipv4_hdr));
212
213		if (input->flow.ip4_flow.tos) {
214			ip4_mask.type_of_service = 0xff;
215			ip4_val.type_of_service = input->flow.ip4_flow.tos;
216		}
217		if (input->flow.ip4_flow.ttl) {
218			ip4_mask.time_to_live = 0xff;
219			ip4_val.time_to_live = input->flow.ip4_flow.ttl;
220		}
221		if (input->flow.ip4_flow.proto) {
222			ip4_mask.next_proto_id = 0xff;
223			ip4_val.next_proto_id = input->flow.ip4_flow.proto;
224		}
225		if (input->flow.ip4_flow.src_ip) {
226			ip4_mask.src_addr =  masks->ipv4_mask.src_ip;
227			ip4_val.src_addr = input->flow.ip4_flow.src_ip;
228		}
229		if (input->flow.ip4_flow.dst_ip) {
230			ip4_mask.dst_addr =  masks->ipv4_mask.dst_ip;
231			ip4_val.dst_addr = input->flow.ip4_flow.dst_ip;
232		}
233
234		enic_set_layer(gp, FILTER_GENERIC_1_IPV4, FILTER_GENERIC_1_L3,
235			       &ip4_mask, &ip4_val, sizeof(struct ipv4_hdr));
236	}
237
238	if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) {
239		struct udp_hdr udp_mask, udp_val;
240		memset(&udp_mask, 0, sizeof(udp_mask));
241		memset(&udp_val, 0, sizeof(udp_val));
242
243		if (input->flow.udp6_flow.src_port) {
244			udp_mask.src_port = masks->src_port_mask;
245			udp_val.src_port = input->flow.udp6_flow.src_port;
246		}
247		if (input->flow.udp6_flow.dst_port) {
248			udp_mask.dst_port = masks->dst_port_mask;
249			udp_val.dst_port = input->flow.udp6_flow.dst_port;
250		}
251		enic_set_layer(gp, FILTER_GENERIC_1_UDP, FILTER_GENERIC_1_L4,
252			       &udp_mask, &udp_val, sizeof(struct udp_hdr));
253	} else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP) {
254		struct tcp_hdr tcp_mask, tcp_val;
255		memset(&tcp_mask, 0, sizeof(tcp_mask));
256		memset(&tcp_val, 0, sizeof(tcp_val));
257
258		if (input->flow.tcp6_flow.src_port) {
259			tcp_mask.src_port = masks->src_port_mask;
260			tcp_val.src_port = input->flow.tcp6_flow.src_port;
261		}
262		if (input->flow.tcp6_flow.dst_port) {
263			tcp_mask.dst_port = masks->dst_port_mask;
264			tcp_val.dst_port = input->flow.tcp6_flow.dst_port;
265		}
266		enic_set_layer(gp, FILTER_GENERIC_1_TCP, FILTER_GENERIC_1_L4,
267			       &tcp_mask, &tcp_val, sizeof(struct tcp_hdr));
268	} else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) {
269		struct sctp_hdr sctp_mask, sctp_val;
270		memset(&sctp_mask, 0, sizeof(sctp_mask));
271		memset(&sctp_val, 0, sizeof(sctp_val));
272
273		if (input->flow.sctp6_flow.src_port) {
274			sctp_mask.src_port = masks->src_port_mask;
275			sctp_val.src_port = input->flow.sctp6_flow.src_port;
276		}
277		if (input->flow.sctp6_flow.dst_port) {
278			sctp_mask.dst_port = masks->dst_port_mask;
279			sctp_val.dst_port = input->flow.sctp6_flow.dst_port;
280		}
281		if (input->flow.sctp6_flow.verify_tag) {
282			sctp_mask.tag = 0xffffffff;
283			sctp_val.tag = input->flow.sctp6_flow.verify_tag;
284		}
285
286		/* v4 proto should be 132, override ipv6_flow.proto */
287		input->flow.ipv6_flow.proto = 132;
288
289		enic_set_layer(gp, 0, FILTER_GENERIC_1_L4, &sctp_mask,
290			       &sctp_val, sizeof(struct sctp_hdr));
291	}
292
293	if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP ||
294	    input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP ||
295	    input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP ||
296	    input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) {
297		struct ipv6_hdr ipv6_mask, ipv6_val;
298		memset(&ipv6_mask, 0, sizeof(struct ipv6_hdr));
299		memset(&ipv6_val, 0, sizeof(struct ipv6_hdr));
300
301		if (input->flow.ipv6_flow.proto) {
302			ipv6_mask.proto = 0xff;
303			ipv6_val.proto = input->flow.ipv6_flow.proto;
304		}
305		for (i = 0; i < 4; i++) {
306			*(uint32_t *)&ipv6_mask.src_addr[i * 4] =
307					masks->ipv6_mask.src_ip[i];
308			*(uint32_t *)&ipv6_val.src_addr[i * 4] =
309					input->flow.ipv6_flow.src_ip[i];
310		}
311		for (i = 0; i < 4; i++) {
312			*(uint32_t *)&ipv6_mask.dst_addr[i * 4] =
313					masks->ipv6_mask.src_ip[i];
314			*(uint32_t *)&ipv6_val.dst_addr[i * 4] =
315					input->flow.ipv6_flow.dst_ip[i];
316		}
317		if (input->flow.ipv6_flow.tc) {
318			ipv6_mask.vtc_flow = 0x00ff0000;
319			ipv6_val.vtc_flow = input->flow.ipv6_flow.tc << 16;
320		}
321		if (input->flow.ipv6_flow.hop_limits) {
322			ipv6_mask.hop_limits = 0xff;
323			ipv6_val.hop_limits = input->flow.ipv6_flow.hop_limits;
324		}
325
326		enic_set_layer(gp, FILTER_GENERIC_1_IPV6, FILTER_GENERIC_1_L3,
327			       &ipv6_mask, &ipv6_val, sizeof(struct ipv6_hdr));
328	}
329}
330
331int enic_fdir_del_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
332{
333	int32_t pos;
334	struct enic_fdir_node *key;
335	/* See if the key is in the table */
336	pos = rte_hash_del_key(enic->fdir.hash, params);
337	switch (pos) {
338	case -EINVAL:
339	case -ENOENT:
340		enic->fdir.stats.f_remove++;
341		return -EINVAL;
342	default:
343		/* The entry is present in the table */
344		key = enic->fdir.nodes[pos];
345
346		/* Delete the filter */
347		vnic_dev_classifier(enic->vdev, CLSF_DEL,
348			&key->fltr_id, NULL);
349		rte_free(key);
350		enic->fdir.nodes[pos] = NULL;
351		enic->fdir.stats.free++;
352		enic->fdir.stats.remove++;
353		break;
354	}
355	return 0;
356}
357
358int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
359{
360	struct enic_fdir_node *key;
361	struct filter_v2 fltr;
362	int32_t pos;
363	u8 do_free = 0;
364	u16 old_fltr_id = 0;
365	u32 flowtype_supported;
366	u16 flex_bytes;
367	u16 queue;
368
369	memset(&fltr, 0, sizeof(fltr));
370	flowtype_supported = enic->fdir.types_mask
371			     & (1 << params->input.flow_type);
372
373	flex_bytes = ((params->input.flow_ext.flexbytes[1] << 8 & 0xFF00) |
374		(params->input.flow_ext.flexbytes[0] & 0xFF));
375
376	if (!enic->fdir.hash ||
377		(params->input.flow_ext.vlan_tci & 0xFFF) ||
378		!flowtype_supported || flex_bytes ||
379		params->action.behavior /* drop */) {
380		enic->fdir.stats.f_add++;
381		return -ENOTSUP;
382	}
383
384	/* Get the enicpmd RQ from the DPDK Rx queue */
385	queue = enic_rte_rq_idx_to_sop_idx(params->action.rx_queue);
386
387	if (!enic->rq[queue].in_use)
388		return -EINVAL;
389
390	/* See if the key is already there in the table */
391	pos = rte_hash_del_key(enic->fdir.hash, params);
392	switch (pos) {
393	case -EINVAL:
394		enic->fdir.stats.f_add++;
395		return -EINVAL;
396	case -ENOENT:
397		/* Add a new classifier entry */
398		if (!enic->fdir.stats.free) {
399			enic->fdir.stats.f_add++;
400			return -ENOSPC;
401		}
402		key = rte_zmalloc("enic_fdir_node",
403				  sizeof(struct enic_fdir_node), 0);
404		if (!key) {
405			enic->fdir.stats.f_add++;
406			return -ENOMEM;
407		}
408		break;
409	default:
410		/* The entry is already present in the table.
411		 * Check if there is a change in queue
412		 */
413		key = enic->fdir.nodes[pos];
414		enic->fdir.nodes[pos] = NULL;
415		if (unlikely(key->rq_index == queue)) {
416			/* Nothing to be done */
417			enic->fdir.stats.f_add++;
418			pos = rte_hash_add_key(enic->fdir.hash, params);
419			if (pos < 0) {
420				dev_err(enic, "Add hash key failed\n");
421				return pos;
422			}
423			enic->fdir.nodes[pos] = key;
424			dev_warning(enic,
425				"FDIR rule is already present\n");
426			return 0;
427		}
428
429		if (likely(enic->fdir.stats.free)) {
430			/* Add the filter and then delete the old one.
431			 * This is to avoid packets from going into the
432			 * default queue during the window between
433			 * delete and add
434			 */
435			do_free = 1;
436			old_fltr_id = key->fltr_id;
437		} else {
438			/* No free slots in the classifier.
439			 * Delete the filter and add the modified one later
440			 */
441			vnic_dev_classifier(enic->vdev, CLSF_DEL,
442				&key->fltr_id, NULL);
443			enic->fdir.stats.free++;
444		}
445
446		break;
447	}
448
449	key->filter = *params;
450	key->rq_index = queue;
451
452	enic->fdir.copy_fltr_fn(&fltr, &params->input,
453				&enic->rte_dev->data->dev_conf.fdir_conf.mask);
454
455	if (!vnic_dev_classifier(enic->vdev, CLSF_ADD, &queue, &fltr)) {
456		key->fltr_id = queue;
457	} else {
458		dev_err(enic, "Add classifier entry failed\n");
459		enic->fdir.stats.f_add++;
460		rte_free(key);
461		return -1;
462	}
463
464	if (do_free)
465		vnic_dev_classifier(enic->vdev, CLSF_DEL, &old_fltr_id, NULL);
466	else{
467		enic->fdir.stats.free--;
468		enic->fdir.stats.add++;
469	}
470
471	pos = rte_hash_add_key(enic->fdir.hash, params);
472	if (pos < 0) {
473		enic->fdir.stats.f_add++;
474		dev_err(enic, "Add hash key failed\n");
475		return pos;
476	}
477
478	enic->fdir.nodes[pos] = key;
479	return 0;
480}
481
482void enic_clsf_destroy(struct enic *enic)
483{
484	u32 index;
485	struct enic_fdir_node *key;
486	/* delete classifier entries */
487	for (index = 0; index < ENICPMD_FDIR_MAX; index++) {
488		key = enic->fdir.nodes[index];
489		if (key) {
490			vnic_dev_classifier(enic->vdev, CLSF_DEL,
491				&key->fltr_id, NULL);
492			rte_free(key);
493			enic->fdir.nodes[index] = NULL;
494		}
495	}
496
497	if (enic->fdir.hash) {
498		rte_hash_free(enic->fdir.hash);
499		enic->fdir.hash = NULL;
500	}
501}
502
503int enic_clsf_init(struct enic *enic)
504{
505	char clsf_name[RTE_HASH_NAMESIZE];
506	struct rte_hash_parameters hash_params = {
507		.name = clsf_name,
508		.entries = ENICPMD_CLSF_HASH_ENTRIES,
509		.key_len = sizeof(struct rte_eth_fdir_filter),
510		.hash_func = DEFAULT_HASH_FUNC,
511		.hash_func_init_val = 0,
512		.socket_id = SOCKET_ID_ANY,
513	};
514	snprintf(clsf_name, RTE_HASH_NAMESIZE, "enic_clsf_%s", enic->bdf_name);
515	enic->fdir.hash = rte_hash_create(&hash_params);
516	memset(&enic->fdir.stats, 0, sizeof(enic->fdir.stats));
517	enic->fdir.stats.free = ENICPMD_FDIR_MAX;
518	return NULL == enic->fdir.hash;
519}
520