1/*-
2 *   BSD LICENSE
3 *
4 *   Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
5 *
6 *   Redistribution and use in source and binary forms, with or without
7 *   modification, are permitted provided that the following conditions
8 *   are met:
9 *
10 *     * Redistributions of source code must retain the above copyright
11 *       notice, this list of conditions and the following disclaimer.
12 *     * Redistributions in binary form must reproduce the above copyright
13 *       notice, this list of conditions and the following disclaimer in
14 *       the documentation and/or other materials provided with the
15 *       distribution.
16 *     * Neither the name of Intel Corporation nor the names of its
17 *       contributors may be used to endorse or promote products derived
18 *       from this software without specific prior written permission.
19 *
20 *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include <string.h>
34
35#include <rte_common.h>
36#include <rte_malloc.h>
37#include <rte_cryptodev_pmd.h>
38
39#include "rte_aesni_mb_pmd_private.h"
40
41
42static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
43	{	/* MD5 HMAC */
44		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
45		{.sym = {
46			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
47			{.auth = {
48				.algo = RTE_CRYPTO_AUTH_MD5_HMAC,
49				.block_size = 64,
50				.key_size = {
51					.min = 1,
52					.max = 64,
53					.increment = 1
54				},
55				.digest_size = {
56					.min = 12,
57					.max = 12,
58					.increment = 0
59				},
60				.aad_size = { 0 }
61			}, }
62		}, }
63	},
64	{	/* SHA1 HMAC */
65		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
66		{.sym = {
67			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
68			{.auth = {
69				.algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
70				.block_size = 64,
71				.key_size = {
72					.min = 1,
73					.max = 64,
74					.increment = 1
75				},
76				.digest_size = {
77					.min = 12,
78					.max = 12,
79					.increment = 0
80				},
81				.aad_size = { 0 }
82			}, }
83		}, }
84	},
85	{	/* SHA224 HMAC */
86		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
87		{.sym = {
88			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
89			{.auth = {
90				.algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
91				.block_size = 64,
92				.key_size = {
93					.min = 1,
94					.max = 64,
95					.increment = 1
96				},
97				.digest_size = {
98					.min = 14,
99					.max = 14,
100					.increment = 0
101				},
102				.aad_size = { 0 }
103			}, }
104		}, }
105	},
106	{	/* SHA256 HMAC */
107		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
108		{.sym = {
109			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
110			{.auth = {
111				.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
112				.block_size = 64,
113				.key_size = {
114					.min = 1,
115					.max = 64,
116					.increment = 1
117				},
118				.digest_size = {
119					.min = 16,
120					.max = 16,
121					.increment = 0
122				},
123				.aad_size = { 0 }
124			}, }
125		}, }
126	},
127	{	/* SHA384 HMAC */
128		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
129		{.sym = {
130			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
131			{.auth = {
132				.algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
133				.block_size = 128,
134				.key_size = {
135					.min = 1,
136					.max = 128,
137					.increment = 1
138				},
139				.digest_size = {
140					.min = 24,
141					.max = 24,
142					.increment = 0
143				},
144				.aad_size = { 0 }
145			}, }
146		}, }
147	},
148	{	/* SHA512 HMAC */
149		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
150		{.sym = {
151			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
152			{.auth = {
153				.algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
154				.block_size = 128,
155				.key_size = {
156					.min = 1,
157					.max = 128,
158					.increment = 1
159				},
160				.digest_size = {
161					.min = 32,
162					.max = 32,
163					.increment = 0
164				},
165				.aad_size = { 0 }
166			}, }
167		}, }
168	},
169	{	/* AES XCBC HMAC */
170		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
171		{.sym = {
172			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
173			{.auth = {
174				.algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
175				.block_size = 16,
176				.key_size = {
177					.min = 16,
178					.max = 16,
179					.increment = 0
180				},
181				.digest_size = {
182					.min = 12,
183					.max = 12,
184					.increment = 0
185				},
186				.aad_size = { 0 }
187			}, }
188		}, }
189	},
190	{	/* AES CBC */
191		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
192		{.sym = {
193			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
194			{.cipher = {
195				.algo = RTE_CRYPTO_CIPHER_AES_CBC,
196				.block_size = 16,
197				.key_size = {
198					.min = 16,
199					.max = 32,
200					.increment = 8
201				},
202				.iv_size = {
203					.min = 16,
204					.max = 16,
205					.increment = 0
206				}
207			}, }
208		}, }
209	},
210	{	/* AES CTR */
211		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
212		{.sym = {
213			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
214			{.cipher = {
215				.algo = RTE_CRYPTO_CIPHER_AES_CTR,
216				.block_size = 16,
217				.key_size = {
218					.min = 16,
219					.max = 32,
220					.increment = 8
221				},
222				.iv_size = {
223					.min = 16,
224					.max = 16,
225					.increment = 0
226				}
227			}, }
228		}, }
229	},
230	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
231};
232
233
234/** Configure device */
235static int
236aesni_mb_pmd_config(__rte_unused struct rte_cryptodev *dev)
237{
238	return 0;
239}
240
241/** Start device */
242static int
243aesni_mb_pmd_start(__rte_unused struct rte_cryptodev *dev)
244{
245	return 0;
246}
247
248/** Stop device */
249static void
250aesni_mb_pmd_stop(__rte_unused struct rte_cryptodev *dev)
251{
252}
253
254/** Close device */
255static int
256aesni_mb_pmd_close(__rte_unused struct rte_cryptodev *dev)
257{
258	return 0;
259}
260
261
262/** Get device statistics */
263static void
264aesni_mb_pmd_stats_get(struct rte_cryptodev *dev,
265		struct rte_cryptodev_stats *stats)
266{
267	int qp_id;
268
269	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
270		struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
271
272		stats->enqueued_count += qp->stats.enqueued_count;
273		stats->dequeued_count += qp->stats.dequeued_count;
274
275		stats->enqueue_err_count += qp->stats.enqueue_err_count;
276		stats->dequeue_err_count += qp->stats.dequeue_err_count;
277	}
278}
279
280/** Reset device statistics */
281static void
282aesni_mb_pmd_stats_reset(struct rte_cryptodev *dev)
283{
284	int qp_id;
285
286	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
287		struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
288
289		memset(&qp->stats, 0, sizeof(qp->stats));
290	}
291}
292
293
294/** Get device info */
295static void
296aesni_mb_pmd_info_get(struct rte_cryptodev *dev,
297		struct rte_cryptodev_info *dev_info)
298{
299	struct aesni_mb_private *internals = dev->data->dev_private;
300
301	if (dev_info != NULL) {
302		dev_info->dev_type = dev->dev_type;
303		dev_info->feature_flags = dev->feature_flags;
304		dev_info->capabilities = aesni_mb_pmd_capabilities;
305		dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
306		dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
307	}
308}
309
310/** Release queue pair */
311static int
312aesni_mb_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
313{
314	struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
315	struct rte_ring *r = NULL;
316
317	if (qp != NULL) {
318		r = rte_ring_lookup(qp->name);
319		if (r)
320			rte_ring_free(r);
321		rte_free(qp);
322		dev->data->queue_pairs[qp_id] = NULL;
323	}
324	return 0;
325}
326
327/** set a unique name for the queue pair based on it's name, dev_id and qp_id */
328static int
329aesni_mb_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
330		struct aesni_mb_qp *qp)
331{
332	unsigned n = snprintf(qp->name, sizeof(qp->name),
333			"aesni_mb_pmd_%u_qp_%u",
334			dev->data->dev_id, qp->id);
335
336	if (n >= sizeof(qp->name))
337		return -1;
338
339	return 0;
340}
341
342/** Create a ring to place processed operations on */
343static struct rte_ring *
344aesni_mb_pmd_qp_create_processed_ops_ring(struct aesni_mb_qp *qp,
345		unsigned ring_size, int socket_id)
346{
347	struct rte_ring *r;
348
349	r = rte_ring_lookup(qp->name);
350	if (r) {
351		if (r->prod.size >= ring_size) {
352			MB_LOG_INFO("Reusing existing ring %s for processed ops",
353					 qp->name);
354			return r;
355		}
356
357		MB_LOG_ERR("Unable to reuse existing ring %s for processed ops",
358				 qp->name);
359		return NULL;
360	}
361
362	return rte_ring_create(qp->name, ring_size, socket_id,
363			RING_F_SP_ENQ | RING_F_SC_DEQ);
364}
365
366/** Setup a queue pair */
367static int
368aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
369		const struct rte_cryptodev_qp_conf *qp_conf,
370		 int socket_id)
371{
372	struct aesni_mb_qp *qp = NULL;
373	struct aesni_mb_private *internals = dev->data->dev_private;
374
375	/* Free memory prior to re-allocation if needed. */
376	if (dev->data->queue_pairs[qp_id] != NULL)
377		aesni_mb_pmd_qp_release(dev, qp_id);
378
379	/* Allocate the queue pair data structure. */
380	qp = rte_zmalloc_socket("AES-NI PMD Queue Pair", sizeof(*qp),
381					RTE_CACHE_LINE_SIZE, socket_id);
382	if (qp == NULL)
383		return -ENOMEM;
384
385	qp->id = qp_id;
386	dev->data->queue_pairs[qp_id] = qp;
387
388	if (aesni_mb_pmd_qp_set_unique_name(dev, qp))
389		goto qp_setup_cleanup;
390
391	qp->ops = &job_ops[internals->vector_mode];
392
393	qp->processed_ops = aesni_mb_pmd_qp_create_processed_ops_ring(qp,
394			qp_conf->nb_descriptors, socket_id);
395	if (qp->processed_ops == NULL)
396		goto qp_setup_cleanup;
397
398	qp->sess_mp = dev->data->session_pool;
399
400	memset(&qp->stats, 0, sizeof(qp->stats));
401
402	/* Initialise multi-buffer manager */
403	(*qp->ops->job.init_mgr)(&qp->mb_mgr);
404
405	return 0;
406
407qp_setup_cleanup:
408	if (qp)
409		rte_free(qp);
410
411	return -1;
412}
413
414/** Start queue pair */
415static int
416aesni_mb_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
417		__rte_unused uint16_t queue_pair_id)
418{
419	return -ENOTSUP;
420}
421
422/** Stop queue pair */
423static int
424aesni_mb_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
425		__rte_unused uint16_t queue_pair_id)
426{
427	return -ENOTSUP;
428}
429
430/** Return the number of allocated queue pairs */
431static uint32_t
432aesni_mb_pmd_qp_count(struct rte_cryptodev *dev)
433{
434	return dev->data->nb_queue_pairs;
435}
436
437/** Returns the size of the aesni multi-buffer session structure */
438static unsigned
439aesni_mb_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
440{
441	return sizeof(struct aesni_mb_session);
442}
443
444/** Configure a aesni multi-buffer session from a crypto xform chain */
445static void *
446aesni_mb_pmd_session_configure(struct rte_cryptodev *dev,
447		struct rte_crypto_sym_xform *xform,	void *sess)
448{
449	struct aesni_mb_private *internals = dev->data->dev_private;
450
451	if (unlikely(sess == NULL)) {
452		MB_LOG_ERR("invalid session struct");
453		return NULL;
454	}
455
456	if (aesni_mb_set_session_parameters(&job_ops[internals->vector_mode],
457			sess, xform) != 0) {
458		MB_LOG_ERR("failed configure session parameters");
459		return NULL;
460	}
461
462	return sess;
463}
464
465/** Clear the memory of session so it doesn't leave key material behind */
466static void
467aesni_mb_pmd_session_clear(struct rte_cryptodev *dev __rte_unused, void *sess)
468{
469	/*
470	 * Current just resetting the whole data structure, need to investigate
471	 * whether a more selective reset of key would be more performant
472	 */
473	if (sess)
474		memset(sess, 0, sizeof(struct aesni_mb_session));
475}
476
477struct rte_cryptodev_ops aesni_mb_pmd_ops = {
478		.dev_configure		= aesni_mb_pmd_config,
479		.dev_start		= aesni_mb_pmd_start,
480		.dev_stop		= aesni_mb_pmd_stop,
481		.dev_close		= aesni_mb_pmd_close,
482
483		.stats_get		= aesni_mb_pmd_stats_get,
484		.stats_reset		= aesni_mb_pmd_stats_reset,
485
486		.dev_infos_get		= aesni_mb_pmd_info_get,
487
488		.queue_pair_setup	= aesni_mb_pmd_qp_setup,
489		.queue_pair_release	= aesni_mb_pmd_qp_release,
490		.queue_pair_start	= aesni_mb_pmd_qp_start,
491		.queue_pair_stop	= aesni_mb_pmd_qp_stop,
492		.queue_pair_count	= aesni_mb_pmd_qp_count,
493
494		.session_get_size	= aesni_mb_pmd_session_get_size,
495		.session_configure	= aesni_mb_pmd_session_configure,
496		.session_clear		= aesni_mb_pmd_session_clear
497};
498
499struct rte_cryptodev_ops *rte_aesni_mb_pmd_ops = &aesni_mb_pmd_ops;
500