qat_crypto.c revision 9365d6cf
1/*-
2 *   BSD LICENSE
3 *
4 *   Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
5 *   All rights reserved.
6 *
7 *   Redistribution and use in source and binary forms, with or without
8 *   modification, are permitted provided that the following conditions
9 *   are met:
10 *
11 *	 * Redistributions of source code must retain the above copyright
12 *	   notice, this list of conditions and the following disclaimer.
13 *	 * Redistributions in binary form must reproduce the above copyright
14 *	   notice, this list of conditions and the following disclaimer in
15 *	   the documentation and/or other materials provided with the
16 *	   distribution.
17 *	 * Neither the name of Intel Corporation nor the names of its
18 *	   contributors may be used to endorse or promote products derived
19 *	   from this software without specific prior written permission.
20 *
21 *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <stdio.h>
35#include <stdlib.h>
36#include <strings.h>
37#include <string.h>
38#include <inttypes.h>
39#include <errno.h>
40#include <sys/queue.h>
41#include <stdarg.h>
42
43#include <rte_common.h>
44#include <rte_log.h>
45#include <rte_debug.h>
46#include <rte_memory.h>
47#include <rte_memzone.h>
48#include <rte_tailq.h>
49#include <rte_ether.h>
50#include <rte_malloc.h>
51#include <rte_launch.h>
52#include <rte_eal.h>
53#include <rte_per_lcore.h>
54#include <rte_lcore.h>
55#include <rte_atomic.h>
56#include <rte_branch_prediction.h>
57#include <rte_mempool.h>
58#include <rte_mbuf.h>
59#include <rte_string_fns.h>
60#include <rte_spinlock.h>
61#include <rte_hexdump.h>
62
63#include "qat_logs.h"
64#include "qat_algs.h"
65#include "qat_crypto.h"
66#include "adf_transport_access_macros.h"
67
68#define BYTE_LENGTH    8
69
70static const struct rte_cryptodev_capabilities qat_pmd_capabilities[] = {
71	{	/* SHA1 HMAC */
72		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
73		{.sym = {
74			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
75			{.auth = {
76				.algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
77				.block_size = 64,
78				.key_size = {
79					.min = 64,
80					.max = 64,
81					.increment = 0
82				},
83				.digest_size = {
84					.min = 20,
85					.max = 20,
86					.increment = 0
87				},
88				.aad_size = { 0 }
89			}, }
90		}, }
91	},
92	{	/* SHA224 HMAC */
93		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
94		{.sym = {
95			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
96			{.auth = {
97				.algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
98				.block_size = 64,
99					.key_size = {
100					.min = 64,
101					.max = 64,
102					.increment = 0
103				},
104				.digest_size = {
105					.min = 28,
106					.max = 28,
107					.increment = 0
108				},
109				.aad_size = { 0 }
110			}, }
111		}, }
112	},
113	{	/* SHA256 HMAC */
114		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
115		{.sym = {
116			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
117			{.auth = {
118				.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
119				.block_size = 64,
120				.key_size = {
121					.min = 64,
122					.max = 64,
123					.increment = 0
124				},
125				.digest_size = {
126					.min = 32,
127					.max = 32,
128					.increment = 0
129				},
130				.aad_size = { 0 }
131			}, }
132		}, }
133	},
134	{	/* SHA384 HMAC */
135		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
136		{.sym = {
137			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
138			{.auth = {
139				.algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
140				.block_size = 64,
141				.key_size = {
142					.min = 128,
143					.max = 128,
144					.increment = 0
145				},
146				.digest_size = {
147					.min = 48,
148					.max = 48,
149					.increment = 0
150					},
151				.aad_size = { 0 }
152			}, }
153		}, }
154	},
155	{	/* SHA512 HMAC */
156		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
157		{.sym = {
158			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
159			{.auth = {
160				.algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
161				.block_size = 128,
162				.key_size = {
163					.min = 128,
164					.max = 128,
165					.increment = 0
166				},
167				.digest_size = {
168					.min = 64,
169					.max = 64,
170					.increment = 0
171				},
172				.aad_size = { 0 }
173			}, }
174		}, }
175	},
176	{	/* MD5 HMAC */
177		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
178		{.sym = {
179			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
180			{.auth = {
181				.algo = RTE_CRYPTO_AUTH_MD5_HMAC,
182				.block_size = 64,
183				.key_size = {
184					.min = 8,
185					.max = 64,
186					.increment = 8
187				},
188				.digest_size = {
189					.min = 16,
190					.max = 16,
191					.increment = 0
192				},
193				.aad_size = { 0 }
194			}, }
195		}, }
196	},
197	{	/* AES XCBC MAC */
198		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
199		{.sym = {
200			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
201			{.auth = {
202				.algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
203				.block_size = 16,
204				.key_size = {
205					.min = 16,
206					.max = 16,
207					.increment = 0
208				},
209				.digest_size = {
210					.min = 16,
211					.max = 16,
212					.increment = 0
213				},
214				.aad_size = { 0 }
215			}, }
216		}, }
217	},
218	{	/* AES GCM (AUTH) */
219		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
220		{.sym = {
221			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
222			{.auth = {
223				.algo = RTE_CRYPTO_AUTH_AES_GCM,
224				.block_size = 16,
225				.key_size = {
226					.min = 16,
227					.max = 32,
228					.increment = 8
229				},
230				.digest_size = {
231					.min = 8,
232					.max = 16,
233					.increment = 4
234				},
235				.aad_size = {
236					.min = 8,
237					.max = 12,
238					.increment = 4
239				}
240			}, }
241		}, }
242	},
243	{	/* AES GMAC (AUTH) */
244		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
245		{.sym = {
246			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
247			{.auth = {
248				.algo = RTE_CRYPTO_AUTH_AES_GMAC,
249				.block_size = 16,
250				.key_size = {
251					.min = 16,
252					.max = 32,
253					.increment = 8
254				},
255				.digest_size = {
256					.min = 8,
257					.max = 16,
258					.increment = 4
259				},
260				.aad_size = {
261					.min = 1,
262					.max = 65535,
263					.increment = 1
264				}
265			}, }
266		}, }
267	},
268	{	/* SNOW 3G (UIA2) */
269		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
270		{.sym = {
271			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
272			{.auth = {
273				.algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2,
274				.block_size = 16,
275				.key_size = {
276					.min = 16,
277					.max = 16,
278					.increment = 0
279				},
280				.digest_size = {
281					.min = 4,
282					.max = 4,
283					.increment = 0
284				},
285				.aad_size = {
286					.min = 16,
287					.max = 16,
288					.increment = 0
289				}
290			}, }
291		}, }
292	},
293	{	/* AES GCM (CIPHER) */
294		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
295		{.sym = {
296			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
297			{.cipher = {
298				.algo = RTE_CRYPTO_CIPHER_AES_GCM,
299				.block_size = 16,
300				.key_size = {
301					.min = 16,
302					.max = 32,
303					.increment = 8
304				},
305				.iv_size = {
306					.min = 12,
307					.max = 12,
308					.increment = 0
309				}
310			}, }
311		}, }
312	},
313	{	/* AES CBC */
314		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
315		{.sym = {
316			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
317			{.cipher = {
318				.algo = RTE_CRYPTO_CIPHER_AES_CBC,
319				.block_size = 16,
320				.key_size = {
321					.min = 16,
322					.max = 32,
323					.increment = 8
324				},
325				.iv_size = {
326					.min = 16,
327					.max = 16,
328					.increment = 0
329				}
330			}, }
331		}, }
332	},
333	{	/* SNOW 3G (UEA2) */
334		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
335		{.sym = {
336			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
337			{.cipher = {
338				.algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
339				.block_size = 16,
340				.key_size = {
341					.min = 16,
342					.max = 16,
343					.increment = 0
344				},
345				.iv_size = {
346					.min = 16,
347					.max = 16,
348					.increment = 0
349				}
350			}, }
351		}, }
352	},
353	{	/* AES CTR */
354		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
355		{.sym = {
356			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
357			{.cipher = {
358				.algo = RTE_CRYPTO_CIPHER_AES_CTR,
359				.block_size = 16,
360				.key_size = {
361					.min = 16,
362					.max = 32,
363					.increment = 8
364				},
365				.iv_size = {
366					.min = 16,
367					.max = 16,
368					.increment = 0
369				}
370			}, }
371		}, }
372	},
373	{	/* NULL (AUTH) */
374		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
375		{.sym = {
376			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
377			{.auth = {
378				.algo = RTE_CRYPTO_AUTH_NULL,
379				.block_size = 1,
380				.key_size = {
381					.min = 0,
382					.max = 0,
383					.increment = 0
384				},
385				.digest_size = {
386					.min = 0,
387					.max = 0,
388					.increment = 0
389				},
390				.aad_size = { 0 }
391			}, },
392		}, },
393	},
394	{	/* NULL (CIPHER) */
395		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
396		{.sym = {
397			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
398			{.cipher = {
399				.algo = RTE_CRYPTO_CIPHER_NULL,
400				.block_size = 1,
401				.key_size = {
402					.min = 0,
403					.max = 0,
404					.increment = 0
405				},
406				.iv_size = {
407					.min = 0,
408					.max = 0,
409					.increment = 0
410				}
411			}, },
412		}, }
413	},
414	{       /* KASUMI (F8) */
415		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
416		{.sym = {
417			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
418			{.cipher = {
419				.algo = RTE_CRYPTO_CIPHER_KASUMI_F8,
420				.block_size = 8,
421				.key_size = {
422					.min = 16,
423					.max = 16,
424					.increment = 0
425				},
426				.iv_size = {
427					.min = 8,
428					.max = 8,
429					.increment = 0
430				}
431			}, }
432		}, }
433	},
434	{       /* KASUMI (F9) */
435		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
436		{.sym = {
437			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
438			{.auth = {
439				.algo = RTE_CRYPTO_AUTH_KASUMI_F9,
440				.block_size = 8,
441				.key_size = {
442					.min = 16,
443					.max = 16,
444					.increment = 0
445				},
446				.digest_size = {
447					.min = 4,
448					.max = 4,
449					.increment = 0
450				},
451				.aad_size = {
452					.min = 8,
453					.max = 8,
454					.increment = 0
455				}
456			}, }
457		}, }
458	},
459	{	/* 3DES CBC */
460		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
461		{.sym = {
462			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
463			{.cipher = {
464				.algo = RTE_CRYPTO_CIPHER_3DES_CBC,
465				.block_size = 8,
466				.key_size = {
467					.min = 16,
468					.max = 24,
469					.increment = 8
470				},
471				.iv_size = {
472					.min = 8,
473					.max = 8,
474					.increment = 0
475				}
476			}, }
477		}, }
478	},
479	{	/* 3DES CTR */
480		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
481		{.sym = {
482			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
483			{.cipher = {
484				.algo = RTE_CRYPTO_CIPHER_3DES_CTR,
485				.block_size = 8,
486				.key_size = {
487					.min = 16,
488					.max = 24,
489					.increment = 8
490				},
491				.iv_size = {
492					.min = 8,
493					.max = 8,
494					.increment = 0
495				}
496			}, }
497		}, }
498	},
499	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
500};
501
502static inline uint32_t
503adf_modulo(uint32_t data, uint32_t shift);
504
505static inline int
506qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg);
507
508void qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
509		void *session)
510{
511	struct qat_session *sess = session;
512	phys_addr_t cd_paddr;
513
514	PMD_INIT_FUNC_TRACE();
515	if (session) {
516		cd_paddr = sess->cd_paddr;
517		memset(sess, 0, qat_crypto_sym_get_session_private_size(dev));
518		sess->cd_paddr = cd_paddr;
519	} else
520		PMD_DRV_LOG(ERR, "NULL session");
521}
522
523static int
524qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
525{
526	/* Cipher Only */
527	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
528		return ICP_QAT_FW_LA_CMD_CIPHER;
529
530	/* Authentication Only */
531	if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
532		return ICP_QAT_FW_LA_CMD_AUTH;
533
534	if (xform->next == NULL)
535		return -1;
536
537	/* Cipher then Authenticate */
538	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
539			xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
540		return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
541
542	/* Authenticate then Cipher */
543	if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
544			xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
545		return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
546
547	return -1;
548}
549
550static struct rte_crypto_auth_xform *
551qat_get_auth_xform(struct rte_crypto_sym_xform *xform)
552{
553	do {
554		if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
555			return &xform->auth;
556
557		xform = xform->next;
558	} while (xform);
559
560	return NULL;
561}
562
563static struct rte_crypto_cipher_xform *
564qat_get_cipher_xform(struct rte_crypto_sym_xform *xform)
565{
566	do {
567		if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
568			return &xform->cipher;
569
570		xform = xform->next;
571	} while (xform);
572
573	return NULL;
574}
575void *
576qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
577		struct rte_crypto_sym_xform *xform, void *session_private)
578{
579	struct qat_pmd_private *internals = dev->data->dev_private;
580
581	struct qat_session *session = session_private;
582
583	struct rte_crypto_cipher_xform *cipher_xform = NULL;
584
585	/* Get cipher xform from crypto xform chain */
586	cipher_xform = qat_get_cipher_xform(xform);
587
588	switch (cipher_xform->algo) {
589	case RTE_CRYPTO_CIPHER_AES_CBC:
590		if (qat_alg_validate_aes_key(cipher_xform->key.length,
591				&session->qat_cipher_alg) != 0) {
592			PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
593			goto error_out;
594		}
595		session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
596		break;
597	case RTE_CRYPTO_CIPHER_AES_GCM:
598		if (qat_alg_validate_aes_key(cipher_xform->key.length,
599				&session->qat_cipher_alg) != 0) {
600			PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
601			goto error_out;
602		}
603		session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
604		break;
605	case RTE_CRYPTO_CIPHER_AES_CTR:
606		if (qat_alg_validate_aes_key(cipher_xform->key.length,
607				&session->qat_cipher_alg) != 0) {
608			PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
609			goto error_out;
610		}
611		session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
612		break;
613	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
614		if (qat_alg_validate_snow3g_key(cipher_xform->key.length,
615					&session->qat_cipher_alg) != 0) {
616			PMD_DRV_LOG(ERR, "Invalid SNOW 3G cipher key size");
617			goto error_out;
618		}
619		session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
620		break;
621	case RTE_CRYPTO_CIPHER_NULL:
622		session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
623		break;
624	case RTE_CRYPTO_CIPHER_KASUMI_F8:
625		if (qat_alg_validate_kasumi_key(cipher_xform->key.length,
626					&session->qat_cipher_alg) != 0) {
627			PMD_DRV_LOG(ERR, "Invalid KASUMI cipher key size");
628			goto error_out;
629		}
630		session->qat_mode = ICP_QAT_HW_CIPHER_F8_MODE;
631		break;
632	case RTE_CRYPTO_CIPHER_3DES_CBC:
633		if (qat_alg_validate_3des_key(cipher_xform->key.length,
634				&session->qat_cipher_alg) != 0) {
635			PMD_DRV_LOG(ERR, "Invalid 3DES cipher key size");
636			goto error_out;
637		}
638		session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
639		break;
640	case RTE_CRYPTO_CIPHER_3DES_CTR:
641		if (qat_alg_validate_3des_key(cipher_xform->key.length,
642				&session->qat_cipher_alg) != 0) {
643			PMD_DRV_LOG(ERR, "Invalid 3DES cipher key size");
644			goto error_out;
645		}
646		session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
647		break;
648	case RTE_CRYPTO_CIPHER_3DES_ECB:
649	case RTE_CRYPTO_CIPHER_AES_ECB:
650	case RTE_CRYPTO_CIPHER_AES_CCM:
651	case RTE_CRYPTO_CIPHER_AES_F8:
652	case RTE_CRYPTO_CIPHER_AES_XTS:
653	case RTE_CRYPTO_CIPHER_ARC4:
654	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
655		PMD_DRV_LOG(ERR, "Crypto QAT PMD: Unsupported Cipher alg %u",
656				cipher_xform->algo);
657		goto error_out;
658	default:
659		PMD_DRV_LOG(ERR, "Crypto: Undefined Cipher specified %u\n",
660				cipher_xform->algo);
661		goto error_out;
662	}
663
664	if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
665		session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
666	else
667		session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
668
669	if (qat_alg_aead_session_create_content_desc_cipher(session,
670						cipher_xform->key.data,
671						cipher_xform->key.length))
672		goto error_out;
673
674	return session;
675
676error_out:
677	rte_mempool_put(internals->sess_mp, session);
678	return NULL;
679}
680
681
682void *
683qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
684		struct rte_crypto_sym_xform *xform, void *session_private)
685{
686	struct qat_pmd_private *internals = dev->data->dev_private;
687
688	struct qat_session *session = session_private;
689
690	int qat_cmd_id;
691
692	PMD_INIT_FUNC_TRACE();
693
694	/* Get requested QAT command id */
695	qat_cmd_id = qat_get_cmd_id(xform);
696	if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) {
697		PMD_DRV_LOG(ERR, "Unsupported xform chain requested");
698		goto error_out;
699	}
700	session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
701	switch (session->qat_cmd) {
702	case ICP_QAT_FW_LA_CMD_CIPHER:
703	session = qat_crypto_sym_configure_session_cipher(dev, xform, session);
704		break;
705	case ICP_QAT_FW_LA_CMD_AUTH:
706	session = qat_crypto_sym_configure_session_auth(dev, xform, session);
707		break;
708	case ICP_QAT_FW_LA_CMD_CIPHER_HASH:
709	session = qat_crypto_sym_configure_session_cipher(dev, xform, session);
710	session = qat_crypto_sym_configure_session_auth(dev, xform, session);
711		break;
712	case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
713	session = qat_crypto_sym_configure_session_auth(dev, xform, session);
714	session = qat_crypto_sym_configure_session_cipher(dev, xform, session);
715		break;
716	case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
717	case ICP_QAT_FW_LA_CMD_TRNG_TEST:
718	case ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE:
719	case ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE:
720	case ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE:
721	case ICP_QAT_FW_LA_CMD_MGF1:
722	case ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP:
723	case ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP:
724	case ICP_QAT_FW_LA_CMD_DELIMITER:
725	PMD_DRV_LOG(ERR, "Unsupported Service %u",
726		session->qat_cmd);
727		goto error_out;
728	default:
729	PMD_DRV_LOG(ERR, "Unsupported Service %u",
730		session->qat_cmd);
731		goto error_out;
732	}
733	return session;
734
735error_out:
736	rte_mempool_put(internals->sess_mp, session);
737	return NULL;
738}
739
740struct qat_session *
741qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
742				struct rte_crypto_sym_xform *xform,
743				struct qat_session *session_private)
744{
745
746	struct qat_pmd_private *internals = dev->data->dev_private;
747	struct qat_session *session = session_private;
748	struct rte_crypto_auth_xform *auth_xform = NULL;
749	struct rte_crypto_cipher_xform *cipher_xform = NULL;
750	auth_xform = qat_get_auth_xform(xform);
751
752	switch (auth_xform->algo) {
753	case RTE_CRYPTO_AUTH_SHA1_HMAC:
754		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
755		break;
756	case RTE_CRYPTO_AUTH_SHA224_HMAC:
757		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
758		break;
759	case RTE_CRYPTO_AUTH_SHA256_HMAC:
760		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
761		break;
762	case RTE_CRYPTO_AUTH_SHA384_HMAC:
763		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
764		break;
765	case RTE_CRYPTO_AUTH_SHA512_HMAC:
766		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
767		break;
768	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
769		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
770		break;
771	case RTE_CRYPTO_AUTH_AES_GCM:
772		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
773		break;
774	case RTE_CRYPTO_AUTH_AES_GMAC:
775		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
776		break;
777	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
778		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2;
779		break;
780	case RTE_CRYPTO_AUTH_MD5_HMAC:
781		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_MD5;
782		break;
783	case RTE_CRYPTO_AUTH_NULL:
784		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_NULL;
785		break;
786	case RTE_CRYPTO_AUTH_KASUMI_F9:
787		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_KASUMI_F9;
788		break;
789	case RTE_CRYPTO_AUTH_SHA1:
790	case RTE_CRYPTO_AUTH_SHA256:
791	case RTE_CRYPTO_AUTH_SHA512:
792	case RTE_CRYPTO_AUTH_SHA224:
793	case RTE_CRYPTO_AUTH_SHA384:
794	case RTE_CRYPTO_AUTH_MD5:
795	case RTE_CRYPTO_AUTH_AES_CCM:
796	case RTE_CRYPTO_AUTH_AES_CMAC:
797	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
798	case RTE_CRYPTO_AUTH_ZUC_EIA3:
799		PMD_DRV_LOG(ERR, "Crypto: Unsupported hash alg %u",
800				auth_xform->algo);
801		goto error_out;
802	default:
803		PMD_DRV_LOG(ERR, "Crypto: Undefined Hash algo %u specified",
804				auth_xform->algo);
805		goto error_out;
806	}
807	cipher_xform = qat_get_cipher_xform(xform);
808
809	if ((session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
810			(session->qat_hash_alg ==
811				ICP_QAT_HW_AUTH_ALGO_GALOIS_64))  {
812		if (qat_alg_aead_session_create_content_desc_auth(session,
813				cipher_xform->key.data,
814				cipher_xform->key.length,
815				auth_xform->add_auth_data_length,
816				auth_xform->digest_length,
817				auth_xform->op))
818			goto error_out;
819	} else {
820		if (qat_alg_aead_session_create_content_desc_auth(session,
821				auth_xform->key.data,
822				auth_xform->key.length,
823				auth_xform->add_auth_data_length,
824				auth_xform->digest_length,
825				auth_xform->op))
826			goto error_out;
827	}
828	return session;
829
830error_out:
831	if (internals->sess_mp != NULL)
832		rte_mempool_put(internals->sess_mp, session);
833	return NULL;
834}
835
836unsigned qat_crypto_sym_get_session_private_size(
837		struct rte_cryptodev *dev __rte_unused)
838{
839	return RTE_ALIGN_CEIL(sizeof(struct qat_session), 8);
840}
841
842
843uint16_t
844qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
845		uint16_t nb_ops)
846{
847	register struct qat_queue *queue;
848	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
849	register uint32_t nb_ops_sent = 0;
850	register struct rte_crypto_op **cur_op = ops;
851	register int ret;
852	uint16_t nb_ops_possible = nb_ops;
853	register uint8_t *base_addr;
854	register uint32_t tail;
855	int overflow;
856
857	if (unlikely(nb_ops == 0))
858		return 0;
859
860	/* read params used a lot in main loop into registers */
861	queue = &(tmp_qp->tx_q);
862	base_addr = (uint8_t *)queue->base_addr;
863	tail = queue->tail;
864
865	/* Find how many can actually fit on the ring */
866	overflow = rte_atomic16_add_return(&tmp_qp->inflights16, nb_ops)
867				- queue->max_inflights;
868	if (overflow > 0) {
869		rte_atomic16_sub(&tmp_qp->inflights16, overflow);
870		nb_ops_possible = nb_ops - overflow;
871		if (nb_ops_possible == 0)
872			return 0;
873	}
874
875	while (nb_ops_sent != nb_ops_possible) {
876		ret = qat_write_hw_desc_entry(*cur_op, base_addr + tail);
877		if (ret != 0) {
878			tmp_qp->stats.enqueue_err_count++;
879			if (nb_ops_sent == 0)
880				return 0;
881			goto kick_tail;
882		}
883
884		tail = adf_modulo(tail + queue->msg_size, queue->modulo);
885		nb_ops_sent++;
886		cur_op++;
887	}
888kick_tail:
889	WRITE_CSR_RING_TAIL(tmp_qp->mmap_bar_addr, queue->hw_bundle_number,
890			queue->hw_queue_number, tail);
891	queue->tail = tail;
892	tmp_qp->stats.enqueued_count += nb_ops_sent;
893	return nb_ops_sent;
894}
895
896uint16_t
897qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
898		uint16_t nb_ops)
899{
900	struct qat_queue *queue;
901	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
902	uint32_t msg_counter = 0;
903	struct rte_crypto_op *rx_op;
904	struct icp_qat_fw_comn_resp *resp_msg;
905
906	queue = &(tmp_qp->rx_q);
907	resp_msg = (struct icp_qat_fw_comn_resp *)
908			((uint8_t *)queue->base_addr + queue->head);
909
910	while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
911			msg_counter != nb_ops) {
912		rx_op = (struct rte_crypto_op *)(uintptr_t)
913				(resp_msg->opaque_data);
914
915#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
916		rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg,
917				sizeof(struct icp_qat_fw_comn_resp));
918#endif
919		if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
920				ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
921					resp_msg->comn_hdr.comn_status)) {
922			rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
923		} else {
924			rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
925		}
926		*(uint32_t *)resp_msg = ADF_RING_EMPTY_SIG;
927		queue->head = adf_modulo(queue->head +
928				queue->msg_size,
929				ADF_RING_SIZE_MODULO(queue->queue_size));
930		resp_msg = (struct icp_qat_fw_comn_resp *)
931					((uint8_t *)queue->base_addr +
932							queue->head);
933		*ops = rx_op;
934		ops++;
935		msg_counter++;
936	}
937	if (msg_counter > 0) {
938		WRITE_CSR_RING_HEAD(tmp_qp->mmap_bar_addr,
939					queue->hw_bundle_number,
940					queue->hw_queue_number, queue->head);
941		rte_atomic16_sub(&tmp_qp->inflights16, msg_counter);
942		tmp_qp->stats.dequeued_count += msg_counter;
943	}
944	return msg_counter;
945}
946
947static inline int
948qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg)
949{
950	struct qat_session *ctx;
951	struct icp_qat_fw_la_cipher_req_params *cipher_param;
952	struct icp_qat_fw_la_auth_req_params *auth_param;
953	register struct icp_qat_fw_la_bulk_req *qat_req;
954	uint8_t do_auth = 0, do_cipher = 0;
955	uint32_t cipher_len = 0, cipher_ofs = 0;
956	uint32_t auth_len = 0, auth_ofs = 0;
957	uint32_t min_ofs = 0;
958	uint32_t digest_appended = 1;
959	uint64_t src_buf_start = 0, dst_buf_start = 0;
960
961
962#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
963	if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
964		PMD_DRV_LOG(ERR, "QAT PMD only supports symmetric crypto "
965				"operation requests, op (%p) is not a "
966				"symmetric operation.", op);
967		return -EINVAL;
968	}
969#endif
970	if (unlikely(op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS)) {
971		PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented"
972				" requests, op (%p) is sessionless.", op);
973		return -EINVAL;
974	}
975
976	if (unlikely(op->sym->session->dev_type != RTE_CRYPTODEV_QAT_SYM_PMD)) {
977		PMD_DRV_LOG(ERR, "Session was not created for this device");
978		return -EINVAL;
979	}
980
981	ctx = (struct qat_session *)op->sym->session->_private;
982	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
983	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
984	qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
985	cipher_param = (void *)&qat_req->serv_specif_rqpars;
986	auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
987
988	if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
989		ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
990		do_auth = 1;
991		do_cipher = 1;
992	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
993		do_auth = 1;
994		do_cipher = 0;
995	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
996		do_auth = 0;
997		do_cipher = 1;
998	}
999
1000	if (do_cipher) {
1001
1002		if (ctx->qat_cipher_alg ==
1003					 ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
1004			ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
1005
1006			if (unlikely(
1007				(cipher_param->cipher_length % BYTE_LENGTH != 0)
1008				 || (cipher_param->cipher_offset
1009							% BYTE_LENGTH != 0))) {
1010				PMD_DRV_LOG(ERR,
1011		  "SNOW3G/KASUMI in QAT PMD only supports byte aligned values");
1012				op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
1013				return -EINVAL;
1014			}
1015			cipher_len = op->sym->cipher.data.length >> 3;
1016			cipher_ofs = op->sym->cipher.data.offset >> 3;
1017
1018		} else {
1019			cipher_len = op->sym->cipher.data.length;
1020			cipher_ofs = op->sym->cipher.data.offset;
1021		}
1022
1023		/* copy IV into request if it fits */
1024		/*
1025		 * If IV length is zero do not copy anything but still
1026		 * use request descriptor embedded IV
1027		 *
1028		 */
1029		if (op->sym->cipher.iv.length) {
1030			if (op->sym->cipher.iv.length <=
1031					sizeof(cipher_param->u.cipher_IV_array)) {
1032				rte_memcpy(cipher_param->u.cipher_IV_array,
1033						op->sym->cipher.iv.data,
1034						op->sym->cipher.iv.length);
1035			} else {
1036				ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
1037						qat_req->comn_hdr.serv_specif_flags,
1038						ICP_QAT_FW_CIPH_IV_64BIT_PTR);
1039				cipher_param->u.s.cipher_IV_ptr =
1040						op->sym->cipher.iv.phys_addr;
1041			}
1042		}
1043		min_ofs = cipher_ofs;
1044	}
1045
1046	if (do_auth) {
1047
1048		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
1049			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9) {
1050			if (unlikely((auth_param->auth_off % BYTE_LENGTH != 0)
1051				|| (auth_param->auth_len % BYTE_LENGTH != 0))) {
1052				PMD_DRV_LOG(ERR,
1053		"For SNOW3G/KASUMI, QAT PMD only supports byte aligned values");
1054				op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
1055				return -EINVAL;
1056			}
1057			auth_ofs = op->sym->auth.data.offset >> 3;
1058			auth_len = op->sym->auth.data.length >> 3;
1059
1060			if (ctx->qat_hash_alg ==
1061					ICP_QAT_HW_AUTH_ALGO_KASUMI_F9) {
1062				if (do_cipher) {
1063					auth_len = auth_len + auth_ofs + 1 -
1064						ICP_QAT_HW_KASUMI_BLK_SZ;
1065					auth_ofs = ICP_QAT_HW_KASUMI_BLK_SZ;
1066				} else {
1067					auth_len = auth_len + auth_ofs + 1;
1068					auth_ofs = 0;
1069				}
1070			}
1071
1072		} else if (ctx->qat_hash_alg ==
1073					ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
1074				ctx->qat_hash_alg ==
1075					ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
1076			auth_ofs = op->sym->cipher.data.offset;
1077			auth_len = op->sym->cipher.data.length;
1078		} else {
1079			auth_ofs = op->sym->auth.data.offset;
1080			auth_len = op->sym->auth.data.length;
1081		}
1082		min_ofs = auth_ofs;
1083
1084		if (op->sym->auth.digest.phys_addr) {
1085			ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
1086					qat_req->comn_hdr.serv_specif_flags,
1087					ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
1088			auth_param->auth_res_addr =
1089					op->sym->auth.digest.phys_addr;
1090			digest_appended = 0;
1091		}
1092
1093		auth_param->u1.aad_adr = op->sym->auth.aad.phys_addr;
1094
1095	}
1096
1097	/* adjust for chain case */
1098	if (do_cipher && do_auth)
1099		min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
1100
1101	if (unlikely(op->sym->m_dst != NULL)) {
1102		/* Out-of-place operation (OOP)
1103		 * Don't align DMA start. DMA the minimum data-set
1104		 * so as not to overwrite data in dest buffer
1105		 */
1106		src_buf_start =
1107			rte_pktmbuf_mtophys_offset(op->sym->m_src, min_ofs);
1108		dst_buf_start =
1109			rte_pktmbuf_mtophys_offset(op->sym->m_dst, min_ofs);
1110	} else {
1111		/* In-place operation
1112		 * Start DMA at nearest aligned address below min_ofs
1113		 */
1114		src_buf_start =
1115			rte_pktmbuf_mtophys_offset(op->sym->m_src, min_ofs)
1116						& QAT_64_BTYE_ALIGN_MASK;
1117
1118		if (unlikely((rte_pktmbuf_mtophys(op->sym->m_src) -
1119					rte_pktmbuf_headroom(op->sym->m_src))
1120							> src_buf_start)) {
1121			/* alignment has pushed addr ahead of start of mbuf
1122			 * so revert and take the performance hit
1123			 */
1124			src_buf_start =
1125				rte_pktmbuf_mtophys_offset(op->sym->m_src,
1126								min_ofs);
1127		}
1128		dst_buf_start = src_buf_start;
1129	}
1130
1131	if (do_cipher) {
1132		cipher_param->cipher_offset =
1133				(uint32_t)rte_pktmbuf_mtophys_offset(
1134				op->sym->m_src, cipher_ofs) - src_buf_start;
1135		cipher_param->cipher_length = cipher_len;
1136	} else {
1137		cipher_param->cipher_offset = 0;
1138		cipher_param->cipher_length = 0;
1139	}
1140	if (do_auth) {
1141		auth_param->auth_off = (uint32_t)rte_pktmbuf_mtophys_offset(
1142				op->sym->m_src, auth_ofs) - src_buf_start;
1143		auth_param->auth_len = auth_len;
1144	} else {
1145		auth_param->auth_off = 0;
1146		auth_param->auth_len = 0;
1147	}
1148	qat_req->comn_mid.dst_length =
1149		qat_req->comn_mid.src_length =
1150		(cipher_param->cipher_offset + cipher_param->cipher_length)
1151		> (auth_param->auth_off + auth_param->auth_len) ?
1152		(cipher_param->cipher_offset + cipher_param->cipher_length)
1153		: (auth_param->auth_off + auth_param->auth_len);
1154
1155	if (do_auth && digest_appended) {
1156		if (ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE)
1157			qat_req->comn_mid.dst_length
1158					+= op->sym->auth.digest.length;
1159		else
1160			qat_req->comn_mid.src_length
1161				+= op->sym->auth.digest.length;
1162	}
1163	qat_req->comn_mid.src_data_addr = src_buf_start;
1164	qat_req->comn_mid.dest_data_addr = dst_buf_start;
1165
1166	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
1167			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
1168		if (op->sym->cipher.iv.length == 12) {
1169			/*
1170			 * For GCM a 12 bit IV is allowed,
1171			 * but we need to inform the f/w
1172			 */
1173			ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
1174				qat_req->comn_hdr.serv_specif_flags,
1175				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
1176		}
1177		if (op->sym->cipher.data.length == 0) {
1178			/*
1179			 * GMAC
1180			 */
1181			qat_req->comn_mid.dest_data_addr =
1182				qat_req->comn_mid.src_data_addr =
1183						op->sym->auth.aad.phys_addr;
1184			qat_req->comn_mid.dst_length =
1185				qat_req->comn_mid.src_length =
1186					rte_pktmbuf_data_len(op->sym->m_src);
1187			cipher_param->cipher_length = 0;
1188			cipher_param->cipher_offset = 0;
1189			auth_param->u1.aad_adr = 0;
1190			auth_param->auth_len = op->sym->auth.aad.length;
1191			auth_param->auth_off = op->sym->auth.data.offset;
1192			auth_param->u2.aad_sz = 0;
1193		}
1194	}
1195
1196
1197#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
1198	rte_hexdump(stdout, "qat_req:", qat_req,
1199			sizeof(struct icp_qat_fw_la_bulk_req));
1200	rte_hexdump(stdout, "src_data:",
1201			rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
1202			rte_pktmbuf_data_len(op->sym->m_src));
1203	rte_hexdump(stdout, "iv:", op->sym->cipher.iv.data,
1204			op->sym->cipher.iv.length);
1205	rte_hexdump(stdout, "digest:", op->sym->auth.digest.data,
1206			op->sym->auth.digest.length);
1207	rte_hexdump(stdout, "aad:", op->sym->auth.aad.data,
1208			op->sym->auth.aad.length);
1209#endif
1210	return 0;
1211}
1212
1213static inline uint32_t adf_modulo(uint32_t data, uint32_t shift)
1214{
1215	uint32_t div = data >> shift;
1216	uint32_t mult = div << shift;
1217
1218	return data - mult;
1219}
1220
1221void qat_crypto_sym_session_init(struct rte_mempool *mp, void *sym_sess)
1222{
1223	struct rte_cryptodev_sym_session *sess = sym_sess;
1224	struct qat_session *s = (void *)sess->_private;
1225
1226	PMD_INIT_FUNC_TRACE();
1227	s->cd_paddr = rte_mempool_virt2phy(mp, sess) +
1228		offsetof(struct qat_session, cd) +
1229		offsetof(struct rte_cryptodev_sym_session, _private);
1230}
1231
1232int qat_dev_config(__rte_unused struct rte_cryptodev *dev)
1233{
1234	PMD_INIT_FUNC_TRACE();
1235	return -ENOTSUP;
1236}
1237
1238int qat_dev_start(__rte_unused struct rte_cryptodev *dev)
1239{
1240	PMD_INIT_FUNC_TRACE();
1241	return 0;
1242}
1243
1244void qat_dev_stop(__rte_unused struct rte_cryptodev *dev)
1245{
1246	PMD_INIT_FUNC_TRACE();
1247}
1248
1249int qat_dev_close(struct rte_cryptodev *dev)
1250{
1251	int i, ret;
1252
1253	PMD_INIT_FUNC_TRACE();
1254
1255	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
1256		ret = qat_crypto_sym_qp_release(dev, i);
1257		if (ret < 0)
1258			return ret;
1259	}
1260
1261	return 0;
1262}
1263
1264void qat_dev_info_get(__rte_unused struct rte_cryptodev *dev,
1265				struct rte_cryptodev_info *info)
1266{
1267	struct qat_pmd_private *internals = dev->data->dev_private;
1268
1269	PMD_INIT_FUNC_TRACE();
1270	if (info != NULL) {
1271		info->max_nb_queue_pairs =
1272				ADF_NUM_SYM_QPS_PER_BUNDLE *
1273				ADF_NUM_BUNDLES_PER_DEV;
1274		info->feature_flags = dev->feature_flags;
1275		info->capabilities = qat_pmd_capabilities;
1276		info->sym.max_nb_sessions = internals->max_nb_sessions;
1277		info->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
1278	}
1279}
1280
1281void qat_crypto_sym_stats_get(struct rte_cryptodev *dev,
1282		struct rte_cryptodev_stats *stats)
1283{
1284	int i;
1285	struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs);
1286
1287	PMD_INIT_FUNC_TRACE();
1288	if (stats == NULL) {
1289		PMD_DRV_LOG(ERR, "invalid stats ptr NULL");
1290		return;
1291	}
1292	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
1293		if (qp[i] == NULL) {
1294			PMD_DRV_LOG(DEBUG, "Uninitialised queue pair");
1295			continue;
1296		}
1297
1298		stats->enqueued_count += qp[i]->stats.enqueued_count;
1299		stats->dequeued_count += qp[i]->stats.dequeued_count;
1300		stats->enqueue_err_count += qp[i]->stats.enqueue_err_count;
1301		stats->dequeue_err_count += qp[i]->stats.dequeue_err_count;
1302	}
1303}
1304
1305void qat_crypto_sym_stats_reset(struct rte_cryptodev *dev)
1306{
1307	int i;
1308	struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs);
1309
1310	PMD_INIT_FUNC_TRACE();
1311	for (i = 0; i < dev->data->nb_queue_pairs; i++)
1312		memset(&(qp[i]->stats), 0, sizeof(qp[i]->stats));
1313	PMD_DRV_LOG(DEBUG, "QAT crypto: stats cleared");
1314}
1315