]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/crypto/qat/qat_common/qat_algs.c
crypto: qat - Intel(R) QAT crypto interface
[karo-tx-linux.git] / drivers / crypto / qat / qat_common / qat_algs.c
1 /*
2   This file is provided under a dual BSD/GPLv2 license.  When using or
3   redistributing this file, you may do so under either license.
4
5   GPL LICENSE SUMMARY
6   Copyright(c) 2014 Intel Corporation.
7   This program is free software; you can redistribute it and/or modify
8   it under the terms of version 2 of the GNU General Public License as
9   published by the Free Software Foundation.
10
11   This program is distributed in the hope that it will be useful, but
12   WITHOUT ANY WARRANTY; without even the implied warranty of
13   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14   General Public License for more details.
15
16   Contact Information:
17   qat-linux@intel.com
18
19   BSD LICENSE
20   Copyright(c) 2014 Intel Corporation.
21   Redistribution and use in source and binary forms, with or without
22   modification, are permitted provided that the following conditions
23   are met:
24
25     * Redistributions of source code must retain the above copyright
26       notice, this list of conditions and the following disclaimer.
27     * Redistributions in binary form must reproduce the above copyright
28       notice, this list of conditions and the following disclaimer in
29       the documentation and/or other materials provided with the
30       distribution.
31     * Neither the name of Intel Corporation nor the names of its
32       contributors may be used to endorse or promote products derived
33       from this software without specific prior written permission.
34
35   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46 */
47 #include <linux/module.h>
48 #include <linux/slab.h>
49 #include <linux/crypto.h>
50 #include <crypto/aead.h>
51 #include <crypto/aes.h>
52 #include <crypto/sha.h>
53 #include <crypto/hash.h>
54 #include <crypto/algapi.h>
55 #include <crypto/authenc.h>
56 #include <crypto/rng.h>
57 #include <linux/dma-mapping.h>
58 #include "adf_accel_devices.h"
59 #include "adf_transport.h"
60 #include "adf_common_drv.h"
61 #include "qat_crypto.h"
62 #include "icp_qat_hw.h"
63 #include "icp_qat_fw.h"
64 #include "icp_qat_fw_la.h"
65
66 #define QAT_AES_HW_CONFIG_ENC(alg) \
67         ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
68                         ICP_QAT_HW_CIPHER_NO_CONVERT, \
69                         ICP_QAT_HW_CIPHER_ENCRYPT)
70
71 #define QAT_AES_HW_CONFIG_DEC(alg) \
72         ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
73                         ICP_QAT_HW_CIPHER_KEY_CONVERT, \
74                         ICP_QAT_HW_CIPHER_DECRYPT)
75
76 static atomic_t active_dev;
77
78 struct qat_alg_buf {
79         uint32_t len;
80         uint32_t resrvd;
81         uint64_t addr;
82 } __packed;
83
84 struct qat_alg_buf_list {
85         uint64_t resrvd;
86         uint32_t num_bufs;
87         uint32_t num_mapped_bufs;
88         struct qat_alg_buf bufers[];
89 } __packed __aligned(64);
90
91 /* Common content descriptor */
92 struct qat_alg_cd {
93         union {
94                 struct qat_enc { /* Encrypt content desc */
95                         struct icp_qat_hw_cipher_algo_blk cipher;
96                         struct icp_qat_hw_auth_algo_blk hash;
97                 } qat_enc_cd;
98                 struct qat_dec { /* Decrytp content desc */
99                         struct icp_qat_hw_auth_algo_blk hash;
100                         struct icp_qat_hw_cipher_algo_blk cipher;
101                 } qat_dec_cd;
102         };
103 } __aligned(64);
104
105 #define MAX_AUTH_STATE_SIZE sizeof(struct icp_qat_hw_auth_algo_blk)
106
107 struct qat_auth_state {
108         uint8_t data[MAX_AUTH_STATE_SIZE];
109 } __aligned(64);
110
111 struct qat_alg_session_ctx {
112         struct qat_alg_cd *enc_cd;
113         dma_addr_t enc_cd_paddr;
114         struct qat_alg_cd *dec_cd;
115         dma_addr_t dec_cd_paddr;
116         struct qat_auth_state *auth_hw_state_enc;
117         dma_addr_t auth_state_enc_paddr;
118         struct qat_auth_state *auth_hw_state_dec;
119         dma_addr_t auth_state_dec_paddr;
120         struct icp_qat_fw_la_bulk_req enc_fw_req_tmpl;
121         struct icp_qat_fw_la_bulk_req dec_fw_req_tmpl;
122         struct qat_crypto_instance *inst;
123         struct crypto_tfm *tfm;
124         struct crypto_shash *hash_tfm;
125         enum icp_qat_hw_auth_algo qat_hash_alg;
126         uint8_t salt[AES_BLOCK_SIZE];
127         spinlock_t lock;        /* protects qat_alg_session_ctx struct */
128 };
129
130 static int get_current_node(void)
131 {
132         return cpu_data(current_thread_info()->cpu).phys_proc_id;
133 }
134
135 static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
136 {
137         switch (qat_hash_alg) {
138         case ICP_QAT_HW_AUTH_ALGO_SHA1:
139                 return ICP_QAT_HW_SHA1_STATE1_SZ;
140         case ICP_QAT_HW_AUTH_ALGO_SHA256:
141                 return ICP_QAT_HW_SHA256_STATE1_SZ;
142         case ICP_QAT_HW_AUTH_ALGO_SHA512:
143                 return ICP_QAT_HW_SHA512_STATE1_SZ;
144         default:
145                 return -EFAULT;
146         };
147         return -EFAULT;
148 }
149
150 static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
151                                   struct qat_alg_session_ctx *ctx,
152                                   const uint8_t *auth_key,
153                                   unsigned int auth_keylen, uint8_t *auth_state)
154 {
155         struct {
156                 struct shash_desc shash;
157                 char ctx[crypto_shash_descsize(ctx->hash_tfm)];
158         } desc;
159         struct sha1_state sha1;
160         struct sha256_state sha256;
161         struct sha512_state sha512;
162         int block_size = crypto_shash_blocksize(ctx->hash_tfm);
163         int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
164         uint8_t *ipad = auth_state;
165         uint8_t *opad = ipad + block_size;
166         __be32 *hash_state_out;
167         __be64 *hash512_state_out;
168         int i, offset;
169
170         desc.shash.tfm = ctx->hash_tfm;
171         desc.shash.flags = 0x0;
172
173         if (auth_keylen > block_size) {
174                 char buff[SHA512_BLOCK_SIZE];
175                 int ret = crypto_shash_digest(&desc.shash, auth_key,
176                                               auth_keylen, buff);
177                 if (ret)
178                         return ret;
179
180                 memcpy(ipad, buff, digest_size);
181                 memcpy(opad, buff, digest_size);
182                 memset(ipad + digest_size, 0, block_size - digest_size);
183                 memset(opad + digest_size, 0, block_size - digest_size);
184         } else {
185                 memcpy(ipad, auth_key, auth_keylen);
186                 memcpy(opad, auth_key, auth_keylen);
187                 memset(ipad + auth_keylen, 0, block_size - auth_keylen);
188                 memset(opad + auth_keylen, 0, block_size - auth_keylen);
189         }
190
191         for (i = 0; i < block_size; i++) {
192                 char *ipad_ptr = ipad + i;
193                 char *opad_ptr = opad + i;
194                 *ipad_ptr ^= 0x36;
195                 *opad_ptr ^= 0x5C;
196         }
197
198         if (crypto_shash_init(&desc.shash))
199                 return -EFAULT;
200
201         if (crypto_shash_update(&desc.shash, ipad, block_size))
202                 return -EFAULT;
203
204         hash_state_out = (__be32 *)hash->sha.state1;
205         hash512_state_out = (__be64 *)hash_state_out;
206
207         switch (ctx->qat_hash_alg) {
208         case ICP_QAT_HW_AUTH_ALGO_SHA1:
209                 if (crypto_shash_export(&desc.shash, &sha1))
210                         return -EFAULT;
211                 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
212                         *hash_state_out = cpu_to_be32(*(sha1.state + i));
213                 break;
214         case ICP_QAT_HW_AUTH_ALGO_SHA256:
215                 if (crypto_shash_export(&desc.shash, &sha256))
216                         return -EFAULT;
217                 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
218                         *hash_state_out = cpu_to_be32(*(sha256.state + i));
219                 break;
220         case ICP_QAT_HW_AUTH_ALGO_SHA512:
221                 if (crypto_shash_export(&desc.shash, &sha512))
222                         return -EFAULT;
223                 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
224                         *hash512_state_out = cpu_to_be64(*(sha512.state + i));
225                 break;
226         default:
227                 return -EFAULT;
228         }
229
230         if (crypto_shash_init(&desc.shash))
231                 return -EFAULT;
232
233         if (crypto_shash_update(&desc.shash, opad, block_size))
234                 return -EFAULT;
235
236         offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
237         hash_state_out = (__be32 *)(hash->sha.state1 + offset);
238         hash512_state_out = (__be64 *)hash_state_out;
239
240         switch (ctx->qat_hash_alg) {
241         case ICP_QAT_HW_AUTH_ALGO_SHA1:
242                 if (crypto_shash_export(&desc.shash, &sha1))
243                         return -EFAULT;
244                 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
245                         *hash_state_out = cpu_to_be32(*(sha1.state + i));
246                 break;
247         case ICP_QAT_HW_AUTH_ALGO_SHA256:
248                 if (crypto_shash_export(&desc.shash, &sha256))
249                         return -EFAULT;
250                 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
251                         *hash_state_out = cpu_to_be32(*(sha256.state + i));
252                 break;
253         case ICP_QAT_HW_AUTH_ALGO_SHA512:
254                 if (crypto_shash_export(&desc.shash, &sha512))
255                         return -EFAULT;
256                 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
257                         *hash512_state_out = cpu_to_be64(*(sha512.state + i));
258                 break;
259         default:
260                 return -EFAULT;
261         }
262         return 0;
263 }
264
265 static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
266 {
267         header->hdr_flags =
268                 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
269         header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
270         header->comn_req_flags =
271                 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
272                                             QAT_COMN_PTR_TYPE_SGL);
273         ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
274                                            ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
275         ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
276                                   ICP_QAT_FW_LA_PARTIAL_NONE);
277         ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
278                                            ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
279         ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
280                                 ICP_QAT_FW_LA_NO_PROTO);
281         ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
282                                        ICP_QAT_FW_LA_NO_UPDATE_STATE);
283 }
284
285 static int qat_alg_init_enc_session(struct qat_alg_session_ctx *ctx,
286                                     int alg, struct crypto_authenc_keys *keys)
287 {
288         struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
289         unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
290         struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
291         struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
292         struct icp_qat_hw_auth_algo_blk *hash =
293                 (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
294                 sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
295         struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req_tmpl;
296         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
297         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
298         void *ptr = &req_tmpl->cd_ctrl;
299         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
300         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
301         struct icp_qat_fw_la_auth_req_params *auth_param =
302                 (struct icp_qat_fw_la_auth_req_params *)
303                 ((char *)&req_tmpl->serv_specif_rqpars +
304                  sizeof(struct icp_qat_fw_la_cipher_req_params));
305
306         /* CD setup */
307         cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg);
308         memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
309         hash->sha.inner_setup.auth_config.config =
310                 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
311                                              ctx->qat_hash_alg, digestsize);
312         hash->sha.inner_setup.auth_counter.counter =
313                 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
314
315         if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen,
316                                    (uint8_t *)ctx->auth_hw_state_enc))
317                 return -EFAULT;
318
319         /* Request setup */
320         qat_alg_init_common_hdr(header);
321         header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
322         ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
323                                    ICP_QAT_FW_LA_RET_AUTH_RES);
324         ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
325                                    ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
326         cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
327         cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
328
329         /* Cipher CD config setup */
330         cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
331         cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
332         cipher_cd_ctrl->cipher_cfg_offset = 0;
333         ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
334         ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
335         /* Auth CD config setup */
336         hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
337         hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
338         hash_cd_ctrl->inner_res_sz = digestsize;
339         hash_cd_ctrl->final_sz = digestsize;
340
341         switch (ctx->qat_hash_alg) {
342         case ICP_QAT_HW_AUTH_ALGO_SHA1:
343                 hash_cd_ctrl->inner_state1_sz =
344                         round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
345                 hash_cd_ctrl->inner_state2_sz =
346                         round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
347                 break;
348         case ICP_QAT_HW_AUTH_ALGO_SHA256:
349                 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
350                 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
351                 break;
352         case ICP_QAT_HW_AUTH_ALGO_SHA512:
353                 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
354                 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
355                 break;
356         default:
357                 break;
358         }
359         hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
360                         ((sizeof(struct icp_qat_hw_auth_setup) +
361                          round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
362         auth_param->u1.auth_partial_st_prefix = ctx->auth_state_enc_paddr +
363                         sizeof(struct icp_qat_hw_auth_counter) +
364                         round_up(hash_cd_ctrl->inner_state1_sz, 8);
365         ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
366         ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
367         return 0;
368 }
369
370 static int qat_alg_init_dec_session(struct qat_alg_session_ctx *ctx,
371                                     int alg, struct crypto_authenc_keys *keys)
372 {
373         struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
374         unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
375         struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
376         struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
377         struct icp_qat_hw_cipher_algo_blk *cipher =
378                 (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
379                 sizeof(struct icp_qat_hw_auth_setup) +
380                 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
381         struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req_tmpl;
382         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
383         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
384         void *ptr = &req_tmpl->cd_ctrl;
385         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
386         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
387         struct icp_qat_fw_la_auth_req_params *auth_param =
388                 (struct icp_qat_fw_la_auth_req_params *)
389                 ((char *)&req_tmpl->serv_specif_rqpars +
390                 sizeof(struct icp_qat_fw_la_cipher_req_params));
391
392         /* CD setup */
393         cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg);
394         memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
395         hash->sha.inner_setup.auth_config.config =
396                 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
397                                              ctx->qat_hash_alg,
398                                              digestsize);
399         hash->sha.inner_setup.auth_counter.counter =
400                 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
401
402         if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen,
403                                    (uint8_t *)ctx->auth_hw_state_dec))
404                 return -EFAULT;
405
406         /* Request setup */
407         qat_alg_init_common_hdr(header);
408         header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
409         ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
410                                    ICP_QAT_FW_LA_NO_RET_AUTH_RES);
411         ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
412                                    ICP_QAT_FW_LA_CMP_AUTH_RES);
413         cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
414         cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
415
416         /* Cipher CD config setup */
417         cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
418         cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
419         cipher_cd_ctrl->cipher_cfg_offset =
420                 (sizeof(struct icp_qat_hw_auth_setup) +
421                  roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
422         ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
423         ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
424
425         /* Auth CD config setup */
426         hash_cd_ctrl->hash_cfg_offset = 0;
427         hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
428         hash_cd_ctrl->inner_res_sz = digestsize;
429         hash_cd_ctrl->final_sz = digestsize;
430
431         switch (ctx->qat_hash_alg) {
432         case ICP_QAT_HW_AUTH_ALGO_SHA1:
433                 hash_cd_ctrl->inner_state1_sz =
434                         round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
435                 hash_cd_ctrl->inner_state2_sz =
436                         round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
437                 break;
438         case ICP_QAT_HW_AUTH_ALGO_SHA256:
439                 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
440                 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
441                 break;
442         case ICP_QAT_HW_AUTH_ALGO_SHA512:
443                 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
444                 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
445                 break;
446         default:
447                 break;
448         }
449
450         hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
451                         ((sizeof(struct icp_qat_hw_auth_setup) +
452                          round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
453         auth_param->u1.auth_partial_st_prefix = ctx->auth_state_enc_paddr +
454                         sizeof(struct icp_qat_hw_auth_counter) +
455                         round_up(hash_cd_ctrl->inner_state1_sz, 8);
456         auth_param->auth_res_sz = digestsize;
457         ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
458         ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
459         return 0;
460 }
461
462 static int qat_alg_init_sessions(struct qat_alg_session_ctx *ctx,
463                                  const uint8_t *key, unsigned int keylen)
464 {
465         struct crypto_authenc_keys keys;
466         int alg;
467
468         if (crypto_rng_get_bytes(crypto_default_rng, ctx->salt, AES_BLOCK_SIZE))
469                 return -EFAULT;
470
471         if (crypto_authenc_extractkeys(&keys, key, keylen))
472                 goto bad_key;
473
474         switch (keys.enckeylen) {
475         case AES_KEYSIZE_128:
476                 alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
477                 break;
478         case AES_KEYSIZE_192:
479                 alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
480                 break;
481         case AES_KEYSIZE_256:
482                 alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
483                 break;
484         default:
485                 goto bad_key;
486                 break;
487         }
488
489         if (qat_alg_init_enc_session(ctx, alg, &keys))
490                 goto error;
491
492         if (qat_alg_init_dec_session(ctx, alg, &keys))
493                 goto error;
494
495         return 0;
496 bad_key:
497         crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
498         return -EINVAL;
499 error:
500         return -EFAULT;
501 }
502
503 static int qat_alg_setkey(struct crypto_aead *tfm, const uint8_t *key,
504                           unsigned int keylen)
505 {
506         struct qat_alg_session_ctx *ctx = crypto_aead_ctx(tfm);
507         struct device *dev;
508
509         spin_lock(&ctx->lock);
510         if (ctx->enc_cd) {
511                 /* rekeying */
512                 dev = &GET_DEV(ctx->inst->accel_dev);
513                 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
514                 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
515                 memset(ctx->auth_hw_state_enc, 0,
516                        sizeof(struct qat_auth_state));
517                 memset(ctx->auth_hw_state_dec, 0,
518                        sizeof(struct qat_auth_state));
519                 memset(&ctx->enc_fw_req_tmpl, 0,
520                        sizeof(struct icp_qat_fw_la_bulk_req));
521                 memset(&ctx->dec_fw_req_tmpl, 0,
522                        sizeof(struct icp_qat_fw_la_bulk_req));
523         } else {
524                 /* new key */
525                 int node = get_current_node();
526                 struct qat_crypto_instance *inst =
527                                 qat_crypto_get_instance_node(node);
528                 if (!inst) {
529                         spin_unlock(&ctx->lock);
530                         return -EINVAL;
531                 }
532
533                 dev = &GET_DEV(inst->accel_dev);
534                 ctx->inst = inst;
535                 ctx->enc_cd = dma_zalloc_coherent(dev,
536                                                   sizeof(struct qat_alg_cd),
537                                                   &ctx->enc_cd_paddr,
538                                                   GFP_ATOMIC);
539                 if (!ctx->enc_cd) {
540                         spin_unlock(&ctx->lock);
541                         return -ENOMEM;
542                 }
543                 ctx->dec_cd = dma_zalloc_coherent(dev,
544                                                   sizeof(struct qat_alg_cd),
545                                                   &ctx->dec_cd_paddr,
546                                                   GFP_ATOMIC);
547                 if (!ctx->dec_cd) {
548                         spin_unlock(&ctx->lock);
549                         goto out_free_enc;
550                 }
551                 ctx->auth_hw_state_enc =
552                         dma_zalloc_coherent(dev, sizeof(struct qat_auth_state),
553                                             &ctx->auth_state_enc_paddr,
554                                             GFP_ATOMIC);
555                 if (!ctx->auth_hw_state_enc) {
556                         spin_unlock(&ctx->lock);
557                         goto out_free_dec;
558                 }
559                 ctx->auth_hw_state_dec =
560                         dma_zalloc_coherent(dev, sizeof(struct qat_auth_state),
561                                             &ctx->auth_state_dec_paddr,
562                                             GFP_ATOMIC);
563                 if (!ctx->auth_hw_state_dec) {
564                         spin_unlock(&ctx->lock);
565                         goto out_free_auth_enc;
566                 }
567         }
568         spin_unlock(&ctx->lock);
569         if (qat_alg_init_sessions(ctx, key, keylen))
570                 goto out_free_all;
571
572         return 0;
573
574 out_free_all:
575         dma_free_coherent(dev, sizeof(struct qat_auth_state),
576                           ctx->auth_hw_state_dec, ctx->auth_state_dec_paddr);
577         ctx->auth_hw_state_dec = NULL;
578 out_free_auth_enc:
579         dma_free_coherent(dev, sizeof(struct qat_auth_state),
580                           ctx->auth_hw_state_enc, ctx->auth_state_enc_paddr);
581         ctx->auth_hw_state_enc = NULL;
582 out_free_dec:
583         dma_free_coherent(dev, sizeof(struct qat_alg_cd),
584                           ctx->dec_cd, ctx->dec_cd_paddr);
585         ctx->dec_cd = NULL;
586 out_free_enc:
587         dma_free_coherent(dev, sizeof(struct qat_alg_cd),
588                           ctx->enc_cd, ctx->enc_cd_paddr);
589         ctx->enc_cd = NULL;
590         return -ENOMEM;
591 }
592
593 static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
594                               struct qat_crypto_request *qat_req)
595 {
596         struct device *dev = &GET_DEV(inst->accel_dev);
597         struct qat_alg_buf_list *bl = qat_req->buf.bl;
598         struct qat_alg_buf_list *blout = qat_req->buf.blout;
599         dma_addr_t blp = qat_req->buf.blp;
600         dma_addr_t blpout = qat_req->buf.bloutp;
601         size_t sz = qat_req->buf.sz;
602         int i, bufs = bl->num_bufs;
603
604         for (i = 0; i < bl->num_bufs; i++)
605                 dma_unmap_single(dev, bl->bufers[i].addr,
606                                  bl->bufers[i].len, DMA_BIDIRECTIONAL);
607
608         dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
609         kfree(bl);
610         if (blp != blpout) {
611                 /* If out of place operation dma unmap only data */
612                 int bufless = bufs - blout->num_mapped_bufs;
613                 for (i = bufless; i < bufs; i++) {
614                         dma_unmap_single(dev, blout->bufers[i].addr,
615                                          blout->bufers[i].len,
616                                          DMA_BIDIRECTIONAL);
617                 }
618                 dma_unmap_single(dev, blpout, sz, DMA_TO_DEVICE);
619                 kfree(blout);
620         }
621 }
622
623 static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
624                                struct scatterlist *assoc,
625                                struct scatterlist *sgl,
626                                struct scatterlist *sglout, uint8_t *iv,
627                                uint8_t ivlen,
628                                struct qat_crypto_request *qat_req)
629 {
630         struct device *dev = &GET_DEV(inst->accel_dev);
631         int i, bufs = 0, n = sg_nents(sgl), assoc_n = sg_nents(assoc);
632         struct qat_alg_buf_list *bufl;
633         struct qat_alg_buf_list *buflout = NULL;
634         dma_addr_t blp;
635         dma_addr_t bloutp = 0;
636         struct scatterlist *sg;
637         size_t sz = sizeof(struct qat_alg_buf_list) +
638                         ((1 + n + assoc_n) * sizeof(struct qat_alg_buf));
639
640         if (unlikely(!n))
641                 return -EINVAL;
642
643         bufl = kmalloc_node(sz, GFP_ATOMIC, inst->accel_dev->numa_node);
644         if (unlikely(!bufl))
645                 return -ENOMEM;
646
647         blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
648         if (unlikely(dma_mapping_error(dev, blp)))
649                 goto err;
650
651         for_each_sg(assoc, sg, assoc_n, i) {
652                 bufl->bufers[bufs].addr = dma_map_single(dev,
653                                                          sg_virt(sg),
654                                                          sg->length,
655                                                          DMA_BIDIRECTIONAL);
656                 bufl->bufers[bufs].len = sg->length;
657                 if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
658                         goto err;
659                 bufs++;
660         }
661         bufl->bufers[bufs].addr = dma_map_single(dev, iv, ivlen,
662                                                  DMA_BIDIRECTIONAL);
663         bufl->bufers[bufs].len = ivlen;
664         if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
665                 goto err;
666         bufs++;
667
668         for_each_sg(sgl, sg, n, i) {
669                 int y = i + bufs;
670                 bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
671                                                       sg->length,
672                                                       DMA_BIDIRECTIONAL);
673                 bufl->bufers[y].len = sg->length;
674                 if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
675                         goto err;
676         }
677         bufl->num_bufs = n + bufs;
678         qat_req->buf.bl = bufl;
679         qat_req->buf.blp = blp;
680         qat_req->buf.sz = sz;
681         /* Handle out of place operation */
682         if (sgl != sglout) {
683                 struct qat_alg_buf *bufers;
684
685                 buflout = kmalloc_node(sz, GFP_ATOMIC,
686                                        inst->accel_dev->numa_node);
687                 if (unlikely(!buflout))
688                         goto err;
689                 bloutp = dma_map_single(dev, buflout, sz, DMA_TO_DEVICE);
690                 if (unlikely(dma_mapping_error(dev, bloutp)))
691                         goto err;
692                 bufers = buflout->bufers;
693                 /* For out of place operation dma map only data and
694                  * reuse assoc mapping and iv */
695                 for (i = 0; i < bufs; i++) {
696                         bufers[i].len = bufl->bufers[i].len;
697                         bufers[i].addr = bufl->bufers[i].addr;
698                 }
699                 for_each_sg(sglout, sg, n, i) {
700                         int y = i + bufs;
701                         bufers[y].addr = dma_map_single(dev, sg_virt(sg),
702                                                         sg->length,
703                                                         DMA_BIDIRECTIONAL);
704                         buflout->bufers[y].len = sg->length;
705                         if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
706                                 goto err;
707                 }
708                 buflout->num_bufs = n + bufs;
709                 buflout->num_mapped_bufs = n;
710                 qat_req->buf.blout = buflout;
711                 qat_req->buf.bloutp = bloutp;
712         } else {
713                 /* Otherwise set the src and dst to the same address */
714                 qat_req->buf.bloutp = qat_req->buf.blp;
715         }
716         return 0;
717 err:
718         dev_err(dev, "Failed to map buf for dma\n");
719         for_each_sg(sgl, sg, n + bufs, i) {
720                 if (!dma_mapping_error(dev, bufl->bufers[i].addr)) {
721                         dma_unmap_single(dev, bufl->bufers[i].addr,
722                                          bufl->bufers[i].len,
723                                          DMA_BIDIRECTIONAL);
724                 }
725         }
726         if (!dma_mapping_error(dev, blp))
727                 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
728         kfree(bufl);
729         if (sgl != sglout && buflout) {
730                 for_each_sg(sglout, sg, n, i) {
731                         int y = i + bufs;
732                         if (!dma_mapping_error(dev, buflout->bufers[y].addr))
733                                 dma_unmap_single(dev, buflout->bufers[y].addr,
734                                                  buflout->bufers[y].len,
735                                                  DMA_BIDIRECTIONAL);
736                 }
737                 if (!dma_mapping_error(dev, bloutp))
738                         dma_unmap_single(dev, bloutp, sz, DMA_TO_DEVICE);
739                 kfree(buflout);
740         }
741         return -ENOMEM;
742 }
743
744 void qat_alg_callback(void *resp)
745 {
746         struct icp_qat_fw_la_resp *qat_resp = resp;
747         struct qat_crypto_request *qat_req =
748                         (void *)(dma_addr_t)qat_resp->opaque_data;
749         struct qat_alg_session_ctx *ctx = qat_req->ctx;
750         struct qat_crypto_instance *inst = ctx->inst;
751         struct aead_request *areq = qat_req->areq;
752         uint8_t stat_filed = qat_resp->comn_resp.comn_status;
753         int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
754
755         qat_alg_free_bufl(inst, qat_req);
756         if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
757                 res = -EBADMSG;
758         areq->base.complete(&(areq->base), res);
759 }
760
761 static int qat_alg_dec(struct aead_request *areq)
762 {
763         struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
764         struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
765         struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
766         struct qat_crypto_request *qat_req = aead_request_ctx(areq);
767         struct icp_qat_fw_la_cipher_req_params *cipher_param;
768         struct icp_qat_fw_la_auth_req_params *auth_param;
769         struct icp_qat_fw_la_bulk_req *msg;
770         int digst_size = crypto_aead_crt(aead_tfm)->authsize;
771         int ret, ctr = 0;
772
773         ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst,
774                                   areq->iv, AES_BLOCK_SIZE, qat_req);
775         if (unlikely(ret))
776                 return ret;
777
778         msg = &qat_req->req;
779         *msg = ctx->dec_fw_req_tmpl;
780         qat_req->ctx = ctx;
781         qat_req->areq = areq;
782         qat_req->req.comn_mid.opaque_data = (uint64_t)(dma_addr_t)qat_req;
783         qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
784         qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
785         cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
786         cipher_param->cipher_length = areq->cryptlen - digst_size;
787         cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
788         memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
789         auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
790         auth_param->auth_off = 0;
791         auth_param->auth_len = areq->assoclen +
792                                 cipher_param->cipher_length + AES_BLOCK_SIZE;
793         do {
794                 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
795         } while (ret == -EAGAIN && ctr++ < 10);
796
797         if (ret == -EAGAIN) {
798                 qat_alg_free_bufl(ctx->inst, qat_req);
799                 return -EBUSY;
800         }
801         return -EINPROGRESS;
802 }
803
804 static int qat_alg_enc_internal(struct aead_request *areq, uint8_t *iv,
805                                 int enc_iv)
806 {
807         struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
808         struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
809         struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
810         struct qat_crypto_request *qat_req = aead_request_ctx(areq);
811         struct icp_qat_fw_la_cipher_req_params *cipher_param;
812         struct icp_qat_fw_la_auth_req_params *auth_param;
813         struct icp_qat_fw_la_bulk_req *msg;
814         int ret, ctr = 0;
815
816         ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst,
817                                   iv, AES_BLOCK_SIZE, qat_req);
818         if (unlikely(ret))
819                 return ret;
820
821         msg = &qat_req->req;
822         *msg = ctx->enc_fw_req_tmpl;
823         qat_req->ctx = ctx;
824         qat_req->areq = areq;
825         qat_req->req.comn_mid.opaque_data = (uint64_t)(dma_addr_t)qat_req;
826         qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
827         qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
828         cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
829         auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
830
831         if (enc_iv) {
832                 cipher_param->cipher_length = areq->cryptlen + AES_BLOCK_SIZE;
833                 cipher_param->cipher_offset = areq->assoclen;
834         } else {
835                 memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
836                 cipher_param->cipher_length = areq->cryptlen;
837                 cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
838         }
839         auth_param->auth_off = 0;
840         auth_param->auth_len = areq->assoclen + areq->cryptlen + AES_BLOCK_SIZE;
841
842         do {
843                 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
844         } while (ret == -EAGAIN && ctr++ < 10);
845
846         if (ret == -EAGAIN) {
847                 qat_alg_free_bufl(ctx->inst, qat_req);
848                 return -EBUSY;
849         }
850         return -EINPROGRESS;
851 }
852
853 static int qat_alg_enc(struct aead_request *areq)
854 {
855         return qat_alg_enc_internal(areq, areq->iv, 0);
856 }
857
858 static int qat_alg_genivenc(struct aead_givcrypt_request *req)
859 {
860         struct crypto_aead *aead_tfm = crypto_aead_reqtfm(&req->areq);
861         struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
862         struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
863         __be64 seq;
864
865         memcpy(req->giv, ctx->salt, AES_BLOCK_SIZE);
866         seq = cpu_to_be64(req->seq);
867         memcpy(req->giv + AES_BLOCK_SIZE - sizeof(uint64_t),
868                &seq, sizeof(uint64_t));
869         return qat_alg_enc_internal(&req->areq, req->giv, 1);
870 }
871
872 static int qat_alg_init(struct crypto_tfm *tfm,
873                         enum icp_qat_hw_auth_algo hash, const char *hash_name)
874 {
875         struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
876
877         memset(ctx, '\0', sizeof(*ctx));
878         ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
879         if (IS_ERR(ctx->hash_tfm))
880                 return -EFAULT;
881         spin_lock_init(&ctx->lock);
882         ctx->qat_hash_alg = hash;
883         tfm->crt_aead.reqsize = sizeof(struct aead_request) +
884                                 sizeof(struct qat_crypto_request);
885         ctx->tfm = tfm;
886         return 0;
887 }
888
889 static int qat_alg_sha1_init(struct crypto_tfm *tfm)
890 {
891         return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
892 }
893
894 static int qat_alg_sha256_init(struct crypto_tfm *tfm)
895 {
896         return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
897 }
898
899 static int qat_alg_sha512_init(struct crypto_tfm *tfm)
900 {
901         return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
902 }
903
904 static void qat_alg_exit(struct crypto_tfm *tfm)
905 {
906         struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
907         struct qat_crypto_instance *inst = ctx->inst;
908         struct device *dev;
909
910         if (!IS_ERR(ctx->hash_tfm))
911                 crypto_free_shash(ctx->hash_tfm);
912
913         if (!inst)
914                 return;
915
916         dev = &GET_DEV(inst->accel_dev);
917         if (ctx->enc_cd)
918                 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
919                                   ctx->enc_cd, ctx->enc_cd_paddr);
920         if (ctx->dec_cd)
921                 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
922                                   ctx->dec_cd, ctx->dec_cd_paddr);
923         if (ctx->auth_hw_state_enc)
924                 dma_free_coherent(dev, sizeof(struct qat_auth_state),
925                                   ctx->auth_hw_state_enc,
926                                   ctx->auth_state_enc_paddr);
927
928         if (ctx->auth_hw_state_dec)
929                 dma_free_coherent(dev, sizeof(struct qat_auth_state),
930                                   ctx->auth_hw_state_dec,
931                                   ctx->auth_state_dec_paddr);
932
933         qat_crypto_put_instance(inst);
934 }
935
936 static struct crypto_alg qat_algs[] = { {
937         .cra_name = "authenc(hmac(sha1),cbc(aes))",
938         .cra_driver_name = "qat_aes_cbc_hmac_sha1",
939         .cra_priority = 4001,
940         .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
941         .cra_blocksize = AES_BLOCK_SIZE,
942         .cra_ctxsize = sizeof(struct qat_alg_session_ctx),
943         .cra_alignmask = 0,
944         .cra_type = &crypto_aead_type,
945         .cra_module = THIS_MODULE,
946         .cra_init = qat_alg_sha1_init,
947         .cra_exit = qat_alg_exit,
948         .cra_u = {
949                 .aead = {
950                         .setkey = qat_alg_setkey,
951                         .decrypt = qat_alg_dec,
952                         .encrypt = qat_alg_enc,
953                         .givencrypt = qat_alg_genivenc,
954                         .ivsize = AES_BLOCK_SIZE,
955                         .maxauthsize = SHA1_DIGEST_SIZE,
956                 },
957         },
958 }, {
959         .cra_name = "authenc(hmac(sha256),cbc(aes))",
960         .cra_driver_name = "qat_aes_cbc_hmac_sha256",
961         .cra_priority = 4001,
962         .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
963         .cra_blocksize = AES_BLOCK_SIZE,
964         .cra_ctxsize = sizeof(struct qat_alg_session_ctx),
965         .cra_alignmask = 0,
966         .cra_type = &crypto_aead_type,
967         .cra_module = THIS_MODULE,
968         .cra_init = qat_alg_sha256_init,
969         .cra_exit = qat_alg_exit,
970         .cra_u = {
971                 .aead = {
972                         .setkey = qat_alg_setkey,
973                         .decrypt = qat_alg_dec,
974                         .encrypt = qat_alg_enc,
975                         .givencrypt = qat_alg_genivenc,
976                         .ivsize = AES_BLOCK_SIZE,
977                         .maxauthsize = SHA256_DIGEST_SIZE,
978                 },
979         },
980 }, {
981         .cra_name = "authenc(hmac(sha512),cbc(aes))",
982         .cra_driver_name = "qat_aes_cbc_hmac_sha512",
983         .cra_priority = 4001,
984         .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
985         .cra_blocksize = AES_BLOCK_SIZE,
986         .cra_ctxsize = sizeof(struct qat_alg_session_ctx),
987         .cra_alignmask = 0,
988         .cra_type = &crypto_aead_type,
989         .cra_module = THIS_MODULE,
990         .cra_init = qat_alg_sha512_init,
991         .cra_exit = qat_alg_exit,
992         .cra_u = {
993                 .aead = {
994                         .setkey = qat_alg_setkey,
995                         .decrypt = qat_alg_dec,
996                         .encrypt = qat_alg_enc,
997                         .givencrypt = qat_alg_genivenc,
998                         .ivsize = AES_BLOCK_SIZE,
999                         .maxauthsize = SHA512_DIGEST_SIZE,
1000                 },
1001         },
1002 } };
1003
1004 int qat_algs_register(void)
1005 {
1006         if (atomic_add_return(1, &active_dev) == 1) {
1007                 int i;
1008
1009                 for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
1010                         qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_AEAD |
1011                                                 CRYPTO_ALG_ASYNC;
1012                 return crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
1013         }
1014         return 0;
1015 }
1016
1017 int qat_algs_unregister(void)
1018 {
1019         if (atomic_sub_return(1, &active_dev) == 0)
1020                 return crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
1021         return 0;
1022 }
1023
1024 int qat_algs_init(void)
1025 {
1026         atomic_set(&active_dev, 0);
1027         crypto_get_default_rng();
1028         return 0;
1029 }
1030
1031 void qat_algs_exit(void)
1032 {
1033         crypto_put_default_rng();
1034 }