2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 #include <linux/module.h>
48 #include <linux/slab.h>
49 #include <linux/crypto.h>
50 #include <crypto/aead.h>
51 #include <crypto/aes.h>
52 #include <crypto/sha.h>
53 #include <crypto/hash.h>
54 #include <crypto/algapi.h>
55 #include <crypto/authenc.h>
56 #include <crypto/rng.h>
57 #include <linux/dma-mapping.h>
58 #include "adf_accel_devices.h"
59 #include "adf_transport.h"
60 #include "adf_common_drv.h"
61 #include "qat_crypto.h"
62 #include "icp_qat_hw.h"
63 #include "icp_qat_fw.h"
64 #include "icp_qat_fw_la.h"
66 #define QAT_AES_HW_CONFIG_ENC(alg) \
67 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
68 ICP_QAT_HW_CIPHER_NO_CONVERT, \
69 ICP_QAT_HW_CIPHER_ENCRYPT)
71 #define QAT_AES_HW_CONFIG_DEC(alg) \
72 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
73 ICP_QAT_HW_CIPHER_KEY_CONVERT, \
74 ICP_QAT_HW_CIPHER_DECRYPT)
76 static atomic_t active_dev;
84 struct qat_alg_buf_list {
87 uint32_t num_mapped_bufs;
88 struct qat_alg_buf bufers[];
89 } __packed __aligned(64);
91 /* Common content descriptor */
94 struct qat_enc { /* Encrypt content desc */
95 struct icp_qat_hw_cipher_algo_blk cipher;
96 struct icp_qat_hw_auth_algo_blk hash;
98 struct qat_dec { /* Decrytp content desc */
99 struct icp_qat_hw_auth_algo_blk hash;
100 struct icp_qat_hw_cipher_algo_blk cipher;
105 #define MAX_AUTH_STATE_SIZE sizeof(struct icp_qat_hw_auth_algo_blk)
107 struct qat_auth_state {
108 uint8_t data[MAX_AUTH_STATE_SIZE];
111 struct qat_alg_session_ctx {
112 struct qat_alg_cd *enc_cd;
113 dma_addr_t enc_cd_paddr;
114 struct qat_alg_cd *dec_cd;
115 dma_addr_t dec_cd_paddr;
116 struct qat_auth_state *auth_hw_state_enc;
117 dma_addr_t auth_state_enc_paddr;
118 struct qat_auth_state *auth_hw_state_dec;
119 dma_addr_t auth_state_dec_paddr;
120 struct icp_qat_fw_la_bulk_req enc_fw_req_tmpl;
121 struct icp_qat_fw_la_bulk_req dec_fw_req_tmpl;
122 struct qat_crypto_instance *inst;
123 struct crypto_tfm *tfm;
124 struct crypto_shash *hash_tfm;
125 enum icp_qat_hw_auth_algo qat_hash_alg;
126 uint8_t salt[AES_BLOCK_SIZE];
127 spinlock_t lock; /* protects qat_alg_session_ctx struct */
130 static int get_current_node(void)
132 return cpu_data(current_thread_info()->cpu).phys_proc_id;
135 static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
137 switch (qat_hash_alg) {
138 case ICP_QAT_HW_AUTH_ALGO_SHA1:
139 return ICP_QAT_HW_SHA1_STATE1_SZ;
140 case ICP_QAT_HW_AUTH_ALGO_SHA256:
141 return ICP_QAT_HW_SHA256_STATE1_SZ;
142 case ICP_QAT_HW_AUTH_ALGO_SHA512:
143 return ICP_QAT_HW_SHA512_STATE1_SZ;
150 static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
151 struct qat_alg_session_ctx *ctx,
152 const uint8_t *auth_key,
153 unsigned int auth_keylen, uint8_t *auth_state)
156 struct shash_desc shash;
157 char ctx[crypto_shash_descsize(ctx->hash_tfm)];
159 struct sha1_state sha1;
160 struct sha256_state sha256;
161 struct sha512_state sha512;
162 int block_size = crypto_shash_blocksize(ctx->hash_tfm);
163 int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
164 uint8_t *ipad = auth_state;
165 uint8_t *opad = ipad + block_size;
166 __be32 *hash_state_out;
167 __be64 *hash512_state_out;
170 desc.shash.tfm = ctx->hash_tfm;
171 desc.shash.flags = 0x0;
173 if (auth_keylen > block_size) {
174 char buff[SHA512_BLOCK_SIZE];
175 int ret = crypto_shash_digest(&desc.shash, auth_key,
180 memcpy(ipad, buff, digest_size);
181 memcpy(opad, buff, digest_size);
182 memset(ipad + digest_size, 0, block_size - digest_size);
183 memset(opad + digest_size, 0, block_size - digest_size);
185 memcpy(ipad, auth_key, auth_keylen);
186 memcpy(opad, auth_key, auth_keylen);
187 memset(ipad + auth_keylen, 0, block_size - auth_keylen);
188 memset(opad + auth_keylen, 0, block_size - auth_keylen);
191 for (i = 0; i < block_size; i++) {
192 char *ipad_ptr = ipad + i;
193 char *opad_ptr = opad + i;
198 if (crypto_shash_init(&desc.shash))
201 if (crypto_shash_update(&desc.shash, ipad, block_size))
204 hash_state_out = (__be32 *)hash->sha.state1;
205 hash512_state_out = (__be64 *)hash_state_out;
207 switch (ctx->qat_hash_alg) {
208 case ICP_QAT_HW_AUTH_ALGO_SHA1:
209 if (crypto_shash_export(&desc.shash, &sha1))
211 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
212 *hash_state_out = cpu_to_be32(*(sha1.state + i));
214 case ICP_QAT_HW_AUTH_ALGO_SHA256:
215 if (crypto_shash_export(&desc.shash, &sha256))
217 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
218 *hash_state_out = cpu_to_be32(*(sha256.state + i));
220 case ICP_QAT_HW_AUTH_ALGO_SHA512:
221 if (crypto_shash_export(&desc.shash, &sha512))
223 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
224 *hash512_state_out = cpu_to_be64(*(sha512.state + i));
230 if (crypto_shash_init(&desc.shash))
233 if (crypto_shash_update(&desc.shash, opad, block_size))
236 offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
237 hash_state_out = (__be32 *)(hash->sha.state1 + offset);
238 hash512_state_out = (__be64 *)hash_state_out;
240 switch (ctx->qat_hash_alg) {
241 case ICP_QAT_HW_AUTH_ALGO_SHA1:
242 if (crypto_shash_export(&desc.shash, &sha1))
244 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
245 *hash_state_out = cpu_to_be32(*(sha1.state + i));
247 case ICP_QAT_HW_AUTH_ALGO_SHA256:
248 if (crypto_shash_export(&desc.shash, &sha256))
250 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
251 *hash_state_out = cpu_to_be32(*(sha256.state + i));
253 case ICP_QAT_HW_AUTH_ALGO_SHA512:
254 if (crypto_shash_export(&desc.shash, &sha512))
256 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
257 *hash512_state_out = cpu_to_be64(*(sha512.state + i));
265 static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
268 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
269 header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
270 header->comn_req_flags =
271 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
272 QAT_COMN_PTR_TYPE_SGL);
273 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
274 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
275 ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
276 ICP_QAT_FW_LA_PARTIAL_NONE);
277 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
278 ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
279 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
280 ICP_QAT_FW_LA_NO_PROTO);
281 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
282 ICP_QAT_FW_LA_NO_UPDATE_STATE);
285 static int qat_alg_init_enc_session(struct qat_alg_session_ctx *ctx,
286 int alg, struct crypto_authenc_keys *keys)
288 struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
289 unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
290 struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
291 struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
292 struct icp_qat_hw_auth_algo_blk *hash =
293 (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
294 sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
295 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req_tmpl;
296 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
297 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
298 void *ptr = &req_tmpl->cd_ctrl;
299 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
300 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
301 struct icp_qat_fw_la_auth_req_params *auth_param =
302 (struct icp_qat_fw_la_auth_req_params *)
303 ((char *)&req_tmpl->serv_specif_rqpars +
304 sizeof(struct icp_qat_fw_la_cipher_req_params));
307 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg);
308 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
309 hash->sha.inner_setup.auth_config.config =
310 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
311 ctx->qat_hash_alg, digestsize);
312 hash->sha.inner_setup.auth_counter.counter =
313 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
315 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen,
316 (uint8_t *)ctx->auth_hw_state_enc))
320 qat_alg_init_common_hdr(header);
321 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
322 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
323 ICP_QAT_FW_LA_RET_AUTH_RES);
324 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
325 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
326 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
327 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
329 /* Cipher CD config setup */
330 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
331 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
332 cipher_cd_ctrl->cipher_cfg_offset = 0;
333 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
334 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
335 /* Auth CD config setup */
336 hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
337 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
338 hash_cd_ctrl->inner_res_sz = digestsize;
339 hash_cd_ctrl->final_sz = digestsize;
341 switch (ctx->qat_hash_alg) {
342 case ICP_QAT_HW_AUTH_ALGO_SHA1:
343 hash_cd_ctrl->inner_state1_sz =
344 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
345 hash_cd_ctrl->inner_state2_sz =
346 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
348 case ICP_QAT_HW_AUTH_ALGO_SHA256:
349 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
350 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
352 case ICP_QAT_HW_AUTH_ALGO_SHA512:
353 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
354 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
359 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
360 ((sizeof(struct icp_qat_hw_auth_setup) +
361 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
362 auth_param->u1.auth_partial_st_prefix = ctx->auth_state_enc_paddr +
363 sizeof(struct icp_qat_hw_auth_counter) +
364 round_up(hash_cd_ctrl->inner_state1_sz, 8);
365 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
366 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
370 static int qat_alg_init_dec_session(struct qat_alg_session_ctx *ctx,
371 int alg, struct crypto_authenc_keys *keys)
373 struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
374 unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
375 struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
376 struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
377 struct icp_qat_hw_cipher_algo_blk *cipher =
378 (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
379 sizeof(struct icp_qat_hw_auth_setup) +
380 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
381 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req_tmpl;
382 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
383 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
384 void *ptr = &req_tmpl->cd_ctrl;
385 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
386 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
387 struct icp_qat_fw_la_auth_req_params *auth_param =
388 (struct icp_qat_fw_la_auth_req_params *)
389 ((char *)&req_tmpl->serv_specif_rqpars +
390 sizeof(struct icp_qat_fw_la_cipher_req_params));
393 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg);
394 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
395 hash->sha.inner_setup.auth_config.config =
396 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
399 hash->sha.inner_setup.auth_counter.counter =
400 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
402 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen,
403 (uint8_t *)ctx->auth_hw_state_dec))
407 qat_alg_init_common_hdr(header);
408 header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
409 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
410 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
411 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
412 ICP_QAT_FW_LA_CMP_AUTH_RES);
413 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
414 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
416 /* Cipher CD config setup */
417 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
418 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
419 cipher_cd_ctrl->cipher_cfg_offset =
420 (sizeof(struct icp_qat_hw_auth_setup) +
421 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
422 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
423 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
425 /* Auth CD config setup */
426 hash_cd_ctrl->hash_cfg_offset = 0;
427 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
428 hash_cd_ctrl->inner_res_sz = digestsize;
429 hash_cd_ctrl->final_sz = digestsize;
431 switch (ctx->qat_hash_alg) {
432 case ICP_QAT_HW_AUTH_ALGO_SHA1:
433 hash_cd_ctrl->inner_state1_sz =
434 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
435 hash_cd_ctrl->inner_state2_sz =
436 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
438 case ICP_QAT_HW_AUTH_ALGO_SHA256:
439 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
440 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
442 case ICP_QAT_HW_AUTH_ALGO_SHA512:
443 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
444 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
450 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
451 ((sizeof(struct icp_qat_hw_auth_setup) +
452 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
453 auth_param->u1.auth_partial_st_prefix = ctx->auth_state_enc_paddr +
454 sizeof(struct icp_qat_hw_auth_counter) +
455 round_up(hash_cd_ctrl->inner_state1_sz, 8);
456 auth_param->auth_res_sz = digestsize;
457 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
458 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
462 static int qat_alg_init_sessions(struct qat_alg_session_ctx *ctx,
463 const uint8_t *key, unsigned int keylen)
465 struct crypto_authenc_keys keys;
468 if (crypto_rng_get_bytes(crypto_default_rng, ctx->salt, AES_BLOCK_SIZE))
471 if (crypto_authenc_extractkeys(&keys, key, keylen))
474 switch (keys.enckeylen) {
475 case AES_KEYSIZE_128:
476 alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
478 case AES_KEYSIZE_192:
479 alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
481 case AES_KEYSIZE_256:
482 alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
489 if (qat_alg_init_enc_session(ctx, alg, &keys))
492 if (qat_alg_init_dec_session(ctx, alg, &keys))
497 crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
503 static int qat_alg_setkey(struct crypto_aead *tfm, const uint8_t *key,
506 struct qat_alg_session_ctx *ctx = crypto_aead_ctx(tfm);
509 spin_lock(&ctx->lock);
512 dev = &GET_DEV(ctx->inst->accel_dev);
513 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
514 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
515 memset(ctx->auth_hw_state_enc, 0,
516 sizeof(struct qat_auth_state));
517 memset(ctx->auth_hw_state_dec, 0,
518 sizeof(struct qat_auth_state));
519 memset(&ctx->enc_fw_req_tmpl, 0,
520 sizeof(struct icp_qat_fw_la_bulk_req));
521 memset(&ctx->dec_fw_req_tmpl, 0,
522 sizeof(struct icp_qat_fw_la_bulk_req));
525 int node = get_current_node();
526 struct qat_crypto_instance *inst =
527 qat_crypto_get_instance_node(node);
529 spin_unlock(&ctx->lock);
533 dev = &GET_DEV(inst->accel_dev);
535 ctx->enc_cd = dma_zalloc_coherent(dev,
536 sizeof(struct qat_alg_cd),
540 spin_unlock(&ctx->lock);
543 ctx->dec_cd = dma_zalloc_coherent(dev,
544 sizeof(struct qat_alg_cd),
548 spin_unlock(&ctx->lock);
551 ctx->auth_hw_state_enc =
552 dma_zalloc_coherent(dev, sizeof(struct qat_auth_state),
553 &ctx->auth_state_enc_paddr,
555 if (!ctx->auth_hw_state_enc) {
556 spin_unlock(&ctx->lock);
559 ctx->auth_hw_state_dec =
560 dma_zalloc_coherent(dev, sizeof(struct qat_auth_state),
561 &ctx->auth_state_dec_paddr,
563 if (!ctx->auth_hw_state_dec) {
564 spin_unlock(&ctx->lock);
565 goto out_free_auth_enc;
568 spin_unlock(&ctx->lock);
569 if (qat_alg_init_sessions(ctx, key, keylen))
575 dma_free_coherent(dev, sizeof(struct qat_auth_state),
576 ctx->auth_hw_state_dec, ctx->auth_state_dec_paddr);
577 ctx->auth_hw_state_dec = NULL;
579 dma_free_coherent(dev, sizeof(struct qat_auth_state),
580 ctx->auth_hw_state_enc, ctx->auth_state_enc_paddr);
581 ctx->auth_hw_state_enc = NULL;
583 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
584 ctx->dec_cd, ctx->dec_cd_paddr);
587 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
588 ctx->enc_cd, ctx->enc_cd_paddr);
593 static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
594 struct qat_crypto_request *qat_req)
596 struct device *dev = &GET_DEV(inst->accel_dev);
597 struct qat_alg_buf_list *bl = qat_req->buf.bl;
598 struct qat_alg_buf_list *blout = qat_req->buf.blout;
599 dma_addr_t blp = qat_req->buf.blp;
600 dma_addr_t blpout = qat_req->buf.bloutp;
601 size_t sz = qat_req->buf.sz;
602 int i, bufs = bl->num_bufs;
604 for (i = 0; i < bl->num_bufs; i++)
605 dma_unmap_single(dev, bl->bufers[i].addr,
606 bl->bufers[i].len, DMA_BIDIRECTIONAL);
608 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
611 /* If out of place operation dma unmap only data */
612 int bufless = bufs - blout->num_mapped_bufs;
613 for (i = bufless; i < bufs; i++) {
614 dma_unmap_single(dev, blout->bufers[i].addr,
615 blout->bufers[i].len,
618 dma_unmap_single(dev, blpout, sz, DMA_TO_DEVICE);
623 static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
624 struct scatterlist *assoc,
625 struct scatterlist *sgl,
626 struct scatterlist *sglout, uint8_t *iv,
628 struct qat_crypto_request *qat_req)
630 struct device *dev = &GET_DEV(inst->accel_dev);
631 int i, bufs = 0, n = sg_nents(sgl), assoc_n = sg_nents(assoc);
632 struct qat_alg_buf_list *bufl;
633 struct qat_alg_buf_list *buflout = NULL;
635 dma_addr_t bloutp = 0;
636 struct scatterlist *sg;
637 size_t sz = sizeof(struct qat_alg_buf_list) +
638 ((1 + n + assoc_n) * sizeof(struct qat_alg_buf));
643 bufl = kmalloc_node(sz, GFP_ATOMIC, inst->accel_dev->numa_node);
647 blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
648 if (unlikely(dma_mapping_error(dev, blp)))
651 for_each_sg(assoc, sg, assoc_n, i) {
652 bufl->bufers[bufs].addr = dma_map_single(dev,
656 bufl->bufers[bufs].len = sg->length;
657 if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
661 bufl->bufers[bufs].addr = dma_map_single(dev, iv, ivlen,
663 bufl->bufers[bufs].len = ivlen;
664 if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
668 for_each_sg(sgl, sg, n, i) {
670 bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
673 bufl->bufers[y].len = sg->length;
674 if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
677 bufl->num_bufs = n + bufs;
678 qat_req->buf.bl = bufl;
679 qat_req->buf.blp = blp;
680 qat_req->buf.sz = sz;
681 /* Handle out of place operation */
683 struct qat_alg_buf *bufers;
685 buflout = kmalloc_node(sz, GFP_ATOMIC,
686 inst->accel_dev->numa_node);
687 if (unlikely(!buflout))
689 bloutp = dma_map_single(dev, buflout, sz, DMA_TO_DEVICE);
690 if (unlikely(dma_mapping_error(dev, bloutp)))
692 bufers = buflout->bufers;
693 /* For out of place operation dma map only data and
694 * reuse assoc mapping and iv */
695 for (i = 0; i < bufs; i++) {
696 bufers[i].len = bufl->bufers[i].len;
697 bufers[i].addr = bufl->bufers[i].addr;
699 for_each_sg(sglout, sg, n, i) {
701 bufers[y].addr = dma_map_single(dev, sg_virt(sg),
704 buflout->bufers[y].len = sg->length;
705 if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
708 buflout->num_bufs = n + bufs;
709 buflout->num_mapped_bufs = n;
710 qat_req->buf.blout = buflout;
711 qat_req->buf.bloutp = bloutp;
713 /* Otherwise set the src and dst to the same address */
714 qat_req->buf.bloutp = qat_req->buf.blp;
718 dev_err(dev, "Failed to map buf for dma\n");
719 for_each_sg(sgl, sg, n + bufs, i) {
720 if (!dma_mapping_error(dev, bufl->bufers[i].addr)) {
721 dma_unmap_single(dev, bufl->bufers[i].addr,
726 if (!dma_mapping_error(dev, blp))
727 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
729 if (sgl != sglout && buflout) {
730 for_each_sg(sglout, sg, n, i) {
732 if (!dma_mapping_error(dev, buflout->bufers[y].addr))
733 dma_unmap_single(dev, buflout->bufers[y].addr,
734 buflout->bufers[y].len,
737 if (!dma_mapping_error(dev, bloutp))
738 dma_unmap_single(dev, bloutp, sz, DMA_TO_DEVICE);
744 void qat_alg_callback(void *resp)
746 struct icp_qat_fw_la_resp *qat_resp = resp;
747 struct qat_crypto_request *qat_req =
748 (void *)(dma_addr_t)qat_resp->opaque_data;
749 struct qat_alg_session_ctx *ctx = qat_req->ctx;
750 struct qat_crypto_instance *inst = ctx->inst;
751 struct aead_request *areq = qat_req->areq;
752 uint8_t stat_filed = qat_resp->comn_resp.comn_status;
753 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
755 qat_alg_free_bufl(inst, qat_req);
756 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
758 areq->base.complete(&(areq->base), res);
761 static int qat_alg_dec(struct aead_request *areq)
763 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
764 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
765 struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
766 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
767 struct icp_qat_fw_la_cipher_req_params *cipher_param;
768 struct icp_qat_fw_la_auth_req_params *auth_param;
769 struct icp_qat_fw_la_bulk_req *msg;
770 int digst_size = crypto_aead_crt(aead_tfm)->authsize;
773 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst,
774 areq->iv, AES_BLOCK_SIZE, qat_req);
779 *msg = ctx->dec_fw_req_tmpl;
781 qat_req->areq = areq;
782 qat_req->req.comn_mid.opaque_data = (uint64_t)(dma_addr_t)qat_req;
783 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
784 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
785 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
786 cipher_param->cipher_length = areq->cryptlen - digst_size;
787 cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
788 memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
789 auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
790 auth_param->auth_off = 0;
791 auth_param->auth_len = areq->assoclen +
792 cipher_param->cipher_length + AES_BLOCK_SIZE;
794 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
795 } while (ret == -EAGAIN && ctr++ < 10);
797 if (ret == -EAGAIN) {
798 qat_alg_free_bufl(ctx->inst, qat_req);
804 static int qat_alg_enc_internal(struct aead_request *areq, uint8_t *iv,
807 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
808 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
809 struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
810 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
811 struct icp_qat_fw_la_cipher_req_params *cipher_param;
812 struct icp_qat_fw_la_auth_req_params *auth_param;
813 struct icp_qat_fw_la_bulk_req *msg;
816 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst,
817 iv, AES_BLOCK_SIZE, qat_req);
822 *msg = ctx->enc_fw_req_tmpl;
824 qat_req->areq = areq;
825 qat_req->req.comn_mid.opaque_data = (uint64_t)(dma_addr_t)qat_req;
826 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
827 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
828 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
829 auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
832 cipher_param->cipher_length = areq->cryptlen + AES_BLOCK_SIZE;
833 cipher_param->cipher_offset = areq->assoclen;
835 memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
836 cipher_param->cipher_length = areq->cryptlen;
837 cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
839 auth_param->auth_off = 0;
840 auth_param->auth_len = areq->assoclen + areq->cryptlen + AES_BLOCK_SIZE;
843 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
844 } while (ret == -EAGAIN && ctr++ < 10);
846 if (ret == -EAGAIN) {
847 qat_alg_free_bufl(ctx->inst, qat_req);
853 static int qat_alg_enc(struct aead_request *areq)
855 return qat_alg_enc_internal(areq, areq->iv, 0);
858 static int qat_alg_genivenc(struct aead_givcrypt_request *req)
860 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(&req->areq);
861 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
862 struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
865 memcpy(req->giv, ctx->salt, AES_BLOCK_SIZE);
866 seq = cpu_to_be64(req->seq);
867 memcpy(req->giv + AES_BLOCK_SIZE - sizeof(uint64_t),
868 &seq, sizeof(uint64_t));
869 return qat_alg_enc_internal(&req->areq, req->giv, 1);
872 static int qat_alg_init(struct crypto_tfm *tfm,
873 enum icp_qat_hw_auth_algo hash, const char *hash_name)
875 struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
877 memset(ctx, '\0', sizeof(*ctx));
878 ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
879 if (IS_ERR(ctx->hash_tfm))
881 spin_lock_init(&ctx->lock);
882 ctx->qat_hash_alg = hash;
883 tfm->crt_aead.reqsize = sizeof(struct aead_request) +
884 sizeof(struct qat_crypto_request);
889 static int qat_alg_sha1_init(struct crypto_tfm *tfm)
891 return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
894 static int qat_alg_sha256_init(struct crypto_tfm *tfm)
896 return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
899 static int qat_alg_sha512_init(struct crypto_tfm *tfm)
901 return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
904 static void qat_alg_exit(struct crypto_tfm *tfm)
906 struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
907 struct qat_crypto_instance *inst = ctx->inst;
910 if (!IS_ERR(ctx->hash_tfm))
911 crypto_free_shash(ctx->hash_tfm);
916 dev = &GET_DEV(inst->accel_dev);
918 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
919 ctx->enc_cd, ctx->enc_cd_paddr);
921 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
922 ctx->dec_cd, ctx->dec_cd_paddr);
923 if (ctx->auth_hw_state_enc)
924 dma_free_coherent(dev, sizeof(struct qat_auth_state),
925 ctx->auth_hw_state_enc,
926 ctx->auth_state_enc_paddr);
928 if (ctx->auth_hw_state_dec)
929 dma_free_coherent(dev, sizeof(struct qat_auth_state),
930 ctx->auth_hw_state_dec,
931 ctx->auth_state_dec_paddr);
933 qat_crypto_put_instance(inst);
936 static struct crypto_alg qat_algs[] = { {
937 .cra_name = "authenc(hmac(sha1),cbc(aes))",
938 .cra_driver_name = "qat_aes_cbc_hmac_sha1",
939 .cra_priority = 4001,
940 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
941 .cra_blocksize = AES_BLOCK_SIZE,
942 .cra_ctxsize = sizeof(struct qat_alg_session_ctx),
944 .cra_type = &crypto_aead_type,
945 .cra_module = THIS_MODULE,
946 .cra_init = qat_alg_sha1_init,
947 .cra_exit = qat_alg_exit,
950 .setkey = qat_alg_setkey,
951 .decrypt = qat_alg_dec,
952 .encrypt = qat_alg_enc,
953 .givencrypt = qat_alg_genivenc,
954 .ivsize = AES_BLOCK_SIZE,
955 .maxauthsize = SHA1_DIGEST_SIZE,
959 .cra_name = "authenc(hmac(sha256),cbc(aes))",
960 .cra_driver_name = "qat_aes_cbc_hmac_sha256",
961 .cra_priority = 4001,
962 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
963 .cra_blocksize = AES_BLOCK_SIZE,
964 .cra_ctxsize = sizeof(struct qat_alg_session_ctx),
966 .cra_type = &crypto_aead_type,
967 .cra_module = THIS_MODULE,
968 .cra_init = qat_alg_sha256_init,
969 .cra_exit = qat_alg_exit,
972 .setkey = qat_alg_setkey,
973 .decrypt = qat_alg_dec,
974 .encrypt = qat_alg_enc,
975 .givencrypt = qat_alg_genivenc,
976 .ivsize = AES_BLOCK_SIZE,
977 .maxauthsize = SHA256_DIGEST_SIZE,
981 .cra_name = "authenc(hmac(sha512),cbc(aes))",
982 .cra_driver_name = "qat_aes_cbc_hmac_sha512",
983 .cra_priority = 4001,
984 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
985 .cra_blocksize = AES_BLOCK_SIZE,
986 .cra_ctxsize = sizeof(struct qat_alg_session_ctx),
988 .cra_type = &crypto_aead_type,
989 .cra_module = THIS_MODULE,
990 .cra_init = qat_alg_sha512_init,
991 .cra_exit = qat_alg_exit,
994 .setkey = qat_alg_setkey,
995 .decrypt = qat_alg_dec,
996 .encrypt = qat_alg_enc,
997 .givencrypt = qat_alg_genivenc,
998 .ivsize = AES_BLOCK_SIZE,
999 .maxauthsize = SHA512_DIGEST_SIZE,
1004 int qat_algs_register(void)
1006 if (atomic_add_return(1, &active_dev) == 1) {
1009 for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
1010 qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_AEAD |
1012 return crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
1017 int qat_algs_unregister(void)
1019 if (atomic_sub_return(1, &active_dev) == 0)
1020 return crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
1024 int qat_algs_init(void)
1026 atomic_set(&active_dev, 0);
1027 crypto_get_default_rng();
1031 void qat_algs_exit(void)
1033 crypto_put_default_rng();