return buf_dma;
}
-/* Map req->src and put it in link table */
-static inline void src_map_to_sec4_sg(struct device *jrdev,
- struct scatterlist *src, int src_nents,
- struct sec4_sg_entry *sec4_sg)
-{
- dma_map_sg(jrdev, src, src_nents, DMA_TO_DEVICE);
- sg_to_sec4_sg_last(src, src_nents, sec4_sg, 0);
-}
-
/*
* Only put buffer in link table if it contains data, which is possible,
* since a buffer has previously been used, and needs to be unmapped,
int in_len = *buflen + req->nbytes, to_hash;
u32 *sh_desc = ctx->sh_desc_update, *desc;
dma_addr_t ptr = ctx->sh_desc_update_dma;
- int src_nents, sec4_sg_bytes, sec4_sg_src_index;
+ int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
struct ahash_edesc *edesc;
int ret = 0;
int sh_len;
dev_err(jrdev, "Invalid number of src SG.\n");
return src_nents;
}
+
+ if (src_nents) {
+ mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (!mapped_nents) {
+ dev_err(jrdev, "unable to DMA map source\n");
+ return -ENOMEM;
+ }
+ } else {
+ mapped_nents = 0;
+ }
+
sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
- sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
+ sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
sizeof(struct sec4_sg_entry);
/*
if (!edesc) {
dev_err(jrdev,
"could not allocate extended descriptor\n");
+ dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
return -ENOMEM;
}
buf, state->buf_dma,
*buflen, last_buflen);
- if (src_nents) {
- src_map_to_sec4_sg(jrdev, req->src, src_nents,
- edesc->sec4_sg + sec4_sg_src_index);
+ if (mapped_nents) {
+ sg_to_sec4_sg_last(req->src, mapped_nents,
+ edesc->sec4_sg + sec4_sg_src_index,
+ 0);
if (*next_buflen)
scatterwalk_map_and_copy(next_buf, req->src,
to_hash - *buflen,
u32 *sh_desc = ctx->sh_desc_finup, *desc;
dma_addr_t ptr = ctx->sh_desc_finup_dma;
int sec4_sg_bytes, sec4_sg_src_index;
- int src_nents;
+ int src_nents, mapped_nents;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
int ret = 0;
dev_err(jrdev, "Invalid number of src SG.\n");
return src_nents;
}
+
+ if (src_nents) {
+ mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (!mapped_nents) {
+ dev_err(jrdev, "unable to DMA map source\n");
+ return -ENOMEM;
+ }
+ } else {
+ mapped_nents = 0;
+ }
+
sec4_sg_src_index = 1 + (buflen ? 1 : 0);
- sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
+ sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes, GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
+ dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
return -ENOMEM;
}
buf, state->buf_dma, buflen,
last_buflen);
- src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg +
- sec4_sg_src_index);
+ sg_to_sec4_sg_last(req->src, mapped_nents,
+ edesc->sec4_sg + sec4_sg_src_index, 0);
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
sec4_sg_bytes, DMA_TO_DEVICE);
u32 *sh_desc = ctx->sh_desc_digest, *desc;
dma_addr_t ptr = ctx->sh_desc_digest_dma;
int digestsize = crypto_ahash_digestsize(ahash);
- int src_nents, sec4_sg_bytes;
+ int src_nents, mapped_nents, sec4_sg_bytes;
dma_addr_t src_dma;
struct ahash_edesc *edesc;
int ret = 0;
dev_err(jrdev, "Invalid number of src SG.\n");
return src_nents;
}
- dma_map_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
- if (src_nents > 1)
- sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
+
+ if (src_nents) {
+ mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (!mapped_nents) {
+ dev_err(jrdev, "unable to map source for DMA\n");
+ return -ENOMEM;
+ }
+ } else {
+ mapped_nents = 0;
+ }
+
+ if (mapped_nents > 1)
+ sec4_sg_bytes = mapped_nents * sizeof(struct sec4_sg_entry);
else
sec4_sg_bytes = 0;
edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes, GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
+ dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
return -ENOMEM;
}
init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
if (src_nents > 1) {
- sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
+ sg_to_sec4_sg_last(req->src, mapped_nents, edesc->sec4_sg, 0);
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
sec4_sg_bytes, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
int *next_buflen = state->current_buf ? &state->buflen_0 :
&state->buflen_1;
int in_len = *buflen + req->nbytes, to_hash;
- int sec4_sg_bytes, src_nents;
+ int sec4_sg_bytes, src_nents, mapped_nents;
struct ahash_edesc *edesc;
u32 *desc, *sh_desc = ctx->sh_desc_update_first;
dma_addr_t ptr = ctx->sh_desc_update_first_dma;
dev_err(jrdev, "Invalid number of src SG.\n");
return src_nents;
}
- sec4_sg_bytes = (1 + src_nents) *
+
+ if (src_nents) {
+ mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (!mapped_nents) {
+ dev_err(jrdev, "unable to DMA map source\n");
+ return -ENOMEM;
+ }
+ } else {
+ mapped_nents = 0;
+ }
+
+ sec4_sg_bytes = (1 + mapped_nents) *
sizeof(struct sec4_sg_entry);
/*
if (!edesc) {
dev_err(jrdev,
"could not allocate extended descriptor\n");
+ dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
return -ENOMEM;
}
state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
buf, *buflen);
- src_map_to_sec4_sg(jrdev, req->src, src_nents,
- edesc->sec4_sg + 1);
+ sg_to_sec4_sg_last(req->src, mapped_nents,
+ edesc->sec4_sg + 1, 0);
+
if (*next_buflen) {
scatterwalk_map_and_copy(next_buf, req->src,
to_hash - *buflen,
state->buflen_1;
u32 *sh_desc = ctx->sh_desc_digest, *desc;
dma_addr_t ptr = ctx->sh_desc_digest_dma;
- int sec4_sg_bytes, sec4_sg_src_index, src_nents;
+ int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
int sh_len;
dev_err(jrdev, "Invalid number of src SG.\n");
return src_nents;
}
+
+ if (src_nents) {
+ mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (!mapped_nents) {
+ dev_err(jrdev, "unable to DMA map source\n");
+ return -ENOMEM;
+ }
+ } else {
+ mapped_nents = 0;
+ }
+
sec4_sg_src_index = 2;
- sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
+ sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes, GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
+ dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
return -ENOMEM;
}
state->buf_dma, buflen,
last_buflen);
- src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1);
+ sg_to_sec4_sg_last(req->src, mapped_nents, edesc->sec4_sg + 1, 0);
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
sec4_sg_bytes, DMA_TO_DEVICE);
int to_hash;
u32 *sh_desc = ctx->sh_desc_update_first, *desc;
dma_addr_t ptr = ctx->sh_desc_update_first_dma;
- int sec4_sg_bytes, src_nents;
+ int sec4_sg_bytes, src_nents, mapped_nents;
dma_addr_t src_dma;
u32 options;
struct ahash_edesc *edesc;
dev_err(jrdev, "Invalid number of src SG.\n");
return src_nents;
}
- dma_map_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
- if (src_nents > 1)
- sec4_sg_bytes = src_nents *
+
+ if (src_nents) {
+ mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (!mapped_nents) {
+ dev_err(jrdev, "unable to map source for DMA\n");
+ return -ENOMEM;
+ }
+ } else {
+ mapped_nents = 0;
+ }
+ if (mapped_nents > 1)
+ sec4_sg_bytes = mapped_nents *
sizeof(struct sec4_sg_entry);
else
sec4_sg_bytes = 0;
if (!edesc) {
dev_err(jrdev,
"could not allocate extended descriptor\n");
+ dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
return -ENOMEM;
}
edesc->dst_dma = 0;
if (src_nents > 1) {
- sg_to_sec4_sg_last(req->src, src_nents,
+ sg_to_sec4_sg_last(req->src, mapped_nents,
edesc->sec4_sg, 0);
edesc->sec4_sg_dma = dma_map_single(jrdev,
edesc->sec4_sg,