2 * Provide TDMA helper functions used by cipher and hash algorithm
5 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
6 * Author: Arnaud Ebalard <arno@natisbad.org>
8 * This work is based on an initial version written by
9 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 as published
13 * by the Free Software Foundation.
18 bool mv_cesa_req_dma_iter_next_transfer(struct mv_cesa_dma_iter *iter,
19 struct mv_cesa_sg_dma_iter *sgiter,
25 sgiter->op_offset += len;
26 sgiter->offset += len;
27 if (sgiter->offset == sg_dma_len(sgiter->sg)) {
28 if (sg_is_last(sgiter->sg))
31 sgiter->sg = sg_next(sgiter->sg);
34 if (sgiter->op_offset == iter->op_len)
40 void mv_cesa_dma_step(struct mv_cesa_req *dreq)
42 struct mv_cesa_engine *engine = dreq->engine;
44 writel_relaxed(0, engine->regs + CESA_SA_CFG);
46 mv_cesa_set_int_mask(engine, CESA_SA_INT_ACC0_IDMA_DONE);
47 writel_relaxed(CESA_TDMA_DST_BURST_128B | CESA_TDMA_SRC_BURST_128B |
48 CESA_TDMA_NO_BYTE_SWAP | CESA_TDMA_EN,
49 engine->regs + CESA_TDMA_CONTROL);
51 writel_relaxed(CESA_SA_CFG_ACT_CH0_IDMA | CESA_SA_CFG_MULTI_PKT |
52 CESA_SA_CFG_CH0_W_IDMA | CESA_SA_CFG_PARA_DIS,
53 engine->regs + CESA_SA_CFG);
54 writel_relaxed(dreq->chain.first->cur_dma,
55 engine->regs + CESA_TDMA_NEXT_ADDR);
56 BUG_ON(readl(engine->regs + CESA_SA_CMD) &
57 CESA_SA_CMD_EN_CESA_SA_ACCL0);
58 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
61 void mv_cesa_dma_cleanup(struct mv_cesa_req *dreq)
63 struct mv_cesa_tdma_desc *tdma;
65 for (tdma = dreq->chain.first; tdma;) {
66 struct mv_cesa_tdma_desc *old_tdma = tdma;
67 u32 type = tdma->flags & CESA_TDMA_TYPE_MSK;
69 if (type == CESA_TDMA_OP)
70 dma_pool_free(cesa_dev->dma->op_pool, tdma->op,
71 le32_to_cpu(tdma->src));
74 dma_pool_free(cesa_dev->dma->tdma_desc_pool, old_tdma,
78 dreq->chain.first = NULL;
79 dreq->chain.last = NULL;
82 void mv_cesa_dma_prepare(struct mv_cesa_req *dreq,
83 struct mv_cesa_engine *engine)
85 struct mv_cesa_tdma_desc *tdma;
87 for (tdma = dreq->chain.first; tdma; tdma = tdma->next) {
88 if (tdma->flags & CESA_TDMA_DST_IN_SRAM)
89 tdma->dst = cpu_to_le32(tdma->dst + engine->sram_dma);
91 if (tdma->flags & CESA_TDMA_SRC_IN_SRAM)
92 tdma->src = cpu_to_le32(tdma->src + engine->sram_dma);
94 if ((tdma->flags & CESA_TDMA_TYPE_MSK) == CESA_TDMA_OP)
95 mv_cesa_adjust_op(engine, tdma->op);
99 void mv_cesa_tdma_chain(struct mv_cesa_engine *engine,
100 struct mv_cesa_req *dreq)
102 if (engine->chain.first == NULL && engine->chain.last == NULL) {
103 engine->chain.first = dreq->chain.first;
104 engine->chain.last = dreq->chain.last;
106 struct mv_cesa_tdma_desc *last;
108 last = engine->chain.last;
109 last->next = dreq->chain.first;
110 engine->chain.last = dreq->chain.last;
112 if (!(last->flags & CESA_TDMA_BREAK_CHAIN))
113 last->next_dma = dreq->chain.first->cur_dma;
117 int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status)
119 struct crypto_async_request *req = NULL;
120 struct mv_cesa_tdma_desc *tdma = NULL, *next = NULL;
124 tdma_cur = readl(engine->regs + CESA_TDMA_CUR);
126 for (tdma = engine->chain.first; tdma; tdma = next) {
127 spin_lock_bh(&engine->lock);
129 spin_unlock_bh(&engine->lock);
131 if (tdma->flags & CESA_TDMA_END_OF_REQ) {
132 struct crypto_async_request *backlog = NULL;
133 struct mv_cesa_ctx *ctx;
136 spin_lock_bh(&engine->lock);
138 * if req is NULL, this means we're processing the
139 * request in engine->req.
144 req = mv_cesa_dequeue_req_locked(engine,
147 /* Re-chaining to the next request */
148 engine->chain.first = tdma->next;
151 /* If this is the last request, clear the chain */
152 if (engine->chain.first == NULL)
153 engine->chain.last = NULL;
154 spin_unlock_bh(&engine->lock);
156 ctx = crypto_tfm_ctx(req->tfm);
157 current_status = (tdma->cur_dma == tdma_cur) ?
158 status : CESA_SA_INT_ACC0_IDMA_DONE;
159 res = ctx->ops->process(req, current_status);
160 ctx->ops->complete(req);
163 mv_cesa_engine_enqueue_complete_request(engine,
167 backlog->complete(backlog, -EINPROGRESS);
170 if (res || tdma->cur_dma == tdma_cur)
174 /* Save the last request in error to engine->req, so that the core
175 * knows which request was fautly */
177 spin_lock_bh(&engine->lock);
179 spin_unlock_bh(&engine->lock);
185 static struct mv_cesa_tdma_desc *
186 mv_cesa_dma_add_desc(struct mv_cesa_tdma_chain *chain, gfp_t flags)
188 struct mv_cesa_tdma_desc *new_tdma = NULL;
189 dma_addr_t dma_handle;
191 new_tdma = dma_pool_zalloc(cesa_dev->dma->tdma_desc_pool, flags,
194 return ERR_PTR(-ENOMEM);
196 new_tdma->cur_dma = dma_handle;
198 chain->last->next_dma = cpu_to_le32(dma_handle);
199 chain->last->next = new_tdma;
201 chain->first = new_tdma;
204 chain->last = new_tdma;
209 int mv_cesa_dma_add_result_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src,
210 u32 size, u32 flags, gfp_t gfp_flags)
212 struct mv_cesa_tdma_desc *tdma, *op_desc;
214 tdma = mv_cesa_dma_add_desc(chain, gfp_flags);
216 return PTR_ERR(tdma);
218 /* We re-use an existing op_desc object to retrieve the context
219 * and result instead of allocating a new one.
220 * There is at least one object of this type in a CESA crypto
221 * req, just pick the first one in the chain.
223 for (op_desc = chain->first; op_desc; op_desc = op_desc->next) {
224 u32 type = op_desc->flags & CESA_TDMA_TYPE_MSK;
226 if (type == CESA_TDMA_OP)
233 tdma->byte_cnt = cpu_to_le32(size | BIT(31));
235 tdma->dst = op_desc->src;
236 tdma->op = op_desc->op;
238 flags &= (CESA_TDMA_DST_IN_SRAM | CESA_TDMA_SRC_IN_SRAM);
239 tdma->flags = flags | CESA_TDMA_RESULT;
243 struct mv_cesa_op_ctx *mv_cesa_dma_add_op(struct mv_cesa_tdma_chain *chain,
244 const struct mv_cesa_op_ctx *op_templ,
248 struct mv_cesa_tdma_desc *tdma;
249 struct mv_cesa_op_ctx *op;
250 dma_addr_t dma_handle;
253 tdma = mv_cesa_dma_add_desc(chain, flags);
255 return ERR_CAST(tdma);
257 op = dma_pool_alloc(cesa_dev->dma->op_pool, flags, &dma_handle);
259 return ERR_PTR(-ENOMEM);
263 size = skip_ctx ? sizeof(op->desc) : sizeof(*op);
267 tdma->byte_cnt = cpu_to_le32(size | BIT(31));
268 tdma->src = cpu_to_le32(dma_handle);
269 tdma->dst = CESA_SA_CFG_SRAM_OFFSET;
270 tdma->flags = CESA_TDMA_DST_IN_SRAM | CESA_TDMA_OP;
275 int mv_cesa_dma_add_data_transfer(struct mv_cesa_tdma_chain *chain,
276 dma_addr_t dst, dma_addr_t src, u32 size,
277 u32 flags, gfp_t gfp_flags)
279 struct mv_cesa_tdma_desc *tdma;
281 tdma = mv_cesa_dma_add_desc(chain, gfp_flags);
283 return PTR_ERR(tdma);
285 tdma->byte_cnt = cpu_to_le32(size | BIT(31));
289 flags &= (CESA_TDMA_DST_IN_SRAM | CESA_TDMA_SRC_IN_SRAM);
290 tdma->flags = flags | CESA_TDMA_DATA;
295 int mv_cesa_dma_add_dummy_launch(struct mv_cesa_tdma_chain *chain, gfp_t flags)
297 struct mv_cesa_tdma_desc *tdma;
299 tdma = mv_cesa_dma_add_desc(chain, flags);
301 return PTR_ERR(tdma);
306 int mv_cesa_dma_add_dummy_end(struct mv_cesa_tdma_chain *chain, gfp_t flags)
308 struct mv_cesa_tdma_desc *tdma;
310 tdma = mv_cesa_dma_add_desc(chain, flags);
312 return PTR_ERR(tdma);
314 tdma->byte_cnt = cpu_to_le32(BIT(31));
319 int mv_cesa_dma_add_op_transfers(struct mv_cesa_tdma_chain *chain,
320 struct mv_cesa_dma_iter *dma_iter,
321 struct mv_cesa_sg_dma_iter *sgiter,
324 u32 flags = sgiter->dir == DMA_TO_DEVICE ?
325 CESA_TDMA_DST_IN_SRAM : CESA_TDMA_SRC_IN_SRAM;
332 len = mv_cesa_req_dma_iter_transfer_len(dma_iter, sgiter);
333 if (sgiter->dir == DMA_TO_DEVICE) {
334 dst = CESA_SA_DATA_SRAM_OFFSET + sgiter->op_offset;
335 src = sg_dma_address(sgiter->sg) + sgiter->offset;
337 dst = sg_dma_address(sgiter->sg) + sgiter->offset;
338 src = CESA_SA_DATA_SRAM_OFFSET + sgiter->op_offset;
341 ret = mv_cesa_dma_add_data_transfer(chain, dst, src, len,
346 } while (mv_cesa_req_dma_iter_next_transfer(dma_iter, sgiter, len));