1 // SPDX-License-Identifier: GPL-2.0-only
3 * Provide TDMA helper functions used by cipher and hash algorithm
6 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
7 * Author: Arnaud Ebalard <arno@natisbad.org>
9 * This work is based on an initial version written by
10 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
15 bool mv_cesa_req_dma_iter_next_transfer(struct mv_cesa_dma_iter *iter,
16 struct mv_cesa_sg_dma_iter *sgiter,
22 sgiter->op_offset += len;
23 sgiter->offset += len;
24 if (sgiter->offset == sg_dma_len(sgiter->sg)) {
25 if (sg_is_last(sgiter->sg))
28 sgiter->sg = sg_next(sgiter->sg);
31 if (sgiter->op_offset == iter->op_len)
37 void mv_cesa_dma_step(struct mv_cesa_req *dreq)
39 struct mv_cesa_engine *engine = dreq->engine;
41 writel_relaxed(0, engine->regs + CESA_SA_CFG);
43 mv_cesa_set_int_mask(engine, CESA_SA_INT_ACC0_IDMA_DONE);
44 writel_relaxed(CESA_TDMA_DST_BURST_128B | CESA_TDMA_SRC_BURST_128B |
45 CESA_TDMA_NO_BYTE_SWAP | CESA_TDMA_EN,
46 engine->regs + CESA_TDMA_CONTROL);
48 writel_relaxed(CESA_SA_CFG_ACT_CH0_IDMA | CESA_SA_CFG_MULTI_PKT |
49 CESA_SA_CFG_CH0_W_IDMA | CESA_SA_CFG_PARA_DIS,
50 engine->regs + CESA_SA_CFG);
51 writel_relaxed(dreq->chain.first->cur_dma,
52 engine->regs + CESA_TDMA_NEXT_ADDR);
53 BUG_ON(readl(engine->regs + CESA_SA_CMD) &
54 CESA_SA_CMD_EN_CESA_SA_ACCL0);
55 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
58 void mv_cesa_dma_cleanup(struct mv_cesa_req *dreq)
60 struct mv_cesa_tdma_desc *tdma;
62 for (tdma = dreq->chain.first; tdma;) {
63 struct mv_cesa_tdma_desc *old_tdma = tdma;
64 u32 type = tdma->flags & CESA_TDMA_TYPE_MSK;
66 if (type == CESA_TDMA_OP)
67 dma_pool_free(cesa_dev->dma->op_pool, tdma->op,
68 le32_to_cpu(tdma->src));
71 dma_pool_free(cesa_dev->dma->tdma_desc_pool, old_tdma,
75 dreq->chain.first = NULL;
76 dreq->chain.last = NULL;
79 void mv_cesa_dma_prepare(struct mv_cesa_req *dreq,
80 struct mv_cesa_engine *engine)
82 struct mv_cesa_tdma_desc *tdma;
84 for (tdma = dreq->chain.first; tdma; tdma = tdma->next) {
85 if (tdma->flags & CESA_TDMA_DST_IN_SRAM)
86 tdma->dst = cpu_to_le32(tdma->dst + engine->sram_dma);
88 if (tdma->flags & CESA_TDMA_SRC_IN_SRAM)
89 tdma->src = cpu_to_le32(tdma->src + engine->sram_dma);
91 if ((tdma->flags & CESA_TDMA_TYPE_MSK) == CESA_TDMA_OP)
92 mv_cesa_adjust_op(engine, tdma->op);
96 void mv_cesa_tdma_chain(struct mv_cesa_engine *engine,
97 struct mv_cesa_req *dreq)
99 if (engine->chain.first == NULL && engine->chain.last == NULL) {
100 engine->chain.first = dreq->chain.first;
101 engine->chain.last = dreq->chain.last;
103 struct mv_cesa_tdma_desc *last;
105 last = engine->chain.last;
106 last->next = dreq->chain.first;
107 engine->chain.last = dreq->chain.last;
110 * Break the DMA chain if the CESA_TDMA_BREAK_CHAIN is set on
111 * the last element of the current chain, or if the request
112 * being queued needs the IV regs to be set before lauching
115 if (!(last->flags & CESA_TDMA_BREAK_CHAIN) &&
116 !(dreq->chain.first->flags & CESA_TDMA_SET_STATE))
117 last->next_dma = dreq->chain.first->cur_dma;
121 int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status)
123 struct crypto_async_request *req = NULL;
124 struct mv_cesa_tdma_desc *tdma = NULL, *next = NULL;
128 tdma_cur = readl(engine->regs + CESA_TDMA_CUR);
130 for (tdma = engine->chain.first; tdma; tdma = next) {
131 spin_lock_bh(&engine->lock);
133 spin_unlock_bh(&engine->lock);
135 if (tdma->flags & CESA_TDMA_END_OF_REQ) {
136 struct crypto_async_request *backlog = NULL;
137 struct mv_cesa_ctx *ctx;
140 spin_lock_bh(&engine->lock);
142 * if req is NULL, this means we're processing the
143 * request in engine->req.
148 req = mv_cesa_dequeue_req_locked(engine,
151 /* Re-chaining to the next request */
152 engine->chain.first = tdma->next;
155 /* If this is the last request, clear the chain */
156 if (engine->chain.first == NULL)
157 engine->chain.last = NULL;
158 spin_unlock_bh(&engine->lock);
160 ctx = crypto_tfm_ctx(req->tfm);
161 current_status = (tdma->cur_dma == tdma_cur) ?
162 status : CESA_SA_INT_ACC0_IDMA_DONE;
163 res = ctx->ops->process(req, current_status);
164 ctx->ops->complete(req);
167 mv_cesa_engine_enqueue_complete_request(engine,
171 backlog->complete(backlog, -EINPROGRESS);
174 if (res || tdma->cur_dma == tdma_cur)
178 /* Save the last request in error to engine->req, so that the core
179 * knows which request was fautly */
181 spin_lock_bh(&engine->lock);
183 spin_unlock_bh(&engine->lock);
189 static struct mv_cesa_tdma_desc *
190 mv_cesa_dma_add_desc(struct mv_cesa_tdma_chain *chain, gfp_t flags)
192 struct mv_cesa_tdma_desc *new_tdma = NULL;
193 dma_addr_t dma_handle;
195 new_tdma = dma_pool_zalloc(cesa_dev->dma->tdma_desc_pool, flags,
198 return ERR_PTR(-ENOMEM);
200 new_tdma->cur_dma = dma_handle;
202 chain->last->next_dma = cpu_to_le32(dma_handle);
203 chain->last->next = new_tdma;
205 chain->first = new_tdma;
208 chain->last = new_tdma;
213 int mv_cesa_dma_add_result_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src,
214 u32 size, u32 flags, gfp_t gfp_flags)
216 struct mv_cesa_tdma_desc *tdma, *op_desc;
218 tdma = mv_cesa_dma_add_desc(chain, gfp_flags);
220 return PTR_ERR(tdma);
222 /* We re-use an existing op_desc object to retrieve the context
223 * and result instead of allocating a new one.
224 * There is at least one object of this type in a CESA crypto
225 * req, just pick the first one in the chain.
227 for (op_desc = chain->first; op_desc; op_desc = op_desc->next) {
228 u32 type = op_desc->flags & CESA_TDMA_TYPE_MSK;
230 if (type == CESA_TDMA_OP)
237 tdma->byte_cnt = cpu_to_le32(size | BIT(31));
239 tdma->dst = op_desc->src;
240 tdma->op = op_desc->op;
242 flags &= (CESA_TDMA_DST_IN_SRAM | CESA_TDMA_SRC_IN_SRAM);
243 tdma->flags = flags | CESA_TDMA_RESULT;
247 struct mv_cesa_op_ctx *mv_cesa_dma_add_op(struct mv_cesa_tdma_chain *chain,
248 const struct mv_cesa_op_ctx *op_templ,
252 struct mv_cesa_tdma_desc *tdma;
253 struct mv_cesa_op_ctx *op;
254 dma_addr_t dma_handle;
257 tdma = mv_cesa_dma_add_desc(chain, flags);
259 return ERR_CAST(tdma);
261 op = dma_pool_alloc(cesa_dev->dma->op_pool, flags, &dma_handle);
263 return ERR_PTR(-ENOMEM);
267 size = skip_ctx ? sizeof(op->desc) : sizeof(*op);
271 tdma->byte_cnt = cpu_to_le32(size | BIT(31));
272 tdma->src = cpu_to_le32(dma_handle);
273 tdma->dst = CESA_SA_CFG_SRAM_OFFSET;
274 tdma->flags = CESA_TDMA_DST_IN_SRAM | CESA_TDMA_OP;
279 int mv_cesa_dma_add_data_transfer(struct mv_cesa_tdma_chain *chain,
280 dma_addr_t dst, dma_addr_t src, u32 size,
281 u32 flags, gfp_t gfp_flags)
283 struct mv_cesa_tdma_desc *tdma;
285 tdma = mv_cesa_dma_add_desc(chain, gfp_flags);
287 return PTR_ERR(tdma);
289 tdma->byte_cnt = cpu_to_le32(size | BIT(31));
293 flags &= (CESA_TDMA_DST_IN_SRAM | CESA_TDMA_SRC_IN_SRAM);
294 tdma->flags = flags | CESA_TDMA_DATA;
299 int mv_cesa_dma_add_dummy_launch(struct mv_cesa_tdma_chain *chain, gfp_t flags)
301 struct mv_cesa_tdma_desc *tdma;
303 tdma = mv_cesa_dma_add_desc(chain, flags);
304 return PTR_ERR_OR_ZERO(tdma);
307 int mv_cesa_dma_add_dummy_end(struct mv_cesa_tdma_chain *chain, gfp_t flags)
309 struct mv_cesa_tdma_desc *tdma;
311 tdma = mv_cesa_dma_add_desc(chain, flags);
313 return PTR_ERR(tdma);
315 tdma->byte_cnt = cpu_to_le32(BIT(31));
320 int mv_cesa_dma_add_op_transfers(struct mv_cesa_tdma_chain *chain,
321 struct mv_cesa_dma_iter *dma_iter,
322 struct mv_cesa_sg_dma_iter *sgiter,
325 u32 flags = sgiter->dir == DMA_TO_DEVICE ?
326 CESA_TDMA_DST_IN_SRAM : CESA_TDMA_SRC_IN_SRAM;
333 len = mv_cesa_req_dma_iter_transfer_len(dma_iter, sgiter);
334 if (sgiter->dir == DMA_TO_DEVICE) {
335 dst = CESA_SA_DATA_SRAM_OFFSET + sgiter->op_offset;
336 src = sg_dma_address(sgiter->sg) + sgiter->offset;
338 dst = sg_dma_address(sgiter->sg) + sgiter->offset;
339 src = CESA_SA_DATA_SRAM_OFFSET + sgiter->op_offset;
342 ret = mv_cesa_dma_add_data_transfer(chain, dst, src, len,
347 } while (mv_cesa_req_dma_iter_next_transfer(dma_iter, sgiter, len));