1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/workqueue.h>
4 #include <crypto/internal/skcipher.h>
6 #include "nitrox_dev.h"
7 #include "nitrox_req.h"
8 #include "nitrox_csr.h"
11 #define MIN_UDD_LEN 16
12 /* PKT_IN_HDR + SLC_STORE_INFO */
14 /* Base destination port for the solicited requests */
15 #define SOLICIT_BASE_DPORT 256
17 #define REQ_NOT_POSTED 1
22 * Response codes from SE microcode
24 * Completion with no error
25 * 0x43 - ERR_GC_DATA_LEN_INVALID
26 * Invalid Data length if Encryption Data length is
27 * less than 16 bytes for AES-XTS and AES-CTS.
28 * 0x45 - ERR_GC_CTX_LEN_INVALID
29 * Invalid context length: CTXL != 23 words.
30 * 0x4F - ERR_GC_DOCSIS_CIPHER_INVALID
31 * DOCSIS support is enabled with other than
32 * AES/DES-CBC mode encryption.
33 * 0x50 - ERR_GC_DOCSIS_OFFSET_INVALID
34 * Authentication offset is other than 0 with
35 * Encryption IV source = 0.
36 * Authentication offset is other than 8 (DES)/16 (AES)
37 * with Encryption IV source = 1
38 * 0x51 - ERR_GC_CRC32_INVALID_SELECTION
39 * CRC32 is enabled for other than DOCSIS encryption.
40 * 0x52 - ERR_GC_AES_CCM_FLAG_INVALID
41 * Invalid flag options in AES-CCM IV.
44 static inline int incr_index(int index, int count, int max)
46 if ((index + count) >= max)
47 index = index + count - max;
54 static void softreq_unmap_sgbufs(struct nitrox_softreq *sr)
56 struct nitrox_device *ndev = sr->ndev;
57 struct device *dev = DEV(ndev);
60 dma_unmap_sg(dev, sr->in.sg, sr->in.sgmap_cnt, DMA_BIDIRECTIONAL);
61 dma_unmap_single(dev, sr->in.sgcomp_dma, sr->in.sgcomp_len,
67 dma_unmap_sg(dev, sr->out.sg, sr->out.sgmap_cnt,
69 dma_unmap_single(dev, sr->out.sgcomp_dma, sr->out.sgcomp_len,
71 kfree(sr->out.sgcomp);
73 sr->out.sgmap_cnt = 0;
76 static void softreq_destroy(struct nitrox_softreq *sr)
78 softreq_unmap_sgbufs(sr);
83 * create_sg_component - create SG componets for N5 device.
84 * @sr: Request structure
86 * @map_nents: number of dma mapped entries
90 * 63 48 47 32 31 16 15 0
91 * --------------------------------------
92 * | LEN0 | LEN1 | LEN2 | LEN3 |
93 * |-------------------------------------
95 * --------------------------------------
97 * --------------------------------------
99 * --------------------------------------
101 * --------------------------------------
103 * Returns 0 if success or a negative errno code on error.
105 static int create_sg_component(struct nitrox_softreq *sr,
106 struct nitrox_sgtable *sgtbl, int map_nents)
108 struct nitrox_device *ndev = sr->ndev;
109 struct nitrox_sgcomp *sgcomp;
110 struct scatterlist *sg;
115 nr_sgcomp = roundup(map_nents, 4) / 4;
117 /* each component holds 4 dma pointers */
118 sz_comp = nr_sgcomp * sizeof(*sgcomp);
119 sgcomp = kzalloc(sz_comp, sr->gfp);
123 sgtbl->sgcomp = sgcomp;
126 /* populate device sg component */
127 for (i = 0; i < nr_sgcomp; i++) {
128 for (j = 0; j < 4 && sg; j++) {
129 sgcomp[i].len[j] = cpu_to_be16(sg_dma_len(sg));
130 sgcomp[i].dma[j] = cpu_to_be64(sg_dma_address(sg));
134 /* map the device sg component */
135 dma = dma_map_single(DEV(ndev), sgtbl->sgcomp, sz_comp, DMA_TO_DEVICE);
136 if (dma_mapping_error(DEV(ndev), dma)) {
137 kfree(sgtbl->sgcomp);
138 sgtbl->sgcomp = NULL;
142 sgtbl->sgcomp_dma = dma;
143 sgtbl->sgcomp_len = sz_comp;
149 * dma_map_inbufs - DMA map input sglist and creates sglist component
151 * @sr: Request structure
152 * @req: Crypto request structre
154 * Returns 0 if successful or a negative errno code on error.
156 static int dma_map_inbufs(struct nitrox_softreq *sr,
157 struct se_crypto_request *req)
159 struct device *dev = DEV(sr->ndev);
160 struct scatterlist *sg = req->src;
161 int i, nents, ret = 0;
163 nents = dma_map_sg(dev, req->src, sg_nents(req->src),
168 for_each_sg(req->src, sg, nents, i)
169 sr->in.total_bytes += sg_dma_len(sg);
171 sr->in.sg = req->src;
172 sr->in.sgmap_cnt = nents;
173 ret = create_sg_component(sr, &sr->in, sr->in.sgmap_cnt);
180 dma_unmap_sg(dev, req->src, nents, DMA_BIDIRECTIONAL);
181 sr->in.sgmap_cnt = 0;
185 static int dma_map_outbufs(struct nitrox_softreq *sr,
186 struct se_crypto_request *req)
188 struct device *dev = DEV(sr->ndev);
191 nents = dma_map_sg(dev, req->dst, sg_nents(req->dst),
196 sr->out.sg = req->dst;
197 sr->out.sgmap_cnt = nents;
198 ret = create_sg_component(sr, &sr->out, sr->out.sgmap_cnt);
200 goto outcomp_map_err;
205 dma_unmap_sg(dev, req->dst, nents, DMA_BIDIRECTIONAL);
206 sr->out.sgmap_cnt = 0;
211 static inline int softreq_map_iobuf(struct nitrox_softreq *sr,
212 struct se_crypto_request *creq)
216 ret = dma_map_inbufs(sr, creq);
220 ret = dma_map_outbufs(sr, creq);
222 softreq_unmap_sgbufs(sr);
227 static inline void backlog_list_add(struct nitrox_softreq *sr,
228 struct nitrox_cmdq *cmdq)
230 INIT_LIST_HEAD(&sr->backlog);
232 spin_lock_bh(&cmdq->backlog_qlock);
233 list_add_tail(&sr->backlog, &cmdq->backlog_head);
234 atomic_inc(&cmdq->backlog_count);
235 atomic_set(&sr->status, REQ_BACKLOG);
236 spin_unlock_bh(&cmdq->backlog_qlock);
239 static inline void response_list_add(struct nitrox_softreq *sr,
240 struct nitrox_cmdq *cmdq)
242 INIT_LIST_HEAD(&sr->response);
244 spin_lock_bh(&cmdq->resp_qlock);
245 list_add_tail(&sr->response, &cmdq->response_head);
246 spin_unlock_bh(&cmdq->resp_qlock);
249 static inline void response_list_del(struct nitrox_softreq *sr,
250 struct nitrox_cmdq *cmdq)
252 spin_lock_bh(&cmdq->resp_qlock);
253 list_del(&sr->response);
254 spin_unlock_bh(&cmdq->resp_qlock);
257 static struct nitrox_softreq *
258 get_first_response_entry(struct nitrox_cmdq *cmdq)
260 return list_first_entry_or_null(&cmdq->response_head,
261 struct nitrox_softreq, response);
264 static inline bool cmdq_full(struct nitrox_cmdq *cmdq, int qlen)
266 if (atomic_inc_return(&cmdq->pending_count) > qlen) {
267 atomic_dec(&cmdq->pending_count);
268 /* sync with other cpus */
269 smp_mb__after_atomic();
272 /* sync with other cpus */
273 smp_mb__after_atomic();
278 * post_se_instr - Post SE instruction to Packet Input ring
279 * @sr: Request structure
281 * Returns 0 if successful or a negative error code,
282 * if no space in ring.
284 static void post_se_instr(struct nitrox_softreq *sr,
285 struct nitrox_cmdq *cmdq)
287 struct nitrox_device *ndev = sr->ndev;
291 spin_lock_bh(&cmdq->cmd_qlock);
293 idx = cmdq->write_idx;
294 /* copy the instruction */
295 ent = cmdq->base + (idx * cmdq->instr_size);
296 memcpy(ent, &sr->instr, cmdq->instr_size);
298 atomic_set(&sr->status, REQ_POSTED);
299 response_list_add(sr, cmdq);
300 sr->tstamp = jiffies;
301 /* flush the command queue updates */
304 /* Ring doorbell with count 1 */
305 writeq(1, cmdq->dbell_csr_addr);
306 /* orders the doorbell rings */
309 cmdq->write_idx = incr_index(idx, 1, ndev->qlen);
311 spin_unlock_bh(&cmdq->cmd_qlock);
313 /* increment the posted command count */
314 atomic64_inc(&ndev->stats.posted);
317 static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
319 struct nitrox_device *ndev = cmdq->ndev;
320 struct nitrox_softreq *sr, *tmp;
323 if (!atomic_read(&cmdq->backlog_count))
326 spin_lock_bh(&cmdq->backlog_qlock);
328 list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) {
329 /* submit until space available */
330 if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
334 /* delete from backlog list */
335 list_del(&sr->backlog);
336 atomic_dec(&cmdq->backlog_count);
337 /* sync with other cpus */
338 smp_mb__after_atomic();
340 /* post the command */
341 post_se_instr(sr, cmdq);
343 spin_unlock_bh(&cmdq->backlog_qlock);
348 static int nitrox_enqueue_request(struct nitrox_softreq *sr)
350 struct nitrox_cmdq *cmdq = sr->cmdq;
351 struct nitrox_device *ndev = sr->ndev;
353 /* try to post backlog requests */
354 post_backlog_cmds(cmdq);
356 if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
357 if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
358 /* increment drop count */
359 atomic64_inc(&ndev->stats.dropped);
362 /* add to backlog list */
363 backlog_list_add(sr, cmdq);
366 post_se_instr(sr, cmdq);
372 * nitrox_se_request - Send request to SE core
373 * @ndev: NITROX device
374 * @req: Crypto request
376 * Returns 0 on success, or a negative error code.
378 int nitrox_process_se_request(struct nitrox_device *ndev,
379 struct se_crypto_request *req,
380 completion_t callback,
383 struct nitrox_softreq *sr;
384 dma_addr_t ctx_handle = 0;
387 if (!nitrox_ready(ndev))
390 sr = kzalloc(sizeof(*sr), req->gfp);
395 sr->flags = req->flags;
397 sr->callback = callback;
400 atomic_set(&sr->status, REQ_NOT_POSTED);
402 sr->resp.orh = req->orh;
403 sr->resp.completion = req->comp;
405 ret = softreq_map_iobuf(sr, req);
411 /* get the context handle */
412 if (req->ctx_handle) {
416 ctx_ptr = (u8 *)(uintptr_t)req->ctx_handle;
417 hdr = (struct ctx_hdr *)(ctx_ptr - sizeof(struct ctx_hdr));
418 ctx_handle = hdr->ctx_dma;
421 /* select the queue */
422 qno = smp_processor_id() % ndev->nr_queues;
424 sr->cmdq = &ndev->pkt_inq[qno];
427 * 64-Byte Instruction Format
429 * ----------------------
431 * ----------------------
432 * | PKT_IN_INSTR_HDR | 8 bytes
433 * ----------------------
434 * | PKT_IN_HDR | 16 bytes
435 * ----------------------
436 * | SLC_INFO | 16 bytes
437 * ----------------------
438 * | Front data | 16 bytes
439 * ----------------------
442 /* fill the packet instruction */
444 sr->instr.dptr0 = cpu_to_be64(sr->in.sgcomp_dma);
447 sr->instr.ih.value = 0;
448 sr->instr.ih.s.g = 1;
449 sr->instr.ih.s.gsz = sr->in.sgmap_cnt;
450 sr->instr.ih.s.ssz = sr->out.sgmap_cnt;
451 sr->instr.ih.s.fsz = FDATA_SIZE + sizeof(struct gphdr);
452 sr->instr.ih.s.tlen = sr->instr.ih.s.fsz + sr->in.total_bytes;
453 sr->instr.ih.value = cpu_to_be64(sr->instr.ih.value);
456 sr->instr.irh.value[0] = 0;
457 sr->instr.irh.s.uddl = MIN_UDD_LEN;
458 /* context length in 64-bit words */
459 sr->instr.irh.s.ctxl = (req->ctrl.s.ctxl / 8);
460 /* offset from solicit base port 256 */
461 sr->instr.irh.s.destport = SOLICIT_BASE_DPORT + qno;
462 sr->instr.irh.s.ctxc = req->ctrl.s.ctxc;
463 sr->instr.irh.s.arg = req->ctrl.s.arg;
464 sr->instr.irh.s.opcode = req->opcode;
465 sr->instr.irh.value[0] = cpu_to_be64(sr->instr.irh.value[0]);
468 sr->instr.irh.s.ctxp = cpu_to_be64(ctx_handle);
471 sr->instr.slc.value[0] = 0;
472 sr->instr.slc.s.ssz = sr->out.sgmap_cnt;
473 sr->instr.slc.value[0] = cpu_to_be64(sr->instr.slc.value[0]);
476 sr->instr.slc.s.rptr = cpu_to_be64(sr->out.sgcomp_dma);
479 * No conversion for front data,
480 * It goes into payload
481 * put GP Header in front data
483 sr->instr.fdata[0] = *((u64 *)&req->gph);
484 sr->instr.fdata[1] = 0;
486 ret = nitrox_enqueue_request(sr);
497 static inline int cmd_timeout(unsigned long tstamp, unsigned long timeout)
499 return time_after_eq(jiffies, (tstamp + timeout));
502 void backlog_qflush_work(struct work_struct *work)
504 struct nitrox_cmdq *cmdq;
506 cmdq = container_of(work, struct nitrox_cmdq, backlog_qflush);
507 post_backlog_cmds(cmdq);
510 static bool sr_completed(struct nitrox_softreq *sr)
512 u64 orh = READ_ONCE(*sr->resp.orh);
513 unsigned long timeout = jiffies + msecs_to_jiffies(1);
515 if ((orh != PENDING_SIG) && (orh & 0xff))
518 while (READ_ONCE(*sr->resp.completion) == PENDING_SIG) {
519 if (time_after(jiffies, timeout)) {
520 pr_err("comp not done\n");
529 * process_request_list - process completed requests
531 * @qno: queue to operate
533 * Returns the number of responses processed.
535 static void process_response_list(struct nitrox_cmdq *cmdq)
537 struct nitrox_device *ndev = cmdq->ndev;
538 struct nitrox_softreq *sr;
539 int req_completed = 0, err = 0, budget;
540 completion_t callback;
543 /* check all pending requests */
544 budget = atomic_read(&cmdq->pending_count);
546 while (req_completed < budget) {
547 sr = get_first_response_entry(cmdq);
551 if (atomic_read(&sr->status) != REQ_POSTED)
554 /* check orh and completion bytes updates */
555 if (!sr_completed(sr)) {
556 /* request not completed, check for timeout */
557 if (!cmd_timeout(sr->tstamp, ndev->timeout))
559 dev_err_ratelimited(DEV(ndev),
560 "Request timeout, orh 0x%016llx\n",
561 READ_ONCE(*sr->resp.orh));
563 atomic_dec(&cmdq->pending_count);
564 atomic64_inc(&ndev->stats.completed);
565 /* sync with other cpus */
566 smp_mb__after_atomic();
567 /* remove from response list */
568 response_list_del(sr, cmdq);
570 err = READ_ONCE(*sr->resp.orh) & 0xff;
571 callback = sr->callback;
575 callback(cb_arg, err);
582 * pkt_slc_resp_tasklet - post processing of SE responses
584 void pkt_slc_resp_tasklet(unsigned long data)
586 struct nitrox_q_vector *qvec = (void *)(uintptr_t)(data);
587 struct nitrox_cmdq *cmdq = qvec->cmdq;
588 union nps_pkt_slc_cnts slc_cnts;
590 /* read completion count */
591 slc_cnts.value = readq(cmdq->compl_cnt_csr_addr);
592 /* resend the interrupt if more work to do */
593 slc_cnts.s.resend = 1;
595 process_response_list(cmdq);
598 * clear the interrupt with resend bit enabled,
599 * MSI-X interrupt generates if Completion count > Threshold
601 writeq(slc_cnts.value, cmdq->compl_cnt_csr_addr);
602 /* order the writes */
605 if (atomic_read(&cmdq->backlog_count))
606 schedule_work(&cmdq->backlog_qflush);