1 // SPDX-License-Identifier: GPL-2.0+
2 /* * CAAM control-plane driver backend
3 * Controller-level driver, kernel property detection, initialization
5 * Copyright 2008-2012 Freescale Semiconductor, Inc.
6 * Copyright 2018-2019 NXP
9 #include <linux/device.h>
10 #include <linux/of_address.h>
11 #include <linux/of_irq.h>
12 #include <linux/sys_soc.h>
18 #include "desc_constr.h"
22 EXPORT_SYMBOL(caam_dpaa2);
29 * Descriptor to instantiate RNG State Handle 0 in normal mode and
30 * load the JDKEK, TDKEK and TDSK registers
32 static void build_instantiation_desc(u32 *desc, int handle, int do_sk)
34 u32 *jump_cmd, op_flags;
36 init_job_desc(desc, 0);
38 op_flags = OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
39 (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INIT;
41 /* INIT RNG in non-test mode */
42 append_operation(desc, op_flags);
44 if (!handle && do_sk) {
46 * For SH0, Secure Keys must be generated as well
50 jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1);
51 set_jump_tgt_here(desc, jump_cmd);
54 * load 1 to clear written reg:
55 * resets the done interrrupt and returns the RNG to idle.
57 append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW);
59 /* Initialize State Handle */
60 append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
64 append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
67 /* Descriptor for deinstantiation of State Handle 0 of the RNG block. */
68 static void build_deinstantiation_desc(u32 *desc, int handle)
70 init_job_desc(desc, 0);
72 /* Uninstantiate State Handle 0 */
73 append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
74 (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INITFINAL);
76 append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
80 * run_descriptor_deco0 - runs a descriptor on DECO0, under direct control of
81 * the software (no JR/QI used).
82 * @ctrldev - pointer to device
83 * @status - descriptor status, after being run
85 * Return: - 0 if no error occurred
86 * - -ENODEV if the DECO couldn't be acquired
87 * - -EAGAIN if an error occurred while executing the descriptor
89 static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
92 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
93 struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl;
94 struct caam_deco __iomem *deco = ctrlpriv->deco;
95 unsigned int timeout = 100000;
96 u32 deco_dbg_reg, deco_state, flags;
100 if (ctrlpriv->virt_en == 1 ||
102 * Apparently on i.MX8MQ it doesn't matter if virt_en == 1
103 * and the following steps should be performed regardless
105 of_machine_is_compatible("fsl,imx8mq")) {
106 clrsetbits_32(&ctrl->deco_rsr, 0, DECORSR_JR0);
108 while (!(rd_reg32(&ctrl->deco_rsr) & DECORSR_VALID) &&
115 clrsetbits_32(&ctrl->deco_rq, 0, DECORR_RQD0ENABLE);
117 while (!(rd_reg32(&ctrl->deco_rq) & DECORR_DEN0) &&
122 dev_err(ctrldev, "failed to acquire DECO 0\n");
123 clrsetbits_32(&ctrl->deco_rq, DECORR_RQD0ENABLE, 0);
127 for (i = 0; i < desc_len(desc); i++)
128 wr_reg32(&deco->descbuf[i], caam32_to_cpu(*(desc + i)));
130 flags = DECO_JQCR_WHL;
132 * If the descriptor length is longer than 4 words, then the
133 * FOUR bit in JRCTRL register must be set.
135 if (desc_len(desc) >= 4)
136 flags |= DECO_JQCR_FOUR;
138 /* Instruct the DECO to execute it */
139 clrsetbits_32(&deco->jr_ctl_hi, 0, flags);
143 deco_dbg_reg = rd_reg32(&deco->desc_dbg);
145 if (ctrlpriv->era < 10)
146 deco_state = (deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) >>
147 DESC_DBG_DECO_STAT_SHIFT;
149 deco_state = (rd_reg32(&deco->dbg_exec) &
150 DESC_DER_DECO_STAT_MASK) >>
151 DESC_DER_DECO_STAT_SHIFT;
154 * If an error occured in the descriptor, then
155 * the DECO status field will be set to 0x0D
157 if (deco_state == DECO_STAT_HOST_ERR)
161 } while ((deco_dbg_reg & DESC_DBG_DECO_STAT_VALID) && --timeout);
163 *status = rd_reg32(&deco->op_status_hi) &
164 DECO_OP_STATUS_HI_ERR_MASK;
166 if (ctrlpriv->virt_en == 1)
167 clrsetbits_32(&ctrl->deco_rsr, DECORSR_JR0, 0);
169 /* Mark the DECO as free */
170 clrsetbits_32(&ctrl->deco_rq, DECORR_RQD0ENABLE, 0);
179 * instantiate_rng - builds and executes a descriptor on DECO0,
180 * which initializes the RNG block.
181 * @ctrldev - pointer to device
182 * @state_handle_mask - bitmask containing the instantiation status
183 * for the RNG4 state handles which exist in
184 * the RNG4 block: 1 if it's been instantiated
185 * by an external entry, 0 otherwise.
186 * @gen_sk - generate data to be loaded into the JDKEK, TDKEK and TDSK;
187 * Caution: this can be done only once; if the keys need to be
188 * regenerated, a POR is required
190 * Return: - 0 if no error occurred
191 * - -ENOMEM if there isn't enough memory to allocate the descriptor
192 * - -ENODEV if DECO0 couldn't be acquired
193 * - -EAGAIN if an error occurred when executing the descriptor
194 * f.i. there was a RNG hardware error due to not "good enough"
195 * entropy being aquired.
197 static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
200 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
201 struct caam_ctrl __iomem *ctrl;
202 u32 *desc, status = 0, rdsta_val;
205 ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
206 desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL);
210 for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
212 * If the corresponding bit is set, this state handle
213 * was initialized by somebody else, so it's left alone.
215 if ((1 << sh_idx) & state_handle_mask)
218 /* Create the descriptor for instantiating RNG State Handle */
219 build_instantiation_desc(desc, sh_idx, gen_sk);
221 /* Try to run it through DECO0 */
222 ret = run_descriptor_deco0(ctrldev, desc, &status);
225 * If ret is not 0, or descriptor status is not 0, then
226 * something went wrong. No need to try the next state
227 * handle (if available), bail out here.
228 * Also, if for some reason, the State Handle didn't get
229 * instantiated although the descriptor has finished
230 * without any error (HW optimizations for later
231 * CAAM eras), then try again.
236 rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK;
237 if ((status && status != JRSTA_SSRC_JUMP_HALT_CC) ||
238 !(rdsta_val & (1 << sh_idx))) {
243 dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx);
244 /* Clear the contents before recreating the descriptor */
245 memset(desc, 0x00, CAAM_CMD_SZ * 7);
254 * deinstantiate_rng - builds and executes a descriptor on DECO0,
255 * which deinitializes the RNG block.
256 * @ctrldev - pointer to device
257 * @state_handle_mask - bitmask containing the instantiation status
258 * for the RNG4 state handles which exist in
259 * the RNG4 block: 1 if it's been instantiated
261 * Return: - 0 if no error occurred
262 * - -ENOMEM if there isn't enough memory to allocate the descriptor
263 * - -ENODEV if DECO0 couldn't be acquired
264 * - -EAGAIN if an error occurred when executing the descriptor
266 static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask)
271 desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL);
275 for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
277 * If the corresponding bit is set, then it means the state
278 * handle was initialized by us, and thus it needs to be
279 * deinitialized as well
281 if ((1 << sh_idx) & state_handle_mask) {
283 * Create the descriptor for deinstantating this state
286 build_deinstantiation_desc(desc, sh_idx);
288 /* Try to run it through DECO0 */
289 ret = run_descriptor_deco0(ctrldev, desc, &status);
292 (status && status != JRSTA_SSRC_JUMP_HALT_CC)) {
294 "Failed to deinstantiate RNG4 SH%d\n",
298 dev_info(ctrldev, "Deinstantiated RNG4 SH%d\n", sh_idx);
307 static int caam_remove(struct platform_device *pdev)
309 struct device *ctrldev;
310 struct caam_drv_private *ctrlpriv;
311 struct caam_ctrl __iomem *ctrl;
313 ctrldev = &pdev->dev;
314 ctrlpriv = dev_get_drvdata(ctrldev);
315 ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
317 /* Remove platform devices under the crypto node */
318 of_platform_depopulate(ctrldev);
320 #ifdef CONFIG_CAAM_QI
321 if (ctrlpriv->qi_init)
322 caam_qi_shutdown(ctrldev);
326 * De-initialize RNG state handles initialized by this driver.
327 * In case of SoCs with Management Complex, RNG is managed by MC f/w.
329 if (!ctrlpriv->mc_en && ctrlpriv->rng4_sh_init)
330 deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
332 /* Shut down debug views */
333 #ifdef CONFIG_DEBUG_FS
334 debugfs_remove_recursive(ctrlpriv->dfs_root);
337 /* Unmap controller region */
344 * kick_trng - sets the various parameters for enabling the initialization
345 * of the RNG4 block in CAAM
346 * @pdev - pointer to the platform device
347 * @ent_delay - Defines the length (in system clocks) of each entropy sample.
349 static void kick_trng(struct platform_device *pdev, int ent_delay)
351 struct device *ctrldev = &pdev->dev;
352 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
353 struct caam_ctrl __iomem *ctrl;
354 struct rng4tst __iomem *r4tst;
357 ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
358 r4tst = &ctrl->r4tst[0];
360 /* put RNG4 into program mode */
361 clrsetbits_32(&r4tst->rtmctl, 0, RTMCTL_PRGM);
364 * Performance-wise, it does not make sense to
365 * set the delay to a value that is lower
366 * than the last one that worked (i.e. the state handles
367 * were instantiated properly. Thus, instead of wasting
368 * time trying to set the values controlling the sample
369 * frequency, the function simply returns.
371 val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK)
372 >> RTSDCTL_ENT_DLY_SHIFT;
373 if (ent_delay <= val)
376 val = rd_reg32(&r4tst->rtsdctl);
377 val = (val & ~RTSDCTL_ENT_DLY_MASK) |
378 (ent_delay << RTSDCTL_ENT_DLY_SHIFT);
379 wr_reg32(&r4tst->rtsdctl, val);
380 /* min. freq. count, equal to 1/4 of the entropy sample length */
381 wr_reg32(&r4tst->rtfrqmin, ent_delay >> 2);
382 /* disable maximum frequency count */
383 wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE);
384 /* read the control register */
385 val = rd_reg32(&r4tst->rtmctl);
388 * select raw sampling in both entropy shifter
389 * and statistical checker; ; put RNG4 into run mode
391 clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, RTMCTL_SAMP_MODE_RAW_ES_SC);
394 static int caam_get_era_from_hw(struct caam_ctrl __iomem *ctrl)
396 static const struct {
420 ccbvid = rd_reg32(&ctrl->perfmon.ccb_id);
421 era = (ccbvid & CCBVID_ERA_MASK) >> CCBVID_ERA_SHIFT;
422 if (era) /* This is '0' prior to CAAM ERA-6 */
425 id_ms = rd_reg32(&ctrl->perfmon.caam_id_ms);
426 ip_id = (id_ms & SECVID_MS_IPID_MASK) >> SECVID_MS_IPID_SHIFT;
427 maj_rev = (id_ms & SECVID_MS_MAJ_REV_MASK) >> SECVID_MS_MAJ_REV_SHIFT;
429 for (i = 0; i < ARRAY_SIZE(id); i++)
430 if (id[i].ip_id == ip_id && id[i].maj_rev == maj_rev)
437 * caam_get_era() - Return the ERA of the SEC on SoC, based
438 * on "sec-era" optional property in the DTS. This property is updated
440 * In case this property is not passed an attempt to retrieve the CAAM
441 * era via register reads will be made.
443 static int caam_get_era(struct caam_ctrl __iomem *ctrl)
445 struct device_node *caam_node;
449 caam_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
450 ret = of_property_read_u32(caam_node, "fsl,sec-era", &prop);
451 of_node_put(caam_node);
456 return caam_get_era_from_hw(ctrl);
460 * ERRATA: imx6 devices (imx6D, imx6Q, imx6DL, imx6S, imx6DP and imx6QP)
461 * have an issue wherein AXI bus transactions may not occur in the correct
462 * order. This isn't a problem running single descriptors, but can be if
463 * running multiple concurrent descriptors. Reworking the driver to throttle
464 * to single requests is impractical, thus the workaround is to limit the AXI
465 * pipeline to a depth of 1 (from it's default of 4) to preclude this situation
468 static void handle_imx6_err005766(u32 *mcr)
470 if (of_machine_is_compatible("fsl,imx6q") ||
471 of_machine_is_compatible("fsl,imx6dl") ||
472 of_machine_is_compatible("fsl,imx6qp"))
473 clrsetbits_32(mcr, MCFGR_AXIPIPE_MASK,
474 1 << MCFGR_AXIPIPE_SHIFT);
477 static const struct of_device_id caam_match[] = {
479 .compatible = "fsl,sec-v4.0",
482 .compatible = "fsl,sec4.0",
486 MODULE_DEVICE_TABLE(of, caam_match);
488 struct caam_imx_data {
489 const struct clk_bulk_data *clks;
493 static const struct clk_bulk_data caam_imx6_clks[] = {
497 { .id = "emi_slow" },
500 static const struct caam_imx_data caam_imx6_data = {
501 .clks = caam_imx6_clks,
502 .num_clks = ARRAY_SIZE(caam_imx6_clks),
505 static const struct clk_bulk_data caam_imx7_clks[] = {
510 static const struct caam_imx_data caam_imx7_data = {
511 .clks = caam_imx7_clks,
512 .num_clks = ARRAY_SIZE(caam_imx7_clks),
515 static const struct clk_bulk_data caam_imx6ul_clks[] = {
521 static const struct caam_imx_data caam_imx6ul_data = {
522 .clks = caam_imx6ul_clks,
523 .num_clks = ARRAY_SIZE(caam_imx6ul_clks),
526 static const struct soc_device_attribute caam_imx_soc_table[] = {
527 { .soc_id = "i.MX6UL", .data = &caam_imx6ul_data },
528 { .soc_id = "i.MX6*", .data = &caam_imx6_data },
529 { .soc_id = "i.MX7*", .data = &caam_imx7_data },
530 { .soc_id = "i.MX8MQ", .data = &caam_imx7_data },
531 { .family = "Freescale i.MX" },
535 static void disable_clocks(void *data)
537 struct caam_drv_private *ctrlpriv = data;
539 clk_bulk_disable_unprepare(ctrlpriv->num_clks, ctrlpriv->clks);
542 static int init_clocks(struct device *dev, const struct caam_imx_data *data)
544 struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
547 ctrlpriv->num_clks = data->num_clks;
548 ctrlpriv->clks = devm_kmemdup(dev, data->clks,
549 data->num_clks * sizeof(data->clks[0]),
554 ret = devm_clk_bulk_get(dev, ctrlpriv->num_clks, ctrlpriv->clks);
557 "Failed to request all necessary clocks\n");
561 ret = clk_bulk_prepare_enable(ctrlpriv->num_clks, ctrlpriv->clks);
564 "Failed to prepare/enable all necessary clocks\n");
568 return devm_add_action_or_reset(dev, disable_clocks, ctrlpriv);
571 /* Probe routine for CAAM top (controller) level */
572 static int caam_probe(struct platform_device *pdev)
574 int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
576 const struct soc_device_attribute *imx_soc_match;
578 struct device_node *nprop, *np;
579 struct caam_ctrl __iomem *ctrl;
580 struct caam_drv_private *ctrlpriv;
581 #ifdef CONFIG_DEBUG_FS
582 struct caam_perfmon *perfmon;
584 u32 scfgr, comp_params;
587 int BLOCK_OFFSET = 0;
589 ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(*ctrlpriv), GFP_KERNEL);
594 dev_set_drvdata(dev, ctrlpriv);
595 nprop = pdev->dev.of_node;
597 imx_soc_match = soc_device_match(caam_imx_soc_table);
598 caam_imx = (bool)imx_soc_match;
601 if (!imx_soc_match->data) {
602 dev_err(dev, "No clock data provided for i.MX SoC");
606 ret = init_clocks(dev, imx_soc_match->data);
612 /* Get configuration properties from device tree */
613 /* First, get register page */
614 ctrl = of_iomap(nprop, 0);
616 dev_err(dev, "caam: of_iomap() failed\n");
620 caam_little_end = !(bool)(rd_reg32(&ctrl->perfmon.status) &
621 (CSTA_PLEND | CSTA_ALT_PLEND));
622 comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms);
623 if (comp_params & CTPR_MS_PS && rd_reg32(&ctrl->mcr) & MCFGR_LONG_PTR)
624 caam_ptr_sz = sizeof(u64);
626 caam_ptr_sz = sizeof(u32);
627 caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2);
628 ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
630 #ifdef CONFIG_CAAM_QI
631 /* If (DPAA 1.x) QI present, check whether dependencies are available */
632 if (ctrlpriv->qi_present && !caam_dpaa2) {
633 ret = qman_is_probed();
637 } else if (ret < 0) {
638 dev_err(dev, "failing probe due to qman probe error\n");
643 ret = qman_portals_probed();
647 } else if (ret < 0) {
648 dev_err(dev, "failing probe due to qman portals probe error\n");
655 /* Allocating the BLOCK_OFFSET based on the supported page size on
658 pg_size = (comp_params & CTPR_MS_PG_SZ_MASK) >> CTPR_MS_PG_SZ_SHIFT;
660 BLOCK_OFFSET = PG_SIZE_4K;
662 BLOCK_OFFSET = PG_SIZE_64K;
664 ctrlpriv->ctrl = (struct caam_ctrl __iomem __force *)ctrl;
665 ctrlpriv->assure = (struct caam_assurance __iomem __force *)
666 ((__force uint8_t *)ctrl +
667 BLOCK_OFFSET * ASSURE_BLOCK_NUMBER
669 ctrlpriv->deco = (struct caam_deco __iomem __force *)
670 ((__force uint8_t *)ctrl +
671 BLOCK_OFFSET * DECO_BLOCK_NUMBER
674 /* Get the IRQ of the controller (for security violations only) */
675 ctrlpriv->secvio_irq = irq_of_parse_and_map(nprop, 0);
678 * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
679 * long pointers in master configuration register.
680 * In case of SoCs with Management Complex, MC f/w performs
683 np = of_find_compatible_node(NULL, NULL, "fsl,qoriq-mc");
684 ctrlpriv->mc_en = !!np;
687 if (!ctrlpriv->mc_en)
688 clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
689 MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
690 MCFGR_WDENABLE | MCFGR_LARGE_BURST |
691 (sizeof(dma_addr_t) == sizeof(u64) ?
692 MCFGR_LONG_PTR : 0));
694 handle_imx6_err005766(&ctrl->mcr);
697 * Read the Compile Time paramters and SCFGR to determine
698 * if Virtualization is enabled for this platform
700 scfgr = rd_reg32(&ctrl->scfgr);
702 ctrlpriv->virt_en = 0;
703 if (comp_params & CTPR_MS_VIRT_EN_INCL) {
704 /* VIRT_EN_INCL = 1 & VIRT_EN_POR = 1 or
705 * VIRT_EN_INCL = 1 & VIRT_EN_POR = 0 & SCFGR_VIRT_EN = 1
707 if ((comp_params & CTPR_MS_VIRT_EN_POR) ||
708 (!(comp_params & CTPR_MS_VIRT_EN_POR) &&
709 (scfgr & SCFGR_VIRT_EN)))
710 ctrlpriv->virt_en = 1;
712 /* VIRT_EN_INCL = 0 && VIRT_EN_POR_VALUE = 1 */
713 if (comp_params & CTPR_MS_VIRT_EN_POR)
714 ctrlpriv->virt_en = 1;
717 if (ctrlpriv->virt_en == 1)
718 clrsetbits_32(&ctrl->jrstart, 0, JRSTART_JR0_START |
719 JRSTART_JR1_START | JRSTART_JR2_START |
722 ret = dma_set_mask_and_coherent(dev, caam_get_dma_mask(dev));
724 dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
728 ctrlpriv->era = caam_get_era(ctrl);
729 ctrlpriv->domain = iommu_get_domain_for_dev(dev);
731 #ifdef CONFIG_DEBUG_FS
733 * FIXME: needs better naming distinction, as some amalgamation of
734 * "caam" and nprop->full_name. The OF name isn't distinctive,
735 * but does separate instances
737 perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
739 ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL);
740 ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
743 /* Check to see if (DPAA 1.x) QI present. If so, enable */
744 if (ctrlpriv->qi_present && !caam_dpaa2) {
745 ctrlpriv->qi = (struct caam_queue_if __iomem __force *)
746 ((__force uint8_t *)ctrl +
747 BLOCK_OFFSET * QI_BLOCK_NUMBER
749 /* This is all that's required to physically enable QI */
750 wr_reg32(&ctrlpriv->qi->qi_control_lo, QICTL_DQEN);
752 /* If QMAN driver is present, init CAAM-QI backend */
753 #ifdef CONFIG_CAAM_QI
754 ret = caam_qi_init(pdev);
756 dev_err(dev, "caam qi i/f init failed: %d\n", ret);
760 ret = of_platform_populate(nprop, caam_match, NULL, dev);
762 dev_err(dev, "JR platform devices creation error\n");
767 for_each_available_child_of_node(nprop, np)
768 if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
769 of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
770 ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
771 ((__force uint8_t *)ctrl +
772 (ring + JR_BLOCK_NUMBER) *
775 ctrlpriv->total_jobrs++;
779 /* If no QI and no rings specified, quit and go home */
780 if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
781 dev_err(dev, "no queues configured, terminating\n");
786 if (ctrlpriv->era < 10)
787 rng_vid = (rd_reg32(&ctrl->perfmon.cha_id_ls) &
788 CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
790 rng_vid = (rd_reg32(&ctrl->vreg.rng) & CHA_VER_VID_MASK) >>
794 * If SEC has RNG version >= 4 and RNG state handle has not been
795 * already instantiated, do RNG instantiation
796 * In case of SoCs with Management Complex, RNG is managed by MC f/w.
798 if (!ctrlpriv->mc_en && rng_vid >= 4) {
799 ctrlpriv->rng4_sh_init =
800 rd_reg32(&ctrl->r4tst[0].rdsta);
802 * If the secure keys (TDKEK, JDKEK, TDSK), were already
803 * generated, signal this to the function that is instantiating
804 * the state handles. An error would occur if RNG4 attempts
805 * to regenerate these keys before the next POR.
807 gen_sk = ctrlpriv->rng4_sh_init & RDSTA_SKVN ? 0 : 1;
808 ctrlpriv->rng4_sh_init &= RDSTA_IFMASK;
811 rd_reg32(&ctrl->r4tst[0].rdsta) &
814 * If either SH were instantiated by somebody else
815 * (e.g. u-boot) then it is assumed that the entropy
816 * parameters are properly set and thus the function
817 * setting these (kick_trng(...)) is skipped.
818 * Also, if a handle was instantiated, do not change
819 * the TRNG parameters.
821 if (!(ctrlpriv->rng4_sh_init || inst_handles)) {
823 "Entropy delay = %u\n",
825 kick_trng(pdev, ent_delay);
829 * if instantiate_rng(...) fails, the loop will rerun
830 * and the kick_trng(...) function will modfiy the
831 * upper and lower limits of the entropy sampling
832 * interval, leading to a sucessful initialization of
835 ret = instantiate_rng(dev, inst_handles,
839 * if here, the loop will rerun,
840 * so don't hog the CPU
843 } while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX));
845 dev_err(dev, "failed to instantiate RNG");
849 * Set handles init'ed by this module as the complement of the
850 * already initialized ones
852 ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_IFMASK;
854 /* Enable RDB bit so that RNG works faster */
855 clrsetbits_32(&ctrl->scfgr, 0, SCFGR_RDBENABLE);
858 /* NOTE: RTIC detection ought to go here, around Si time */
860 caam_id = (u64)rd_reg32(&ctrl->perfmon.caam_id_ms) << 32 |
861 (u64)rd_reg32(&ctrl->perfmon.caam_id_ls);
863 /* Report "alive" for developer to see */
864 dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
866 dev_info(dev, "job rings = %d, qi = %d\n",
867 ctrlpriv->total_jobrs, ctrlpriv->qi_present);
869 #ifdef CONFIG_DEBUG_FS
870 debugfs_create_file("rq_dequeued", S_IRUSR | S_IRGRP | S_IROTH,
871 ctrlpriv->ctl, &perfmon->req_dequeued,
873 debugfs_create_file("ob_rq_encrypted", S_IRUSR | S_IRGRP | S_IROTH,
874 ctrlpriv->ctl, &perfmon->ob_enc_req,
876 debugfs_create_file("ib_rq_decrypted", S_IRUSR | S_IRGRP | S_IROTH,
877 ctrlpriv->ctl, &perfmon->ib_dec_req,
879 debugfs_create_file("ob_bytes_encrypted", S_IRUSR | S_IRGRP | S_IROTH,
880 ctrlpriv->ctl, &perfmon->ob_enc_bytes,
882 debugfs_create_file("ob_bytes_protected", S_IRUSR | S_IRGRP | S_IROTH,
883 ctrlpriv->ctl, &perfmon->ob_prot_bytes,
885 debugfs_create_file("ib_bytes_decrypted", S_IRUSR | S_IRGRP | S_IROTH,
886 ctrlpriv->ctl, &perfmon->ib_dec_bytes,
888 debugfs_create_file("ib_bytes_validated", S_IRUSR | S_IRGRP | S_IROTH,
889 ctrlpriv->ctl, &perfmon->ib_valid_bytes,
892 /* Controller level - global status values */
893 debugfs_create_file("fault_addr", S_IRUSR | S_IRGRP | S_IROTH,
894 ctrlpriv->ctl, &perfmon->faultaddr,
896 debugfs_create_file("fault_detail", S_IRUSR | S_IRGRP | S_IROTH,
897 ctrlpriv->ctl, &perfmon->faultdetail,
899 debugfs_create_file("fault_status", S_IRUSR | S_IRGRP | S_IROTH,
900 ctrlpriv->ctl, &perfmon->status,
903 /* Internal covering keys (useful in non-secure mode only) */
904 ctrlpriv->ctl_kek_wrap.data = (__force void *)&ctrlpriv->ctrl->kek[0];
905 ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
906 debugfs_create_blob("kek", S_IRUSR | S_IRGRP | S_IROTH, ctrlpriv->ctl,
907 &ctrlpriv->ctl_kek_wrap);
909 ctrlpriv->ctl_tkek_wrap.data = (__force void *)&ctrlpriv->ctrl->tkek[0];
910 ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
911 debugfs_create_blob("tkek", S_IRUSR | S_IRGRP | S_IROTH, ctrlpriv->ctl,
912 &ctrlpriv->ctl_tkek_wrap);
914 ctrlpriv->ctl_tdsk_wrap.data = (__force void *)&ctrlpriv->ctrl->tdsk[0];
915 ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
916 debugfs_create_blob("tdsk", S_IRUSR | S_IRGRP | S_IROTH, ctrlpriv->ctl,
917 &ctrlpriv->ctl_tdsk_wrap);
926 #ifdef CONFIG_CAAM_QI
927 if (ctrlpriv->qi_init)
928 caam_qi_shutdown(dev);
935 static struct platform_driver caam_driver = {
938 .of_match_table = caam_match,
941 .remove = caam_remove,
944 module_platform_driver(caam_driver);
946 MODULE_LICENSE("GPL");
947 MODULE_DESCRIPTION("FSL CAAM request backend");
948 MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");