2 * Copyright (C) 2012-2017 ARM Limited or its affiliates.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 #include <linux/kernel.h>
18 #include <linux/module.h>
20 #include <linux/crypto.h>
21 #include <crypto/algapi.h>
22 #include <crypto/aes.h>
23 #include <crypto/sha.h>
24 #include <crypto/aead.h>
25 #include <crypto/authenc.h>
26 #include <crypto/scatterwalk.h>
27 #include <crypto/internal/skcipher.h>
29 #include <linux/init.h>
30 #include <linux/moduleparam.h>
31 #include <linux/types.h>
32 #include <linux/random.h>
33 #include <linux/ioport.h>
34 #include <linux/interrupt.h>
35 #include <linux/fcntl.h>
36 #include <linux/poll.h>
37 #include <linux/proc_fs.h>
38 #include <linux/mutex.h>
39 #include <linux/sysctl.h>
41 #include <linux/cdev.h>
42 #include <linux/platform_device.h>
44 #include <linux/delay.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/dmapool.h>
47 #include <linux/list.h>
48 #include <linux/slab.h>
49 #include <linux/spinlock.h>
52 /* cache.h required for L1_CACHE_ALIGN() and cache_line_size() */
53 #include <linux/cache.h>
55 #include <linux/uaccess.h>
56 #include <linux/pagemap.h>
57 #include <linux/sched.h>
58 #include <linux/random.h>
61 #include "ssi_config.h"
62 #include "ssi_driver.h"
63 #include "ssi_request_mgr.h"
64 #include "ssi_buffer_mgr.h"
65 #include "ssi_sysfs.h"
66 #include "ssi_cipher.h"
69 #include "ssi_ivgen.h"
70 #include "ssi_sram_mgr.h"
72 #include "ssi_fips_local.h"
76 void dump_byte_array(const char *name, const uint8_t *the_array, unsigned long size)
78 int i , line_offset = 0, ret = 0;
79 const uint8_t *cur_byte;
82 if (the_array == NULL) {
83 SSI_LOG_ERR("cannot dump_byte_array - NULL pointer\n");
87 ret = snprintf(line_buf, sizeof(line_buf), "%s[%lu]: ",
90 SSI_LOG_ERR("snprintf returned %d . aborting buffer array dump\n",ret);
94 for (i = 0 , cur_byte = the_array;
95 (i < size) && (line_offset < sizeof(line_buf)); i++, cur_byte++) {
96 ret = snprintf(line_buf + line_offset,
97 sizeof(line_buf) - line_offset,
98 "0x%02X ", *cur_byte);
100 SSI_LOG_ERR("snprintf returned %d . aborting buffer array dump\n",ret);
104 if (line_offset > 75) { /* Cut before line end */
105 SSI_LOG_DEBUG("%s\n", line_buf);
110 if (line_offset > 0) /* Dump remaining line */
111 SSI_LOG_DEBUG("%s\n", line_buf);
115 static irqreturn_t cc_isr(int irq, void *dev_id)
117 struct ssi_drvdata *drvdata = (struct ssi_drvdata *)dev_id;
118 void __iomem *cc_base = drvdata->cc_base;
121 DECL_CYCLE_COUNT_RESOURCES;
123 /* STAT_OP_TYPE_GENERIC STAT_PHASE_0: Interrupt */
126 /* read the interrupt status */
127 irr = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IRR));
128 SSI_LOG_DEBUG("Got IRR=0x%08X\n", irr);
129 if (unlikely(irr == 0)) { /* Probably shared interrupt line */
130 SSI_LOG_ERR("Got interrupt with empty IRR\n");
133 imr = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR));
135 /* clear interrupt - must be before processing events */
136 CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_ICR), irr);
139 /* Completion interrupt - most probable */
140 if (likely((irr & SSI_COMP_IRQ_MASK) != 0)) {
141 /* Mask AXI completion interrupt - will be unmasked in Deferred service handler */
142 CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR), imr | SSI_COMP_IRQ_MASK);
143 irr &= ~SSI_COMP_IRQ_MASK;
144 complete_request(drvdata);
146 #ifdef CC_SUPPORT_FIPS
147 /* TEE FIPS interrupt */
148 if (likely((irr & SSI_GPR0_IRQ_MASK) != 0)) {
149 /* Mask interrupt - will be unmasked in Deferred service handler */
150 CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR), imr | SSI_GPR0_IRQ_MASK);
151 irr &= ~SSI_GPR0_IRQ_MASK;
152 fips_handler(drvdata);
155 /* AXI error interrupt */
156 if (unlikely((irr & SSI_AXI_ERR_IRQ_MASK) != 0)) {
159 /* Read the AXI error ID */
160 axi_err = CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_MON_ERR));
161 SSI_LOG_DEBUG("AXI completion error: axim_mon_err=0x%08X\n", axi_err);
163 irr &= ~SSI_AXI_ERR_IRQ_MASK;
166 if (unlikely(irr != 0)) {
167 SSI_LOG_DEBUG("IRR includes unknown cause bits (0x%08X)\n", irr);
171 END_CYCLE_COUNT(STAT_OP_TYPE_GENERIC, STAT_PHASE_0);
172 START_CYCLE_COUNT_AT(drvdata->isr_exit_cycles);
177 int init_cc_regs(struct ssi_drvdata *drvdata, bool is_probe)
180 void __iomem *cc_base = drvdata->cc_base;
182 /* Unmask all AXI interrupt sources AXI_CFG1 register */
183 val = CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_CFG));
184 CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_CFG), val & ~SSI_AXI_IRQ_MASK);
185 SSI_LOG_DEBUG("AXIM_CFG=0x%08X\n", CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_CFG)));
187 /* Clear all pending interrupts */
188 val = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IRR));
189 SSI_LOG_DEBUG("IRR=0x%08X\n", val);
190 CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_ICR), val);
192 /* Unmask relevant interrupt cause */
193 val = (~(SSI_COMP_IRQ_MASK | SSI_AXI_ERR_IRQ_MASK | SSI_GPR0_IRQ_MASK));
194 CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR), val);
196 #ifdef DX_HOST_IRQ_TIMER_INIT_VAL_REG_OFFSET
198 /* Set CC IRQ delay */
199 CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IRQ_TIMER_INIT_VAL),
202 if (CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IRQ_TIMER_INIT_VAL)) > 0) {
203 SSI_LOG_DEBUG("irq_delay=%d CC cycles\n",
204 CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IRQ_TIMER_INIT_VAL)));
208 val = CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_CACHE_PARAMS));
209 if (is_probe == true) {
210 SSI_LOG_INFO("Cache params previous: 0x%08X\n", val);
212 CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_CACHE_PARAMS), SSI_CACHE_PARAMS);
213 val = CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_CACHE_PARAMS));
214 if (is_probe == true) {
215 SSI_LOG_INFO("Cache params current: 0x%08X (expected: 0x%08X)\n", val, SSI_CACHE_PARAMS);
221 static int init_cc_resources(struct platform_device *plat_dev)
223 struct resource *req_mem_cc_regs = NULL;
224 void __iomem *cc_base = NULL;
225 bool irq_registered = false;
226 struct ssi_drvdata *new_drvdata = kzalloc(sizeof(struct ssi_drvdata), GFP_KERNEL);
227 uint32_t signature_val;
230 if (unlikely(new_drvdata == NULL)) {
231 SSI_LOG_ERR("Failed to allocate drvdata");
233 goto init_cc_res_err;
236 /*Initialize inflight counter used in dx_ablkcipher_secure_complete used for count of BYSPASS blocks operations*/
237 new_drvdata->inflight_counter = 0;
239 dev_set_drvdata(&plat_dev->dev, new_drvdata);
240 /* Get device resources */
241 /* First CC registers space */
242 new_drvdata->res_mem = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
243 if (unlikely(new_drvdata->res_mem == NULL)) {
244 SSI_LOG_ERR("Failed getting IO memory resource\n");
246 goto init_cc_res_err;
248 SSI_LOG_DEBUG("Got MEM resource (%s): start=0x%llX end=0x%llX\n",
249 new_drvdata->res_mem->name,
250 (unsigned long long)new_drvdata->res_mem->start,
251 (unsigned long long)new_drvdata->res_mem->end);
252 /* Map registers space */
253 req_mem_cc_regs = request_mem_region(new_drvdata->res_mem->start, resource_size(new_drvdata->res_mem), "arm_cc7x_regs");
254 if (unlikely(req_mem_cc_regs == NULL)) {
255 SSI_LOG_ERR("Couldn't allocate registers memory region at "
256 "0x%08X\n", (unsigned int)new_drvdata->res_mem->start);
258 goto init_cc_res_err;
260 cc_base = ioremap(new_drvdata->res_mem->start, resource_size(new_drvdata->res_mem));
261 if (unlikely(cc_base == NULL)) {
262 SSI_LOG_ERR("ioremap[CC](0x%08X,0x%08X) failed\n",
263 (unsigned int)new_drvdata->res_mem->start, (unsigned int)resource_size(new_drvdata->res_mem));
265 goto init_cc_res_err;
267 SSI_LOG_DEBUG("CC registers mapped from %pa to 0x%p\n", &new_drvdata->res_mem->start, cc_base);
268 new_drvdata->cc_base = cc_base;
272 new_drvdata->res_irq = platform_get_resource(plat_dev, IORESOURCE_IRQ, 0);
273 if (unlikely(new_drvdata->res_irq == NULL)) {
274 SSI_LOG_ERR("Failed getting IRQ resource\n");
276 goto init_cc_res_err;
278 rc = request_irq(new_drvdata->res_irq->start, cc_isr,
279 IRQF_SHARED, "arm_cc7x", new_drvdata);
280 if (unlikely(rc != 0)) {
281 SSI_LOG_ERR("Could not register to interrupt %llu\n",
282 (unsigned long long)new_drvdata->res_irq->start);
283 goto init_cc_res_err;
285 init_completion(&new_drvdata->icache_setup_completion);
287 irq_registered = true;
288 SSI_LOG_DEBUG("Registered to IRQ (%s) %llu\n",
289 new_drvdata->res_irq->name,
290 (unsigned long long)new_drvdata->res_irq->start);
292 new_drvdata->plat_dev = plat_dev;
294 if(new_drvdata->plat_dev->dev.dma_mask == NULL)
296 new_drvdata->plat_dev->dev.dma_mask = & new_drvdata->plat_dev->dev.coherent_dma_mask;
298 if (!new_drvdata->plat_dev->dev.coherent_dma_mask)
300 new_drvdata->plat_dev->dev.coherent_dma_mask = DMA_BIT_MASK(DMA_BIT_MASK_LEN);
303 /* Verify correct mapping */
304 signature_val = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_SIGNATURE));
305 if (signature_val != DX_DEV_SIGNATURE) {
306 SSI_LOG_ERR("Invalid CC signature: SIGNATURE=0x%08X != expected=0x%08X\n",
307 signature_val, (uint32_t)DX_DEV_SIGNATURE);
309 goto init_cc_res_err;
311 SSI_LOG_DEBUG("CC SIGNATURE=0x%08X\n", signature_val);
313 /* Display HW versions */
314 SSI_LOG(KERN_INFO, "ARM CryptoCell %s Driver: HW version 0x%08X, Driver version %s\n", SSI_DEV_NAME_STR,
315 CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_VERSION)), DRV_MODULE_VERSION);
317 rc = init_cc_regs(new_drvdata, true);
318 if (unlikely(rc != 0)) {
319 SSI_LOG_ERR("init_cc_regs failed\n");
320 goto init_cc_res_err;
323 #ifdef ENABLE_CC_SYSFS
324 rc = ssi_sysfs_init(&(plat_dev->dev.kobj), new_drvdata);
325 if (unlikely(rc != 0)) {
326 SSI_LOG_ERR("init_stat_db failed\n");
327 goto init_cc_res_err;
331 rc = ssi_sram_mgr_init(new_drvdata);
332 if (unlikely(rc != 0)) {
333 SSI_LOG_ERR("ssi_sram_mgr_init failed\n");
334 goto init_cc_res_err;
337 new_drvdata->mlli_sram_addr =
338 ssi_sram_mgr_alloc(new_drvdata, MAX_MLLI_BUFF_SIZE);
339 if (unlikely(new_drvdata->mlli_sram_addr == NULL_SRAM_ADDR)) {
340 SSI_LOG_ERR("Failed to alloc MLLI Sram buffer\n");
342 goto init_cc_res_err;
345 rc = request_mgr_init(new_drvdata);
346 if (unlikely(rc != 0)) {
347 SSI_LOG_ERR("request_mgr_init failed\n");
348 goto init_cc_res_err;
351 rc = ssi_buffer_mgr_init(new_drvdata);
352 if (unlikely(rc != 0)) {
353 SSI_LOG_ERR("buffer_mgr_init failed\n");
354 goto init_cc_res_err;
357 rc = ssi_power_mgr_init(new_drvdata);
358 if (unlikely(rc != 0)) {
359 SSI_LOG_ERR("ssi_power_mgr_init failed\n");
360 goto init_cc_res_err;
363 rc = ssi_fips_init(new_drvdata);
364 if (unlikely(rc != 0)) {
365 SSI_LOG_ERR("SSI_FIPS_INIT failed 0x%x\n", rc);
366 goto init_cc_res_err;
369 rc = ssi_ivgen_init(new_drvdata);
370 if (unlikely(rc != 0)) {
371 SSI_LOG_ERR("ssi_ivgen_init failed\n");
372 goto init_cc_res_err;
375 /* Allocate crypto algs */
376 rc = ssi_ablkcipher_alloc(new_drvdata);
377 if (unlikely(rc != 0)) {
378 SSI_LOG_ERR("ssi_ablkcipher_alloc failed\n");
379 goto init_cc_res_err;
382 /* hash must be allocated before aead since hash exports APIs */
383 rc = ssi_hash_alloc(new_drvdata);
384 if (unlikely(rc != 0)) {
385 SSI_LOG_ERR("ssi_hash_alloc failed\n");
386 goto init_cc_res_err;
389 rc = ssi_aead_alloc(new_drvdata);
390 if (unlikely(rc != 0)) {
391 SSI_LOG_ERR("ssi_aead_alloc failed\n");
392 goto init_cc_res_err;
398 SSI_LOG_ERR("Freeing CC HW resources!\n");
400 if (new_drvdata != NULL) {
401 ssi_aead_free(new_drvdata);
402 ssi_hash_free(new_drvdata);
403 ssi_ablkcipher_free(new_drvdata);
404 ssi_ivgen_fini(new_drvdata);
405 ssi_power_mgr_fini(new_drvdata);
406 ssi_buffer_mgr_fini(new_drvdata);
407 request_mgr_fini(new_drvdata);
408 ssi_sram_mgr_fini(new_drvdata);
409 ssi_fips_fini(new_drvdata);
410 #ifdef ENABLE_CC_SYSFS
414 if (req_mem_cc_regs != NULL) {
415 if (irq_registered) {
416 free_irq(new_drvdata->res_irq->start, new_drvdata);
417 new_drvdata->res_irq = NULL;
419 new_drvdata->cc_base = NULL;
421 release_mem_region(new_drvdata->res_mem->start,
422 resource_size(new_drvdata->res_mem));
423 new_drvdata->res_mem = NULL;
426 dev_set_drvdata(&plat_dev->dev, NULL);
432 void fini_cc_regs(struct ssi_drvdata *drvdata)
434 /* Mask all interrupts */
435 WRITE_REGISTER(drvdata->cc_base +
436 CC_REG_OFFSET(HOST_RGF, HOST_IMR), 0xFFFFFFFF);
440 static void cleanup_cc_resources(struct platform_device *plat_dev)
442 struct ssi_drvdata *drvdata =
443 (struct ssi_drvdata *)dev_get_drvdata(&plat_dev->dev);
445 ssi_aead_free(drvdata);
446 ssi_hash_free(drvdata);
447 ssi_ablkcipher_free(drvdata);
448 ssi_ivgen_fini(drvdata);
449 ssi_power_mgr_fini(drvdata);
450 ssi_buffer_mgr_fini(drvdata);
451 request_mgr_fini(drvdata);
452 ssi_sram_mgr_fini(drvdata);
453 ssi_fips_fini(drvdata);
454 #ifdef ENABLE_CC_SYSFS
458 /* Mask all interrupts */
459 WRITE_REGISTER(drvdata->cc_base + CC_REG_OFFSET(HOST_RGF, HOST_IMR),
461 free_irq(drvdata->res_irq->start, drvdata);
462 drvdata->res_irq = NULL;
464 fini_cc_regs(drvdata);
466 if (drvdata->cc_base != NULL) {
467 iounmap(drvdata->cc_base);
468 release_mem_region(drvdata->res_mem->start,
469 resource_size(drvdata->res_mem));
470 drvdata->cc_base = NULL;
471 drvdata->res_mem = NULL;
475 dev_set_drvdata(&plat_dev->dev, NULL);
478 static int cc7x_probe(struct platform_device *plat_dev)
481 #if defined(CONFIG_ARM) && defined(CC_DEBUG)
482 uint32_t ctr, cacheline_size;
484 asm volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr));
485 cacheline_size = 4 << ((ctr >> 16) & 0xf);
486 SSI_LOG_DEBUG("CP15(L1_CACHE_BYTES) = %u , Kconfig(L1_CACHE_BYTES) = %u\n",
487 cacheline_size, L1_CACHE_BYTES);
489 asm volatile("mrc p15, 0, %0, c0, c0, 0" : "=r" (ctr));
490 SSI_LOG_DEBUG("Main ID register (MIDR): Implementer 0x%02X, Arch 0x%01X,"
491 " Part 0x%03X, Rev r%dp%d\n",
492 (ctr>>24), (ctr>>16)&0xF, (ctr>>4)&0xFFF, (ctr>>20)&0xF, ctr&0xF);
495 /* Map registers space */
496 rc = init_cc_resources(plat_dev);
500 SSI_LOG(KERN_INFO, "ARM cc7x_ree device initialized\n");
505 static int cc7x_remove(struct platform_device *plat_dev)
507 SSI_LOG_DEBUG("Releasing cc7x resources...\n");
509 cleanup_cc_resources(plat_dev);
511 SSI_LOG(KERN_INFO, "ARM cc7x_ree device terminated\n");
512 #ifdef ENABLE_CYCLE_COUNT
513 display_all_stat_db();
518 #if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP)
519 static struct dev_pm_ops arm_cc7x_driver_pm = {
520 SET_RUNTIME_PM_OPS(ssi_power_mgr_runtime_suspend, ssi_power_mgr_runtime_resume, NULL)
524 #if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP)
525 #define DX_DRIVER_RUNTIME_PM (&arm_cc7x_driver_pm)
527 #define DX_DRIVER_RUNTIME_PM NULL
532 static const struct of_device_id arm_cc7x_dev_of_match[] = {
533 {.compatible = "arm,cryptocell-712-ree"},
536 MODULE_DEVICE_TABLE(of, arm_cc7x_dev_of_match);
539 static struct platform_driver cc7x_driver = {
543 .of_match_table = arm_cc7x_dev_of_match,
545 .pm = DX_DRIVER_RUNTIME_PM,
548 .remove = cc7x_remove,
550 module_platform_driver(cc7x_driver);
552 /* Module description */
553 MODULE_DESCRIPTION("ARM TrustZone CryptoCell REE Driver");
554 MODULE_VERSION(DRV_MODULE_VERSION);
555 MODULE_AUTHOR("ARM");
556 MODULE_LICENSE("GPL v2");