1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
7 #include <linux/interrupt.h>
8 #include <linux/module.h>
9 #include <linux/mod_devicetable.h>
10 #include <linux/platform_device.h>
11 #include <linux/spinlock.h>
12 #include <linux/types.h>
13 #include <crypto/algapi.h>
14 #include <crypto/internal/hash.h>
15 #include <crypto/sha.h>
21 #define QCE_MAJOR_VERSION5 0x05
22 #define QCE_QUEUE_LENGTH 1
24 static const struct qce_algo_ops *qce_ops[] = {
29 static void qce_unregister_algs(struct qce_device *qce)
31 const struct qce_algo_ops *ops;
34 for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
36 ops->unregister_algs(qce);
40 static int qce_register_algs(struct qce_device *qce)
42 const struct qce_algo_ops *ops;
45 for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
47 ret = ops->register_algs(qce);
55 static int qce_handle_request(struct crypto_async_request *async_req)
58 const struct qce_algo_ops *ops;
59 u32 type = crypto_tfm_alg_type(async_req->tfm);
61 for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
63 if (type != ops->type)
65 ret = ops->async_req_handle(async_req);
72 static int qce_handle_queue(struct qce_device *qce,
73 struct crypto_async_request *req)
75 struct crypto_async_request *async_req, *backlog;
79 spin_lock_irqsave(&qce->lock, flags);
82 ret = crypto_enqueue_request(&qce->queue, req);
84 /* busy, do not dequeue request */
86 spin_unlock_irqrestore(&qce->lock, flags);
90 backlog = crypto_get_backlog(&qce->queue);
91 async_req = crypto_dequeue_request(&qce->queue);
95 spin_unlock_irqrestore(&qce->lock, flags);
101 spin_lock_bh(&qce->lock);
102 backlog->complete(backlog, -EINPROGRESS);
103 spin_unlock_bh(&qce->lock);
106 err = qce_handle_request(async_req);
109 tasklet_schedule(&qce->done_tasklet);
115 static void qce_tasklet_req_done(unsigned long data)
117 struct qce_device *qce = (struct qce_device *)data;
118 struct crypto_async_request *req;
121 spin_lock_irqsave(&qce->lock, flags);
124 spin_unlock_irqrestore(&qce->lock, flags);
127 req->complete(req, qce->result);
129 qce_handle_queue(qce, NULL);
132 static int qce_async_request_enqueue(struct qce_device *qce,
133 struct crypto_async_request *req)
135 return qce_handle_queue(qce, req);
138 static void qce_async_request_done(struct qce_device *qce, int ret)
141 tasklet_schedule(&qce->done_tasklet);
144 static int qce_check_version(struct qce_device *qce)
146 u32 major, minor, step;
148 qce_get_version(qce, &major, &minor, &step);
151 * the driver does not support v5 with minor 0 because it has special
152 * alignment requirements.
154 if (major != QCE_MAJOR_VERSION5 || minor == 0)
157 qce->burst_size = QCE_BAM_BURST_SIZE;
158 qce->pipe_pair_id = 1;
160 dev_dbg(qce->dev, "Crypto device found, version %d.%d.%d\n",
166 static int qce_crypto_probe(struct platform_device *pdev)
168 struct device *dev = &pdev->dev;
169 struct qce_device *qce;
172 qce = devm_kzalloc(dev, sizeof(*qce), GFP_KERNEL);
177 platform_set_drvdata(pdev, qce);
179 qce->base = devm_platform_ioremap_resource(pdev, 0);
180 if (IS_ERR(qce->base))
181 return PTR_ERR(qce->base);
183 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
187 qce->core = devm_clk_get(qce->dev, "core");
188 if (IS_ERR(qce->core))
189 return PTR_ERR(qce->core);
191 qce->iface = devm_clk_get(qce->dev, "iface");
192 if (IS_ERR(qce->iface))
193 return PTR_ERR(qce->iface);
195 qce->bus = devm_clk_get(qce->dev, "bus");
196 if (IS_ERR(qce->bus))
197 return PTR_ERR(qce->bus);
199 ret = clk_prepare_enable(qce->core);
203 ret = clk_prepare_enable(qce->iface);
207 ret = clk_prepare_enable(qce->bus);
211 ret = qce_dma_request(qce->dev, &qce->dma);
215 ret = qce_check_version(qce);
219 spin_lock_init(&qce->lock);
220 tasklet_init(&qce->done_tasklet, qce_tasklet_req_done,
222 crypto_init_queue(&qce->queue, QCE_QUEUE_LENGTH);
224 qce->async_req_enqueue = qce_async_request_enqueue;
225 qce->async_req_done = qce_async_request_done;
227 ret = qce_register_algs(qce);
234 qce_dma_release(&qce->dma);
236 clk_disable_unprepare(qce->bus);
238 clk_disable_unprepare(qce->iface);
240 clk_disable_unprepare(qce->core);
244 static int qce_crypto_remove(struct platform_device *pdev)
246 struct qce_device *qce = platform_get_drvdata(pdev);
248 tasklet_kill(&qce->done_tasklet);
249 qce_unregister_algs(qce);
250 qce_dma_release(&qce->dma);
251 clk_disable_unprepare(qce->bus);
252 clk_disable_unprepare(qce->iface);
253 clk_disable_unprepare(qce->core);
257 static const struct of_device_id qce_crypto_of_match[] = {
258 { .compatible = "qcom,crypto-v5.1", },
261 MODULE_DEVICE_TABLE(of, qce_crypto_of_match);
263 static struct platform_driver qce_crypto_driver = {
264 .probe = qce_crypto_probe,
265 .remove = qce_crypto_remove,
267 .name = KBUILD_MODNAME,
268 .of_match_table = qce_crypto_of_match,
271 module_platform_driver(qce_crypto_driver);
273 MODULE_LICENSE("GPL v2");
274 MODULE_DESCRIPTION("Qualcomm crypto engine driver");
275 MODULE_ALIAS("platform:" KBUILD_MODNAME);
276 MODULE_AUTHOR("The Linux Foundation");