1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
3 // Copyright (c) 2018, Linaro Limited
5 #include <linux/completion.h>
6 #include <linux/device.h>
7 #include <linux/dma-buf.h>
8 #include <linux/dma-mapping.h>
10 #include <linux/list.h>
11 #include <linux/miscdevice.h>
12 #include <linux/module.h>
13 #include <linux/of_address.h>
15 #include <linux/sort.h>
16 #include <linux/of_platform.h>
17 #include <linux/rpmsg.h>
18 #include <linux/scatterlist.h>
19 #include <linux/slab.h>
20 #include <uapi/misc/fastrpc.h>
22 #define ADSP_DOMAIN_ID (0)
23 #define MDSP_DOMAIN_ID (1)
24 #define SDSP_DOMAIN_ID (2)
25 #define CDSP_DOMAIN_ID (3)
26 #define FASTRPC_DEV_MAX 4 /* adsp, mdsp, slpi, cdsp*/
27 #define FASTRPC_MAX_SESSIONS 9 /*8 compute, 1 cpz*/
28 #define FASTRPC_ALIGN 128
29 #define FASTRPC_MAX_FDLIST 16
30 #define FASTRPC_MAX_CRCLIST 64
31 #define FASTRPC_PHYS(p) ((p) & 0xffffffff)
32 #define FASTRPC_CTX_MAX (256)
33 #define FASTRPC_INIT_HANDLE 1
34 #define FASTRPC_CTXID_MASK (0xFF0)
35 #define INIT_FILELEN_MAX (64 * 1024 * 1024)
36 #define FASTRPC_DEVICE_NAME "fastrpc"
38 /* Retrives number of input buffers from the scalars parameter */
39 #define REMOTE_SCALARS_INBUFS(sc) (((sc) >> 16) & 0x0ff)
41 /* Retrives number of output buffers from the scalars parameter */
42 #define REMOTE_SCALARS_OUTBUFS(sc) (((sc) >> 8) & 0x0ff)
44 /* Retrives number of input handles from the scalars parameter */
45 #define REMOTE_SCALARS_INHANDLES(sc) (((sc) >> 4) & 0x0f)
47 /* Retrives number of output handles from the scalars parameter */
48 #define REMOTE_SCALARS_OUTHANDLES(sc) ((sc) & 0x0f)
50 #define REMOTE_SCALARS_LENGTH(sc) (REMOTE_SCALARS_INBUFS(sc) + \
51 REMOTE_SCALARS_OUTBUFS(sc) + \
52 REMOTE_SCALARS_INHANDLES(sc)+ \
53 REMOTE_SCALARS_OUTHANDLES(sc))
54 #define FASTRPC_BUILD_SCALARS(attr, method, in, out, oin, oout) \
55 (((attr & 0x07) << 29) | \
56 ((method & 0x1f) << 24) | \
57 ((in & 0xff) << 16) | \
58 ((out & 0xff) << 8) | \
59 ((oin & 0x0f) << 4) | \
62 #define FASTRPC_SCALARS(method, in, out) \
63 FASTRPC_BUILD_SCALARS(0, method, in, out, 0, 0)
65 #define FASTRPC_CREATE_PROCESS_NARGS 6
66 /* Remote Method id table */
67 #define FASTRPC_RMID_INIT_ATTACH 0
68 #define FASTRPC_RMID_INIT_RELEASE 1
69 #define FASTRPC_RMID_INIT_CREATE 6
70 #define FASTRPC_RMID_INIT_CREATE_ATTR 7
71 #define FASTRPC_RMID_INIT_CREATE_STATIC 8
73 #define miscdev_to_cctx(d) container_of(d, struct fastrpc_channel_ctx, miscdev)
75 static const char *domains[FASTRPC_DEV_MAX] = { "adsp", "mdsp",
77 struct fastrpc_phy_page {
78 u64 addr; /* physical address */
79 u64 size; /* size of contiguous region */
82 struct fastrpc_invoke_buf {
83 u32 num; /* number of contiguous regions */
84 u32 pgidx; /* index to start of contiguous region */
87 struct fastrpc_remote_arg {
93 int pid; /* process group id */
94 int tid; /* thread id */
95 u64 ctx; /* invoke caller context */
96 u32 handle; /* handle to invoke */
97 u32 sc; /* scalars structure describing the data */
98 u64 addr; /* physical address */
99 u64 size; /* size of contiguous region */
102 struct fastrpc_invoke_rsp {
103 u64 ctx; /* invoke caller context */
104 int retval; /* invoke return value */
107 struct fastrpc_buf_overlap {
117 struct fastrpc_user *fl;
118 struct dma_buf *dmabuf;
123 /* Lock for dma buf attachments */
125 struct list_head attachments;
128 struct fastrpc_dma_buf_attachment {
131 struct list_head node;
135 struct list_head node;
136 struct fastrpc_user *fl;
139 struct sg_table *table;
140 struct dma_buf_attachment *attach;
145 struct kref refcount;
148 struct fastrpc_invoke_ctx {
158 struct kref refcount;
159 struct list_head node; /* list of ctxs */
160 struct completion work;
161 struct work_struct put_work;
162 struct fastrpc_msg msg;
163 struct fastrpc_user *fl;
164 struct fastrpc_remote_arg *rpra;
165 struct fastrpc_map **maps;
166 struct fastrpc_buf *buf;
167 struct fastrpc_invoke_args *args;
168 struct fastrpc_buf_overlap *olaps;
169 struct fastrpc_channel_ctx *cctx;
172 struct fastrpc_session_ctx {
179 struct fastrpc_channel_ctx {
182 struct rpmsg_device *rpdev;
183 struct fastrpc_session_ctx session[FASTRPC_MAX_SESSIONS];
186 struct list_head users;
187 struct miscdevice miscdev;
188 struct kref refcount;
191 struct fastrpc_user {
192 struct list_head user;
193 struct list_head maps;
194 struct list_head pending;
196 struct fastrpc_channel_ctx *cctx;
197 struct fastrpc_session_ctx *sctx;
198 struct fastrpc_buf *init_mem;
204 /* lock for allocations */
208 static void fastrpc_free_map(struct kref *ref)
210 struct fastrpc_map *map;
212 map = container_of(ref, struct fastrpc_map, refcount);
215 dma_buf_unmap_attachment(map->attach, map->table,
217 dma_buf_detach(map->buf, map->attach);
218 dma_buf_put(map->buf);
224 static void fastrpc_map_put(struct fastrpc_map *map)
227 kref_put(&map->refcount, fastrpc_free_map);
230 static void fastrpc_map_get(struct fastrpc_map *map)
233 kref_get(&map->refcount);
236 static int fastrpc_map_find(struct fastrpc_user *fl, int fd,
237 struct fastrpc_map **ppmap)
239 struct fastrpc_map *map = NULL;
241 mutex_lock(&fl->mutex);
242 list_for_each_entry(map, &fl->maps, node) {
244 fastrpc_map_get(map);
246 mutex_unlock(&fl->mutex);
250 mutex_unlock(&fl->mutex);
255 static void fastrpc_buf_free(struct fastrpc_buf *buf)
257 dma_free_coherent(buf->dev, buf->size, buf->virt,
258 FASTRPC_PHYS(buf->phys));
262 static int fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev,
263 u64 size, struct fastrpc_buf **obuf)
265 struct fastrpc_buf *buf;
267 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
271 INIT_LIST_HEAD(&buf->attachments);
272 mutex_init(&buf->lock);
280 buf->virt = dma_alloc_coherent(dev, buf->size, (dma_addr_t *)&buf->phys,
283 mutex_destroy(&buf->lock);
288 if (fl->sctx && fl->sctx->sid)
289 buf->phys += ((u64)fl->sctx->sid << 32);
296 static void fastrpc_channel_ctx_free(struct kref *ref)
298 struct fastrpc_channel_ctx *cctx;
300 cctx = container_of(ref, struct fastrpc_channel_ctx, refcount);
305 static void fastrpc_channel_ctx_get(struct fastrpc_channel_ctx *cctx)
307 kref_get(&cctx->refcount);
310 static void fastrpc_channel_ctx_put(struct fastrpc_channel_ctx *cctx)
312 kref_put(&cctx->refcount, fastrpc_channel_ctx_free);
315 static void fastrpc_context_free(struct kref *ref)
317 struct fastrpc_invoke_ctx *ctx;
318 struct fastrpc_channel_ctx *cctx;
322 ctx = container_of(ref, struct fastrpc_invoke_ctx, refcount);
325 for (i = 0; i < ctx->nscalars; i++)
326 fastrpc_map_put(ctx->maps[i]);
329 fastrpc_buf_free(ctx->buf);
331 spin_lock_irqsave(&cctx->lock, flags);
332 idr_remove(&cctx->ctx_idr, ctx->ctxid >> 4);
333 spin_unlock_irqrestore(&cctx->lock, flags);
339 fastrpc_channel_ctx_put(cctx);
342 static void fastrpc_context_get(struct fastrpc_invoke_ctx *ctx)
344 kref_get(&ctx->refcount);
347 static void fastrpc_context_put(struct fastrpc_invoke_ctx *ctx)
349 kref_put(&ctx->refcount, fastrpc_context_free);
352 static void fastrpc_context_put_wq(struct work_struct *work)
354 struct fastrpc_invoke_ctx *ctx =
355 container_of(work, struct fastrpc_invoke_ctx, put_work);
357 fastrpc_context_put(ctx);
360 #define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
361 static int olaps_cmp(const void *a, const void *b)
363 struct fastrpc_buf_overlap *pa = (struct fastrpc_buf_overlap *)a;
364 struct fastrpc_buf_overlap *pb = (struct fastrpc_buf_overlap *)b;
365 /* sort with lowest starting buffer first */
366 int st = CMP(pa->start, pb->start);
367 /* sort with highest ending buffer first */
368 int ed = CMP(pb->end, pa->end);
370 return st == 0 ? ed : st;
373 static void fastrpc_get_buff_overlaps(struct fastrpc_invoke_ctx *ctx)
378 for (i = 0; i < ctx->nbufs; ++i) {
379 ctx->olaps[i].start = ctx->args[i].ptr;
380 ctx->olaps[i].end = ctx->olaps[i].start + ctx->args[i].length;
381 ctx->olaps[i].raix = i;
384 sort(ctx->olaps, ctx->nbufs, sizeof(*ctx->olaps), olaps_cmp, NULL);
386 for (i = 0; i < ctx->nbufs; ++i) {
387 /* Falling inside previous range */
388 if (ctx->olaps[i].start < max_end) {
389 ctx->olaps[i].mstart = max_end;
390 ctx->olaps[i].mend = ctx->olaps[i].end;
391 ctx->olaps[i].offset = max_end - ctx->olaps[i].start;
393 if (ctx->olaps[i].end > max_end) {
394 max_end = ctx->olaps[i].end;
396 ctx->olaps[i].mend = 0;
397 ctx->olaps[i].mstart = 0;
401 ctx->olaps[i].mend = ctx->olaps[i].end;
402 ctx->olaps[i].mstart = ctx->olaps[i].start;
403 ctx->olaps[i].offset = 0;
404 max_end = ctx->olaps[i].end;
409 static struct fastrpc_invoke_ctx *fastrpc_context_alloc(
410 struct fastrpc_user *user, u32 kernel, u32 sc,
411 struct fastrpc_invoke_args *args)
413 struct fastrpc_channel_ctx *cctx = user->cctx;
414 struct fastrpc_invoke_ctx *ctx = NULL;
418 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
420 return ERR_PTR(-ENOMEM);
422 INIT_LIST_HEAD(&ctx->node);
424 ctx->nscalars = REMOTE_SCALARS_LENGTH(sc);
425 ctx->nbufs = REMOTE_SCALARS_INBUFS(sc) +
426 REMOTE_SCALARS_OUTBUFS(sc);
429 ctx->maps = kcalloc(ctx->nscalars,
430 sizeof(*ctx->maps), GFP_KERNEL);
433 return ERR_PTR(-ENOMEM);
435 ctx->olaps = kcalloc(ctx->nscalars,
436 sizeof(*ctx->olaps), GFP_KERNEL);
440 return ERR_PTR(-ENOMEM);
443 fastrpc_get_buff_overlaps(ctx);
446 /* Released in fastrpc_context_put() */
447 fastrpc_channel_ctx_get(cctx);
451 ctx->pid = current->pid;
452 ctx->tgid = user->tgid;
454 init_completion(&ctx->work);
455 INIT_WORK(&ctx->put_work, fastrpc_context_put_wq);
457 spin_lock(&user->lock);
458 list_add_tail(&ctx->node, &user->pending);
459 spin_unlock(&user->lock);
461 spin_lock_irqsave(&cctx->lock, flags);
462 ret = idr_alloc_cyclic(&cctx->ctx_idr, ctx, 1,
463 FASTRPC_CTX_MAX, GFP_ATOMIC);
465 spin_unlock_irqrestore(&cctx->lock, flags);
468 ctx->ctxid = ret << 4;
469 spin_unlock_irqrestore(&cctx->lock, flags);
471 kref_init(&ctx->refcount);
475 spin_lock(&user->lock);
476 list_del(&ctx->node);
477 spin_unlock(&user->lock);
478 fastrpc_channel_ctx_put(cctx);
486 static struct sg_table *
487 fastrpc_map_dma_buf(struct dma_buf_attachment *attachment,
488 enum dma_data_direction dir)
490 struct fastrpc_dma_buf_attachment *a = attachment->priv;
491 struct sg_table *table;
495 if (!dma_map_sg(attachment->dev, table->sgl, table->nents, dir))
496 return ERR_PTR(-ENOMEM);
501 static void fastrpc_unmap_dma_buf(struct dma_buf_attachment *attach,
502 struct sg_table *table,
503 enum dma_data_direction dir)
505 dma_unmap_sg(attach->dev, table->sgl, table->nents, dir);
508 static void fastrpc_release(struct dma_buf *dmabuf)
510 struct fastrpc_buf *buffer = dmabuf->priv;
512 fastrpc_buf_free(buffer);
515 static int fastrpc_dma_buf_attach(struct dma_buf *dmabuf,
516 struct dma_buf_attachment *attachment)
518 struct fastrpc_dma_buf_attachment *a;
519 struct fastrpc_buf *buffer = dmabuf->priv;
522 a = kzalloc(sizeof(*a), GFP_KERNEL);
526 ret = dma_get_sgtable(buffer->dev, &a->sgt, buffer->virt,
527 FASTRPC_PHYS(buffer->phys), buffer->size);
529 dev_err(buffer->dev, "failed to get scatterlist from DMA API\n");
534 a->dev = attachment->dev;
535 INIT_LIST_HEAD(&a->node);
536 attachment->priv = a;
538 mutex_lock(&buffer->lock);
539 list_add(&a->node, &buffer->attachments);
540 mutex_unlock(&buffer->lock);
545 static void fastrpc_dma_buf_detatch(struct dma_buf *dmabuf,
546 struct dma_buf_attachment *attachment)
548 struct fastrpc_dma_buf_attachment *a = attachment->priv;
549 struct fastrpc_buf *buffer = dmabuf->priv;
551 mutex_lock(&buffer->lock);
553 mutex_unlock(&buffer->lock);
554 sg_free_table(&a->sgt);
558 static void *fastrpc_kmap(struct dma_buf *dmabuf, unsigned long pgnum)
560 struct fastrpc_buf *buf = dmabuf->priv;
562 return buf->virt ? buf->virt + pgnum * PAGE_SIZE : NULL;
565 static void *fastrpc_vmap(struct dma_buf *dmabuf)
567 struct fastrpc_buf *buf = dmabuf->priv;
572 static int fastrpc_mmap(struct dma_buf *dmabuf,
573 struct vm_area_struct *vma)
575 struct fastrpc_buf *buf = dmabuf->priv;
576 size_t size = vma->vm_end - vma->vm_start;
578 return dma_mmap_coherent(buf->dev, vma, buf->virt,
579 FASTRPC_PHYS(buf->phys), size);
582 static const struct dma_buf_ops fastrpc_dma_buf_ops = {
583 .attach = fastrpc_dma_buf_attach,
584 .detach = fastrpc_dma_buf_detatch,
585 .map_dma_buf = fastrpc_map_dma_buf,
586 .unmap_dma_buf = fastrpc_unmap_dma_buf,
587 .mmap = fastrpc_mmap,
589 .vmap = fastrpc_vmap,
590 .release = fastrpc_release,
593 static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
594 u64 len, struct fastrpc_map **ppmap)
596 struct fastrpc_session_ctx *sess = fl->sctx;
597 struct fastrpc_map *map = NULL;
600 if (!fastrpc_map_find(fl, fd, ppmap))
603 map = kzalloc(sizeof(*map), GFP_KERNEL);
607 INIT_LIST_HEAD(&map->node);
610 map->buf = dma_buf_get(fd);
611 if (IS_ERR(map->buf)) {
612 err = PTR_ERR(map->buf);
616 map->attach = dma_buf_attach(map->buf, sess->dev);
617 if (IS_ERR(map->attach)) {
618 dev_err(sess->dev, "Failed to attach dmabuf\n");
619 err = PTR_ERR(map->attach);
623 map->table = dma_buf_map_attachment(map->attach, DMA_BIDIRECTIONAL);
624 if (IS_ERR(map->table)) {
625 err = PTR_ERR(map->table);
629 map->phys = sg_dma_address(map->table->sgl);
630 map->phys += ((u64)fl->sctx->sid << 32);
632 map->va = sg_virt(map->table->sgl);
634 kref_init(&map->refcount);
636 spin_lock(&fl->lock);
637 list_add_tail(&map->node, &fl->maps);
638 spin_unlock(&fl->lock);
644 dma_buf_detach(map->buf, map->attach);
646 dma_buf_put(map->buf);
654 * Fastrpc payload buffer with metadata looks like:
656 * >>>>>> START of METADATA <<<<<<<<<
657 * +---------------------------------+
659 * | type:(struct fastrpc_remote_arg)|
661 * +---------------------------------+
662 * | Invoke Buffer list |
663 * | type:(struct fastrpc_invoke_buf)|
665 * +---------------------------------+
667 * | type:(struct fastrpc_phy_page) |
669 * +---------------------------------+
671 * |(can be specific to SoC/Firmware)|
672 * +---------------------------------+
673 * >>>>>>>> END of METADATA <<<<<<<<<
674 * +---------------------------------+
677 * +---------------------------------+
680 static int fastrpc_get_meta_size(struct fastrpc_invoke_ctx *ctx)
684 size = (sizeof(struct fastrpc_remote_arg) +
685 sizeof(struct fastrpc_invoke_buf) +
686 sizeof(struct fastrpc_phy_page)) * ctx->nscalars +
687 sizeof(u64) * FASTRPC_MAX_FDLIST +
688 sizeof(u32) * FASTRPC_MAX_CRCLIST;
693 static u64 fastrpc_get_payload_size(struct fastrpc_invoke_ctx *ctx, int metalen)
698 size = ALIGN(metalen, FASTRPC_ALIGN);
699 for (i = 0; i < ctx->nscalars; i++) {
700 if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1) {
702 if (ctx->olaps[i].offset == 0)
703 size = ALIGN(size, FASTRPC_ALIGN);
705 size += (ctx->olaps[i].mend - ctx->olaps[i].mstart);
712 static int fastrpc_create_maps(struct fastrpc_invoke_ctx *ctx)
714 struct device *dev = ctx->fl->sctx->dev;
717 for (i = 0; i < ctx->nscalars; ++i) {
718 /* Make sure reserved field is set to 0 */
719 if (ctx->args[i].reserved)
722 if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1 ||
723 ctx->args[i].length == 0)
726 err = fastrpc_map_create(ctx->fl, ctx->args[i].fd,
727 ctx->args[i].length, &ctx->maps[i]);
729 dev_err(dev, "Error Creating map %d\n", err);
737 static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
739 struct device *dev = ctx->fl->sctx->dev;
740 struct fastrpc_remote_arg *rpra;
741 struct fastrpc_invoke_buf *list;
742 struct fastrpc_phy_page *pages;
743 int inbufs, i, oix, err = 0;
744 u64 len, rlen, pkt_size;
745 u64 pg_start, pg_end;
749 inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
750 metalen = fastrpc_get_meta_size(ctx);
751 pkt_size = fastrpc_get_payload_size(ctx, metalen);
753 err = fastrpc_create_maps(ctx);
757 ctx->msg_sz = pkt_size;
759 err = fastrpc_buf_alloc(ctx->fl, dev, pkt_size, &ctx->buf);
763 rpra = ctx->buf->virt;
764 list = ctx->buf->virt + ctx->nscalars * sizeof(*rpra);
765 pages = ctx->buf->virt + ctx->nscalars * (sizeof(*list) +
767 args = (uintptr_t)ctx->buf->virt + metalen;
768 rlen = pkt_size - metalen;
771 for (oix = 0; oix < ctx->nbufs; ++oix) {
774 i = ctx->olaps[oix].raix;
775 len = ctx->args[i].length;
779 list[i].num = len ? 1 : 0;
786 struct vm_area_struct *vma = NULL;
788 rpra[i].pv = (u64) ctx->args[i].ptr;
789 pages[i].addr = ctx->maps[i]->phys;
791 vma = find_vma(current->mm, ctx->args[i].ptr);
793 pages[i].addr += ctx->args[i].ptr -
796 pg_start = (ctx->args[i].ptr & PAGE_MASK) >> PAGE_SHIFT;
797 pg_end = ((ctx->args[i].ptr + len - 1) & PAGE_MASK) >>
799 pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE;
803 if (ctx->olaps[oix].offset == 0) {
804 rlen -= ALIGN(args, FASTRPC_ALIGN) - args;
805 args = ALIGN(args, FASTRPC_ALIGN);
808 mlen = ctx->olaps[oix].mend - ctx->olaps[oix].mstart;
813 rpra[i].pv = args - ctx->olaps[oix].offset;
814 pages[i].addr = ctx->buf->phys -
815 ctx->olaps[oix].offset +
817 pages[i].addr = pages[i].addr & PAGE_MASK;
819 pg_start = (args & PAGE_MASK) >> PAGE_SHIFT;
820 pg_end = ((args + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
821 pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE;
826 if (i < inbufs && !ctx->maps[i]) {
827 void *dst = (void *)(uintptr_t)rpra[i].pv;
828 void *src = (void *)(uintptr_t)ctx->args[i].ptr;
831 if (copy_from_user(dst, (void __user *)src,
837 memcpy(dst, src, len);
842 for (i = ctx->nbufs; i < ctx->nscalars; ++i) {
843 rpra[i].pv = (u64) ctx->args[i].ptr;
844 rpra[i].len = ctx->args[i].length;
845 list[i].num = ctx->args[i].length ? 1 : 0;
847 pages[i].addr = ctx->maps[i]->phys;
848 pages[i].size = ctx->maps[i]->size;
853 dev_err(dev, "Error: get invoke args failed:%d\n", err);
858 static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
861 struct fastrpc_remote_arg *rpra = ctx->rpra;
864 inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
866 for (i = inbufs; i < ctx->nbufs; ++i) {
867 void *src = (void *)(uintptr_t)rpra[i].pv;
868 void *dst = (void *)(uintptr_t)ctx->args[i].ptr;
869 u64 len = rpra[i].len;
872 if (copy_to_user((void __user *)dst, src, len))
875 memcpy(dst, src, len);
882 static int fastrpc_invoke_send(struct fastrpc_session_ctx *sctx,
883 struct fastrpc_invoke_ctx *ctx,
884 u32 kernel, uint32_t handle)
886 struct fastrpc_channel_ctx *cctx;
887 struct fastrpc_user *fl = ctx->fl;
888 struct fastrpc_msg *msg = &ctx->msg;
892 msg->tid = current->pid;
897 msg->ctx = ctx->ctxid | fl->pd;
898 msg->handle = handle;
900 msg->addr = ctx->buf ? ctx->buf->phys : 0;
901 msg->size = roundup(ctx->msg_sz, PAGE_SIZE);
902 fastrpc_context_get(ctx);
904 return rpmsg_send(cctx->rpdev->ept, (void *)msg, sizeof(*msg));
907 static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
909 struct fastrpc_invoke_args *args)
911 struct fastrpc_invoke_ctx *ctx = NULL;
917 if (!fl->cctx->rpdev)
920 ctx = fastrpc_context_alloc(fl, kernel, sc, args);
925 err = fastrpc_get_args(kernel, ctx);
930 /* make sure that all CPU memory writes are seen by DSP */
932 /* Send invoke buffer to remote dsp */
933 err = fastrpc_invoke_send(fl->sctx, ctx, kernel, handle);
937 /* Wait for remote dsp to respond or time out */
938 err = wait_for_completion_interruptible(&ctx->work);
942 /* Check the response from remote dsp */
948 /* make sure that all memory writes by DSP are seen by CPU */
950 /* populate all the output buffers with results */
951 err = fastrpc_put_args(ctx, kernel);
957 /* We are done with this compute context, remove it from pending list */
958 spin_lock(&fl->lock);
959 list_del(&ctx->node);
960 spin_unlock(&fl->lock);
961 fastrpc_context_put(ctx);
964 dev_dbg(fl->sctx->dev, "Error: Invoke Failed %d\n", err);
969 static int fastrpc_init_create_process(struct fastrpc_user *fl,
972 struct fastrpc_init_create init;
973 struct fastrpc_invoke_args *args;
974 struct fastrpc_phy_page pages[1];
975 struct fastrpc_map *map = NULL;
976 struct fastrpc_buf *imem = NULL;
989 args = kcalloc(FASTRPC_CREATE_PROCESS_NARGS, sizeof(*args), GFP_KERNEL);
993 if (copy_from_user(&init, argp, sizeof(init))) {
998 if (init.filelen > INIT_FILELEN_MAX) {
1003 inbuf.pgid = fl->tgid;
1004 inbuf.namelen = strlen(current->comm) + 1;
1005 inbuf.filelen = init.filelen;
1007 inbuf.attrs = init.attrs;
1008 inbuf.siglen = init.siglen;
1011 if (init.filelen && init.filefd) {
1012 err = fastrpc_map_create(fl, init.filefd, init.filelen, &map);
1017 memlen = ALIGN(max(INIT_FILELEN_MAX, (int)init.filelen * 4),
1019 err = fastrpc_buf_alloc(fl, fl->sctx->dev, memlen,
1024 fl->init_mem = imem;
1025 args[0].ptr = (u64)(uintptr_t)&inbuf;
1026 args[0].length = sizeof(inbuf);
1029 args[1].ptr = (u64)(uintptr_t)current->comm;
1030 args[1].length = inbuf.namelen;
1033 args[2].ptr = (u64) init.file;
1034 args[2].length = inbuf.filelen;
1035 args[2].fd = init.filefd;
1037 pages[0].addr = imem->phys;
1038 pages[0].size = imem->size;
1040 args[3].ptr = (u64)(uintptr_t) pages;
1041 args[3].length = 1 * sizeof(*pages);
1044 args[4].ptr = (u64)(uintptr_t)&inbuf.attrs;
1045 args[4].length = sizeof(inbuf.attrs);
1048 args[5].ptr = (u64)(uintptr_t) &inbuf.siglen;
1049 args[5].length = sizeof(inbuf.siglen);
1052 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE, 4, 0);
1054 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE_ATTR, 6, 0);
1056 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
1066 fl->init_mem = NULL;
1067 fastrpc_buf_free(imem);
1070 spin_lock(&fl->lock);
1071 list_del(&map->node);
1072 spin_unlock(&fl->lock);
1073 fastrpc_map_put(map);
1081 static struct fastrpc_session_ctx *fastrpc_session_alloc(
1082 struct fastrpc_channel_ctx *cctx)
1084 struct fastrpc_session_ctx *session = NULL;
1085 unsigned long flags;
1088 spin_lock_irqsave(&cctx->lock, flags);
1089 for (i = 0; i < cctx->sesscount; i++) {
1090 if (!cctx->session[i].used && cctx->session[i].valid) {
1091 cctx->session[i].used = true;
1092 session = &cctx->session[i];
1096 spin_unlock_irqrestore(&cctx->lock, flags);
1101 static void fastrpc_session_free(struct fastrpc_channel_ctx *cctx,
1102 struct fastrpc_session_ctx *session)
1104 unsigned long flags;
1106 spin_lock_irqsave(&cctx->lock, flags);
1107 session->used = false;
1108 spin_unlock_irqrestore(&cctx->lock, flags);
1111 static int fastrpc_release_current_dsp_process(struct fastrpc_user *fl)
1113 struct fastrpc_invoke_args args[1];
1118 args[0].ptr = (u64)(uintptr_t) &tgid;
1119 args[0].length = sizeof(tgid);
1121 args[0].reserved = 0;
1122 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_RELEASE, 1, 0);
1124 return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
1128 static int fastrpc_device_release(struct inode *inode, struct file *file)
1130 struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data;
1131 struct fastrpc_channel_ctx *cctx = fl->cctx;
1132 struct fastrpc_invoke_ctx *ctx, *n;
1133 struct fastrpc_map *map, *m;
1134 unsigned long flags;
1136 fastrpc_release_current_dsp_process(fl);
1138 spin_lock_irqsave(&cctx->lock, flags);
1139 list_del(&fl->user);
1140 spin_unlock_irqrestore(&cctx->lock, flags);
1143 fastrpc_buf_free(fl->init_mem);
1145 list_for_each_entry_safe(ctx, n, &fl->pending, node) {
1146 list_del(&ctx->node);
1147 fastrpc_context_put(ctx);
1150 list_for_each_entry_safe(map, m, &fl->maps, node) {
1151 list_del(&map->node);
1152 fastrpc_map_put(map);
1155 fastrpc_session_free(cctx, fl->sctx);
1156 fastrpc_channel_ctx_put(cctx);
1158 mutex_destroy(&fl->mutex);
1160 file->private_data = NULL;
1165 static int fastrpc_device_open(struct inode *inode, struct file *filp)
1167 struct fastrpc_channel_ctx *cctx = miscdev_to_cctx(filp->private_data);
1168 struct fastrpc_user *fl = NULL;
1169 unsigned long flags;
1171 fl = kzalloc(sizeof(*fl), GFP_KERNEL);
1175 /* Released in fastrpc_device_release() */
1176 fastrpc_channel_ctx_get(cctx);
1178 filp->private_data = fl;
1179 spin_lock_init(&fl->lock);
1180 mutex_init(&fl->mutex);
1181 INIT_LIST_HEAD(&fl->pending);
1182 INIT_LIST_HEAD(&fl->maps);
1183 INIT_LIST_HEAD(&fl->user);
1184 fl->tgid = current->tgid;
1187 fl->sctx = fastrpc_session_alloc(cctx);
1189 dev_err(&cctx->rpdev->dev, "No session available\n");
1190 mutex_destroy(&fl->mutex);
1196 spin_lock_irqsave(&cctx->lock, flags);
1197 list_add_tail(&fl->user, &cctx->users);
1198 spin_unlock_irqrestore(&cctx->lock, flags);
1203 static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp)
1205 struct fastrpc_alloc_dma_buf bp;
1206 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
1207 struct fastrpc_buf *buf = NULL;
1210 if (copy_from_user(&bp, argp, sizeof(bp)))
1213 err = fastrpc_buf_alloc(fl, fl->sctx->dev, bp.size, &buf);
1216 exp_info.ops = &fastrpc_dma_buf_ops;
1217 exp_info.size = bp.size;
1218 exp_info.flags = O_RDWR;
1219 exp_info.priv = buf;
1220 buf->dmabuf = dma_buf_export(&exp_info);
1221 if (IS_ERR(buf->dmabuf)) {
1222 err = PTR_ERR(buf->dmabuf);
1223 fastrpc_buf_free(buf);
1227 bp.fd = dma_buf_fd(buf->dmabuf, O_ACCMODE);
1229 dma_buf_put(buf->dmabuf);
1233 if (copy_to_user(argp, &bp, sizeof(bp))) {
1234 dma_buf_put(buf->dmabuf);
1241 static int fastrpc_init_attach(struct fastrpc_user *fl)
1243 struct fastrpc_invoke_args args[1];
1244 int tgid = fl->tgid;
1247 args[0].ptr = (u64)(uintptr_t) &tgid;
1248 args[0].length = sizeof(tgid);
1250 args[0].reserved = 0;
1251 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_ATTACH, 1, 0);
1254 return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
1258 static int fastrpc_invoke(struct fastrpc_user *fl, char __user *argp)
1260 struct fastrpc_invoke_args *args = NULL;
1261 struct fastrpc_invoke inv;
1265 if (copy_from_user(&inv, argp, sizeof(inv)))
1268 /* nscalars is truncated here to max supported value */
1269 nscalars = REMOTE_SCALARS_LENGTH(inv.sc);
1271 args = kcalloc(nscalars, sizeof(*args), GFP_KERNEL);
1275 if (copy_from_user(args, (void __user *)(uintptr_t)inv.args,
1276 nscalars * sizeof(*args))) {
1282 err = fastrpc_internal_invoke(fl, false, inv.handle, inv.sc, args);
1288 static long fastrpc_device_ioctl(struct file *file, unsigned int cmd,
1291 struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data;
1292 char __user *argp = (char __user *)arg;
1296 case FASTRPC_IOCTL_INVOKE:
1297 err = fastrpc_invoke(fl, argp);
1299 case FASTRPC_IOCTL_INIT_ATTACH:
1300 err = fastrpc_init_attach(fl);
1302 case FASTRPC_IOCTL_INIT_CREATE:
1303 err = fastrpc_init_create_process(fl, argp);
1305 case FASTRPC_IOCTL_ALLOC_DMA_BUFF:
1306 err = fastrpc_dmabuf_alloc(fl, argp);
1316 static const struct file_operations fastrpc_fops = {
1317 .open = fastrpc_device_open,
1318 .release = fastrpc_device_release,
1319 .unlocked_ioctl = fastrpc_device_ioctl,
1320 .compat_ioctl = fastrpc_device_ioctl,
1323 static int fastrpc_cb_probe(struct platform_device *pdev)
1325 struct fastrpc_channel_ctx *cctx;
1326 struct fastrpc_session_ctx *sess;
1327 struct device *dev = &pdev->dev;
1328 int i, sessions = 0;
1329 unsigned long flags;
1332 cctx = dev_get_drvdata(dev->parent);
1336 of_property_read_u32(dev->of_node, "qcom,nsessions", &sessions);
1338 spin_lock_irqsave(&cctx->lock, flags);
1339 sess = &cctx->session[cctx->sesscount];
1343 dev_set_drvdata(dev, sess);
1345 if (of_property_read_u32(dev->of_node, "reg", &sess->sid))
1346 dev_info(dev, "FastRPC Session ID not specified in DT\n");
1349 struct fastrpc_session_ctx *dup_sess;
1351 for (i = 1; i < sessions; i++) {
1352 if (cctx->sesscount++ >= FASTRPC_MAX_SESSIONS)
1354 dup_sess = &cctx->session[cctx->sesscount];
1355 memcpy(dup_sess, sess, sizeof(*dup_sess));
1359 spin_unlock_irqrestore(&cctx->lock, flags);
1360 rc = dma_set_mask(dev, DMA_BIT_MASK(32));
1362 dev_err(dev, "32-bit DMA enable failed\n");
1369 static int fastrpc_cb_remove(struct platform_device *pdev)
1371 struct fastrpc_channel_ctx *cctx = dev_get_drvdata(pdev->dev.parent);
1372 struct fastrpc_session_ctx *sess = dev_get_drvdata(&pdev->dev);
1373 unsigned long flags;
1376 spin_lock_irqsave(&cctx->lock, flags);
1377 for (i = 1; i < FASTRPC_MAX_SESSIONS; i++) {
1378 if (cctx->session[i].sid == sess->sid) {
1379 cctx->session[i].valid = false;
1383 spin_unlock_irqrestore(&cctx->lock, flags);
1388 static const struct of_device_id fastrpc_match_table[] = {
1389 { .compatible = "qcom,fastrpc-compute-cb", },
1393 static struct platform_driver fastrpc_cb_driver = {
1394 .probe = fastrpc_cb_probe,
1395 .remove = fastrpc_cb_remove,
1397 .name = "qcom,fastrpc-cb",
1398 .of_match_table = fastrpc_match_table,
1399 .suppress_bind_attrs = true,
1403 static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
1405 struct device *rdev = &rpdev->dev;
1406 struct fastrpc_channel_ctx *data;
1407 int i, err, domain_id = -1;
1410 err = of_property_read_string(rdev->of_node, "label", &domain);
1412 dev_info(rdev, "FastRPC Domain not specified in DT\n");
1416 for (i = 0; i <= CDSP_DOMAIN_ID; i++) {
1417 if (!strcmp(domains[i], domain)) {
1423 if (domain_id < 0) {
1424 dev_info(rdev, "FastRPC Invalid Domain ID %d\n", domain_id);
1428 data = kzalloc(sizeof(*data), GFP_KERNEL);
1432 data->miscdev.minor = MISC_DYNAMIC_MINOR;
1433 data->miscdev.name = kasprintf(GFP_KERNEL, "fastrpc-%s",
1434 domains[domain_id]);
1435 data->miscdev.fops = &fastrpc_fops;
1436 err = misc_register(&data->miscdev);
1440 kref_init(&data->refcount);
1442 dev_set_drvdata(&rpdev->dev, data);
1443 dma_set_mask_and_coherent(rdev, DMA_BIT_MASK(32));
1444 INIT_LIST_HEAD(&data->users);
1445 spin_lock_init(&data->lock);
1446 idr_init(&data->ctx_idr);
1447 data->domain_id = domain_id;
1448 data->rpdev = rpdev;
1450 return of_platform_populate(rdev->of_node, NULL, NULL, rdev);
1453 static void fastrpc_notify_users(struct fastrpc_user *user)
1455 struct fastrpc_invoke_ctx *ctx;
1457 spin_lock(&user->lock);
1458 list_for_each_entry(ctx, &user->pending, node)
1459 complete(&ctx->work);
1460 spin_unlock(&user->lock);
1463 static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev)
1465 struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev);
1466 struct fastrpc_user *user;
1467 unsigned long flags;
1469 spin_lock_irqsave(&cctx->lock, flags);
1470 list_for_each_entry(user, &cctx->users, user)
1471 fastrpc_notify_users(user);
1472 spin_unlock_irqrestore(&cctx->lock, flags);
1474 misc_deregister(&cctx->miscdev);
1475 of_platform_depopulate(&rpdev->dev);
1478 fastrpc_channel_ctx_put(cctx);
1481 static int fastrpc_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
1482 int len, void *priv, u32 addr)
1484 struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev);
1485 struct fastrpc_invoke_rsp *rsp = data;
1486 struct fastrpc_invoke_ctx *ctx;
1487 unsigned long flags;
1488 unsigned long ctxid;
1490 if (len < sizeof(*rsp))
1493 ctxid = ((rsp->ctx & FASTRPC_CTXID_MASK) >> 4);
1495 spin_lock_irqsave(&cctx->lock, flags);
1496 ctx = idr_find(&cctx->ctx_idr, ctxid);
1497 spin_unlock_irqrestore(&cctx->lock, flags);
1500 dev_err(&rpdev->dev, "No context ID matches response\n");
1504 ctx->retval = rsp->retval;
1505 complete(&ctx->work);
1508 * The DMA buffer associated with the context cannot be freed in
1509 * interrupt context so schedule it through a worker thread to
1510 * avoid a kernel BUG.
1512 schedule_work(&ctx->put_work);
1517 static const struct of_device_id fastrpc_rpmsg_of_match[] = {
1518 { .compatible = "qcom,fastrpc" },
1521 MODULE_DEVICE_TABLE(of, fastrpc_rpmsg_of_match);
1523 static struct rpmsg_driver fastrpc_driver = {
1524 .probe = fastrpc_rpmsg_probe,
1525 .remove = fastrpc_rpmsg_remove,
1526 .callback = fastrpc_rpmsg_callback,
1528 .name = "qcom,fastrpc",
1529 .of_match_table = fastrpc_rpmsg_of_match,
1533 static int fastrpc_init(void)
1537 ret = platform_driver_register(&fastrpc_cb_driver);
1539 pr_err("fastrpc: failed to register cb driver\n");
1543 ret = register_rpmsg_driver(&fastrpc_driver);
1545 pr_err("fastrpc: failed to register rpmsg driver\n");
1546 platform_driver_unregister(&fastrpc_cb_driver);
1552 module_init(fastrpc_init);
1554 static void fastrpc_exit(void)
1556 platform_driver_unregister(&fastrpc_cb_driver);
1557 unregister_rpmsg_driver(&fastrpc_driver);
1559 module_exit(fastrpc_exit);
1561 MODULE_LICENSE("GPL v2");