1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (c) 2010-2015, NVIDIA Corporation.
8 #include <linux/dma-mapping.h>
10 #include <linux/host1x.h>
11 #include <linux/kref.h>
12 #include <linux/module.h>
13 #include <linux/scatterlist.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include <trace/events/host1x.h>
23 #define HOST1X_WAIT_SYNCPT_OFFSET 0x8
25 struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
26 u32 num_cmdbufs, u32 num_relocs)
28 struct host1x_job *job = NULL;
29 unsigned int num_unpins = num_cmdbufs + num_relocs;
33 /* Check that we're not going to overflow */
34 total = sizeof(struct host1x_job) +
35 (u64)num_relocs * sizeof(struct host1x_reloc) +
36 (u64)num_unpins * sizeof(struct host1x_job_unpin_data) +
37 (u64)num_cmdbufs * sizeof(struct host1x_job_gather) +
38 (u64)num_unpins * sizeof(dma_addr_t) +
39 (u64)num_unpins * sizeof(u32 *);
40 if (total > ULONG_MAX)
43 mem = job = kzalloc(total, GFP_KERNEL);
50 /* Redistribute memory to the structs */
51 mem += sizeof(struct host1x_job);
52 job->relocs = num_relocs ? mem : NULL;
53 mem += num_relocs * sizeof(struct host1x_reloc);
54 job->unpins = num_unpins ? mem : NULL;
55 mem += num_unpins * sizeof(struct host1x_job_unpin_data);
56 job->gathers = num_cmdbufs ? mem : NULL;
57 mem += num_cmdbufs * sizeof(struct host1x_job_gather);
58 job->addr_phys = num_unpins ? mem : NULL;
60 job->reloc_addr_phys = job->addr_phys;
61 job->gather_addr_phys = &job->addr_phys[num_relocs];
65 EXPORT_SYMBOL(host1x_job_alloc);
67 struct host1x_job *host1x_job_get(struct host1x_job *job)
72 EXPORT_SYMBOL(host1x_job_get);
74 static void job_free(struct kref *ref)
76 struct host1x_job *job = container_of(ref, struct host1x_job, ref);
81 void host1x_job_put(struct host1x_job *job)
83 kref_put(&job->ref, job_free);
85 EXPORT_SYMBOL(host1x_job_put);
87 void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
88 unsigned int words, unsigned int offset)
90 struct host1x_job_gather *gather = &job->gathers[job->num_gathers];
92 gather->words = words;
94 gather->offset = offset;
98 EXPORT_SYMBOL(host1x_job_add_gather);
100 static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
102 struct device *dev = job->client->dev;
108 for (i = 0; i < job->num_relocs; i++) {
109 struct host1x_reloc *reloc = &job->relocs[i];
110 struct sg_table *sgt;
111 dma_addr_t phys_addr;
113 reloc->target.bo = host1x_bo_get(reloc->target.bo);
114 if (!reloc->target.bo) {
119 sgt = host1x_bo_pin(dev, reloc->target.bo, &phys_addr);
125 job->addr_phys[job->num_unpins] = phys_addr;
126 job->unpins[job->num_unpins].bo = reloc->target.bo;
127 job->unpins[job->num_unpins].sgt = sgt;
131 for (i = 0; i < job->num_gathers; i++) {
132 struct host1x_job_gather *g = &job->gathers[i];
133 size_t gather_size = 0;
134 struct scatterlist *sg;
135 struct sg_table *sgt;
136 dma_addr_t phys_addr;
141 g->bo = host1x_bo_get(g->bo);
147 sgt = host1x_bo_pin(host->dev, g->bo, &phys_addr);
153 if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && host->domain) {
154 for_each_sg(sgt->sgl, sg, sgt->nents, j)
155 gather_size += sg->length;
156 gather_size = iova_align(&host->iova, gather_size);
158 shift = iova_shift(&host->iova);
159 alloc = alloc_iova(&host->iova, gather_size >> shift,
160 host->iova_end >> shift, true);
166 err = iommu_map_sg(host->domain,
167 iova_dma_addr(&host->iova, alloc),
168 sgt->sgl, sgt->nents, IOMMU_READ);
170 __free_iova(&host->iova, alloc);
175 job->addr_phys[job->num_unpins] =
176 iova_dma_addr(&host->iova, alloc);
177 job->unpins[job->num_unpins].size = gather_size;
179 job->addr_phys[job->num_unpins] = phys_addr;
182 job->gather_addr_phys[i] = job->addr_phys[job->num_unpins];
184 job->unpins[job->num_unpins].bo = g->bo;
185 job->unpins[job->num_unpins].sgt = sgt;
192 host1x_job_unpin(job);
196 static int do_relocs(struct host1x_job *job, struct host1x_job_gather *g)
199 void *cmdbuf_page_addr = NULL;
200 struct host1x_bo *cmdbuf = g->bo;
203 /* pin & patch the relocs for one gather */
204 for (i = 0; i < job->num_relocs; i++) {
205 struct host1x_reloc *reloc = &job->relocs[i];
206 u32 reloc_addr = (job->reloc_addr_phys[i] +
207 reloc->target.offset) >> reloc->shift;
210 /* skip all other gathers */
211 if (cmdbuf != reloc->cmdbuf.bo)
214 if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) {
215 target = (u32 *)job->gather_copy_mapped +
216 reloc->cmdbuf.offset / sizeof(u32) +
217 g->offset / sizeof(u32);
221 if (last_page != reloc->cmdbuf.offset >> PAGE_SHIFT) {
222 if (cmdbuf_page_addr)
223 host1x_bo_kunmap(cmdbuf, last_page,
226 cmdbuf_page_addr = host1x_bo_kmap(cmdbuf,
227 reloc->cmdbuf.offset >> PAGE_SHIFT);
228 last_page = reloc->cmdbuf.offset >> PAGE_SHIFT;
230 if (unlikely(!cmdbuf_page_addr)) {
231 pr_err("Could not map cmdbuf for relocation\n");
236 target = cmdbuf_page_addr + (reloc->cmdbuf.offset & ~PAGE_MASK);
238 *target = reloc_addr;
241 if (cmdbuf_page_addr)
242 host1x_bo_kunmap(cmdbuf, last_page, cmdbuf_page_addr);
247 static bool check_reloc(struct host1x_reloc *reloc, struct host1x_bo *cmdbuf,
250 offset *= sizeof(u32);
252 if (reloc->cmdbuf.bo != cmdbuf || reloc->cmdbuf.offset != offset)
255 /* relocation shift value validation isn't implemented yet */
262 struct host1x_firewall {
263 struct host1x_job *job;
266 unsigned int num_relocs;
267 struct host1x_reloc *reloc;
269 struct host1x_bo *cmdbuf;
279 static int check_register(struct host1x_firewall *fw, unsigned long offset)
281 if (!fw->job->is_addr_reg)
284 if (fw->job->is_addr_reg(fw->dev, fw->class, offset)) {
288 if (!check_reloc(fw->reloc, fw->cmdbuf, fw->offset))
298 static int check_class(struct host1x_firewall *fw, u32 class)
300 if (!fw->job->is_valid_class) {
301 if (fw->class != class)
304 if (!fw->job->is_valid_class(fw->class))
311 static int check_mask(struct host1x_firewall *fw)
322 ret = check_register(fw, reg);
336 static int check_incr(struct host1x_firewall *fw)
338 u32 count = fw->count;
346 ret = check_register(fw, reg);
359 static int check_nonincr(struct host1x_firewall *fw)
361 u32 count = fw->count;
368 ret = check_register(fw, fw->reg);
380 static int validate(struct host1x_firewall *fw, struct host1x_job_gather *g)
382 u32 *cmdbuf_base = (u32 *)fw->job->gather_copy_mapped +
383 (g->offset / sizeof(u32));
384 u32 job_class = fw->class;
387 fw->words = g->words;
391 while (fw->words && !err) {
392 u32 word = cmdbuf_base[fw->offset];
393 u32 opcode = (word & 0xf0000000) >> 28;
403 fw->class = word >> 6 & 0x3ff;
404 fw->mask = word & 0x3f;
405 fw->reg = word >> 16 & 0xfff;
406 err = check_class(fw, job_class);
408 err = check_mask(fw);
413 fw->reg = word >> 16 & 0xfff;
414 fw->count = word & 0xffff;
415 err = check_incr(fw);
421 fw->reg = word >> 16 & 0xfff;
422 fw->count = word & 0xffff;
423 err = check_nonincr(fw);
429 fw->mask = word & 0xffff;
430 fw->reg = word >> 16 & 0xfff;
431 err = check_mask(fw);
448 static inline int copy_gathers(struct device *host, struct host1x_job *job,
451 struct host1x_firewall fw;
458 fw.reloc = job->relocs;
459 fw.num_relocs = job->num_relocs;
460 fw.class = job->class;
462 for (i = 0; i < job->num_gathers; i++) {
463 struct host1x_job_gather *g = &job->gathers[i];
465 size += g->words * sizeof(u32);
469 * Try a non-blocking allocation from a higher priority pools first,
470 * as awaiting for the allocation here is a major performance hit.
472 job->gather_copy_mapped = dma_alloc_wc(host, size, &job->gather_copy,
475 /* the higher priority allocation failed, try the generic-blocking */
476 if (!job->gather_copy_mapped)
477 job->gather_copy_mapped = dma_alloc_wc(host, size,
480 if (!job->gather_copy_mapped)
483 job->gather_copy_size = size;
485 for (i = 0; i < job->num_gathers; i++) {
486 struct host1x_job_gather *g = &job->gathers[i];
489 /* Copy the gather */
490 gather = host1x_bo_mmap(g->bo);
491 memcpy(job->gather_copy_mapped + offset, gather + g->offset,
492 g->words * sizeof(u32));
493 host1x_bo_munmap(g->bo, gather);
495 /* Store the location in the buffer */
496 g->base = job->gather_copy;
499 /* Validate the job */
500 if (validate(&fw, g))
503 offset += g->words * sizeof(u32);
506 /* No relocs should remain at this point */
513 int host1x_job_pin(struct host1x_job *job, struct device *dev)
517 struct host1x *host = dev_get_drvdata(dev->parent);
520 err = pin_job(host, job);
524 if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) {
525 err = copy_gathers(host->dev, job, dev);
531 for (i = 0; i < job->num_gathers; i++) {
532 struct host1x_job_gather *g = &job->gathers[i];
534 /* process each gather mem only once */
538 /* copy_gathers() sets gathers base if firewall is enabled */
539 if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
540 g->base = job->gather_addr_phys[i];
542 for (j = i + 1; j < job->num_gathers; j++) {
543 if (job->gathers[j].bo == g->bo) {
544 job->gathers[j].handled = true;
545 job->gathers[j].base = g->base;
549 err = do_relocs(job, g);
556 host1x_job_unpin(job);
561 EXPORT_SYMBOL(host1x_job_pin);
563 void host1x_job_unpin(struct host1x_job *job)
565 struct host1x *host = dev_get_drvdata(job->channel->dev->parent);
568 for (i = 0; i < job->num_unpins; i++) {
569 struct host1x_job_unpin_data *unpin = &job->unpins[i];
571 if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) &&
572 unpin->size && host->domain) {
573 iommu_unmap(host->domain, job->addr_phys[i],
575 free_iova(&host->iova,
576 iova_pfn(&host->iova, job->addr_phys[i]));
579 host1x_bo_unpin(host->dev, unpin->bo, unpin->sgt);
580 host1x_bo_put(unpin->bo);
585 if (job->gather_copy_size)
586 dma_free_wc(host->dev, job->gather_copy_size,
587 job->gather_copy_mapped, job->gather_copy);
589 EXPORT_SYMBOL(host1x_job_unpin);
592 * Debug routine used to dump job entries
594 void host1x_job_dump(struct device *dev, struct host1x_job *job)
596 dev_dbg(dev, " SYNCPT_ID %d\n", job->syncpt_id);
597 dev_dbg(dev, " SYNCPT_VAL %d\n", job->syncpt_end);
598 dev_dbg(dev, " FIRST_GET 0x%x\n", job->first_get);
599 dev_dbg(dev, " TIMEOUT %d\n", job->timeout);
600 dev_dbg(dev, " NUM_SLOTS %d\n", job->num_slots);
601 dev_dbg(dev, " NUM_HANDLES %d\n", job->num_unpins);