]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/iommu/tegra-gart.c
iommu/tegra: gart: Fix spinlock recursion
[linux.git] / drivers / iommu / tegra-gart.c
1 /*
2  * IOMMU API for GART in Tegra20
3  *
4  * Copyright (c) 2010-2012, NVIDIA CORPORATION.  All rights reserved.
5  *
6  * Author: Hiroshi DOYU <hdoyu@nvidia.com>
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms and conditions of the GNU General Public License,
10  * version 2, as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  *
17  * You should have received a copy of the GNU General Public License along with
18  * this program; if not, write to the Free Software Foundation, Inc.,
19  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20  */
21
22 #include <linux/io.h>
23 #include <linux/iommu.h>
24 #include <linux/list.h>
25 #include <linux/moduleparam.h>
26 #include <linux/platform_device.h>
27 #include <linux/slab.h>
28 #include <linux/spinlock.h>
29 #include <linux/vmalloc.h>
30
31 #include <soc/tegra/mc.h>
32
33 /* bitmap of the page sizes currently supported */
34 #define GART_IOMMU_PGSIZES      (SZ_4K)
35
36 #define GART_REG_BASE           0x24
37 #define GART_CONFIG             (0x24 - GART_REG_BASE)
38 #define GART_ENTRY_ADDR         (0x28 - GART_REG_BASE)
39 #define GART_ENTRY_DATA         (0x2c - GART_REG_BASE)
40 #define GART_ENTRY_PHYS_ADDR_VALID      (1 << 31)
41
42 #define GART_PAGE_SHIFT         12
43 #define GART_PAGE_SIZE          (1 << GART_PAGE_SHIFT)
44 #define GART_PAGE_MASK                                          \
45         (~(GART_PAGE_SIZE - 1) & ~GART_ENTRY_PHYS_ADDR_VALID)
46
47 struct gart_client {
48         struct device           *dev;
49         struct list_head        list;
50 };
51
52 struct gart_device {
53         void __iomem            *regs;
54         u32                     *savedata;
55         u32                     page_count;     /* total remappable size */
56         dma_addr_t              iovmm_base;     /* offset to vmm_area */
57         spinlock_t              pte_lock;       /* for pagetable */
58         struct list_head        client;
59         spinlock_t              client_lock;    /* for client list */
60         struct device           *dev;
61
62         struct iommu_device     iommu;          /* IOMMU Core handle */
63 };
64
65 struct gart_domain {
66         struct iommu_domain domain;             /* generic domain handle */
67         struct gart_device *gart;               /* link to gart device   */
68 };
69
70 static struct gart_device *gart_handle; /* unique for a system */
71
72 static bool gart_debug;
73
74 #define GART_PTE(_pfn)                                          \
75         (GART_ENTRY_PHYS_ADDR_VALID | ((_pfn) << PAGE_SHIFT))
76
77 static struct gart_domain *to_gart_domain(struct iommu_domain *dom)
78 {
79         return container_of(dom, struct gart_domain, domain);
80 }
81
82 /*
83  * Any interaction between any block on PPSB and a block on APB or AHB
84  * must have these read-back to ensure the APB/AHB bus transaction is
85  * complete before initiating activity on the PPSB block.
86  */
87 #define FLUSH_GART_REGS(gart)   ((void)readl((gart)->regs + GART_CONFIG))
88
89 #define for_each_gart_pte(gart, iova)                                   \
90         for (iova = gart->iovmm_base;                                   \
91              iova < gart->iovmm_base + GART_PAGE_SIZE * gart->page_count; \
92              iova += GART_PAGE_SIZE)
93
94 static inline void gart_set_pte(struct gart_device *gart,
95                                 unsigned long offs, u32 pte)
96 {
97         writel(offs, gart->regs + GART_ENTRY_ADDR);
98         writel(pte, gart->regs + GART_ENTRY_DATA);
99
100         dev_dbg(gart->dev, "%s %08lx:%08x\n",
101                  pte ? "map" : "unmap", offs, pte & GART_PAGE_MASK);
102 }
103
104 static inline unsigned long gart_read_pte(struct gart_device *gart,
105                                           unsigned long offs)
106 {
107         unsigned long pte;
108
109         writel(offs, gart->regs + GART_ENTRY_ADDR);
110         pte = readl(gart->regs + GART_ENTRY_DATA);
111
112         return pte;
113 }
114
115 static void do_gart_setup(struct gart_device *gart, const u32 *data)
116 {
117         unsigned long iova;
118
119         for_each_gart_pte(gart, iova)
120                 gart_set_pte(gart, iova, data ? *(data++) : 0);
121
122         writel(1, gart->regs + GART_CONFIG);
123         FLUSH_GART_REGS(gart);
124 }
125
126 #ifdef DEBUG
127 static void gart_dump_table(struct gart_device *gart)
128 {
129         unsigned long iova;
130         unsigned long flags;
131
132         spin_lock_irqsave(&gart->pte_lock, flags);
133         for_each_gart_pte(gart, iova) {
134                 unsigned long pte;
135
136                 pte = gart_read_pte(gart, iova);
137
138                 dev_dbg(gart->dev, "%s %08lx:%08lx\n",
139                         (GART_ENTRY_PHYS_ADDR_VALID & pte) ? "v" : " ",
140                         iova, pte & GART_PAGE_MASK);
141         }
142         spin_unlock_irqrestore(&gart->pte_lock, flags);
143 }
144 #else
145 static inline void gart_dump_table(struct gart_device *gart)
146 {
147 }
148 #endif
149
150 static inline bool gart_iova_range_valid(struct gart_device *gart,
151                                          unsigned long iova, size_t bytes)
152 {
153         unsigned long iova_start, iova_end, gart_start, gart_end;
154
155         iova_start = iova;
156         iova_end = iova_start + bytes - 1;
157         gart_start = gart->iovmm_base;
158         gart_end = gart_start + gart->page_count * GART_PAGE_SIZE - 1;
159
160         if (iova_start < gart_start)
161                 return false;
162         if (iova_end > gart_end)
163                 return false;
164         return true;
165 }
166
167 static int gart_iommu_attach_dev(struct iommu_domain *domain,
168                                  struct device *dev)
169 {
170         struct gart_domain *gart_domain = to_gart_domain(domain);
171         struct gart_device *gart = gart_domain->gart;
172         struct gart_client *client, *c;
173         int err = 0;
174
175         client = devm_kzalloc(gart->dev, sizeof(*c), GFP_KERNEL);
176         if (!client)
177                 return -ENOMEM;
178         client->dev = dev;
179
180         spin_lock(&gart->client_lock);
181         list_for_each_entry(c, &gart->client, list) {
182                 if (c->dev == dev) {
183                         dev_err(gart->dev,
184                                 "%s is already attached\n", dev_name(dev));
185                         err = -EINVAL;
186                         goto fail;
187                 }
188         }
189         list_add(&client->list, &gart->client);
190         spin_unlock(&gart->client_lock);
191         dev_dbg(gart->dev, "Attached %s\n", dev_name(dev));
192         return 0;
193
194 fail:
195         devm_kfree(gart->dev, client);
196         spin_unlock(&gart->client_lock);
197         return err;
198 }
199
200 static void __gart_iommu_detach_dev(struct iommu_domain *domain,
201                                     struct device *dev)
202 {
203         struct gart_domain *gart_domain = to_gart_domain(domain);
204         struct gart_device *gart = gart_domain->gart;
205         struct gart_client *c;
206
207         list_for_each_entry(c, &gart->client, list) {
208                 if (c->dev == dev) {
209                         list_del(&c->list);
210                         devm_kfree(gart->dev, c);
211                         dev_dbg(gart->dev, "Detached %s\n", dev_name(dev));
212                         return;
213                 }
214         }
215
216         dev_err(gart->dev, "Couldn't find %s to detach\n", dev_name(dev));
217 }
218
219 static void gart_iommu_detach_dev(struct iommu_domain *domain,
220                                   struct device *dev)
221 {
222         struct gart_domain *gart_domain = to_gart_domain(domain);
223         struct gart_device *gart = gart_domain->gart;
224
225         spin_lock(&gart->client_lock);
226         __gart_iommu_detach_dev(domain, dev);
227         spin_unlock(&gart->client_lock);
228 }
229
230 static struct iommu_domain *gart_iommu_domain_alloc(unsigned type)
231 {
232         struct gart_domain *gart_domain;
233         struct gart_device *gart;
234
235         if (type != IOMMU_DOMAIN_UNMANAGED)
236                 return NULL;
237
238         gart = gart_handle;
239         if (!gart)
240                 return NULL;
241
242         gart_domain = kzalloc(sizeof(*gart_domain), GFP_KERNEL);
243         if (!gart_domain)
244                 return NULL;
245
246         gart_domain->gart = gart;
247         gart_domain->domain.geometry.aperture_start = gart->iovmm_base;
248         gart_domain->domain.geometry.aperture_end = gart->iovmm_base +
249                                         gart->page_count * GART_PAGE_SIZE - 1;
250         gart_domain->domain.geometry.force_aperture = true;
251
252         return &gart_domain->domain;
253 }
254
255 static void gart_iommu_domain_free(struct iommu_domain *domain)
256 {
257         struct gart_domain *gart_domain = to_gart_domain(domain);
258         struct gart_device *gart = gart_domain->gart;
259
260         if (gart) {
261                 spin_lock(&gart->client_lock);
262                 if (!list_empty(&gart->client)) {
263                         struct gart_client *c;
264
265                         list_for_each_entry(c, &gart->client, list)
266                                 __gart_iommu_detach_dev(domain, c->dev);
267                 }
268                 spin_unlock(&gart->client_lock);
269         }
270
271         kfree(gart_domain);
272 }
273
274 static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
275                           phys_addr_t pa, size_t bytes, int prot)
276 {
277         struct gart_domain *gart_domain = to_gart_domain(domain);
278         struct gart_device *gart = gart_domain->gart;
279         unsigned long flags;
280         unsigned long pfn;
281         unsigned long pte;
282
283         if (!gart_iova_range_valid(gart, iova, bytes))
284                 return -EINVAL;
285
286         spin_lock_irqsave(&gart->pte_lock, flags);
287         pfn = __phys_to_pfn(pa);
288         if (!pfn_valid(pfn)) {
289                 dev_err(gart->dev, "Invalid page: %pa\n", &pa);
290                 spin_unlock_irqrestore(&gart->pte_lock, flags);
291                 return -EINVAL;
292         }
293         if (gart_debug) {
294                 pte = gart_read_pte(gart, iova);
295                 if (pte & GART_ENTRY_PHYS_ADDR_VALID) {
296                         spin_unlock_irqrestore(&gart->pte_lock, flags);
297                         dev_err(gart->dev, "Page entry is in-use\n");
298                         return -EBUSY;
299                 }
300         }
301         gart_set_pte(gart, iova, GART_PTE(pfn));
302         spin_unlock_irqrestore(&gart->pte_lock, flags);
303         return 0;
304 }
305
306 static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
307                                size_t bytes)
308 {
309         struct gart_domain *gart_domain = to_gart_domain(domain);
310         struct gart_device *gart = gart_domain->gart;
311         unsigned long flags;
312
313         if (!gart_iova_range_valid(gart, iova, bytes))
314                 return 0;
315
316         spin_lock_irqsave(&gart->pte_lock, flags);
317         gart_set_pte(gart, iova, 0);
318         spin_unlock_irqrestore(&gart->pte_lock, flags);
319         return bytes;
320 }
321
322 static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain,
323                                            dma_addr_t iova)
324 {
325         struct gart_domain *gart_domain = to_gart_domain(domain);
326         struct gart_device *gart = gart_domain->gart;
327         unsigned long pte;
328         phys_addr_t pa;
329         unsigned long flags;
330
331         if (!gart_iova_range_valid(gart, iova, 0))
332                 return -EINVAL;
333
334         spin_lock_irqsave(&gart->pte_lock, flags);
335         pte = gart_read_pte(gart, iova);
336         spin_unlock_irqrestore(&gart->pte_lock, flags);
337
338         pa = (pte & GART_PAGE_MASK);
339         if (!pfn_valid(__phys_to_pfn(pa))) {
340                 dev_err(gart->dev, "No entry for %08llx:%pa\n",
341                          (unsigned long long)iova, &pa);
342                 gart_dump_table(gart);
343                 return -EINVAL;
344         }
345         return pa;
346 }
347
348 static bool gart_iommu_capable(enum iommu_cap cap)
349 {
350         return false;
351 }
352
353 static int gart_iommu_add_device(struct device *dev)
354 {
355         struct iommu_group *group;
356
357         if (!dev->iommu_fwspec)
358                 return -ENODEV;
359
360         group = iommu_group_get_for_dev(dev);
361         if (IS_ERR(group))
362                 return PTR_ERR(group);
363
364         iommu_group_put(group);
365
366         iommu_device_link(&gart_handle->iommu, dev);
367
368         return 0;
369 }
370
371 static void gart_iommu_remove_device(struct device *dev)
372 {
373         iommu_group_remove_device(dev);
374         iommu_device_unlink(&gart_handle->iommu, dev);
375 }
376
377 static int gart_iommu_of_xlate(struct device *dev,
378                                struct of_phandle_args *args)
379 {
380         return 0;
381 }
382
383 static void gart_iommu_sync(struct iommu_domain *domain)
384 {
385         struct gart_domain *gart_domain = to_gart_domain(domain);
386         struct gart_device *gart = gart_domain->gart;
387
388         FLUSH_GART_REGS(gart);
389 }
390
391 static const struct iommu_ops gart_iommu_ops = {
392         .capable        = gart_iommu_capable,
393         .domain_alloc   = gart_iommu_domain_alloc,
394         .domain_free    = gart_iommu_domain_free,
395         .attach_dev     = gart_iommu_attach_dev,
396         .detach_dev     = gart_iommu_detach_dev,
397         .add_device     = gart_iommu_add_device,
398         .remove_device  = gart_iommu_remove_device,
399         .device_group   = generic_device_group,
400         .map            = gart_iommu_map,
401         .unmap          = gart_iommu_unmap,
402         .iova_to_phys   = gart_iommu_iova_to_phys,
403         .pgsize_bitmap  = GART_IOMMU_PGSIZES,
404         .of_xlate       = gart_iommu_of_xlate,
405         .iotlb_sync_map = gart_iommu_sync,
406         .iotlb_sync     = gart_iommu_sync,
407 };
408
409 int tegra_gart_suspend(struct gart_device *gart)
410 {
411         unsigned long iova;
412         u32 *data = gart->savedata;
413         unsigned long flags;
414
415         spin_lock_irqsave(&gart->pte_lock, flags);
416         for_each_gart_pte(gart, iova)
417                 *(data++) = gart_read_pte(gart, iova);
418         spin_unlock_irqrestore(&gart->pte_lock, flags);
419         return 0;
420 }
421
422 int tegra_gart_resume(struct gart_device *gart)
423 {
424         unsigned long flags;
425
426         spin_lock_irqsave(&gart->pte_lock, flags);
427         do_gart_setup(gart, gart->savedata);
428         spin_unlock_irqrestore(&gart->pte_lock, flags);
429         return 0;
430 }
431
432 struct gart_device *tegra_gart_probe(struct device *dev, struct tegra_mc *mc)
433 {
434         struct gart_device *gart;
435         struct resource *res_remap;
436         void __iomem *gart_regs;
437         int ret;
438
439         BUILD_BUG_ON(PAGE_SHIFT != GART_PAGE_SHIFT);
440
441         /* the GART memory aperture is required */
442         res_remap = platform_get_resource(to_platform_device(dev),
443                                           IORESOURCE_MEM, 1);
444         if (!res_remap) {
445                 dev_err(dev, "GART memory aperture expected\n");
446                 return ERR_PTR(-ENXIO);
447         }
448
449         gart = devm_kzalloc(dev, sizeof(*gart), GFP_KERNEL);
450         if (!gart) {
451                 dev_err(dev, "failed to allocate gart_device\n");
452                 return ERR_PTR(-ENOMEM);
453         }
454
455         ret = iommu_device_sysfs_add(&gart->iommu, dev, NULL, "gart");
456         if (ret) {
457                 dev_err(dev, "Failed to register IOMMU in sysfs\n");
458                 return ERR_PTR(ret);
459         }
460
461         iommu_device_set_ops(&gart->iommu, &gart_iommu_ops);
462         iommu_device_set_fwnode(&gart->iommu, dev->fwnode);
463
464         ret = iommu_device_register(&gart->iommu);
465         if (ret) {
466                 dev_err(dev, "Failed to register IOMMU\n");
467                 goto remove_sysfs;
468         }
469
470         gart->dev = dev;
471         gart_regs = mc->regs + GART_REG_BASE;
472         spin_lock_init(&gart->pte_lock);
473         spin_lock_init(&gart->client_lock);
474         INIT_LIST_HEAD(&gart->client);
475         gart->regs = gart_regs;
476         gart->iovmm_base = (dma_addr_t)res_remap->start;
477         gart->page_count = (resource_size(res_remap) >> GART_PAGE_SHIFT);
478
479         gart->savedata = vmalloc(array_size(sizeof(u32), gart->page_count));
480         if (!gart->savedata) {
481                 dev_err(dev, "failed to allocate context save area\n");
482                 ret = -ENOMEM;
483                 goto unregister_iommu;
484         }
485
486         do_gart_setup(gart, NULL);
487
488         gart_handle = gart;
489
490         return gart;
491
492 unregister_iommu:
493         iommu_device_unregister(&gart->iommu);
494 remove_sysfs:
495         iommu_device_sysfs_remove(&gart->iommu);
496
497         return ERR_PTR(ret);
498 }
499
500 module_param(gart_debug, bool, 0644);
501 MODULE_PARM_DESC(gart_debug, "Enable GART debugging");