]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/i915/gem/i915_gem_phys.c
1c0ce69f765bde932589eb96abc9f1f933bb330a
[linux.git] / drivers / gpu / drm / i915 / gem / i915_gem_phys.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2014-2016 Intel Corporation
5  */
6
7 #include <linux/highmem.h>
8 #include <linux/shmem_fs.h>
9 #include <linux/swap.h>
10
11 #include <drm/drm.h> /* for drm_legacy.h! */
12 #include <drm/drm_cache.h>
13 #include <drm/drm_legacy.h> /* for drm_pci.h! */
14 #include <drm/drm_pci.h>
15
16 #include "i915_drv.h"
17 #include "i915_gem_object.h"
18
19 static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
20 {
21         struct address_space *mapping = obj->base.filp->f_mapping;
22         struct drm_dma_handle *phys;
23         struct sg_table *st;
24         struct scatterlist *sg;
25         char *vaddr;
26         int i;
27         int err;
28
29         if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
30                 return -EINVAL;
31
32         /* Always aligning to the object size, allows a single allocation
33          * to handle all possible callers, and given typical object sizes,
34          * the alignment of the buddy allocation will naturally match.
35          */
36         phys = drm_pci_alloc(obj->base.dev,
37                              roundup_pow_of_two(obj->base.size),
38                              roundup_pow_of_two(obj->base.size));
39         if (!phys)
40                 return -ENOMEM;
41
42         vaddr = phys->vaddr;
43         for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
44                 struct page *page;
45                 char *src;
46
47                 page = shmem_read_mapping_page(mapping, i);
48                 if (IS_ERR(page)) {
49                         err = PTR_ERR(page);
50                         goto err_phys;
51                 }
52
53                 src = kmap_atomic(page);
54                 memcpy(vaddr, src, PAGE_SIZE);
55                 drm_clflush_virt_range(vaddr, PAGE_SIZE);
56                 kunmap_atomic(src);
57
58                 put_page(page);
59                 vaddr += PAGE_SIZE;
60         }
61
62         i915_gem_chipset_flush(to_i915(obj->base.dev));
63
64         st = kmalloc(sizeof(*st), GFP_KERNEL);
65         if (!st) {
66                 err = -ENOMEM;
67                 goto err_phys;
68         }
69
70         if (sg_alloc_table(st, 1, GFP_KERNEL)) {
71                 kfree(st);
72                 err = -ENOMEM;
73                 goto err_phys;
74         }
75
76         sg = st->sgl;
77         sg->offset = 0;
78         sg->length = obj->base.size;
79
80         sg_dma_address(sg) = phys->busaddr;
81         sg_dma_len(sg) = obj->base.size;
82
83         obj->phys_handle = phys;
84
85         __i915_gem_object_set_pages(obj, st, sg->length);
86
87         return 0;
88
89 err_phys:
90         drm_pci_free(obj->base.dev, phys);
91
92         return err;
93 }
94
95 static void
96 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
97                                struct sg_table *pages)
98 {
99         __i915_gem_object_release_shmem(obj, pages, false);
100
101         if (obj->mm.dirty) {
102                 struct address_space *mapping = obj->base.filp->f_mapping;
103                 char *vaddr = obj->phys_handle->vaddr;
104                 int i;
105
106                 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
107                         struct page *page;
108                         char *dst;
109
110                         page = shmem_read_mapping_page(mapping, i);
111                         if (IS_ERR(page))
112                                 continue;
113
114                         dst = kmap_atomic(page);
115                         drm_clflush_virt_range(vaddr, PAGE_SIZE);
116                         memcpy(dst, vaddr, PAGE_SIZE);
117                         kunmap_atomic(dst);
118
119                         set_page_dirty(page);
120                         if (obj->mm.madv == I915_MADV_WILLNEED)
121                                 mark_page_accessed(page);
122                         put_page(page);
123                         vaddr += PAGE_SIZE;
124                 }
125                 obj->mm.dirty = false;
126         }
127
128         sg_free_table(pages);
129         kfree(pages);
130
131         drm_pci_free(obj->base.dev, obj->phys_handle);
132 }
133
134 static void
135 i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
136 {
137         i915_gem_object_unpin_pages(obj);
138 }
139
140 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
141         .get_pages = i915_gem_object_get_pages_phys,
142         .put_pages = i915_gem_object_put_pages_phys,
143         .release = i915_gem_object_release_phys,
144 };
145
146 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
147 {
148         struct sg_table *pages;
149         int err;
150
151         if (align > obj->base.size)
152                 return -EINVAL;
153
154         if (obj->ops == &i915_gem_phys_ops)
155                 return 0;
156
157         if (obj->ops != &i915_gem_shmem_ops)
158                 return -EINVAL;
159
160         err = i915_gem_object_unbind(obj);
161         if (err)
162                 return err;
163
164         mutex_lock(&obj->mm.lock);
165
166         if (obj->mm.madv != I915_MADV_WILLNEED) {
167                 err = -EFAULT;
168                 goto err_unlock;
169         }
170
171         if (obj->mm.quirked) {
172                 err = -EFAULT;
173                 goto err_unlock;
174         }
175
176         if (obj->mm.mapping) {
177                 err = -EBUSY;
178                 goto err_unlock;
179         }
180
181         pages = __i915_gem_object_unset_pages(obj);
182
183         obj->ops = &i915_gem_phys_ops;
184
185         err = ____i915_gem_object_get_pages(obj);
186         if (err)
187                 goto err_xfer;
188
189         /* Perma-pin (until release) the physical set of pages */
190         __i915_gem_object_pin_pages(obj);
191
192         if (!IS_ERR_OR_NULL(pages))
193                 i915_gem_shmem_ops.put_pages(obj, pages);
194         mutex_unlock(&obj->mm.lock);
195         return 0;
196
197 err_xfer:
198         obj->ops = &i915_gem_shmem_ops;
199         if (!IS_ERR_OR_NULL(pages)) {
200                 unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl);
201
202                 __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
203         }
204 err_unlock:
205         mutex_unlock(&obj->mm.lock);
206         return err;
207 }
208
209 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
210 #include "selftests/i915_gem_phys.c"
211 #endif