2 * SPDX-License-Identifier: MIT
4 * Copyright © 2014-2016 Intel Corporation
7 #include <linux/highmem.h>
8 #include <linux/shmem_fs.h>
9 #include <linux/swap.h>
11 #include <drm/drm.h> /* for drm_legacy.h! */
12 #include <drm/drm_cache.h>
13 #include <drm/drm_legacy.h> /* for drm_pci.h! */
14 #include <drm/drm_pci.h>
16 #include "gt/intel_gt.h"
18 #include "i915_gem_object.h"
19 #include "i915_scatterlist.h"
21 static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
23 struct address_space *mapping = obj->base.filp->f_mapping;
24 struct drm_dma_handle *phys;
26 struct scatterlist *sg;
31 if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
34 /* Always aligning to the object size, allows a single allocation
35 * to handle all possible callers, and given typical object sizes,
36 * the alignment of the buddy allocation will naturally match.
38 phys = drm_pci_alloc(obj->base.dev,
39 roundup_pow_of_two(obj->base.size),
40 roundup_pow_of_two(obj->base.size));
45 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
49 page = shmem_read_mapping_page(mapping, i);
55 src = kmap_atomic(page);
56 memcpy(vaddr, src, PAGE_SIZE);
57 drm_clflush_virt_range(vaddr, PAGE_SIZE);
64 intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
66 st = kmalloc(sizeof(*st), GFP_KERNEL);
72 if (sg_alloc_table(st, 1, GFP_KERNEL)) {
80 sg->length = obj->base.size;
82 sg_dma_address(sg) = phys->busaddr;
83 sg_dma_len(sg) = obj->base.size;
85 obj->phys_handle = phys;
87 __i915_gem_object_set_pages(obj, st, sg->length);
92 drm_pci_free(obj->base.dev, phys);
98 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
99 struct sg_table *pages)
101 __i915_gem_object_release_shmem(obj, pages, false);
104 struct address_space *mapping = obj->base.filp->f_mapping;
105 char *vaddr = obj->phys_handle->vaddr;
108 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
112 page = shmem_read_mapping_page(mapping, i);
116 dst = kmap_atomic(page);
117 drm_clflush_virt_range(vaddr, PAGE_SIZE);
118 memcpy(dst, vaddr, PAGE_SIZE);
121 set_page_dirty(page);
122 if (obj->mm.madv == I915_MADV_WILLNEED)
123 mark_page_accessed(page);
127 obj->mm.dirty = false;
130 sg_free_table(pages);
133 drm_pci_free(obj->base.dev, obj->phys_handle);
136 static void phys_release(struct drm_i915_gem_object *obj)
138 fput(obj->base.filp);
141 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
142 .get_pages = i915_gem_object_get_pages_phys,
143 .put_pages = i915_gem_object_put_pages_phys,
145 .release = phys_release,
148 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
150 struct sg_table *pages;
153 if (align > obj->base.size)
156 if (obj->ops == &i915_gem_phys_ops)
159 if (obj->ops != &i915_gem_shmem_ops)
162 err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
166 mutex_lock(&obj->mm.lock);
168 if (obj->mm.madv != I915_MADV_WILLNEED) {
173 if (obj->mm.quirked) {
178 if (obj->mm.mapping) {
183 pages = __i915_gem_object_unset_pages(obj);
185 obj->ops = &i915_gem_phys_ops;
187 err = ____i915_gem_object_get_pages(obj);
191 /* Perma-pin (until release) the physical set of pages */
192 __i915_gem_object_pin_pages(obj);
194 if (!IS_ERR_OR_NULL(pages))
195 i915_gem_shmem_ops.put_pages(obj, pages);
196 mutex_unlock(&obj->mm.lock);
200 obj->ops = &i915_gem_shmem_ops;
201 if (!IS_ERR_OR_NULL(pages)) {
202 unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl);
204 __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
207 mutex_unlock(&obj->mm.lock);
211 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
212 #include "selftests/i915_gem_phys.c"