2 * SPDX-License-Identifier: MIT
4 * Copyright © 2014-2016 Intel Corporation
7 #include <linux/highmem.h>
8 #include <linux/shmem_fs.h>
9 #include <linux/swap.h>
11 #include <drm/drm.h> /* for drm_legacy.h! */
12 #include <drm/drm_cache.h>
13 #include <drm/drm_legacy.h> /* for drm_pci.h! */
14 #include <drm/drm_pci.h>
17 #include "i915_gem_object.h"
19 static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
21 struct address_space *mapping = obj->base.filp->f_mapping;
22 struct drm_dma_handle *phys;
24 struct scatterlist *sg;
29 if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
32 /* Always aligning to the object size, allows a single allocation
33 * to handle all possible callers, and given typical object sizes,
34 * the alignment of the buddy allocation will naturally match.
36 phys = drm_pci_alloc(obj->base.dev,
37 roundup_pow_of_two(obj->base.size),
38 roundup_pow_of_two(obj->base.size));
43 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
47 page = shmem_read_mapping_page(mapping, i);
53 src = kmap_atomic(page);
54 memcpy(vaddr, src, PAGE_SIZE);
55 drm_clflush_virt_range(vaddr, PAGE_SIZE);
62 i915_gem_chipset_flush(to_i915(obj->base.dev));
64 st = kmalloc(sizeof(*st), GFP_KERNEL);
70 if (sg_alloc_table(st, 1, GFP_KERNEL)) {
78 sg->length = obj->base.size;
80 sg_dma_address(sg) = phys->busaddr;
81 sg_dma_len(sg) = obj->base.size;
83 obj->phys_handle = phys;
85 __i915_gem_object_set_pages(obj, st, sg->length);
90 drm_pci_free(obj->base.dev, phys);
96 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
97 struct sg_table *pages)
99 __i915_gem_object_release_shmem(obj, pages, false);
102 struct address_space *mapping = obj->base.filp->f_mapping;
103 char *vaddr = obj->phys_handle->vaddr;
106 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
110 page = shmem_read_mapping_page(mapping, i);
114 dst = kmap_atomic(page);
115 drm_clflush_virt_range(vaddr, PAGE_SIZE);
116 memcpy(dst, vaddr, PAGE_SIZE);
119 set_page_dirty(page);
120 if (obj->mm.madv == I915_MADV_WILLNEED)
121 mark_page_accessed(page);
125 obj->mm.dirty = false;
128 sg_free_table(pages);
131 drm_pci_free(obj->base.dev, obj->phys_handle);
135 i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
137 i915_gem_object_unpin_pages(obj);
140 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
141 .get_pages = i915_gem_object_get_pages_phys,
142 .put_pages = i915_gem_object_put_pages_phys,
143 .release = i915_gem_object_release_phys,
146 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
148 struct sg_table *pages;
151 if (align > obj->base.size)
154 if (obj->ops == &i915_gem_phys_ops)
157 if (obj->ops != &i915_gem_shmem_ops)
160 err = i915_gem_object_unbind(obj);
164 mutex_lock(&obj->mm.lock);
166 if (obj->mm.madv != I915_MADV_WILLNEED) {
171 if (obj->mm.quirked) {
176 if (obj->mm.mapping) {
181 pages = __i915_gem_object_unset_pages(obj);
183 obj->ops = &i915_gem_phys_ops;
185 err = ____i915_gem_object_get_pages(obj);
189 /* Perma-pin (until release) the physical set of pages */
190 __i915_gem_object_pin_pages(obj);
192 if (!IS_ERR_OR_NULL(pages))
193 i915_gem_shmem_ops.put_pages(obj, pages);
194 mutex_unlock(&obj->mm.lock);
198 obj->ops = &i915_gem_shmem_ops;
199 if (!IS_ERR_OR_NULL(pages)) {
200 unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl);
202 __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
205 mutex_unlock(&obj->mm.lock);
209 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
210 #include "selftests/i915_gem_phys.c"