2 * SPDX-License-Identifier: MIT
4 * Copyright © 2014-2016 Intel Corporation
7 #include <linux/pagevec.h>
8 #include <linux/swap.h>
11 #include "i915_gem_object.h"
12 #include "i915_scatterlist.h"
15 * Move pages to appropriate lru and release the pagevec, decrementing the
16 * ref count of those pages.
18 static void check_release_pagevec(struct pagevec *pvec)
20 check_move_unevictable_pages(pvec);
21 __pagevec_release(pvec);
25 static int shmem_get_pages(struct drm_i915_gem_object *obj)
27 struct drm_i915_private *i915 = to_i915(obj->base.dev);
28 const unsigned long page_count = obj->base.size / PAGE_SIZE;
30 struct address_space *mapping;
32 struct scatterlist *sg;
33 struct sgt_iter sgt_iter;
35 unsigned long last_pfn = 0; /* suppress gcc warning */
36 unsigned int max_segment = i915_sg_segment_size();
37 unsigned int sg_page_sizes;
43 * Assert that the object is not currently in any GPU domain. As it
44 * wasn't in the GTT, there shouldn't be any way it could have been in
47 GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
48 GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
51 * If there's no chance of allocating enough pages for the whole
54 if (page_count > totalram_pages())
57 st = kmalloc(sizeof(*st), GFP_KERNEL);
62 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
68 * Get the list of pages out of our struct file. They'll be pinned
69 * at this point until we release them.
71 * Fail silently without starting the shrinker
73 mapping = obj->base.filp->f_mapping;
74 mapping_set_unevictable(mapping);
75 noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM);
76 noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
81 for (i = 0; i < page_count; i++) {
82 const unsigned int shrink[] = {
83 I915_SHRINK_BOUND | I915_SHRINK_UNBOUND,
86 gfp_t gfp = noreclaim;
90 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
99 i915_gem_shrink(i915, 2 * page_count, NULL, *s++);
102 * We've tried hard to allocate the memory by reaping
103 * our own buffer, now let the real VM do its job and
104 * go down in flames if truly OOM.
106 * However, since graphics tend to be disposable,
107 * defer the oom here by reporting the ENOMEM back
111 /* reclaim and warn, but no oom */
112 gfp = mapping_gfp_mask(mapping);
115 * Our bo are always dirty and so we require
116 * kswapd to reclaim our pages (direct reclaim
117 * does not effectively begin pageout of our
118 * buffers on its own). However, direct reclaim
119 * only waits for kswapd when under allocation
120 * congestion. So as a result __GFP_RECLAIM is
121 * unreliable and fails to actually reclaim our
122 * dirty pages -- unless you try over and over
123 * again with !__GFP_NORETRY. However, we still
124 * want to fail this allocation rather than
125 * trigger the out-of-memory killer and for
126 * this we want __GFP_RETRY_MAYFAIL.
128 gfp |= __GFP_RETRY_MAYFAIL;
133 sg->length >= max_segment ||
134 page_to_pfn(page) != last_pfn + 1) {
136 sg_page_sizes |= sg->length;
140 sg_set_page(sg, page, PAGE_SIZE, 0);
142 sg->length += PAGE_SIZE;
144 last_pfn = page_to_pfn(page);
146 /* Check that the i965g/gm workaround works. */
147 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
149 if (sg) { /* loop terminated early; short sg table */
150 sg_page_sizes |= sg->length;
154 /* Trim unused sg entries to avoid wasting memory. */
157 ret = i915_gem_gtt_prepare_pages(obj, st);
160 * DMA remapping failed? One possible cause is that
161 * it could not reserve enough large entries, asking
162 * for PAGE_SIZE chunks instead may be helpful.
164 if (max_segment > PAGE_SIZE) {
165 for_each_sgt_page(page, sgt_iter, st)
169 max_segment = PAGE_SIZE;
172 dev_warn(&i915->drm.pdev->dev,
173 "Failed to DMA remap %lu pages\n",
179 if (i915_gem_object_needs_bit17_swizzle(obj))
180 i915_gem_object_do_bit_17_swizzle(obj, st);
182 __i915_gem_object_set_pages(obj, st, sg_page_sizes);
189 mapping_clear_unevictable(mapping);
191 for_each_sgt_page(page, sgt_iter, st) {
192 if (!pagevec_add(&pvec, page))
193 check_release_pagevec(&pvec);
195 if (pagevec_count(&pvec))
196 check_release_pagevec(&pvec);
201 * shmemfs first checks if there is enough memory to allocate the page
202 * and reports ENOSPC should there be insufficient, along with the usual
203 * ENOMEM for a genuine allocation failure.
205 * We use ENOSPC in our driver to mean that we have run out of aperture
206 * space and so want to translate the error from shmemfs back to our
207 * usual understanding of ENOMEM.
216 shmem_truncate(struct drm_i915_gem_object *obj)
219 * Our goal here is to return as much of the memory as
220 * is possible back to the system as we are called from OOM.
221 * To do this we must instruct the shmfs to drop all of its
222 * backing pages, *now*.
224 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
225 obj->mm.madv = __I915_MADV_PURGED;
226 obj->mm.pages = ERR_PTR(-EFAULT);
230 shmem_writeback(struct drm_i915_gem_object *obj)
232 struct address_space *mapping;
233 struct writeback_control wbc = {
234 .sync_mode = WB_SYNC_NONE,
235 .nr_to_write = SWAP_CLUSTER_MAX,
237 .range_end = LLONG_MAX,
243 * Leave mmapings intact (GTT will have been revoked on unbinding,
244 * leaving only CPU mmapings around) and add those pages to the LRU
245 * instead of invoking writeback so they are aged and paged out
248 mapping = obj->base.filp->f_mapping;
250 /* Begin writeback on each dirty page */
251 for (i = 0; i < obj->base.size >> PAGE_SHIFT; i++) {
254 page = find_lock_entry(mapping, i);
255 if (!page || xa_is_value(page))
258 if (!page_mapped(page) && clear_page_dirty_for_io(page)) {
261 SetPageReclaim(page);
262 ret = mapping->a_ops->writepage(page, &wbc);
263 if (!PageWriteback(page))
264 ClearPageReclaim(page);
275 __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
276 struct sg_table *pages,
279 GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
281 if (obj->mm.madv == I915_MADV_DONTNEED)
282 obj->mm.dirty = false;
285 (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
286 !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
287 drm_clflush_sg(pages);
289 __start_cpu_write(obj);
293 shmem_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages)
295 struct sgt_iter sgt_iter;
299 __i915_gem_object_release_shmem(obj, pages, true);
301 i915_gem_gtt_finish_pages(obj, pages);
303 if (i915_gem_object_needs_bit17_swizzle(obj))
304 i915_gem_object_save_bit_17_swizzle(obj, pages);
306 mapping_clear_unevictable(file_inode(obj->base.filp)->i_mapping);
309 for_each_sgt_page(page, sgt_iter, pages) {
311 set_page_dirty(page);
313 if (obj->mm.madv == I915_MADV_WILLNEED)
314 mark_page_accessed(page);
316 if (!pagevec_add(&pvec, page))
317 check_release_pagevec(&pvec);
319 if (pagevec_count(&pvec))
320 check_release_pagevec(&pvec);
321 obj->mm.dirty = false;
323 sg_free_table(pages);
328 shmem_pwrite(struct drm_i915_gem_object *obj,
329 const struct drm_i915_gem_pwrite *arg)
331 struct address_space *mapping = obj->base.filp->f_mapping;
332 char __user *user_data = u64_to_user_ptr(arg->data_ptr);
336 /* Caller already validated user args */
337 GEM_BUG_ON(!access_ok(user_data, arg->size));
340 * Before we instantiate/pin the backing store for our use, we
341 * can prepopulate the shmemfs filp efficiently using a write into
342 * the pagecache. We avoid the penalty of instantiating all the
343 * pages, important if the user is just writing to a few and never
344 * uses the object on the GPU, and using a direct write into shmemfs
345 * allows it to avoid the cost of retrieving a page (either swapin
346 * or clearing-before-use) before it is overwritten.
348 if (i915_gem_object_has_pages(obj))
351 if (obj->mm.madv != I915_MADV_WILLNEED)
355 * Before the pages are instantiated the object is treated as being
356 * in the CPU domain. The pages will be clflushed as required before
357 * use, and we can freely write into the pages directly. If userspace
358 * races pwrite with any other operation; corruption will ensue -
359 * that is userspace's prerogative!
363 offset = arg->offset;
364 pg = offset_in_page(offset);
367 unsigned int len, unwritten;
373 len = PAGE_SIZE - pg;
377 /* Prefault the user page to reduce potential recursion */
378 err = __get_user(c, user_data);
382 err = __get_user(c, user_data + len - 1);
386 err = pagecache_write_begin(obj->base.filp, mapping,
392 vaddr = kmap_atomic(page);
393 unwritten = __copy_from_user_inatomic(vaddr + pg,
396 kunmap_atomic(vaddr);
398 err = pagecache_write_end(obj->base.filp, mapping,
399 offset, len, len - unwritten,
404 /* We don't handle -EFAULT, leave it to the caller to check */
417 const struct drm_i915_gem_object_ops i915_gem_shmem_ops = {
418 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
419 I915_GEM_OBJECT_IS_SHRINKABLE,
421 .get_pages = shmem_get_pages,
422 .put_pages = shmem_put_pages,
423 .truncate = shmem_truncate,
424 .writeback = shmem_writeback,
426 .pwrite = shmem_pwrite,
429 static int create_shmem(struct drm_i915_private *i915,
430 struct drm_gem_object *obj,
433 unsigned long flags = VM_NORESERVE;
436 drm_gem_private_object_init(&i915->drm, obj, size);
439 filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size,
442 filp = shmem_file_setup("i915", size, flags);
444 return PTR_ERR(filp);
450 struct drm_i915_gem_object *
451 i915_gem_object_create_shmem(struct drm_i915_private *i915, u64 size)
453 struct drm_i915_gem_object *obj;
454 struct address_space *mapping;
455 unsigned int cache_level;
459 /* There is a prevalence of the assumption that we fit the object's
460 * page count inside a 32bit _signed_ variable. Let's document this and
461 * catch if we ever need to fix it. In the meantime, if you do spot
462 * such a local variable, please consider fixing!
464 if (size >> PAGE_SHIFT > INT_MAX)
465 return ERR_PTR(-E2BIG);
467 if (overflows_type(size, obj->base.size))
468 return ERR_PTR(-E2BIG);
470 obj = i915_gem_object_alloc();
472 return ERR_PTR(-ENOMEM);
474 ret = create_shmem(i915, &obj->base, size);
478 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
479 if (IS_I965GM(i915) || IS_I965G(i915)) {
480 /* 965gm cannot relocate objects above 4GiB. */
481 mask &= ~__GFP_HIGHMEM;
485 mapping = obj->base.filp->f_mapping;
486 mapping_set_gfp_mask(mapping, mask);
487 GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
489 i915_gem_object_init(obj, &i915_gem_shmem_ops);
491 obj->write_domain = I915_GEM_DOMAIN_CPU;
492 obj->read_domains = I915_GEM_DOMAIN_CPU;
495 /* On some devices, we can have the GPU use the LLC (the CPU
496 * cache) for about a 10% performance improvement
497 * compared to uncached. Graphics requests other than
498 * display scanout are coherent with the CPU in
499 * accessing this cache. This means in this mode we
500 * don't need to clflush on the CPU side, and on the
501 * GPU side we only need to flush internal caches to
502 * get data visible to the CPU.
504 * However, we maintain the display planes as UC, and so
505 * need to rebind when first used as such.
507 cache_level = I915_CACHE_LLC;
509 cache_level = I915_CACHE_NONE;
511 i915_gem_object_set_cache_coherency(obj, cache_level);
513 trace_i915_gem_object_create(obj);
518 i915_gem_object_free(obj);
522 /* Allocate a new GEM object and fill it with the supplied data */
523 struct drm_i915_gem_object *
524 i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv,
525 const void *data, size_t size)
527 struct drm_i915_gem_object *obj;
532 obj = i915_gem_object_create_shmem(dev_priv, round_up(size, PAGE_SIZE));
536 GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
538 file = obj->base.filp;
541 unsigned int len = min_t(typeof(size), size, PAGE_SIZE);
543 void *pgdata, *vaddr;
545 err = pagecache_write_begin(file, file->f_mapping,
552 memcpy(vaddr, data, len);
555 err = pagecache_write_end(file, file->f_mapping,
569 i915_gem_object_put(obj);