2 * Copyright © 2008-2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
28 #include <drm/drm_vma_manager.h>
29 #include <drm/drm_pci.h>
30 #include <drm/i915_drm.h>
31 #include <linux/dma-fence-array.h>
32 #include <linux/kthread.h>
33 #include <linux/reservation.h>
34 #include <linux/shmem_fs.h>
35 #include <linux/slab.h>
36 #include <linux/stop_machine.h>
37 #include <linux/swap.h>
38 #include <linux/pci.h>
39 #include <linux/dma-buf.h>
40 #include <linux/mman.h>
43 #include "i915_gem_clflush.h"
44 #include "i915_gemfs.h"
45 #include "i915_globals.h"
46 #include "i915_reset.h"
47 #include "i915_trace.h"
48 #include "i915_vgpu.h"
50 #include "intel_drv.h"
51 #include "intel_frontbuffer.h"
52 #include "intel_mocs.h"
53 #include "intel_workarounds.h"
55 static void i915_gem_flush_free_objects(struct drm_i915_private *i915);
57 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
62 if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
65 return obj->pin_global; /* currently in use by HW, keep flushed */
69 insert_mappable_node(struct i915_ggtt *ggtt,
70 struct drm_mm_node *node, u32 size)
72 memset(node, 0, sizeof(*node));
73 return drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
74 size, 0, I915_COLOR_UNEVICTABLE,
75 0, ggtt->mappable_end,
80 remove_mappable_node(struct drm_mm_node *node)
82 drm_mm_remove_node(node);
85 /* some bookkeeping */
86 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
89 spin_lock(&dev_priv->mm.object_stat_lock);
90 dev_priv->mm.object_count++;
91 dev_priv->mm.object_memory += size;
92 spin_unlock(&dev_priv->mm.object_stat_lock);
95 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
98 spin_lock(&dev_priv->mm.object_stat_lock);
99 dev_priv->mm.object_count--;
100 dev_priv->mm.object_memory -= size;
101 spin_unlock(&dev_priv->mm.object_stat_lock);
104 static void __i915_gem_park(struct drm_i915_private *i915)
106 intel_wakeref_t wakeref;
110 lockdep_assert_held(&i915->drm.struct_mutex);
111 GEM_BUG_ON(i915->gt.active_requests);
112 GEM_BUG_ON(!list_empty(&i915->gt.active_rings));
118 * Be paranoid and flush a concurrent interrupt to make sure
119 * we don't reactivate any irq tasklets after parking.
121 * FIXME: Note that even though we have waited for execlists to be idle,
122 * there may still be an in-flight interrupt even though the CSB
123 * is now empty. synchronize_irq() makes sure that a residual interrupt
124 * is completed before we continue, but it doesn't prevent the HW from
125 * raising a spurious interrupt later. To complete the shield we should
126 * coordinate disabling the CS irq with flushing the interrupts.
128 synchronize_irq(i915->drm.irq);
130 intel_engines_park(i915);
131 i915_timelines_park(i915);
133 i915_pmu_gt_parked(i915);
134 i915_vma_parked(i915);
136 wakeref = fetch_and_zero(&i915->gt.awake);
137 GEM_BUG_ON(!wakeref);
139 if (INTEL_GEN(i915) >= 6)
142 intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, wakeref);
147 void i915_gem_park(struct drm_i915_private *i915)
151 lockdep_assert_held(&i915->drm.struct_mutex);
152 GEM_BUG_ON(i915->gt.active_requests);
157 /* Defer the actual call to __i915_gem_park() to prevent ping-pongs */
158 mod_delayed_work(i915->wq, &i915->gt.idle_work, msecs_to_jiffies(100));
161 void i915_gem_unpark(struct drm_i915_private *i915)
165 lockdep_assert_held(&i915->drm.struct_mutex);
166 GEM_BUG_ON(!i915->gt.active_requests);
167 assert_rpm_wakelock_held(i915);
173 * It seems that the DMC likes to transition between the DC states a lot
174 * when there are no connected displays (no active power domains) during
175 * command submission.
177 * This activity has negative impact on the performance of the chip with
178 * huge latencies observed in the interrupt handler and elsewhere.
180 * Work around it by grabbing a GT IRQ power domain whilst there is any
181 * GT activity, preventing any DC state transitions.
183 i915->gt.awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
184 GEM_BUG_ON(!i915->gt.awake);
186 i915_globals_unpark();
188 intel_enable_gt_powersave(i915);
189 i915_update_gfx_val(i915);
190 if (INTEL_GEN(i915) >= 6)
192 i915_pmu_gt_unparked(i915);
194 intel_engines_unpark(i915);
196 i915_queue_hangcheck(i915);
198 queue_delayed_work(i915->wq,
199 &i915->gt.retire_work,
200 round_jiffies_up_relative(HZ));
204 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
205 struct drm_file *file)
207 struct i915_ggtt *ggtt = &to_i915(dev)->ggtt;
208 struct drm_i915_gem_get_aperture *args = data;
209 struct i915_vma *vma;
212 mutex_lock(&ggtt->vm.mutex);
214 pinned = ggtt->vm.reserved;
215 list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
216 if (i915_vma_is_pinned(vma))
217 pinned += vma->node.size;
219 mutex_unlock(&ggtt->vm.mutex);
221 args->aper_size = ggtt->vm.total;
222 args->aper_available_size = args->aper_size - pinned;
227 static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
229 struct address_space *mapping = obj->base.filp->f_mapping;
230 drm_dma_handle_t *phys;
232 struct scatterlist *sg;
237 if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
240 /* Always aligning to the object size, allows a single allocation
241 * to handle all possible callers, and given typical object sizes,
242 * the alignment of the buddy allocation will naturally match.
244 phys = drm_pci_alloc(obj->base.dev,
245 roundup_pow_of_two(obj->base.size),
246 roundup_pow_of_two(obj->base.size));
251 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
255 page = shmem_read_mapping_page(mapping, i);
261 src = kmap_atomic(page);
262 memcpy(vaddr, src, PAGE_SIZE);
263 drm_clflush_virt_range(vaddr, PAGE_SIZE);
270 i915_gem_chipset_flush(to_i915(obj->base.dev));
272 st = kmalloc(sizeof(*st), GFP_KERNEL);
278 if (sg_alloc_table(st, 1, GFP_KERNEL)) {
286 sg->length = obj->base.size;
288 sg_dma_address(sg) = phys->busaddr;
289 sg_dma_len(sg) = obj->base.size;
291 obj->phys_handle = phys;
293 __i915_gem_object_set_pages(obj, st, sg->length);
298 drm_pci_free(obj->base.dev, phys);
303 static void __start_cpu_write(struct drm_i915_gem_object *obj)
305 obj->read_domains = I915_GEM_DOMAIN_CPU;
306 obj->write_domain = I915_GEM_DOMAIN_CPU;
307 if (cpu_write_needs_clflush(obj))
308 obj->cache_dirty = true;
312 __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
313 struct sg_table *pages,
316 GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
318 if (obj->mm.madv == I915_MADV_DONTNEED)
319 obj->mm.dirty = false;
322 (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
323 !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
324 drm_clflush_sg(pages);
326 __start_cpu_write(obj);
330 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
331 struct sg_table *pages)
333 __i915_gem_object_release_shmem(obj, pages, false);
336 struct address_space *mapping = obj->base.filp->f_mapping;
337 char *vaddr = obj->phys_handle->vaddr;
340 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
344 page = shmem_read_mapping_page(mapping, i);
348 dst = kmap_atomic(page);
349 drm_clflush_virt_range(vaddr, PAGE_SIZE);
350 memcpy(dst, vaddr, PAGE_SIZE);
353 set_page_dirty(page);
354 if (obj->mm.madv == I915_MADV_WILLNEED)
355 mark_page_accessed(page);
359 obj->mm.dirty = false;
362 sg_free_table(pages);
365 drm_pci_free(obj->base.dev, obj->phys_handle);
369 i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
371 i915_gem_object_unpin_pages(obj);
374 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
375 .get_pages = i915_gem_object_get_pages_phys,
376 .put_pages = i915_gem_object_put_pages_phys,
377 .release = i915_gem_object_release_phys,
380 static const struct drm_i915_gem_object_ops i915_gem_object_ops;
382 int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
384 struct i915_vma *vma;
385 LIST_HEAD(still_in_list);
388 lockdep_assert_held(&obj->base.dev->struct_mutex);
390 /* Closed vma are removed from the obj->vma_list - but they may
391 * still have an active binding on the object. To remove those we
392 * must wait for all rendering to complete to the object (as unbinding
393 * must anyway), and retire the requests.
395 ret = i915_gem_object_set_to_cpu_domain(obj, false);
399 spin_lock(&obj->vma.lock);
400 while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
403 list_move_tail(&vma->obj_link, &still_in_list);
404 spin_unlock(&obj->vma.lock);
406 ret = i915_vma_unbind(vma);
408 spin_lock(&obj->vma.lock);
410 list_splice(&still_in_list, &obj->vma.list);
411 spin_unlock(&obj->vma.lock);
417 i915_gem_object_wait_fence(struct dma_fence *fence,
421 struct i915_request *rq;
423 BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1);
425 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
428 if (!dma_fence_is_i915(fence))
429 return dma_fence_wait_timeout(fence,
430 flags & I915_WAIT_INTERRUPTIBLE,
433 rq = to_request(fence);
434 if (i915_request_completed(rq))
437 timeout = i915_request_wait(rq, flags, timeout);
440 if (flags & I915_WAIT_LOCKED && i915_request_completed(rq))
441 i915_request_retire_upto(rq);
447 i915_gem_object_wait_reservation(struct reservation_object *resv,
451 unsigned int seq = __read_seqcount_begin(&resv->seq);
452 struct dma_fence *excl;
453 bool prune_fences = false;
455 if (flags & I915_WAIT_ALL) {
456 struct dma_fence **shared;
457 unsigned int count, i;
460 ret = reservation_object_get_fences_rcu(resv,
461 &excl, &count, &shared);
465 for (i = 0; i < count; i++) {
466 timeout = i915_gem_object_wait_fence(shared[i],
471 dma_fence_put(shared[i]);
474 for (; i < count; i++)
475 dma_fence_put(shared[i]);
479 * If both shared fences and an exclusive fence exist,
480 * then by construction the shared fences must be later
481 * than the exclusive fence. If we successfully wait for
482 * all the shared fences, we know that the exclusive fence
483 * must all be signaled. If all the shared fences are
484 * signaled, we can prune the array and recover the
485 * floating references on the fences/requests.
487 prune_fences = count && timeout >= 0;
489 excl = reservation_object_get_excl_rcu(resv);
492 if (excl && timeout >= 0)
493 timeout = i915_gem_object_wait_fence(excl, flags, timeout);
498 * Opportunistically prune the fences iff we know they have *all* been
499 * signaled and that the reservation object has not been changed (i.e.
500 * no new fences have been added).
502 if (prune_fences && !__read_seqcount_retry(&resv->seq, seq)) {
503 if (reservation_object_trylock(resv)) {
504 if (!__read_seqcount_retry(&resv->seq, seq))
505 reservation_object_add_excl_fence(resv, NULL);
506 reservation_object_unlock(resv);
513 static void __fence_set_priority(struct dma_fence *fence,
514 const struct i915_sched_attr *attr)
516 struct i915_request *rq;
517 struct intel_engine_cs *engine;
519 if (dma_fence_is_signaled(fence) || !dma_fence_is_i915(fence))
522 rq = to_request(fence);
526 rcu_read_lock(); /* RCU serialisation for set-wedged protection */
527 if (engine->schedule)
528 engine->schedule(rq, attr);
530 local_bh_enable(); /* kick the tasklets if queues were reprioritised */
533 static void fence_set_priority(struct dma_fence *fence,
534 const struct i915_sched_attr *attr)
536 /* Recurse once into a fence-array */
537 if (dma_fence_is_array(fence)) {
538 struct dma_fence_array *array = to_dma_fence_array(fence);
541 for (i = 0; i < array->num_fences; i++)
542 __fence_set_priority(array->fences[i], attr);
544 __fence_set_priority(fence, attr);
549 i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
551 const struct i915_sched_attr *attr)
553 struct dma_fence *excl;
555 if (flags & I915_WAIT_ALL) {
556 struct dma_fence **shared;
557 unsigned int count, i;
560 ret = reservation_object_get_fences_rcu(obj->resv,
561 &excl, &count, &shared);
565 for (i = 0; i < count; i++) {
566 fence_set_priority(shared[i], attr);
567 dma_fence_put(shared[i]);
572 excl = reservation_object_get_excl_rcu(obj->resv);
576 fence_set_priority(excl, attr);
583 * Waits for rendering to the object to be completed
584 * @obj: i915 gem object
585 * @flags: how to wait (under a lock, for all rendering or just for writes etc)
586 * @timeout: how long to wait
589 i915_gem_object_wait(struct drm_i915_gem_object *obj,
594 GEM_BUG_ON(timeout < 0);
596 timeout = i915_gem_object_wait_reservation(obj->resv, flags, timeout);
597 return timeout < 0 ? timeout : 0;
601 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
602 struct drm_i915_gem_pwrite *args,
603 struct drm_file *file)
605 void *vaddr = obj->phys_handle->vaddr + args->offset;
606 char __user *user_data = u64_to_user_ptr(args->data_ptr);
608 /* We manually control the domain here and pretend that it
609 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
611 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
612 if (copy_from_user(vaddr, user_data, args->size))
615 drm_clflush_virt_range(vaddr, args->size);
616 i915_gem_chipset_flush(to_i915(obj->base.dev));
618 intel_fb_obj_flush(obj, ORIGIN_CPU);
623 i915_gem_create(struct drm_file *file,
624 struct drm_i915_private *dev_priv,
628 struct drm_i915_gem_object *obj;
633 size = round_up(*size_p, PAGE_SIZE);
637 /* Allocate the new object */
638 obj = i915_gem_object_create(dev_priv, size);
642 ret = drm_gem_handle_create(file, &obj->base, &handle);
643 /* drop reference from allocate - handle holds it now */
644 i915_gem_object_put(obj);
649 *size_p = obj->base.size;
654 i915_gem_dumb_create(struct drm_file *file,
655 struct drm_device *dev,
656 struct drm_mode_create_dumb *args)
658 /* have to work out size/pitch and return them */
659 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
660 args->size = args->pitch * args->height;
661 return i915_gem_create(file, to_i915(dev),
662 &args->size, &args->handle);
665 static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
667 return !(obj->cache_level == I915_CACHE_NONE ||
668 obj->cache_level == I915_CACHE_WT);
672 * Creates a new mm object and returns a handle to it.
673 * @dev: drm device pointer
674 * @data: ioctl data blob
675 * @file: drm file pointer
678 i915_gem_create_ioctl(struct drm_device *dev, void *data,
679 struct drm_file *file)
681 struct drm_i915_private *dev_priv = to_i915(dev);
682 struct drm_i915_gem_create *args = data;
684 i915_gem_flush_free_objects(dev_priv);
686 return i915_gem_create(file, dev_priv,
687 &args->size, &args->handle);
690 static inline enum fb_op_origin
691 fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)
693 return (domain == I915_GEM_DOMAIN_GTT ?
694 obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
697 void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
699 intel_wakeref_t wakeref;
702 * No actual flushing is required for the GTT write domain for reads
703 * from the GTT domain. Writes to it "immediately" go to main memory
704 * as far as we know, so there's no chipset flush. It also doesn't
705 * land in the GPU render cache.
707 * However, we do have to enforce the order so that all writes through
708 * the GTT land before any writes to the device, such as updates to
711 * We also have to wait a bit for the writes to land from the GTT.
712 * An uncached read (i.e. mmio) seems to be ideal for the round-trip
713 * timing. This issue has only been observed when switching quickly
714 * between GTT writes and CPU reads from inside the kernel on recent hw,
715 * and it appears to only affect discrete GTT blocks (i.e. on LLC
716 * system agents we cannot reproduce this behaviour, until Cannonlake
722 if (INTEL_INFO(dev_priv)->has_coherent_ggtt)
725 i915_gem_chipset_flush(dev_priv);
727 with_intel_runtime_pm(dev_priv, wakeref) {
728 spin_lock_irq(&dev_priv->uncore.lock);
730 POSTING_READ_FW(RING_HEAD(RENDER_RING_BASE));
732 spin_unlock_irq(&dev_priv->uncore.lock);
737 flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
739 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
740 struct i915_vma *vma;
742 if (!(obj->write_domain & flush_domains))
745 switch (obj->write_domain) {
746 case I915_GEM_DOMAIN_GTT:
747 i915_gem_flush_ggtt_writes(dev_priv);
749 intel_fb_obj_flush(obj,
750 fb_write_origin(obj, I915_GEM_DOMAIN_GTT));
752 for_each_ggtt_vma(vma, obj) {
756 i915_vma_unset_ggtt_write(vma);
760 case I915_GEM_DOMAIN_WC:
764 case I915_GEM_DOMAIN_CPU:
765 i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
768 case I915_GEM_DOMAIN_RENDER:
769 if (gpu_write_needs_clflush(obj))
770 obj->cache_dirty = true;
774 obj->write_domain = 0;
778 * Pins the specified object's pages and synchronizes the object with
779 * GPU accesses. Sets needs_clflush to non-zero if the caller should
780 * flush the object from the CPU cache.
782 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
783 unsigned int *needs_clflush)
787 lockdep_assert_held(&obj->base.dev->struct_mutex);
790 if (!i915_gem_object_has_struct_page(obj))
793 ret = i915_gem_object_wait(obj,
794 I915_WAIT_INTERRUPTIBLE |
796 MAX_SCHEDULE_TIMEOUT);
800 ret = i915_gem_object_pin_pages(obj);
804 if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ ||
805 !static_cpu_has(X86_FEATURE_CLFLUSH)) {
806 ret = i915_gem_object_set_to_cpu_domain(obj, false);
813 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
815 /* If we're not in the cpu read domain, set ourself into the gtt
816 * read domain and manually flush cachelines (if required). This
817 * optimizes for the case when the gpu will dirty the data
818 * anyway again before the next pread happens.
820 if (!obj->cache_dirty &&
821 !(obj->read_domains & I915_GEM_DOMAIN_CPU))
822 *needs_clflush = CLFLUSH_BEFORE;
825 /* return with the pages pinned */
829 i915_gem_object_unpin_pages(obj);
833 int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
834 unsigned int *needs_clflush)
838 lockdep_assert_held(&obj->base.dev->struct_mutex);
841 if (!i915_gem_object_has_struct_page(obj))
844 ret = i915_gem_object_wait(obj,
845 I915_WAIT_INTERRUPTIBLE |
848 MAX_SCHEDULE_TIMEOUT);
852 ret = i915_gem_object_pin_pages(obj);
856 if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE ||
857 !static_cpu_has(X86_FEATURE_CLFLUSH)) {
858 ret = i915_gem_object_set_to_cpu_domain(obj, true);
865 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
867 /* If we're not in the cpu write domain, set ourself into the
868 * gtt write domain and manually flush cachelines (as required).
869 * This optimizes for the case when the gpu will use the data
870 * right away and we therefore have to clflush anyway.
872 if (!obj->cache_dirty) {
873 *needs_clflush |= CLFLUSH_AFTER;
876 * Same trick applies to invalidate partially written
877 * cachelines read before writing.
879 if (!(obj->read_domains & I915_GEM_DOMAIN_CPU))
880 *needs_clflush |= CLFLUSH_BEFORE;
884 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
885 obj->mm.dirty = true;
886 /* return with the pages pinned */
890 i915_gem_object_unpin_pages(obj);
895 shmem_pread(struct page *page, int offset, int len, char __user *user_data,
904 drm_clflush_virt_range(vaddr + offset, len);
906 ret = __copy_to_user(user_data, vaddr + offset, len);
910 return ret ? -EFAULT : 0;
914 i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
915 struct drm_i915_gem_pread *args)
917 char __user *user_data;
919 unsigned int needs_clflush;
920 unsigned int idx, offset;
923 ret = mutex_lock_interruptible(&obj->base.dev->struct_mutex);
927 ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
928 mutex_unlock(&obj->base.dev->struct_mutex);
933 user_data = u64_to_user_ptr(args->data_ptr);
934 offset = offset_in_page(args->offset);
935 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
936 struct page *page = i915_gem_object_get_page(obj, idx);
937 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
939 ret = shmem_pread(page, offset, length, user_data,
949 i915_gem_obj_finish_shmem_access(obj);
954 gtt_user_read(struct io_mapping *mapping,
955 loff_t base, int offset,
956 char __user *user_data, int length)
959 unsigned long unwritten;
961 /* We can use the cpu mem copy function because this is X86. */
962 vaddr = io_mapping_map_atomic_wc(mapping, base);
963 unwritten = __copy_to_user_inatomic(user_data,
964 (void __force *)vaddr + offset,
966 io_mapping_unmap_atomic(vaddr);
968 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
969 unwritten = copy_to_user(user_data,
970 (void __force *)vaddr + offset,
972 io_mapping_unmap(vaddr);
978 i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
979 const struct drm_i915_gem_pread *args)
981 struct drm_i915_private *i915 = to_i915(obj->base.dev);
982 struct i915_ggtt *ggtt = &i915->ggtt;
983 intel_wakeref_t wakeref;
984 struct drm_mm_node node;
985 struct i915_vma *vma;
986 void __user *user_data;
990 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
994 wakeref = intel_runtime_pm_get(i915);
995 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
1000 node.start = i915_ggtt_offset(vma);
1001 node.allocated = false;
1002 ret = i915_vma_put_fence(vma);
1004 i915_vma_unpin(vma);
1009 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
1012 GEM_BUG_ON(!node.allocated);
1015 ret = i915_gem_object_set_to_gtt_domain(obj, false);
1019 mutex_unlock(&i915->drm.struct_mutex);
1021 user_data = u64_to_user_ptr(args->data_ptr);
1022 remain = args->size;
1023 offset = args->offset;
1025 while (remain > 0) {
1026 /* Operation in this page
1028 * page_base = page offset within aperture
1029 * page_offset = offset within page
1030 * page_length = bytes to copy for this page
1032 u32 page_base = node.start;
1033 unsigned page_offset = offset_in_page(offset);
1034 unsigned page_length = PAGE_SIZE - page_offset;
1035 page_length = remain < page_length ? remain : page_length;
1036 if (node.allocated) {
1038 ggtt->vm.insert_page(&ggtt->vm,
1039 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
1040 node.start, I915_CACHE_NONE, 0);
1043 page_base += offset & PAGE_MASK;
1046 if (gtt_user_read(&ggtt->iomap, page_base, page_offset,
1047 user_data, page_length)) {
1052 remain -= page_length;
1053 user_data += page_length;
1054 offset += page_length;
1057 mutex_lock(&i915->drm.struct_mutex);
1059 if (node.allocated) {
1061 ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
1062 remove_mappable_node(&node);
1064 i915_vma_unpin(vma);
1067 intel_runtime_pm_put(i915, wakeref);
1068 mutex_unlock(&i915->drm.struct_mutex);
1074 * Reads data from the object referenced by handle.
1075 * @dev: drm device pointer
1076 * @data: ioctl data blob
1077 * @file: drm file pointer
1079 * On error, the contents of *data are undefined.
1082 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
1083 struct drm_file *file)
1085 struct drm_i915_gem_pread *args = data;
1086 struct drm_i915_gem_object *obj;
1089 if (args->size == 0)
1092 if (!access_ok(u64_to_user_ptr(args->data_ptr),
1096 obj = i915_gem_object_lookup(file, args->handle);
1100 /* Bounds check source. */
1101 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
1106 trace_i915_gem_object_pread(obj, args->offset, args->size);
1108 ret = i915_gem_object_wait(obj,
1109 I915_WAIT_INTERRUPTIBLE,
1110 MAX_SCHEDULE_TIMEOUT);
1114 ret = i915_gem_object_pin_pages(obj);
1118 ret = i915_gem_shmem_pread(obj, args);
1119 if (ret == -EFAULT || ret == -ENODEV)
1120 ret = i915_gem_gtt_pread(obj, args);
1122 i915_gem_object_unpin_pages(obj);
1124 i915_gem_object_put(obj);
1128 /* This is the fast write path which cannot handle
1129 * page faults in the source data
1133 ggtt_write(struct io_mapping *mapping,
1134 loff_t base, int offset,
1135 char __user *user_data, int length)
1137 void __iomem *vaddr;
1138 unsigned long unwritten;
1140 /* We can use the cpu mem copy function because this is X86. */
1141 vaddr = io_mapping_map_atomic_wc(mapping, base);
1142 unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
1144 io_mapping_unmap_atomic(vaddr);
1146 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
1147 unwritten = copy_from_user((void __force *)vaddr + offset,
1149 io_mapping_unmap(vaddr);
1156 * This is the fast pwrite path, where we copy the data directly from the
1157 * user into the GTT, uncached.
1158 * @obj: i915 GEM object
1159 * @args: pwrite arguments structure
1162 i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
1163 const struct drm_i915_gem_pwrite *args)
1165 struct drm_i915_private *i915 = to_i915(obj->base.dev);
1166 struct i915_ggtt *ggtt = &i915->ggtt;
1167 intel_wakeref_t wakeref;
1168 struct drm_mm_node node;
1169 struct i915_vma *vma;
1171 void __user *user_data;
1174 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1178 if (i915_gem_object_has_struct_page(obj)) {
1180 * Avoid waking the device up if we can fallback, as
1181 * waking/resuming is very slow (worst-case 10-100 ms
1182 * depending on PCI sleeps and our own resume time).
1183 * This easily dwarfs any performance advantage from
1184 * using the cache bypass of indirect GGTT access.
1186 wakeref = intel_runtime_pm_get_if_in_use(i915);
1192 /* No backing pages, no fallback, we must force GGTT access */
1193 wakeref = intel_runtime_pm_get(i915);
1196 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
1201 node.start = i915_ggtt_offset(vma);
1202 node.allocated = false;
1203 ret = i915_vma_put_fence(vma);
1205 i915_vma_unpin(vma);
1210 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
1213 GEM_BUG_ON(!node.allocated);
1216 ret = i915_gem_object_set_to_gtt_domain(obj, true);
1220 mutex_unlock(&i915->drm.struct_mutex);
1222 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
1224 user_data = u64_to_user_ptr(args->data_ptr);
1225 offset = args->offset;
1226 remain = args->size;
1228 /* Operation in this page
1230 * page_base = page offset within aperture
1231 * page_offset = offset within page
1232 * page_length = bytes to copy for this page
1234 u32 page_base = node.start;
1235 unsigned int page_offset = offset_in_page(offset);
1236 unsigned int page_length = PAGE_SIZE - page_offset;
1237 page_length = remain < page_length ? remain : page_length;
1238 if (node.allocated) {
1239 wmb(); /* flush the write before we modify the GGTT */
1240 ggtt->vm.insert_page(&ggtt->vm,
1241 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
1242 node.start, I915_CACHE_NONE, 0);
1243 wmb(); /* flush modifications to the GGTT (insert_page) */
1245 page_base += offset & PAGE_MASK;
1247 /* If we get a fault while copying data, then (presumably) our
1248 * source page isn't available. Return the error and we'll
1249 * retry in the slow path.
1250 * If the object is non-shmem backed, we retry again with the
1251 * path that handles page fault.
1253 if (ggtt_write(&ggtt->iomap, page_base, page_offset,
1254 user_data, page_length)) {
1259 remain -= page_length;
1260 user_data += page_length;
1261 offset += page_length;
1263 intel_fb_obj_flush(obj, ORIGIN_CPU);
1265 mutex_lock(&i915->drm.struct_mutex);
1267 if (node.allocated) {
1269 ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
1270 remove_mappable_node(&node);
1272 i915_vma_unpin(vma);
1275 intel_runtime_pm_put(i915, wakeref);
1277 mutex_unlock(&i915->drm.struct_mutex);
1281 /* Per-page copy function for the shmem pwrite fastpath.
1282 * Flushes invalid cachelines before writing to the target if
1283 * needs_clflush_before is set and flushes out any written cachelines after
1284 * writing if needs_clflush is set.
1287 shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
1288 bool needs_clflush_before,
1289 bool needs_clflush_after)
1296 if (needs_clflush_before)
1297 drm_clflush_virt_range(vaddr + offset, len);
1299 ret = __copy_from_user(vaddr + offset, user_data, len);
1300 if (!ret && needs_clflush_after)
1301 drm_clflush_virt_range(vaddr + offset, len);
1305 return ret ? -EFAULT : 0;
1309 i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
1310 const struct drm_i915_gem_pwrite *args)
1312 struct drm_i915_private *i915 = to_i915(obj->base.dev);
1313 void __user *user_data;
1315 unsigned int partial_cacheline_write;
1316 unsigned int needs_clflush;
1317 unsigned int offset, idx;
1320 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1324 ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
1325 mutex_unlock(&i915->drm.struct_mutex);
1329 /* If we don't overwrite a cacheline completely we need to be
1330 * careful to have up-to-date data by first clflushing. Don't
1331 * overcomplicate things and flush the entire patch.
1333 partial_cacheline_write = 0;
1334 if (needs_clflush & CLFLUSH_BEFORE)
1335 partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
1337 user_data = u64_to_user_ptr(args->data_ptr);
1338 remain = args->size;
1339 offset = offset_in_page(args->offset);
1340 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
1341 struct page *page = i915_gem_object_get_page(obj, idx);
1342 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
1344 ret = shmem_pwrite(page, offset, length, user_data,
1345 (offset | length) & partial_cacheline_write,
1346 needs_clflush & CLFLUSH_AFTER);
1351 user_data += length;
1355 intel_fb_obj_flush(obj, ORIGIN_CPU);
1356 i915_gem_obj_finish_shmem_access(obj);
1361 * Writes data to the object referenced by handle.
1363 * @data: ioctl data blob
1366 * On error, the contents of the buffer that were to be modified are undefined.
1369 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1370 struct drm_file *file)
1372 struct drm_i915_gem_pwrite *args = data;
1373 struct drm_i915_gem_object *obj;
1376 if (args->size == 0)
1379 if (!access_ok(u64_to_user_ptr(args->data_ptr), args->size))
1382 obj = i915_gem_object_lookup(file, args->handle);
1386 /* Bounds check destination. */
1387 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
1392 /* Writes not allowed into this read-only object */
1393 if (i915_gem_object_is_readonly(obj)) {
1398 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
1401 if (obj->ops->pwrite)
1402 ret = obj->ops->pwrite(obj, args);
1406 ret = i915_gem_object_wait(obj,
1407 I915_WAIT_INTERRUPTIBLE |
1409 MAX_SCHEDULE_TIMEOUT);
1413 ret = i915_gem_object_pin_pages(obj);
1418 /* We can only do the GTT pwrite on untiled buffers, as otherwise
1419 * it would end up going through the fenced access, and we'll get
1420 * different detiling behavior between reading and writing.
1421 * pread/pwrite currently are reading and writing from the CPU
1422 * perspective, requiring manual detiling by the client.
1424 if (!i915_gem_object_has_struct_page(obj) ||
1425 cpu_write_needs_clflush(obj))
1426 /* Note that the gtt paths might fail with non-page-backed user
1427 * pointers (e.g. gtt mappings when moving data between
1428 * textures). Fallback to the shmem path in that case.
1430 ret = i915_gem_gtt_pwrite_fast(obj, args);
1432 if (ret == -EFAULT || ret == -ENOSPC) {
1433 if (obj->phys_handle)
1434 ret = i915_gem_phys_pwrite(obj, args, file);
1436 ret = i915_gem_shmem_pwrite(obj, args);
1439 i915_gem_object_unpin_pages(obj);
1441 i915_gem_object_put(obj);
1445 static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
1447 struct drm_i915_private *i915 = to_i915(obj->base.dev);
1448 struct list_head *list;
1449 struct i915_vma *vma;
1451 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
1453 mutex_lock(&i915->ggtt.vm.mutex);
1454 for_each_ggtt_vma(vma, obj) {
1455 if (!drm_mm_node_allocated(&vma->node))
1458 list_move_tail(&vma->vm_link, &vma->vm->bound_list);
1460 mutex_unlock(&i915->ggtt.vm.mutex);
1462 spin_lock(&i915->mm.obj_lock);
1463 list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list;
1464 list_move_tail(&obj->mm.link, list);
1465 spin_unlock(&i915->mm.obj_lock);
1469 * Called when user space prepares to use an object with the CPU, either
1470 * through the mmap ioctl's mapping or a GTT mapping.
1472 * @data: ioctl data blob
1476 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1477 struct drm_file *file)
1479 struct drm_i915_gem_set_domain *args = data;
1480 struct drm_i915_gem_object *obj;
1481 u32 read_domains = args->read_domains;
1482 u32 write_domain = args->write_domain;
1485 /* Only handle setting domains to types used by the CPU. */
1486 if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
1490 * Having something in the write domain implies it's in the read
1491 * domain, and only that read domain. Enforce that in the request.
1493 if (write_domain && read_domains != write_domain)
1499 obj = i915_gem_object_lookup(file, args->handle);
1504 * Already in the desired write domain? Nothing for us to do!
1506 * We apply a little bit of cunning here to catch a broader set of
1507 * no-ops. If obj->write_domain is set, we must be in the same
1508 * obj->read_domains, and only that domain. Therefore, if that
1509 * obj->write_domain matches the request read_domains, we are
1510 * already in the same read/write domain and can skip the operation,
1511 * without having to further check the requested write_domain.
1513 if (READ_ONCE(obj->write_domain) == read_domains) {
1519 * Try to flush the object off the GPU without holding the lock.
1520 * We will repeat the flush holding the lock in the normal manner
1521 * to catch cases where we are gazumped.
1523 err = i915_gem_object_wait(obj,
1524 I915_WAIT_INTERRUPTIBLE |
1525 I915_WAIT_PRIORITY |
1526 (write_domain ? I915_WAIT_ALL : 0),
1527 MAX_SCHEDULE_TIMEOUT);
1532 * Proxy objects do not control access to the backing storage, ergo
1533 * they cannot be used as a means to manipulate the cache domain
1534 * tracking for that backing storage. The proxy object is always
1535 * considered to be outside of any cache domain.
1537 if (i915_gem_object_is_proxy(obj)) {
1543 * Flush and acquire obj->pages so that we are coherent through
1544 * direct access in memory with previous cached writes through
1545 * shmemfs and that our cache domain tracking remains valid.
1546 * For example, if the obj->filp was moved to swap without us
1547 * being notified and releasing the pages, we would mistakenly
1548 * continue to assume that the obj remained out of the CPU cached
1551 err = i915_gem_object_pin_pages(obj);
1555 err = i915_mutex_lock_interruptible(dev);
1559 if (read_domains & I915_GEM_DOMAIN_WC)
1560 err = i915_gem_object_set_to_wc_domain(obj, write_domain);
1561 else if (read_domains & I915_GEM_DOMAIN_GTT)
1562 err = i915_gem_object_set_to_gtt_domain(obj, write_domain);
1564 err = i915_gem_object_set_to_cpu_domain(obj, write_domain);
1566 /* And bump the LRU for this access */
1567 i915_gem_object_bump_inactive_ggtt(obj);
1569 mutex_unlock(&dev->struct_mutex);
1571 if (write_domain != 0)
1572 intel_fb_obj_invalidate(obj,
1573 fb_write_origin(obj, write_domain));
1576 i915_gem_object_unpin_pages(obj);
1578 i915_gem_object_put(obj);
1583 * Called when user space has done writes to this buffer
1585 * @data: ioctl data blob
1589 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1590 struct drm_file *file)
1592 struct drm_i915_gem_sw_finish *args = data;
1593 struct drm_i915_gem_object *obj;
1595 obj = i915_gem_object_lookup(file, args->handle);
1600 * Proxy objects are barred from CPU access, so there is no
1601 * need to ban sw_finish as it is a nop.
1604 /* Pinned buffers may be scanout, so flush the cache */
1605 i915_gem_object_flush_if_display(obj);
1606 i915_gem_object_put(obj);
1612 __vma_matches(struct vm_area_struct *vma, struct file *filp,
1613 unsigned long addr, unsigned long size)
1615 if (vma->vm_file != filp)
1618 return vma->vm_start == addr &&
1619 (vma->vm_end - vma->vm_start) == PAGE_ALIGN(size);
1623 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
1626 * @data: ioctl data blob
1629 * While the mapping holds a reference on the contents of the object, it doesn't
1630 * imply a ref on the object itself.
1634 * DRM driver writers who look a this function as an example for how to do GEM
1635 * mmap support, please don't implement mmap support like here. The modern way
1636 * to implement DRM mmap support is with an mmap offset ioctl (like
1637 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
1638 * That way debug tooling like valgrind will understand what's going on, hiding
1639 * the mmap call in a driver private ioctl will break that. The i915 driver only
1640 * does cpu mmaps this way because we didn't know better.
1643 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1644 struct drm_file *file)
1646 struct drm_i915_gem_mmap *args = data;
1647 struct drm_i915_gem_object *obj;
1650 if (args->flags & ~(I915_MMAP_WC))
1653 if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
1656 obj = i915_gem_object_lookup(file, args->handle);
1660 /* prime objects have no backing filp to GEM mmap
1663 if (!obj->base.filp) {
1668 if (range_overflows(args->offset, args->size, (u64)obj->base.size)) {
1673 addr = vm_mmap(obj->base.filp, 0, args->size,
1674 PROT_READ | PROT_WRITE, MAP_SHARED,
1676 if (IS_ERR_VALUE(addr))
1679 if (args->flags & I915_MMAP_WC) {
1680 struct mm_struct *mm = current->mm;
1681 struct vm_area_struct *vma;
1683 if (down_write_killable(&mm->mmap_sem)) {
1687 vma = find_vma(mm, addr);
1688 if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
1690 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1693 up_write(&mm->mmap_sem);
1694 if (IS_ERR_VALUE(addr))
1697 /* This may race, but that's ok, it only gets set */
1698 WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
1700 i915_gem_object_put(obj);
1702 args->addr_ptr = (u64)addr;
1706 i915_gem_object_put(obj);
1710 static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
1712 return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT;
1716 * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps
1718 * A history of the GTT mmap interface:
1720 * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to
1721 * aligned and suitable for fencing, and still fit into the available
1722 * mappable space left by the pinned display objects. A classic problem
1723 * we called the page-fault-of-doom where we would ping-pong between
1724 * two objects that could not fit inside the GTT and so the memcpy
1725 * would page one object in at the expense of the other between every
1728 * 1 - Objects can be any size, and have any compatible fencing (X Y, or none
1729 * as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the
1730 * object is too large for the available space (or simply too large
1731 * for the mappable aperture!), a view is created instead and faulted
1732 * into userspace. (This view is aligned and sized appropriately for
1735 * 2 - Recognise WC as a separate cache domain so that we can flush the
1736 * delayed writes via GTT before performing direct access via WC.
1738 * 3 - Remove implicit set-domain(GTT) and synchronisation on initial
1739 * pagefault; swapin remains transparent.
1743 * * snoopable objects cannot be accessed via the GTT. It can cause machine
1744 * hangs on some architectures, corruption on others. An attempt to service
1745 * a GTT page fault from a snoopable object will generate a SIGBUS.
1747 * * the object must be able to fit into RAM (physical memory, though no
1748 * limited to the mappable aperture).
1753 * * a new GTT page fault will synchronize rendering from the GPU and flush
1754 * all data to system memory. Subsequent access will not be synchronized.
1756 * * all mappings are revoked on runtime device suspend.
1758 * * there are only 8, 16 or 32 fence registers to share between all users
1759 * (older machines require fence register for display and blitter access
1760 * as well). Contention of the fence registers will cause the previous users
1761 * to be unmapped and any new access will generate new page faults.
1763 * * running out of memory while servicing a fault may generate a SIGBUS,
1764 * rather than the expected SIGSEGV.
1766 int i915_gem_mmap_gtt_version(void)
1771 static inline struct i915_ggtt_view
1772 compute_partial_view(const struct drm_i915_gem_object *obj,
1773 pgoff_t page_offset,
1776 struct i915_ggtt_view view;
1778 if (i915_gem_object_is_tiled(obj))
1779 chunk = roundup(chunk, tile_row_pages(obj));
1781 view.type = I915_GGTT_VIEW_PARTIAL;
1782 view.partial.offset = rounddown(page_offset, chunk);
1784 min_t(unsigned int, chunk,
1785 (obj->base.size >> PAGE_SHIFT) - view.partial.offset);
1787 /* If the partial covers the entire object, just create a normal VMA. */
1788 if (chunk >= obj->base.size >> PAGE_SHIFT)
1789 view.type = I915_GGTT_VIEW_NORMAL;
1795 * i915_gem_fault - fault a page into the GTT
1798 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1799 * from userspace. The fault handler takes care of binding the object to
1800 * the GTT (if needed), allocating and programming a fence register (again,
1801 * only if needed based on whether the old reg is still valid or the object
1802 * is tiled) and inserting a new PTE into the faulting process.
1804 * Note that the faulting process may involve evicting existing objects
1805 * from the GTT and/or fence registers to make room. So performance may
1806 * suffer if the GTT working set is large or there are few fence registers
1809 * The current feature set supported by i915_gem_fault() and thus GTT mmaps
1810 * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
1812 vm_fault_t i915_gem_fault(struct vm_fault *vmf)
1814 #define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
1815 struct vm_area_struct *area = vmf->vma;
1816 struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
1817 struct drm_device *dev = obj->base.dev;
1818 struct drm_i915_private *dev_priv = to_i915(dev);
1819 struct i915_ggtt *ggtt = &dev_priv->ggtt;
1820 bool write = area->vm_flags & VM_WRITE;
1821 intel_wakeref_t wakeref;
1822 struct i915_vma *vma;
1823 pgoff_t page_offset;
1827 /* Sanity check that we allow writing into this object */
1828 if (i915_gem_object_is_readonly(obj) && write)
1829 return VM_FAULT_SIGBUS;
1831 /* We don't use vmf->pgoff since that has the fake offset */
1832 page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
1834 trace_i915_gem_object_fault(obj, page_offset, true, write);
1836 ret = i915_gem_object_pin_pages(obj);
1840 wakeref = intel_runtime_pm_get(dev_priv);
1842 srcu = i915_reset_trylock(dev_priv);
1848 ret = i915_mutex_lock_interruptible(dev);
1852 /* Access to snoopable pages through the GTT is incoherent. */
1853 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) {
1858 /* Now pin it into the GTT as needed */
1859 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
1864 /* Use a partial view if it is bigger than available space */
1865 struct i915_ggtt_view view =
1866 compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
1869 flags = PIN_MAPPABLE;
1870 if (view.type == I915_GGTT_VIEW_NORMAL)
1871 flags |= PIN_NONBLOCK; /* avoid warnings for pinned */
1874 * Userspace is now writing through an untracked VMA, abandon
1875 * all hope that the hardware is able to track future writes.
1877 obj->frontbuffer_ggtt_origin = ORIGIN_CPU;
1879 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
1880 if (IS_ERR(vma) && !view.type) {
1881 flags = PIN_MAPPABLE;
1882 view.type = I915_GGTT_VIEW_PARTIAL;
1883 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
1891 ret = i915_vma_pin_fence(vma);
1895 /* Finally, remap it using the new GTT offset */
1896 ret = remap_io_mapping(area,
1897 area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
1898 (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
1899 min_t(u64, vma->size, area->vm_end - area->vm_start),
1904 /* Mark as being mmapped into userspace for later revocation */
1905 assert_rpm_wakelock_held(dev_priv);
1906 if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
1907 list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
1908 GEM_BUG_ON(!obj->userfault_count);
1910 i915_vma_set_ggtt_write(vma);
1913 i915_vma_unpin_fence(vma);
1915 __i915_vma_unpin(vma);
1917 mutex_unlock(&dev->struct_mutex);
1919 i915_reset_unlock(dev_priv, srcu);
1921 intel_runtime_pm_put(dev_priv, wakeref);
1922 i915_gem_object_unpin_pages(obj);
1927 * We eat errors when the gpu is terminally wedged to avoid
1928 * userspace unduly crashing (gl has no provisions for mmaps to
1929 * fail). But any other -EIO isn't ours (e.g. swap in failure)
1930 * and so needs to be reported.
1932 if (!i915_terminally_wedged(dev_priv))
1933 return VM_FAULT_SIGBUS;
1934 /* else: fall through */
1937 * EAGAIN means the gpu is hung and we'll wait for the error
1938 * handler to reset everything when re-faulting in
1939 * i915_mutex_lock_interruptible.
1946 * EBUSY is ok: this just means that another thread
1947 * already did the job.
1949 return VM_FAULT_NOPAGE;
1951 return VM_FAULT_OOM;
1954 return VM_FAULT_SIGBUS;
1956 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
1957 return VM_FAULT_SIGBUS;
1961 static void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
1963 struct i915_vma *vma;
1965 GEM_BUG_ON(!obj->userfault_count);
1967 obj->userfault_count = 0;
1968 list_del(&obj->userfault_link);
1969 drm_vma_node_unmap(&obj->base.vma_node,
1970 obj->base.dev->anon_inode->i_mapping);
1972 for_each_ggtt_vma(vma, obj)
1973 i915_vma_unset_userfault(vma);
1977 * i915_gem_release_mmap - remove physical page mappings
1978 * @obj: obj in question
1980 * Preserve the reservation of the mmapping with the DRM core code, but
1981 * relinquish ownership of the pages back to the system.
1983 * It is vital that we remove the page mapping if we have mapped a tiled
1984 * object through the GTT and then lose the fence register due to
1985 * resource pressure. Similarly if the object has been moved out of the
1986 * aperture, than pages mapped into userspace must be revoked. Removing the
1987 * mapping will then trigger a page fault on the next user access, allowing
1988 * fixup by i915_gem_fault().
1991 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1993 struct drm_i915_private *i915 = to_i915(obj->base.dev);
1994 intel_wakeref_t wakeref;
1996 /* Serialisation between user GTT access and our code depends upon
1997 * revoking the CPU's PTE whilst the mutex is held. The next user
1998 * pagefault then has to wait until we release the mutex.
2000 * Note that RPM complicates somewhat by adding an additional
2001 * requirement that operations to the GGTT be made holding the RPM
2004 lockdep_assert_held(&i915->drm.struct_mutex);
2005 wakeref = intel_runtime_pm_get(i915);
2007 if (!obj->userfault_count)
2010 __i915_gem_object_release_mmap(obj);
2012 /* Ensure that the CPU's PTE are revoked and there are not outstanding
2013 * memory transactions from userspace before we return. The TLB
2014 * flushing implied above by changing the PTE above *should* be
2015 * sufficient, an extra barrier here just provides us with a bit
2016 * of paranoid documentation about our requirement to serialise
2017 * memory writes before touching registers / GSM.
2022 intel_runtime_pm_put(i915, wakeref);
2025 void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
2027 struct drm_i915_gem_object *obj, *on;
2031 * Only called during RPM suspend. All users of the userfault_list
2032 * must be holding an RPM wakeref to ensure that this can not
2033 * run concurrently with themselves (and use the struct_mutex for
2034 * protection between themselves).
2037 list_for_each_entry_safe(obj, on,
2038 &dev_priv->mm.userfault_list, userfault_link)
2039 __i915_gem_object_release_mmap(obj);
2041 /* The fence will be lost when the device powers down. If any were
2042 * in use by hardware (i.e. they are pinned), we should not be powering
2043 * down! All other fences will be reacquired by the user upon waking.
2045 for (i = 0; i < dev_priv->num_fence_regs; i++) {
2046 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
2048 /* Ideally we want to assert that the fence register is not
2049 * live at this point (i.e. that no piece of code will be
2050 * trying to write through fence + GTT, as that both violates
2051 * our tracking of activity and associated locking/barriers,
2052 * but also is illegal given that the hw is powered down).
2054 * Previously we used reg->pin_count as a "liveness" indicator.
2055 * That is not sufficient, and we need a more fine-grained
2056 * tool if we want to have a sanity check here.
2062 GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
2067 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
2069 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2072 err = drm_gem_create_mmap_offset(&obj->base);
2076 /* Attempt to reap some mmap space from dead objects */
2078 err = i915_gem_wait_for_idle(dev_priv,
2079 I915_WAIT_INTERRUPTIBLE,
2080 MAX_SCHEDULE_TIMEOUT);
2084 i915_gem_drain_freed_objects(dev_priv);
2085 err = drm_gem_create_mmap_offset(&obj->base);
2089 } while (flush_delayed_work(&dev_priv->gt.retire_work));
2094 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
2096 drm_gem_free_mmap_offset(&obj->base);
2100 i915_gem_mmap_gtt(struct drm_file *file,
2101 struct drm_device *dev,
2105 struct drm_i915_gem_object *obj;
2108 obj = i915_gem_object_lookup(file, handle);
2112 ret = i915_gem_object_create_mmap_offset(obj);
2114 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
2116 i915_gem_object_put(obj);
2121 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
2123 * @data: GTT mapping ioctl data
2124 * @file: GEM object info
2126 * Simply returns the fake offset to userspace so it can mmap it.
2127 * The mmap call will end up in drm_gem_mmap(), which will set things
2128 * up so we can get faults in the handler above.
2130 * The fault handler will take care of binding the object into the GTT
2131 * (since it may have been evicted to make room for something), allocating
2132 * a fence register, and mapping the appropriate aperture address into
2136 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
2137 struct drm_file *file)
2139 struct drm_i915_gem_mmap_gtt *args = data;
2141 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
2144 /* Immediately discard the backing storage */
2146 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
2148 i915_gem_object_free_mmap_offset(obj);
2150 if (obj->base.filp == NULL)
2153 /* Our goal here is to return as much of the memory as
2154 * is possible back to the system as we are called from OOM.
2155 * To do this we must instruct the shmfs to drop all of its
2156 * backing pages, *now*.
2158 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
2159 obj->mm.madv = __I915_MADV_PURGED;
2160 obj->mm.pages = ERR_PTR(-EFAULT);
2163 /* Try to discard unwanted pages */
2164 void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
2166 struct address_space *mapping;
2168 lockdep_assert_held(&obj->mm.lock);
2169 GEM_BUG_ON(i915_gem_object_has_pages(obj));
2171 switch (obj->mm.madv) {
2172 case I915_MADV_DONTNEED:
2173 i915_gem_object_truncate(obj);
2174 case __I915_MADV_PURGED:
2178 if (obj->base.filp == NULL)
2181 mapping = obj->base.filp->f_mapping,
2182 invalidate_mapping_pages(mapping, 0, (loff_t)-1);
2186 * Move pages to appropriate lru and release the pagevec, decrementing the
2187 * ref count of those pages.
2189 static void check_release_pagevec(struct pagevec *pvec)
2191 check_move_unevictable_pages(pvec);
2192 __pagevec_release(pvec);
2197 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
2198 struct sg_table *pages)
2200 struct sgt_iter sgt_iter;
2201 struct pagevec pvec;
2204 __i915_gem_object_release_shmem(obj, pages, true);
2206 i915_gem_gtt_finish_pages(obj, pages);
2208 if (i915_gem_object_needs_bit17_swizzle(obj))
2209 i915_gem_object_save_bit_17_swizzle(obj, pages);
2211 mapping_clear_unevictable(file_inode(obj->base.filp)->i_mapping);
2213 pagevec_init(&pvec);
2214 for_each_sgt_page(page, sgt_iter, pages) {
2216 set_page_dirty(page);
2218 if (obj->mm.madv == I915_MADV_WILLNEED)
2219 mark_page_accessed(page);
2221 if (!pagevec_add(&pvec, page))
2222 check_release_pagevec(&pvec);
2224 if (pagevec_count(&pvec))
2225 check_release_pagevec(&pvec);
2226 obj->mm.dirty = false;
2228 sg_free_table(pages);
2232 static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
2234 struct radix_tree_iter iter;
2238 radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
2239 radix_tree_delete(&obj->mm.get_page.radix, iter.index);
2243 static struct sg_table *
2244 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
2246 struct drm_i915_private *i915 = to_i915(obj->base.dev);
2247 struct sg_table *pages;
2249 pages = fetch_and_zero(&obj->mm.pages);
2250 if (IS_ERR_OR_NULL(pages))
2253 spin_lock(&i915->mm.obj_lock);
2254 list_del(&obj->mm.link);
2255 spin_unlock(&i915->mm.obj_lock);
2257 if (obj->mm.mapping) {
2260 ptr = page_mask_bits(obj->mm.mapping);
2261 if (is_vmalloc_addr(ptr))
2264 kunmap(kmap_to_page(ptr));
2266 obj->mm.mapping = NULL;
2269 __i915_gem_object_reset_page_iter(obj);
2270 obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
2275 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
2276 enum i915_mm_subclass subclass)
2278 struct sg_table *pages;
2281 if (i915_gem_object_has_pinned_pages(obj))
2284 GEM_BUG_ON(obj->bind_count);
2286 /* May be called by shrinker from within get_pages() (on another bo) */
2287 mutex_lock_nested(&obj->mm.lock, subclass);
2288 if (unlikely(atomic_read(&obj->mm.pages_pin_count))) {
2294 * ->put_pages might need to allocate memory for the bit17 swizzle
2295 * array, hence protect them from being reaped by removing them from gtt
2298 pages = __i915_gem_object_unset_pages(obj);
2301 * XXX Temporary hijinx to avoid updating all backends to handle
2302 * NULL pages. In the future, when we have more asynchronous
2303 * get_pages backends we should be better able to handle the
2304 * cancellation of the async task in a more uniform manner.
2306 if (!pages && !i915_gem_object_needs_async_cancel(obj))
2307 pages = ERR_PTR(-EINVAL);
2310 obj->ops->put_pages(obj, pages);
2314 mutex_unlock(&obj->mm.lock);
2319 bool i915_sg_trim(struct sg_table *orig_st)
2321 struct sg_table new_st;
2322 struct scatterlist *sg, *new_sg;
2325 if (orig_st->nents == orig_st->orig_nents)
2328 if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL | __GFP_NOWARN))
2331 new_sg = new_st.sgl;
2332 for_each_sg(orig_st->sgl, sg, orig_st->nents, i) {
2333 sg_set_page(new_sg, sg_page(sg), sg->length, 0);
2334 sg_dma_address(new_sg) = sg_dma_address(sg);
2335 sg_dma_len(new_sg) = sg_dma_len(sg);
2337 new_sg = sg_next(new_sg);
2339 GEM_BUG_ON(new_sg); /* Should walk exactly nents and hit the end */
2341 sg_free_table(orig_st);
2347 static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2349 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2350 const unsigned long page_count = obj->base.size / PAGE_SIZE;
2352 struct address_space *mapping;
2353 struct sg_table *st;
2354 struct scatterlist *sg;
2355 struct sgt_iter sgt_iter;
2357 unsigned long last_pfn = 0; /* suppress gcc warning */
2358 unsigned int max_segment = i915_sg_segment_size();
2359 unsigned int sg_page_sizes;
2360 struct pagevec pvec;
2365 * Assert that the object is not currently in any GPU domain. As it
2366 * wasn't in the GTT, there shouldn't be any way it could have been in
2369 GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
2370 GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
2373 * If there's no chance of allocating enough pages for the whole
2374 * object, bail early.
2376 if (page_count > totalram_pages())
2379 st = kmalloc(sizeof(*st), GFP_KERNEL);
2384 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
2390 * Get the list of pages out of our struct file. They'll be pinned
2391 * at this point until we release them.
2393 * Fail silently without starting the shrinker
2395 mapping = obj->base.filp->f_mapping;
2396 mapping_set_unevictable(mapping);
2397 noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM);
2398 noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
2403 for (i = 0; i < page_count; i++) {
2404 const unsigned int shrink[] = {
2405 I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE,
2408 gfp_t gfp = noreclaim;
2412 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2417 ret = PTR_ERR(page);
2421 i915_gem_shrink(dev_priv, 2 * page_count, NULL, *s++);
2424 * We've tried hard to allocate the memory by reaping
2425 * our own buffer, now let the real VM do its job and
2426 * go down in flames if truly OOM.
2428 * However, since graphics tend to be disposable,
2429 * defer the oom here by reporting the ENOMEM back
2433 /* reclaim and warn, but no oom */
2434 gfp = mapping_gfp_mask(mapping);
2437 * Our bo are always dirty and so we require
2438 * kswapd to reclaim our pages (direct reclaim
2439 * does not effectively begin pageout of our
2440 * buffers on its own). However, direct reclaim
2441 * only waits for kswapd when under allocation
2442 * congestion. So as a result __GFP_RECLAIM is
2443 * unreliable and fails to actually reclaim our
2444 * dirty pages -- unless you try over and over
2445 * again with !__GFP_NORETRY. However, we still
2446 * want to fail this allocation rather than
2447 * trigger the out-of-memory killer and for
2448 * this we want __GFP_RETRY_MAYFAIL.
2450 gfp |= __GFP_RETRY_MAYFAIL;
2455 sg->length >= max_segment ||
2456 page_to_pfn(page) != last_pfn + 1) {
2458 sg_page_sizes |= sg->length;
2462 sg_set_page(sg, page, PAGE_SIZE, 0);
2464 sg->length += PAGE_SIZE;
2466 last_pfn = page_to_pfn(page);
2468 /* Check that the i965g/gm workaround works. */
2469 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
2471 if (sg) { /* loop terminated early; short sg table */
2472 sg_page_sizes |= sg->length;
2476 /* Trim unused sg entries to avoid wasting memory. */
2479 ret = i915_gem_gtt_prepare_pages(obj, st);
2482 * DMA remapping failed? One possible cause is that
2483 * it could not reserve enough large entries, asking
2484 * for PAGE_SIZE chunks instead may be helpful.
2486 if (max_segment > PAGE_SIZE) {
2487 for_each_sgt_page(page, sgt_iter, st)
2491 max_segment = PAGE_SIZE;
2494 dev_warn(&dev_priv->drm.pdev->dev,
2495 "Failed to DMA remap %lu pages\n",
2501 if (i915_gem_object_needs_bit17_swizzle(obj))
2502 i915_gem_object_do_bit_17_swizzle(obj, st);
2504 __i915_gem_object_set_pages(obj, st, sg_page_sizes);
2511 mapping_clear_unevictable(mapping);
2512 pagevec_init(&pvec);
2513 for_each_sgt_page(page, sgt_iter, st) {
2514 if (!pagevec_add(&pvec, page))
2515 check_release_pagevec(&pvec);
2517 if (pagevec_count(&pvec))
2518 check_release_pagevec(&pvec);
2523 * shmemfs first checks if there is enough memory to allocate the page
2524 * and reports ENOSPC should there be insufficient, along with the usual
2525 * ENOMEM for a genuine allocation failure.
2527 * We use ENOSPC in our driver to mean that we have run out of aperture
2528 * space and so want to translate the error from shmemfs back to our
2529 * usual understanding of ENOMEM.
2537 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
2538 struct sg_table *pages,
2539 unsigned int sg_page_sizes)
2541 struct drm_i915_private *i915 = to_i915(obj->base.dev);
2542 unsigned long supported = INTEL_INFO(i915)->page_sizes;
2545 lockdep_assert_held(&obj->mm.lock);
2547 /* Make the pages coherent with the GPU (flushing any swapin). */
2548 if (obj->cache_dirty) {
2549 obj->write_domain = 0;
2550 if (i915_gem_object_has_struct_page(obj))
2551 drm_clflush_sg(pages);
2552 obj->cache_dirty = false;
2555 obj->mm.get_page.sg_pos = pages->sgl;
2556 obj->mm.get_page.sg_idx = 0;
2558 obj->mm.pages = pages;
2560 if (i915_gem_object_is_tiled(obj) &&
2561 i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
2562 GEM_BUG_ON(obj->mm.quirked);
2563 __i915_gem_object_pin_pages(obj);
2564 obj->mm.quirked = true;
2567 GEM_BUG_ON(!sg_page_sizes);
2568 obj->mm.page_sizes.phys = sg_page_sizes;
2571 * Calculate the supported page-sizes which fit into the given
2572 * sg_page_sizes. This will give us the page-sizes which we may be able
2573 * to use opportunistically when later inserting into the GTT. For
2574 * example if phys=2G, then in theory we should be able to use 1G, 2M,
2575 * 64K or 4K pages, although in practice this will depend on a number of
2578 obj->mm.page_sizes.sg = 0;
2579 for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
2580 if (obj->mm.page_sizes.phys & ~0u << i)
2581 obj->mm.page_sizes.sg |= BIT(i);
2583 GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
2585 spin_lock(&i915->mm.obj_lock);
2586 list_add(&obj->mm.link, &i915->mm.unbound_list);
2587 spin_unlock(&i915->mm.obj_lock);
2590 static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2594 if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
2595 DRM_DEBUG("Attempting to obtain a purgeable object\n");
2599 err = obj->ops->get_pages(obj);
2600 GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
2605 /* Ensure that the associated pages are gathered from the backing storage
2606 * and pinned into our object. i915_gem_object_pin_pages() may be called
2607 * multiple times before they are released by a single call to
2608 * i915_gem_object_unpin_pages() - once the pages are no longer referenced
2609 * either as a result of memory pressure (reaping pages under the shrinker)
2610 * or as the object is itself released.
2612 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2616 err = mutex_lock_interruptible(&obj->mm.lock);
2620 if (unlikely(!i915_gem_object_has_pages(obj))) {
2621 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
2623 err = ____i915_gem_object_get_pages(obj);
2627 smp_mb__before_atomic();
2629 atomic_inc(&obj->mm.pages_pin_count);
2632 mutex_unlock(&obj->mm.lock);
2636 /* The 'mapping' part of i915_gem_object_pin_map() below */
2637 static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
2638 enum i915_map_type type)
2640 unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
2641 struct sg_table *sgt = obj->mm.pages;
2642 struct sgt_iter sgt_iter;
2644 struct page *stack_pages[32];
2645 struct page **pages = stack_pages;
2646 unsigned long i = 0;
2650 /* A single page can always be kmapped */
2651 if (n_pages == 1 && type == I915_MAP_WB)
2652 return kmap(sg_page(sgt->sgl));
2654 if (n_pages > ARRAY_SIZE(stack_pages)) {
2655 /* Too big for stack -- allocate temporary array instead */
2656 pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
2661 for_each_sgt_page(page, sgt_iter, sgt)
2664 /* Check that we have the expected number of pages */
2665 GEM_BUG_ON(i != n_pages);
2670 /* fallthrough to use PAGE_KERNEL anyway */
2672 pgprot = PAGE_KERNEL;
2675 pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
2678 addr = vmap(pages, n_pages, 0, pgprot);
2680 if (pages != stack_pages)
2686 /* get, pin, and map the pages of the object into kernel space */
2687 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
2688 enum i915_map_type type)
2690 enum i915_map_type has_type;
2695 if (unlikely(!i915_gem_object_has_struct_page(obj)))
2696 return ERR_PTR(-ENXIO);
2698 ret = mutex_lock_interruptible(&obj->mm.lock);
2700 return ERR_PTR(ret);
2702 pinned = !(type & I915_MAP_OVERRIDE);
2703 type &= ~I915_MAP_OVERRIDE;
2705 if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
2706 if (unlikely(!i915_gem_object_has_pages(obj))) {
2707 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
2709 ret = ____i915_gem_object_get_pages(obj);
2713 smp_mb__before_atomic();
2715 atomic_inc(&obj->mm.pages_pin_count);
2718 GEM_BUG_ON(!i915_gem_object_has_pages(obj));
2720 ptr = page_unpack_bits(obj->mm.mapping, &has_type);
2721 if (ptr && has_type != type) {
2727 if (is_vmalloc_addr(ptr))
2730 kunmap(kmap_to_page(ptr));
2732 ptr = obj->mm.mapping = NULL;
2736 ptr = i915_gem_object_map(obj, type);
2742 obj->mm.mapping = page_pack_bits(ptr, type);
2746 mutex_unlock(&obj->mm.lock);
2750 atomic_dec(&obj->mm.pages_pin_count);
2756 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
2757 unsigned long offset,
2760 enum i915_map_type has_type;
2763 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
2764 GEM_BUG_ON(range_overflows_t(typeof(obj->base.size),
2765 offset, size, obj->base.size));
2767 obj->mm.dirty = true;
2769 if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)
2772 ptr = page_unpack_bits(obj->mm.mapping, &has_type);
2773 if (has_type == I915_MAP_WC)
2776 drm_clflush_virt_range(ptr + offset, size);
2777 if (size == obj->base.size) {
2778 obj->write_domain &= ~I915_GEM_DOMAIN_CPU;
2779 obj->cache_dirty = false;
2784 i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
2785 const struct drm_i915_gem_pwrite *arg)
2787 struct address_space *mapping = obj->base.filp->f_mapping;
2788 char __user *user_data = u64_to_user_ptr(arg->data_ptr);
2792 /* Before we instantiate/pin the backing store for our use, we
2793 * can prepopulate the shmemfs filp efficiently using a write into
2794 * the pagecache. We avoid the penalty of instantiating all the
2795 * pages, important if the user is just writing to a few and never
2796 * uses the object on the GPU, and using a direct write into shmemfs
2797 * allows it to avoid the cost of retrieving a page (either swapin
2798 * or clearing-before-use) before it is overwritten.
2800 if (i915_gem_object_has_pages(obj))
2803 if (obj->mm.madv != I915_MADV_WILLNEED)
2806 /* Before the pages are instantiated the object is treated as being
2807 * in the CPU domain. The pages will be clflushed as required before
2808 * use, and we can freely write into the pages directly. If userspace
2809 * races pwrite with any other operation; corruption will ensue -
2810 * that is userspace's prerogative!
2814 offset = arg->offset;
2815 pg = offset_in_page(offset);
2818 unsigned int len, unwritten;
2823 len = PAGE_SIZE - pg;
2827 err = pagecache_write_begin(obj->base.filp, mapping,
2834 unwritten = copy_from_user(vaddr + pg, user_data, len);
2837 err = pagecache_write_end(obj->base.filp, mapping,
2838 offset, len, len - unwritten,
2856 i915_gem_retire_work_handler(struct work_struct *work)
2858 struct drm_i915_private *dev_priv =
2859 container_of(work, typeof(*dev_priv), gt.retire_work.work);
2860 struct drm_device *dev = &dev_priv->drm;
2862 /* Come back later if the device is busy... */
2863 if (mutex_trylock(&dev->struct_mutex)) {
2864 i915_retire_requests(dev_priv);
2865 mutex_unlock(&dev->struct_mutex);
2869 * Keep the retire handler running until we are finally idle.
2870 * We do not need to do this test under locking as in the worst-case
2871 * we queue the retire worker once too often.
2873 if (READ_ONCE(dev_priv->gt.awake))
2874 queue_delayed_work(dev_priv->wq,
2875 &dev_priv->gt.retire_work,
2876 round_jiffies_up_relative(HZ));
2879 static bool switch_to_kernel_context_sync(struct drm_i915_private *i915,
2885 * Even if we fail to switch, give whatever is running a small chance
2886 * to save itself before we report the failure. Yes, this may be a
2887 * false positive due to e.g. ENOMEM, caveat emptor!
2889 if (i915_gem_switch_to_kernel_context(i915, mask))
2892 if (i915_gem_wait_for_idle(i915,
2894 I915_WAIT_FOR_IDLE_BOOST,
2895 I915_GEM_IDLE_TIMEOUT))
2899 if (i915_modparams.reset) { /* XXX hide warning from gem_eio */
2900 dev_err(i915->drm.dev,
2901 "Failed to idle engines, declaring wedged!\n");
2905 /* Forcibly cancel outstanding work and leave the gpu quiet. */
2906 i915_gem_set_wedged(i915);
2909 i915_retire_requests(i915); /* ensure we flush after wedging */
2913 static bool load_power_context(struct drm_i915_private *i915)
2915 /* Force loading the kernel context on all engines */
2916 if (!switch_to_kernel_context_sync(i915, ALL_ENGINES))
2920 * Immediately park the GPU so that we enable powersaving and
2921 * treat it as idle. The next time we issue a request, we will
2922 * unpark and start using the engine->pinned_default_state, otherwise
2923 * it is in limbo and an early reset may fail.
2925 __i915_gem_park(i915);
2931 i915_gem_idle_work_handler(struct work_struct *work)
2933 struct drm_i915_private *i915 =
2934 container_of(work, typeof(*i915), gt.idle_work.work);
2935 bool rearm_hangcheck;
2937 if (!READ_ONCE(i915->gt.awake))
2940 if (READ_ONCE(i915->gt.active_requests))
2944 cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work);
2946 if (!mutex_trylock(&i915->drm.struct_mutex)) {
2947 /* Currently busy, come back later */
2948 mod_delayed_work(i915->wq,
2949 &i915->gt.idle_work,
2950 msecs_to_jiffies(50));
2955 * Flush out the last user context, leaving only the pinned
2956 * kernel context resident. Should anything unfortunate happen
2957 * while we are idle (such as the GPU being power cycled), no users
2960 if (!work_pending(&i915->gt.idle_work.work) &&
2961 !i915->gt.active_requests) {
2962 ++i915->gt.active_requests; /* don't requeue idle */
2964 switch_to_kernel_context_sync(i915, i915->gt.active_engines);
2966 if (!--i915->gt.active_requests) {
2967 __i915_gem_park(i915);
2968 rearm_hangcheck = false;
2972 mutex_unlock(&i915->drm.struct_mutex);
2975 if (rearm_hangcheck) {
2976 GEM_BUG_ON(!i915->gt.awake);
2977 i915_queue_hangcheck(i915);
2981 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
2983 struct drm_i915_private *i915 = to_i915(gem->dev);
2984 struct drm_i915_gem_object *obj = to_intel_bo(gem);
2985 struct drm_i915_file_private *fpriv = file->driver_priv;
2986 struct i915_lut_handle *lut, *ln;
2988 mutex_lock(&i915->drm.struct_mutex);
2990 list_for_each_entry_safe(lut, ln, &obj->lut_list, obj_link) {
2991 struct i915_gem_context *ctx = lut->ctx;
2992 struct i915_vma *vma;
2994 GEM_BUG_ON(ctx->file_priv == ERR_PTR(-EBADF));
2995 if (ctx->file_priv != fpriv)
2998 vma = radix_tree_delete(&ctx->handles_vma, lut->handle);
2999 GEM_BUG_ON(vma->obj != obj);
3001 /* We allow the process to have multiple handles to the same
3002 * vma, in the same fd namespace, by virtue of flink/open.
3004 GEM_BUG_ON(!vma->open_count);
3005 if (!--vma->open_count && !i915_vma_is_ggtt(vma))
3006 i915_vma_close(vma);
3008 list_del(&lut->obj_link);
3009 list_del(&lut->ctx_link);
3011 i915_lut_handle_free(lut);
3012 __i915_gem_object_release_unless_active(obj);
3015 mutex_unlock(&i915->drm.struct_mutex);
3018 static unsigned long to_wait_timeout(s64 timeout_ns)
3021 return MAX_SCHEDULE_TIMEOUT;
3023 if (timeout_ns == 0)
3026 return nsecs_to_jiffies_timeout(timeout_ns);
3030 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
3031 * @dev: drm device pointer
3032 * @data: ioctl data blob
3033 * @file: drm file pointer
3035 * Returns 0 if successful, else an error is returned with the remaining time in
3036 * the timeout parameter.
3037 * -ETIME: object is still busy after timeout
3038 * -ERESTARTSYS: signal interrupted the wait
3039 * -ENONENT: object doesn't exist
3040 * Also possible, but rare:
3041 * -EAGAIN: incomplete, restart syscall
3043 * -ENODEV: Internal IRQ fail
3044 * -E?: The add request failed
3046 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
3047 * non-zero timeout parameter the wait ioctl will wait for the given number of
3048 * nanoseconds on an object becoming unbusy. Since the wait itself does so
3049 * without holding struct_mutex the object may become re-busied before this
3050 * function completes. A similar but shorter * race condition exists in the busy
3054 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
3056 struct drm_i915_gem_wait *args = data;
3057 struct drm_i915_gem_object *obj;
3061 if (args->flags != 0)
3064 obj = i915_gem_object_lookup(file, args->bo_handle);
3068 start = ktime_get();
3070 ret = i915_gem_object_wait(obj,
3071 I915_WAIT_INTERRUPTIBLE |
3072 I915_WAIT_PRIORITY |
3074 to_wait_timeout(args->timeout_ns));
3076 if (args->timeout_ns > 0) {
3077 args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
3078 if (args->timeout_ns < 0)
3079 args->timeout_ns = 0;
3082 * Apparently ktime isn't accurate enough and occasionally has a
3083 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
3084 * things up to make the test happy. We allow up to 1 jiffy.
3086 * This is a regression from the timespec->ktime conversion.
3088 if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns))
3089 args->timeout_ns = 0;
3091 /* Asked to wait beyond the jiffie/scheduler precision? */
3092 if (ret == -ETIME && args->timeout_ns)
3096 i915_gem_object_put(obj);
3100 static int wait_for_engines(struct drm_i915_private *i915)
3102 if (wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT)) {
3103 dev_err(i915->drm.dev,
3104 "Failed to idle engines, declaring wedged!\n");
3106 i915_gem_set_wedged(i915);
3114 wait_for_timelines(struct drm_i915_private *i915,
3115 unsigned int flags, long timeout)
3117 struct i915_gt_timelines *gt = &i915->gt.timelines;
3118 struct i915_timeline *tl;
3120 if (!READ_ONCE(i915->gt.active_requests))
3123 mutex_lock(>->mutex);
3124 list_for_each_entry(tl, >->active_list, link) {
3125 struct i915_request *rq;
3127 rq = i915_active_request_get_unlocked(&tl->last_request);
3131 mutex_unlock(>->mutex);
3136 * Switching to the kernel context is often used a synchronous
3137 * step prior to idling, e.g. in suspend for flushing all
3138 * current operations to memory before sleeping. These we
3139 * want to complete as quickly as possible to avoid prolonged
3140 * stalls, so allow the gpu to boost to maximum clocks.
3142 if (flags & I915_WAIT_FOR_IDLE_BOOST)
3145 timeout = i915_request_wait(rq, flags, timeout);
3146 i915_request_put(rq);
3150 /* restart after reacquiring the lock */
3151 mutex_lock(>->mutex);
3152 tl = list_entry(>->active_list, typeof(*tl), link);
3154 mutex_unlock(>->mutex);
3159 int i915_gem_wait_for_idle(struct drm_i915_private *i915,
3160 unsigned int flags, long timeout)
3162 GEM_TRACE("flags=%x (%s), timeout=%ld%s\n",
3163 flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked",
3164 timeout, timeout == MAX_SCHEDULE_TIMEOUT ? " (forever)" : "");
3166 /* If the device is asleep, we have no requests outstanding */
3167 if (!READ_ONCE(i915->gt.awake))
3170 timeout = wait_for_timelines(i915, flags, timeout);
3174 if (flags & I915_WAIT_LOCKED) {
3177 lockdep_assert_held(&i915->drm.struct_mutex);
3179 err = wait_for_engines(i915);
3183 i915_retire_requests(i915);
3189 static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
3192 * We manually flush the CPU domain so that we can override and
3193 * force the flush for the display, and perform it asyncrhonously.
3195 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
3196 if (obj->cache_dirty)
3197 i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE);
3198 obj->write_domain = 0;
3201 void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj)
3203 if (!READ_ONCE(obj->pin_global))
3206 mutex_lock(&obj->base.dev->struct_mutex);
3207 __i915_gem_object_flush_for_display(obj);
3208 mutex_unlock(&obj->base.dev->struct_mutex);
3212 * Moves a single object to the WC read, and possibly write domain.
3213 * @obj: object to act on
3214 * @write: ask for write access or read only
3216 * This function returns when the move is complete, including waiting on
3220 i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
3224 lockdep_assert_held(&obj->base.dev->struct_mutex);
3226 ret = i915_gem_object_wait(obj,
3227 I915_WAIT_INTERRUPTIBLE |
3229 (write ? I915_WAIT_ALL : 0),
3230 MAX_SCHEDULE_TIMEOUT);
3234 if (obj->write_domain == I915_GEM_DOMAIN_WC)
3237 /* Flush and acquire obj->pages so that we are coherent through
3238 * direct access in memory with previous cached writes through
3239 * shmemfs and that our cache domain tracking remains valid.
3240 * For example, if the obj->filp was moved to swap without us
3241 * being notified and releasing the pages, we would mistakenly
3242 * continue to assume that the obj remained out of the CPU cached
3245 ret = i915_gem_object_pin_pages(obj);
3249 flush_write_domain(obj, ~I915_GEM_DOMAIN_WC);
3251 /* Serialise direct access to this object with the barriers for
3252 * coherent writes from the GPU, by effectively invalidating the
3253 * WC domain upon first access.
3255 if ((obj->read_domains & I915_GEM_DOMAIN_WC) == 0)
3258 /* It should now be out of any other write domains, and we can update
3259 * the domain values for our changes.
3261 GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_WC) != 0);
3262 obj->read_domains |= I915_GEM_DOMAIN_WC;
3264 obj->read_domains = I915_GEM_DOMAIN_WC;
3265 obj->write_domain = I915_GEM_DOMAIN_WC;
3266 obj->mm.dirty = true;
3269 i915_gem_object_unpin_pages(obj);
3274 * Moves a single object to the GTT read, and possibly write domain.
3275 * @obj: object to act on
3276 * @write: ask for write access or read only
3278 * This function returns when the move is complete, including waiting on
3282 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3286 lockdep_assert_held(&obj->base.dev->struct_mutex);
3288 ret = i915_gem_object_wait(obj,
3289 I915_WAIT_INTERRUPTIBLE |
3291 (write ? I915_WAIT_ALL : 0),
3292 MAX_SCHEDULE_TIMEOUT);
3296 if (obj->write_domain == I915_GEM_DOMAIN_GTT)
3299 /* Flush and acquire obj->pages so that we are coherent through
3300 * direct access in memory with previous cached writes through
3301 * shmemfs and that our cache domain tracking remains valid.
3302 * For example, if the obj->filp was moved to swap without us
3303 * being notified and releasing the pages, we would mistakenly
3304 * continue to assume that the obj remained out of the CPU cached
3307 ret = i915_gem_object_pin_pages(obj);
3311 flush_write_domain(obj, ~I915_GEM_DOMAIN_GTT);
3313 /* Serialise direct access to this object with the barriers for
3314 * coherent writes from the GPU, by effectively invalidating the
3315 * GTT domain upon first access.
3317 if ((obj->read_domains & I915_GEM_DOMAIN_GTT) == 0)
3320 /* It should now be out of any other write domains, and we can update
3321 * the domain values for our changes.
3323 GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3324 obj->read_domains |= I915_GEM_DOMAIN_GTT;
3326 obj->read_domains = I915_GEM_DOMAIN_GTT;
3327 obj->write_domain = I915_GEM_DOMAIN_GTT;
3328 obj->mm.dirty = true;
3331 i915_gem_object_unpin_pages(obj);
3336 * Changes the cache-level of an object across all VMA.
3337 * @obj: object to act on
3338 * @cache_level: new cache level to set for the object
3340 * After this function returns, the object will be in the new cache-level
3341 * across all GTT and the contents of the backing storage will be coherent,
3342 * with respect to the new cache-level. In order to keep the backing storage
3343 * coherent for all users, we only allow a single cache level to be set
3344 * globally on the object and prevent it from being changed whilst the
3345 * hardware is reading from the object. That is if the object is currently
3346 * on the scanout it will be set to uncached (or equivalent display
3347 * cache coherency) and all non-MOCS GPU access will also be uncached so
3348 * that all direct access to the scanout remains coherent.
3350 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3351 enum i915_cache_level cache_level)
3353 struct i915_vma *vma;
3356 lockdep_assert_held(&obj->base.dev->struct_mutex);
3358 if (obj->cache_level == cache_level)
3361 /* Inspect the list of currently bound VMA and unbind any that would
3362 * be invalid given the new cache-level. This is principally to
3363 * catch the issue of the CS prefetch crossing page boundaries and
3364 * reading an invalid PTE on older architectures.
3367 list_for_each_entry(vma, &obj->vma.list, obj_link) {
3368 if (!drm_mm_node_allocated(&vma->node))
3371 if (i915_vma_is_pinned(vma)) {
3372 DRM_DEBUG("can not change the cache level of pinned objects\n");
3376 if (!i915_vma_is_closed(vma) &&
3377 i915_gem_valid_gtt_space(vma, cache_level))
3380 ret = i915_vma_unbind(vma);
3384 /* As unbinding may affect other elements in the
3385 * obj->vma_list (due to side-effects from retiring
3386 * an active vma), play safe and restart the iterator.
3391 /* We can reuse the existing drm_mm nodes but need to change the
3392 * cache-level on the PTE. We could simply unbind them all and
3393 * rebind with the correct cache-level on next use. However since
3394 * we already have a valid slot, dma mapping, pages etc, we may as
3395 * rewrite the PTE in the belief that doing so tramples upon less
3396 * state and so involves less work.
3398 if (obj->bind_count) {
3399 /* Before we change the PTE, the GPU must not be accessing it.
3400 * If we wait upon the object, we know that all the bound
3401 * VMA are no longer active.
3403 ret = i915_gem_object_wait(obj,
3404 I915_WAIT_INTERRUPTIBLE |
3407 MAX_SCHEDULE_TIMEOUT);
3411 if (!HAS_LLC(to_i915(obj->base.dev)) &&
3412 cache_level != I915_CACHE_NONE) {
3413 /* Access to snoopable pages through the GTT is
3414 * incoherent and on some machines causes a hard
3415 * lockup. Relinquish the CPU mmaping to force
3416 * userspace to refault in the pages and we can
3417 * then double check if the GTT mapping is still
3418 * valid for that pointer access.
3420 i915_gem_release_mmap(obj);
3422 /* As we no longer need a fence for GTT access,
3423 * we can relinquish it now (and so prevent having
3424 * to steal a fence from someone else on the next
3425 * fence request). Note GPU activity would have
3426 * dropped the fence as all snoopable access is
3427 * supposed to be linear.
3429 for_each_ggtt_vma(vma, obj) {
3430 ret = i915_vma_put_fence(vma);
3435 /* We either have incoherent backing store and
3436 * so no GTT access or the architecture is fully
3437 * coherent. In such cases, existing GTT mmaps
3438 * ignore the cache bit in the PTE and we can
3439 * rewrite it without confusing the GPU or having
3440 * to force userspace to fault back in its mmaps.
3444 list_for_each_entry(vma, &obj->vma.list, obj_link) {
3445 if (!drm_mm_node_allocated(&vma->node))
3448 ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
3454 list_for_each_entry(vma, &obj->vma.list, obj_link)
3455 vma->node.color = cache_level;
3456 i915_gem_object_set_cache_coherency(obj, cache_level);
3457 obj->cache_dirty = true; /* Always invalidate stale cachelines */
3462 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3463 struct drm_file *file)
3465 struct drm_i915_gem_caching *args = data;
3466 struct drm_i915_gem_object *obj;
3470 obj = i915_gem_object_lookup_rcu(file, args->handle);
3476 switch (obj->cache_level) {
3477 case I915_CACHE_LLC:
3478 case I915_CACHE_L3_LLC:
3479 args->caching = I915_CACHING_CACHED;
3483 args->caching = I915_CACHING_DISPLAY;
3487 args->caching = I915_CACHING_NONE;
3495 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3496 struct drm_file *file)
3498 struct drm_i915_private *i915 = to_i915(dev);
3499 struct drm_i915_gem_caching *args = data;
3500 struct drm_i915_gem_object *obj;
3501 enum i915_cache_level level;
3504 switch (args->caching) {
3505 case I915_CACHING_NONE:
3506 level = I915_CACHE_NONE;
3508 case I915_CACHING_CACHED:
3510 * Due to a HW issue on BXT A stepping, GPU stores via a
3511 * snooped mapping may leave stale data in a corresponding CPU
3512 * cacheline, whereas normally such cachelines would get
3515 if (!HAS_LLC(i915) && !HAS_SNOOP(i915))
3518 level = I915_CACHE_LLC;
3520 case I915_CACHING_DISPLAY:
3521 level = HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE;
3527 obj = i915_gem_object_lookup(file, args->handle);
3532 * The caching mode of proxy object is handled by its generator, and
3533 * not allowed to be changed by userspace.
3535 if (i915_gem_object_is_proxy(obj)) {
3540 if (obj->cache_level == level)
3543 ret = i915_gem_object_wait(obj,
3544 I915_WAIT_INTERRUPTIBLE,
3545 MAX_SCHEDULE_TIMEOUT);
3549 ret = i915_mutex_lock_interruptible(dev);
3553 ret = i915_gem_object_set_cache_level(obj, level);
3554 mutex_unlock(&dev->struct_mutex);
3557 i915_gem_object_put(obj);
3562 * Prepare buffer for display plane (scanout, cursors, etc). Can be called from
3563 * an uninterruptible phase (modesetting) and allows any flushes to be pipelined
3564 * (for pageflips). We only flush the caches while preparing the buffer for
3565 * display, the callers are responsible for frontbuffer flush.
3568 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3570 const struct i915_ggtt_view *view,
3573 struct i915_vma *vma;
3576 lockdep_assert_held(&obj->base.dev->struct_mutex);
3578 /* Mark the global pin early so that we account for the
3579 * display coherency whilst setting up the cache domains.
3583 /* The display engine is not coherent with the LLC cache on gen6. As
3584 * a result, we make sure that the pinning that is about to occur is
3585 * done with uncached PTEs. This is lowest common denominator for all
3588 * However for gen6+, we could do better by using the GFDT bit instead
3589 * of uncaching, which would allow us to flush all the LLC-cached data
3590 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3592 ret = i915_gem_object_set_cache_level(obj,
3593 HAS_WT(to_i915(obj->base.dev)) ?
3594 I915_CACHE_WT : I915_CACHE_NONE);
3597 goto err_unpin_global;
3600 /* As the user may map the buffer once pinned in the display plane
3601 * (e.g. libkms for the bootup splash), we have to ensure that we
3602 * always use map_and_fenceable for all scanout buffers. However,
3603 * it may simply be too big to fit into mappable, in which case
3604 * put it anyway and hope that userspace can cope (but always first
3605 * try to preserve the existing ABI).
3607 vma = ERR_PTR(-ENOSPC);
3608 if ((flags & PIN_MAPPABLE) == 0 &&
3609 (!view || view->type == I915_GGTT_VIEW_NORMAL))
3610 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
3615 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
3617 goto err_unpin_global;
3619 vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
3621 __i915_gem_object_flush_for_display(obj);
3623 /* It should now be out of any other write domains, and we can update
3624 * the domain values for our changes.
3626 obj->read_domains |= I915_GEM_DOMAIN_GTT;
3636 i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
3638 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
3640 if (WARN_ON(vma->obj->pin_global == 0))
3643 if (--vma->obj->pin_global == 0)
3644 vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
3646 /* Bump the LRU to try and avoid premature eviction whilst flipping */
3647 i915_gem_object_bump_inactive_ggtt(vma->obj);
3649 i915_vma_unpin(vma);
3653 * Moves a single object to the CPU read, and possibly write domain.
3654 * @obj: object to act on
3655 * @write: requesting write or read-only access
3657 * This function returns when the move is complete, including waiting on
3661 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3665 lockdep_assert_held(&obj->base.dev->struct_mutex);
3667 ret = i915_gem_object_wait(obj,
3668 I915_WAIT_INTERRUPTIBLE |
3670 (write ? I915_WAIT_ALL : 0),
3671 MAX_SCHEDULE_TIMEOUT);
3675 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
3677 /* Flush the CPU cache if it's still invalid. */
3678 if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3679 i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
3680 obj->read_domains |= I915_GEM_DOMAIN_CPU;
3683 /* It should now be out of any other write domains, and we can update
3684 * the domain values for our changes.
3686 GEM_BUG_ON(obj->write_domain & ~I915_GEM_DOMAIN_CPU);
3688 /* If we're writing through the CPU, then the GPU read domains will
3689 * need to be invalidated at next use.
3692 __start_cpu_write(obj);
3697 /* Throttle our rendering by waiting until the ring has completed our requests
3698 * emitted over 20 msec ago.
3700 * Note that if we were to use the current jiffies each time around the loop,
3701 * we wouldn't escape the function with any frames outstanding if the time to
3702 * render a frame was over 20ms.
3704 * This should get us reasonable parallelism between CPU and GPU but also
3705 * relatively low latency when blocking on a particular request to finish.
3708 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3710 struct drm_i915_private *dev_priv = to_i915(dev);
3711 struct drm_i915_file_private *file_priv = file->driver_priv;
3712 unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
3713 struct i915_request *request, *target = NULL;
3716 /* ABI: return -EIO if already wedged */
3717 ret = i915_terminally_wedged(dev_priv);
3721 spin_lock(&file_priv->mm.lock);
3722 list_for_each_entry(request, &file_priv->mm.request_list, client_link) {
3723 if (time_after_eq(request->emitted_jiffies, recent_enough))
3727 list_del(&target->client_link);
3728 target->file_priv = NULL;
3734 i915_request_get(target);
3735 spin_unlock(&file_priv->mm.lock);
3740 ret = i915_request_wait(target,
3741 I915_WAIT_INTERRUPTIBLE,
3742 MAX_SCHEDULE_TIMEOUT);
3743 i915_request_put(target);
3745 return ret < 0 ? ret : 0;
3749 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
3750 const struct i915_ggtt_view *view,
3755 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
3756 struct i915_address_space *vm = &dev_priv->ggtt.vm;
3757 struct i915_vma *vma;
3760 lockdep_assert_held(&obj->base.dev->struct_mutex);
3762 if (flags & PIN_MAPPABLE &&
3763 (!view || view->type == I915_GGTT_VIEW_NORMAL)) {
3764 /* If the required space is larger than the available
3765 * aperture, we will not able to find a slot for the
3766 * object and unbinding the object now will be in
3767 * vain. Worse, doing so may cause us to ping-pong
3768 * the object in and out of the Global GTT and
3769 * waste a lot of cycles under the mutex.
3771 if (obj->base.size > dev_priv->ggtt.mappable_end)
3772 return ERR_PTR(-E2BIG);
3774 /* If NONBLOCK is set the caller is optimistically
3775 * trying to cache the full object within the mappable
3776 * aperture, and *must* have a fallback in place for
3777 * situations where we cannot bind the object. We
3778 * can be a little more lax here and use the fallback
3779 * more often to avoid costly migrations of ourselves
3780 * and other objects within the aperture.
3782 * Half-the-aperture is used as a simple heuristic.
3783 * More interesting would to do search for a free
3784 * block prior to making the commitment to unbind.
3785 * That caters for the self-harm case, and with a
3786 * little more heuristics (e.g. NOFAULT, NOEVICT)
3787 * we could try to minimise harm to others.
3789 if (flags & PIN_NONBLOCK &&
3790 obj->base.size > dev_priv->ggtt.mappable_end / 2)
3791 return ERR_PTR(-ENOSPC);
3794 vma = i915_vma_instance(obj, vm, view);
3798 if (i915_vma_misplaced(vma, size, alignment, flags)) {
3799 if (flags & PIN_NONBLOCK) {
3800 if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))
3801 return ERR_PTR(-ENOSPC);
3803 if (flags & PIN_MAPPABLE &&
3804 vma->fence_size > dev_priv->ggtt.mappable_end / 2)
3805 return ERR_PTR(-ENOSPC);
3808 WARN(i915_vma_is_pinned(vma),
3809 "bo is already pinned in ggtt with incorrect alignment:"
3810 " offset=%08x, req.alignment=%llx,"
3811 " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
3812 i915_ggtt_offset(vma), alignment,
3813 !!(flags & PIN_MAPPABLE),
3814 i915_vma_is_map_and_fenceable(vma));
3815 ret = i915_vma_unbind(vma);
3817 return ERR_PTR(ret);
3820 ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
3822 return ERR_PTR(ret);
3827 static __always_inline unsigned int __busy_read_flag(unsigned int id)
3829 if (id == I915_ENGINE_CLASS_INVALID)
3832 GEM_BUG_ON(id >= 16);
3833 return 0x10000 << id;
3836 static __always_inline unsigned int __busy_write_id(unsigned int id)
3839 * The uABI guarantees an active writer is also amongst the read
3840 * engines. This would be true if we accessed the activity tracking
3841 * under the lock, but as we perform the lookup of the object and
3842 * its activity locklessly we can not guarantee that the last_write
3843 * being active implies that we have set the same engine flag from
3844 * last_read - hence we always set both read and write busy for
3847 if (id == I915_ENGINE_CLASS_INVALID)
3850 return (id + 1) | __busy_read_flag(id);
3853 static __always_inline unsigned int
3854 __busy_set_if_active(const struct dma_fence *fence,
3855 unsigned int (*flag)(unsigned int id))
3857 const struct i915_request *rq;
3860 * We have to check the current hw status of the fence as the uABI
3861 * guarantees forward progress. We could rely on the idle worker
3862 * to eventually flush us, but to minimise latency just ask the
3865 * Note we only report on the status of native fences.
3867 if (!dma_fence_is_i915(fence))
3870 /* opencode to_request() in order to avoid const warnings */
3871 rq = container_of(fence, const struct i915_request, fence);
3872 if (i915_request_completed(rq))
3875 return flag(rq->engine->uabi_class);
3878 static __always_inline unsigned int
3879 busy_check_reader(const struct dma_fence *fence)
3881 return __busy_set_if_active(fence, __busy_read_flag);
3884 static __always_inline unsigned int
3885 busy_check_writer(const struct dma_fence *fence)
3890 return __busy_set_if_active(fence, __busy_write_id);
3894 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3895 struct drm_file *file)
3897 struct drm_i915_gem_busy *args = data;
3898 struct drm_i915_gem_object *obj;
3899 struct reservation_object_list *list;
3905 obj = i915_gem_object_lookup_rcu(file, args->handle);
3910 * A discrepancy here is that we do not report the status of
3911 * non-i915 fences, i.e. even though we may report the object as idle,
3912 * a call to set-domain may still stall waiting for foreign rendering.
3913 * This also means that wait-ioctl may report an object as busy,
3914 * where busy-ioctl considers it idle.
3916 * We trade the ability to warn of foreign fences to report on which
3917 * i915 engines are active for the object.
3919 * Alternatively, we can trade that extra information on read/write
3922 * !reservation_object_test_signaled_rcu(obj->resv, true);
3923 * to report the overall busyness. This is what the wait-ioctl does.
3927 seq = raw_read_seqcount(&obj->resv->seq);
3929 /* Translate the exclusive fence to the READ *and* WRITE engine */
3930 args->busy = busy_check_writer(rcu_dereference(obj->resv->fence_excl));
3932 /* Translate shared fences to READ set of engines */
3933 list = rcu_dereference(obj->resv->fence);
3935 unsigned int shared_count = list->shared_count, i;
3937 for (i = 0; i < shared_count; ++i) {
3938 struct dma_fence *fence =
3939 rcu_dereference(list->shared[i]);
3941 args->busy |= busy_check_reader(fence);
3945 if (args->busy && read_seqcount_retry(&obj->resv->seq, seq))
3955 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3956 struct drm_file *file_priv)
3958 return i915_gem_ring_throttle(dev, file_priv);
3962 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3963 struct drm_file *file_priv)
3965 struct drm_i915_private *dev_priv = to_i915(dev);
3966 struct drm_i915_gem_madvise *args = data;
3967 struct drm_i915_gem_object *obj;
3970 switch (args->madv) {
3971 case I915_MADV_DONTNEED:
3972 case I915_MADV_WILLNEED:
3978 obj = i915_gem_object_lookup(file_priv, args->handle);
3982 err = mutex_lock_interruptible(&obj->mm.lock);
3986 if (i915_gem_object_has_pages(obj) &&
3987 i915_gem_object_is_tiled(obj) &&
3988 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
3989 if (obj->mm.madv == I915_MADV_WILLNEED) {
3990 GEM_BUG_ON(!obj->mm.quirked);
3991 __i915_gem_object_unpin_pages(obj);
3992 obj->mm.quirked = false;
3994 if (args->madv == I915_MADV_WILLNEED) {
3995 GEM_BUG_ON(obj->mm.quirked);
3996 __i915_gem_object_pin_pages(obj);
3997 obj->mm.quirked = true;
4001 if (obj->mm.madv != __I915_MADV_PURGED)
4002 obj->mm.madv = args->madv;
4004 /* if the object is no longer attached, discard its backing storage */
4005 if (obj->mm.madv == I915_MADV_DONTNEED &&
4006 !i915_gem_object_has_pages(obj))
4007 i915_gem_object_truncate(obj);
4009 args->retained = obj->mm.madv != __I915_MADV_PURGED;
4010 mutex_unlock(&obj->mm.lock);
4013 i915_gem_object_put(obj);
4018 frontbuffer_retire(struct i915_active_request *active,
4019 struct i915_request *request)
4021 struct drm_i915_gem_object *obj =
4022 container_of(active, typeof(*obj), frontbuffer_write);
4024 intel_fb_obj_flush(obj, ORIGIN_CS);
4027 void i915_gem_object_init(struct drm_i915_gem_object *obj,
4028 const struct drm_i915_gem_object_ops *ops)
4030 mutex_init(&obj->mm.lock);
4032 spin_lock_init(&obj->vma.lock);
4033 INIT_LIST_HEAD(&obj->vma.list);
4035 INIT_LIST_HEAD(&obj->lut_list);
4036 INIT_LIST_HEAD(&obj->batch_pool_link);
4038 init_rcu_head(&obj->rcu);
4042 reservation_object_init(&obj->__builtin_resv);
4043 obj->resv = &obj->__builtin_resv;
4045 obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
4046 i915_active_request_init(&obj->frontbuffer_write,
4047 NULL, frontbuffer_retire);
4049 obj->mm.madv = I915_MADV_WILLNEED;
4050 INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
4051 mutex_init(&obj->mm.get_page.lock);
4053 i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
4056 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4057 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
4058 I915_GEM_OBJECT_IS_SHRINKABLE,
4060 .get_pages = i915_gem_object_get_pages_gtt,
4061 .put_pages = i915_gem_object_put_pages_gtt,
4063 .pwrite = i915_gem_object_pwrite_gtt,
4066 static int i915_gem_object_create_shmem(struct drm_device *dev,
4067 struct drm_gem_object *obj,
4070 struct drm_i915_private *i915 = to_i915(dev);
4071 unsigned long flags = VM_NORESERVE;
4074 drm_gem_private_object_init(dev, obj, size);
4077 filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size,
4080 filp = shmem_file_setup("i915", size, flags);
4083 return PTR_ERR(filp);
4090 struct drm_i915_gem_object *
4091 i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
4093 struct drm_i915_gem_object *obj;
4094 struct address_space *mapping;
4095 unsigned int cache_level;
4099 /* There is a prevalence of the assumption that we fit the object's
4100 * page count inside a 32bit _signed_ variable. Let's document this and
4101 * catch if we ever need to fix it. In the meantime, if you do spot
4102 * such a local variable, please consider fixing!
4104 if (size >> PAGE_SHIFT > INT_MAX)
4105 return ERR_PTR(-E2BIG);
4107 if (overflows_type(size, obj->base.size))
4108 return ERR_PTR(-E2BIG);
4110 obj = i915_gem_object_alloc();
4112 return ERR_PTR(-ENOMEM);
4114 ret = i915_gem_object_create_shmem(&dev_priv->drm, &obj->base, size);
4118 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4119 if (IS_I965GM(dev_priv) || IS_I965G(dev_priv)) {
4120 /* 965gm cannot relocate objects above 4GiB. */
4121 mask &= ~__GFP_HIGHMEM;
4122 mask |= __GFP_DMA32;
4125 mapping = obj->base.filp->f_mapping;
4126 mapping_set_gfp_mask(mapping, mask);
4127 GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
4129 i915_gem_object_init(obj, &i915_gem_object_ops);
4131 obj->write_domain = I915_GEM_DOMAIN_CPU;
4132 obj->read_domains = I915_GEM_DOMAIN_CPU;
4134 if (HAS_LLC(dev_priv))
4135 /* On some devices, we can have the GPU use the LLC (the CPU
4136 * cache) for about a 10% performance improvement
4137 * compared to uncached. Graphics requests other than
4138 * display scanout are coherent with the CPU in
4139 * accessing this cache. This means in this mode we
4140 * don't need to clflush on the CPU side, and on the
4141 * GPU side we only need to flush internal caches to
4142 * get data visible to the CPU.
4144 * However, we maintain the display planes as UC, and so
4145 * need to rebind when first used as such.
4147 cache_level = I915_CACHE_LLC;
4149 cache_level = I915_CACHE_NONE;
4151 i915_gem_object_set_cache_coherency(obj, cache_level);
4153 trace_i915_gem_object_create(obj);
4158 i915_gem_object_free(obj);
4159 return ERR_PTR(ret);
4162 static bool discard_backing_storage(struct drm_i915_gem_object *obj)
4164 /* If we are the last user of the backing storage (be it shmemfs
4165 * pages or stolen etc), we know that the pages are going to be
4166 * immediately released. In this case, we can then skip copying
4167 * back the contents from the GPU.
4170 if (obj->mm.madv != I915_MADV_WILLNEED)
4173 if (obj->base.filp == NULL)
4176 /* At first glance, this looks racy, but then again so would be
4177 * userspace racing mmap against close. However, the first external
4178 * reference to the filp can only be obtained through the
4179 * i915_gem_mmap_ioctl() which safeguards us against the user
4180 * acquiring such a reference whilst we are in the middle of
4181 * freeing the object.
4183 return atomic_long_read(&obj->base.filp->f_count) == 1;
4186 static void __i915_gem_free_objects(struct drm_i915_private *i915,
4187 struct llist_node *freed)
4189 struct drm_i915_gem_object *obj, *on;
4190 intel_wakeref_t wakeref;
4192 wakeref = intel_runtime_pm_get(i915);
4193 llist_for_each_entry_safe(obj, on, freed, freed) {
4194 struct i915_vma *vma, *vn;
4196 trace_i915_gem_object_destroy(obj);
4198 mutex_lock(&i915->drm.struct_mutex);
4200 GEM_BUG_ON(i915_gem_object_is_active(obj));
4201 list_for_each_entry_safe(vma, vn, &obj->vma.list, obj_link) {
4202 GEM_BUG_ON(i915_vma_is_active(vma));
4203 vma->flags &= ~I915_VMA_PIN_MASK;
4204 i915_vma_destroy(vma);
4206 GEM_BUG_ON(!list_empty(&obj->vma.list));
4207 GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma.tree));
4209 /* This serializes freeing with the shrinker. Since the free
4210 * is delayed, first by RCU then by the workqueue, we want the
4211 * shrinker to be able to free pages of unreferenced objects,
4212 * or else we may oom whilst there are plenty of deferred
4215 if (i915_gem_object_has_pages(obj)) {
4216 spin_lock(&i915->mm.obj_lock);
4217 list_del_init(&obj->mm.link);
4218 spin_unlock(&i915->mm.obj_lock);
4221 mutex_unlock(&i915->drm.struct_mutex);
4223 GEM_BUG_ON(obj->bind_count);
4224 GEM_BUG_ON(obj->userfault_count);
4225 GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
4226 GEM_BUG_ON(!list_empty(&obj->lut_list));
4228 if (obj->ops->release)
4229 obj->ops->release(obj);
4231 if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
4232 atomic_set(&obj->mm.pages_pin_count, 0);
4233 __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
4234 GEM_BUG_ON(i915_gem_object_has_pages(obj));
4236 if (obj->base.import_attach)
4237 drm_prime_gem_destroy(&obj->base, NULL);
4239 reservation_object_fini(&obj->__builtin_resv);
4240 drm_gem_object_release(&obj->base);
4241 i915_gem_info_remove_obj(i915, obj->base.size);
4243 bitmap_free(obj->bit_17);
4244 i915_gem_object_free(obj);
4246 GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
4247 atomic_dec(&i915->mm.free_count);
4252 intel_runtime_pm_put(i915, wakeref);
4255 static void i915_gem_flush_free_objects(struct drm_i915_private *i915)
4257 struct llist_node *freed;
4259 /* Free the oldest, most stale object to keep the free_list short */
4261 if (!llist_empty(&i915->mm.free_list)) { /* quick test for hotpath */
4262 /* Only one consumer of llist_del_first() allowed */
4263 spin_lock(&i915->mm.free_lock);
4264 freed = llist_del_first(&i915->mm.free_list);
4265 spin_unlock(&i915->mm.free_lock);
4267 if (unlikely(freed)) {
4269 __i915_gem_free_objects(i915, freed);
4273 static void __i915_gem_free_work(struct work_struct *work)
4275 struct drm_i915_private *i915 =
4276 container_of(work, struct drm_i915_private, mm.free_work);
4277 struct llist_node *freed;
4280 * All file-owned VMA should have been released by this point through
4281 * i915_gem_close_object(), or earlier by i915_gem_context_close().
4282 * However, the object may also be bound into the global GTT (e.g.
4283 * older GPUs without per-process support, or for direct access through
4284 * the GTT either for the user or for scanout). Those VMA still need to
4288 spin_lock(&i915->mm.free_lock);
4289 while ((freed = llist_del_all(&i915->mm.free_list))) {
4290 spin_unlock(&i915->mm.free_lock);
4292 __i915_gem_free_objects(i915, freed);
4296 spin_lock(&i915->mm.free_lock);
4298 spin_unlock(&i915->mm.free_lock);
4301 static void __i915_gem_free_object_rcu(struct rcu_head *head)
4303 struct drm_i915_gem_object *obj =
4304 container_of(head, typeof(*obj), rcu);
4305 struct drm_i915_private *i915 = to_i915(obj->base.dev);
4308 * We reuse obj->rcu for the freed list, so we had better not treat
4309 * it like a rcu_head from this point forwards. And we expect all
4310 * objects to be freed via this path.
4312 destroy_rcu_head(&obj->rcu);
4315 * Since we require blocking on struct_mutex to unbind the freed
4316 * object from the GPU before releasing resources back to the
4317 * system, we can not do that directly from the RCU callback (which may
4318 * be a softirq context), but must instead then defer that work onto a
4319 * kthread. We use the RCU callback rather than move the freed object
4320 * directly onto the work queue so that we can mix between using the
4321 * worker and performing frees directly from subsequent allocations for
4322 * crude but effective memory throttling.
4324 if (llist_add(&obj->freed, &i915->mm.free_list))
4325 queue_work(i915->wq, &i915->mm.free_work);
4328 void i915_gem_free_object(struct drm_gem_object *gem_obj)
4330 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4332 if (obj->mm.quirked)
4333 __i915_gem_object_unpin_pages(obj);
4335 if (discard_backing_storage(obj))
4336 obj->mm.madv = I915_MADV_DONTNEED;
4339 * Before we free the object, make sure any pure RCU-only
4340 * read-side critical sections are complete, e.g.
4341 * i915_gem_busy_ioctl(). For the corresponding synchronized
4342 * lookup see i915_gem_object_lookup_rcu().
4344 atomic_inc(&to_i915(obj->base.dev)->mm.free_count);
4345 call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
4348 void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
4350 lockdep_assert_held(&obj->base.dev->struct_mutex);
4352 if (!i915_gem_object_has_active_reference(obj) &&
4353 i915_gem_object_is_active(obj))
4354 i915_gem_object_set_active_reference(obj);
4356 i915_gem_object_put(obj);
4359 void i915_gem_sanitize(struct drm_i915_private *i915)
4361 intel_wakeref_t wakeref;
4365 wakeref = intel_runtime_pm_get(i915);
4366 intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
4369 * As we have just resumed the machine and woken the device up from
4370 * deep PCI sleep (presumably D3_cold), assume the HW has been reset
4371 * back to defaults, recovering from whatever wedged state we left it
4372 * in and so worth trying to use the device once more.
4374 if (i915_terminally_wedged(i915))
4375 i915_gem_unset_wedged(i915);
4378 * If we inherit context state from the BIOS or earlier occupants
4379 * of the GPU, the GPU may be in an inconsistent state when we
4380 * try to take over. The only way to remove the earlier state
4381 * is by resetting. However, resetting on earlier gen is tricky as
4382 * it may impact the display and we are uncertain about the stability
4383 * of the reset, so this could be applied to even earlier gen.
4385 intel_engines_sanitize(i915, false);
4387 intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
4388 intel_runtime_pm_put(i915, wakeref);
4390 mutex_lock(&i915->drm.struct_mutex);
4391 i915_gem_contexts_lost(i915);
4392 mutex_unlock(&i915->drm.struct_mutex);
4395 void i915_gem_suspend(struct drm_i915_private *i915)
4397 intel_wakeref_t wakeref;
4401 wakeref = intel_runtime_pm_get(i915);
4403 flush_workqueue(i915->wq);
4405 mutex_lock(&i915->drm.struct_mutex);
4408 * We have to flush all the executing contexts to main memory so
4409 * that they can saved in the hibernation image. To ensure the last
4410 * context image is coherent, we have to switch away from it. That
4411 * leaves the i915->kernel_context still active when
4412 * we actually suspend, and its image in memory may not match the GPU
4413 * state. Fortunately, the kernel_context is disposable and we do
4414 * not rely on its state.
4416 switch_to_kernel_context_sync(i915, i915->gt.active_engines);
4418 mutex_unlock(&i915->drm.struct_mutex);
4419 i915_reset_flush(i915);
4421 drain_delayed_work(&i915->gt.retire_work);
4424 * As the idle_work is rearming if it detects a race, play safe and
4425 * repeat the flush until it is definitely idle.
4427 drain_delayed_work(&i915->gt.idle_work);
4430 * Assert that we successfully flushed all the work and
4431 * reset the GPU back to its idle, low power state.
4433 GEM_BUG_ON(i915->gt.awake);
4435 intel_uc_suspend(i915);
4437 intel_runtime_pm_put(i915, wakeref);
4440 void i915_gem_suspend_late(struct drm_i915_private *i915)
4442 struct drm_i915_gem_object *obj;
4443 struct list_head *phases[] = {
4444 &i915->mm.unbound_list,
4445 &i915->mm.bound_list,
4450 * Neither the BIOS, ourselves or any other kernel
4451 * expects the system to be in execlists mode on startup,
4452 * so we need to reset the GPU back to legacy mode. And the only
4453 * known way to disable logical contexts is through a GPU reset.
4455 * So in order to leave the system in a known default configuration,
4456 * always reset the GPU upon unload and suspend. Afterwards we then
4457 * clean up the GEM state tracking, flushing off the requests and
4458 * leaving the system in a known idle state.
4460 * Note that is of the upmost importance that the GPU is idle and
4461 * all stray writes are flushed *before* we dismantle the backing
4462 * storage for the pinned objects.
4464 * However, since we are uncertain that resetting the GPU on older
4465 * machines is a good idea, we don't - just in case it leaves the
4466 * machine in an unusable condition.
4469 mutex_lock(&i915->drm.struct_mutex);
4470 for (phase = phases; *phase; phase++) {
4471 list_for_each_entry(obj, *phase, mm.link)
4472 WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
4474 mutex_unlock(&i915->drm.struct_mutex);
4476 intel_uc_sanitize(i915);
4477 i915_gem_sanitize(i915);
4480 void i915_gem_resume(struct drm_i915_private *i915)
4484 WARN_ON(i915->gt.awake);
4486 mutex_lock(&i915->drm.struct_mutex);
4487 intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
4489 i915_gem_restore_gtt_mappings(i915);
4490 i915_gem_restore_fences(i915);
4493 * As we didn't flush the kernel context before suspend, we cannot
4494 * guarantee that the context image is complete. So let's just reset
4495 * it and start again.
4497 i915->gt.resume(i915);
4499 if (i915_gem_init_hw(i915))
4502 intel_uc_resume(i915);
4504 /* Always reload a context for powersaving. */
4505 if (!load_power_context(i915))
4509 intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
4510 mutex_unlock(&i915->drm.struct_mutex);
4514 if (!i915_reset_failed(i915)) {
4515 dev_err(i915->drm.dev,
4516 "Failed to re-initialize GPU, declaring it wedged!\n");
4517 i915_gem_set_wedged(i915);
4522 void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
4524 if (INTEL_GEN(dev_priv) < 5 ||
4525 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4528 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4529 DISP_TILE_SURFACE_SWIZZLING);
4531 if (IS_GEN(dev_priv, 5))
4534 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4535 if (IS_GEN(dev_priv, 6))
4536 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4537 else if (IS_GEN(dev_priv, 7))
4538 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4539 else if (IS_GEN(dev_priv, 8))
4540 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
4545 static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base)
4547 I915_WRITE(RING_CTL(base), 0);
4548 I915_WRITE(RING_HEAD(base), 0);
4549 I915_WRITE(RING_TAIL(base), 0);
4550 I915_WRITE(RING_START(base), 0);
4553 static void init_unused_rings(struct drm_i915_private *dev_priv)
4555 if (IS_I830(dev_priv)) {
4556 init_unused_ring(dev_priv, PRB1_BASE);
4557 init_unused_ring(dev_priv, SRB0_BASE);
4558 init_unused_ring(dev_priv, SRB1_BASE);
4559 init_unused_ring(dev_priv, SRB2_BASE);
4560 init_unused_ring(dev_priv, SRB3_BASE);
4561 } else if (IS_GEN(dev_priv, 2)) {
4562 init_unused_ring(dev_priv, SRB0_BASE);
4563 init_unused_ring(dev_priv, SRB1_BASE);
4564 } else if (IS_GEN(dev_priv, 3)) {
4565 init_unused_ring(dev_priv, PRB1_BASE);
4566 init_unused_ring(dev_priv, PRB2_BASE);
4570 static int __i915_gem_restart_engines(void *data)
4572 struct drm_i915_private *i915 = data;
4573 struct intel_engine_cs *engine;
4574 enum intel_engine_id id;
4577 for_each_engine(engine, i915, id) {
4578 err = engine->init_hw(engine);
4580 DRM_ERROR("Failed to restart %s (%d)\n",
4586 intel_engines_set_scheduler_caps(i915);
4591 int i915_gem_init_hw(struct drm_i915_private *dev_priv)
4595 dev_priv->gt.last_init_time = ktime_get();
4597 /* Double layer security blanket, see i915_gem_init() */
4598 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
4600 if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9)
4601 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4603 if (IS_HASWELL(dev_priv))
4604 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
4605 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
4607 /* Apply the GT workarounds... */
4608 intel_gt_apply_workarounds(dev_priv);
4609 /* ...and determine whether they are sticking. */
4610 intel_gt_verify_workarounds(dev_priv, "init");
4612 i915_gem_init_swizzling(dev_priv);
4615 * At least 830 can leave some of the unused rings
4616 * "active" (ie. head != tail) after resume which
4617 * will prevent c3 entry. Makes sure all unused rings
4620 init_unused_rings(dev_priv);
4622 BUG_ON(!dev_priv->kernel_context);
4623 ret = i915_terminally_wedged(dev_priv);
4627 ret = i915_ppgtt_init_hw(dev_priv);
4629 DRM_ERROR("Enabling PPGTT failed (%d)\n", ret);
4633 ret = intel_wopcm_init_hw(&dev_priv->wopcm);
4635 DRM_ERROR("Enabling WOPCM failed (%d)\n", ret);
4639 /* We can't enable contexts until all firmware is loaded */
4640 ret = intel_uc_init_hw(dev_priv);
4642 DRM_ERROR("Enabling uc failed (%d)\n", ret);
4646 intel_mocs_init_l3cc_table(dev_priv);
4648 /* Only when the HW is re-initialised, can we replay the requests */
4649 ret = __i915_gem_restart_engines(dev_priv);
4653 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
4658 intel_uc_fini_hw(dev_priv);
4660 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
4665 static int __intel_engines_record_defaults(struct drm_i915_private *i915)
4667 struct i915_gem_context *ctx;
4668 struct intel_engine_cs *engine;
4669 enum intel_engine_id id;
4673 * As we reset the gpu during very early sanitisation, the current
4674 * register state on the GPU should reflect its defaults values.
4675 * We load a context onto the hw (with restore-inhibit), then switch
4676 * over to a second context to save that default register state. We
4677 * can then prime every new context with that state so they all start
4678 * from the same default HW values.
4681 ctx = i915_gem_context_create_kernel(i915, 0);
4683 return PTR_ERR(ctx);
4685 for_each_engine(engine, i915, id) {
4686 struct i915_request *rq;
4688 rq = i915_request_alloc(engine, ctx);
4695 if (engine->init_context)
4696 err = engine->init_context(rq);
4698 i915_request_add(rq);
4703 /* Flush the default context image to memory, and enable powersaving. */
4704 if (!load_power_context(i915)) {
4709 for_each_engine(engine, i915, id) {
4710 struct intel_context *ce;
4711 struct i915_vma *state;
4714 ce = intel_context_lookup(ctx, engine);
4722 GEM_BUG_ON(intel_context_is_pinned(ce));
4725 * As we will hold a reference to the logical state, it will
4726 * not be torn down with the context, and importantly the
4727 * object will hold onto its vma (making it possible for a
4728 * stray GTT write to corrupt our defaults). Unmap the vma
4729 * from the GTT to prevent such accidents and reclaim the
4732 err = i915_vma_unbind(state);
4736 err = i915_gem_object_set_to_cpu_domain(state->obj, false);
4740 engine->default_state = i915_gem_object_get(state->obj);
4741 i915_gem_object_set_cache_coherency(engine->default_state,
4744 /* Check we can acquire the image of the context state */
4745 vaddr = i915_gem_object_pin_map(engine->default_state,
4747 if (IS_ERR(vaddr)) {
4748 err = PTR_ERR(vaddr);
4752 i915_gem_object_unpin_map(engine->default_state);
4755 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
4756 unsigned int found = intel_engines_has_context_isolation(i915);
4759 * Make sure that classes with multiple engine instances all
4760 * share the same basic configuration.
4762 for_each_engine(engine, i915, id) {
4763 unsigned int bit = BIT(engine->uabi_class);
4764 unsigned int expected = engine->default_state ? bit : 0;
4766 if ((found & bit) != expected) {
4767 DRM_ERROR("mismatching default context state for class %d on engine %s\n",
4768 engine->uabi_class, engine->name);
4774 i915_gem_context_set_closed(ctx);
4775 i915_gem_context_put(ctx);
4780 * If we have to abandon now, we expect the engines to be idle
4781 * and ready to be torn-down. The quickest way we can accomplish
4782 * this is by declaring ourselves wedged.
4784 i915_gem_set_wedged(i915);
4789 i915_gem_init_scratch(struct drm_i915_private *i915, unsigned int size)
4791 struct drm_i915_gem_object *obj;
4792 struct i915_vma *vma;
4795 obj = i915_gem_object_create_stolen(i915, size);
4797 obj = i915_gem_object_create_internal(i915, size);
4799 DRM_ERROR("Failed to allocate scratch page\n");
4800 return PTR_ERR(obj);
4803 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
4809 ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
4813 i915->gt.scratch = vma;
4817 i915_gem_object_put(obj);
4821 static void i915_gem_fini_scratch(struct drm_i915_private *i915)
4823 i915_vma_unpin_and_release(&i915->gt.scratch, 0);
4826 int i915_gem_init(struct drm_i915_private *dev_priv)
4830 /* We need to fallback to 4K pages if host doesn't support huge gtt. */
4831 if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
4832 mkwrite_device_info(dev_priv)->page_sizes =
4833 I915_GTT_PAGE_SIZE_4K;
4835 dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
4837 if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
4838 dev_priv->gt.resume = intel_lr_context_resume;
4839 dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
4841 dev_priv->gt.resume = intel_legacy_submission_resume;
4842 dev_priv->gt.cleanup_engine = intel_engine_cleanup;
4845 i915_timelines_init(dev_priv);
4847 ret = i915_gem_init_userptr(dev_priv);
4851 ret = intel_uc_init_misc(dev_priv);
4855 ret = intel_wopcm_init(&dev_priv->wopcm);
4859 /* This is just a security blanket to placate dragons.
4860 * On some systems, we very sporadically observe that the first TLBs
4861 * used by the CS may be stale, despite us poking the TLB reset. If
4862 * we hold the forcewake during initialisation these problems
4863 * just magically go away.
4865 mutex_lock(&dev_priv->drm.struct_mutex);
4866 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
4868 ret = i915_gem_init_ggtt(dev_priv);
4870 GEM_BUG_ON(ret == -EIO);
4874 ret = i915_gem_init_scratch(dev_priv,
4875 IS_GEN(dev_priv, 2) ? SZ_256K : PAGE_SIZE);
4877 GEM_BUG_ON(ret == -EIO);
4881 ret = i915_gem_contexts_init(dev_priv);
4883 GEM_BUG_ON(ret == -EIO);
4887 ret = intel_engines_init(dev_priv);
4889 GEM_BUG_ON(ret == -EIO);
4893 intel_init_gt_powersave(dev_priv);
4895 ret = intel_uc_init(dev_priv);
4899 ret = i915_gem_init_hw(dev_priv);
4904 * Despite its name intel_init_clock_gating applies both display
4905 * clock gating workarounds; GT mmio workarounds and the occasional
4906 * GT power context workaround. Worse, sometimes it includes a context
4907 * register workaround which we need to apply before we record the
4908 * default HW state for all contexts.
4910 * FIXME: break up the workarounds and apply them at the right time!
4912 intel_init_clock_gating(dev_priv);
4914 ret = __intel_engines_record_defaults(dev_priv);
4918 if (i915_inject_load_failure()) {
4923 if (i915_inject_load_failure()) {
4928 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
4929 mutex_unlock(&dev_priv->drm.struct_mutex);
4934 * Unwinding is complicated by that we want to handle -EIO to mean
4935 * disable GPU submission but keep KMS alive. We want to mark the
4936 * HW as irrevisibly wedged, but keep enough state around that the
4937 * driver doesn't explode during runtime.
4940 mutex_unlock(&dev_priv->drm.struct_mutex);
4942 i915_gem_suspend(dev_priv);
4943 i915_gem_suspend_late(dev_priv);
4945 i915_gem_drain_workqueue(dev_priv);
4947 mutex_lock(&dev_priv->drm.struct_mutex);
4948 intel_uc_fini_hw(dev_priv);
4950 intel_uc_fini(dev_priv);
4953 intel_cleanup_gt_powersave(dev_priv);
4954 i915_gem_cleanup_engines(dev_priv);
4958 i915_gem_contexts_fini(dev_priv);
4960 i915_gem_fini_scratch(dev_priv);
4963 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
4964 mutex_unlock(&dev_priv->drm.struct_mutex);
4967 intel_uc_fini_misc(dev_priv);
4970 i915_gem_cleanup_userptr(dev_priv);
4971 i915_timelines_fini(dev_priv);
4975 mutex_lock(&dev_priv->drm.struct_mutex);
4978 * Allow engine initialisation to fail by marking the GPU as
4979 * wedged. But we only want to do this where the GPU is angry,
4980 * for all other failure, such as an allocation failure, bail.
4982 if (!i915_reset_failed(dev_priv)) {
4983 i915_load_error(dev_priv,
4984 "Failed to initialize GPU, declaring it wedged!\n");
4985 i915_gem_set_wedged(dev_priv);
4988 /* Minimal basic recovery for KMS */
4989 ret = i915_ggtt_enable_hw(dev_priv);
4990 i915_gem_restore_gtt_mappings(dev_priv);
4991 i915_gem_restore_fences(dev_priv);
4992 intel_init_clock_gating(dev_priv);
4994 mutex_unlock(&dev_priv->drm.struct_mutex);
4997 i915_gem_drain_freed_objects(dev_priv);
5001 void i915_gem_fini(struct drm_i915_private *dev_priv)
5003 i915_gem_suspend_late(dev_priv);
5004 intel_disable_gt_powersave(dev_priv);
5006 /* Flush any outstanding unpin_work. */
5007 i915_gem_drain_workqueue(dev_priv);
5009 mutex_lock(&dev_priv->drm.struct_mutex);
5010 intel_uc_fini_hw(dev_priv);
5011 intel_uc_fini(dev_priv);
5012 i915_gem_cleanup_engines(dev_priv);
5013 i915_gem_contexts_fini(dev_priv);
5014 i915_gem_fini_scratch(dev_priv);
5015 mutex_unlock(&dev_priv->drm.struct_mutex);
5017 intel_wa_list_free(&dev_priv->gt_wa_list);
5019 intel_cleanup_gt_powersave(dev_priv);
5021 intel_uc_fini_misc(dev_priv);
5022 i915_gem_cleanup_userptr(dev_priv);
5023 i915_timelines_fini(dev_priv);
5025 i915_gem_drain_freed_objects(dev_priv);
5027 WARN_ON(!list_empty(&dev_priv->contexts.list));
5030 void i915_gem_init_mmio(struct drm_i915_private *i915)
5032 i915_gem_sanitize(i915);
5036 i915_gem_cleanup_engines(struct drm_i915_private *dev_priv)
5038 struct intel_engine_cs *engine;
5039 enum intel_engine_id id;
5041 for_each_engine(engine, dev_priv, id)
5042 dev_priv->gt.cleanup_engine(engine);
5046 i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
5050 if (INTEL_GEN(dev_priv) >= 7 && !IS_VALLEYVIEW(dev_priv) &&
5051 !IS_CHERRYVIEW(dev_priv))
5052 dev_priv->num_fence_regs = 32;
5053 else if (INTEL_GEN(dev_priv) >= 4 ||
5054 IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
5055 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
5056 dev_priv->num_fence_regs = 16;
5058 dev_priv->num_fence_regs = 8;
5060 if (intel_vgpu_active(dev_priv))
5061 dev_priv->num_fence_regs =
5062 I915_READ(vgtif_reg(avail_rs.fence_num));
5064 /* Initialize fence registers to zero */
5065 for (i = 0; i < dev_priv->num_fence_regs; i++) {
5066 struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];
5068 fence->i915 = dev_priv;
5070 list_add_tail(&fence->link, &dev_priv->mm.fence_list);
5072 i915_gem_restore_fences(dev_priv);
5074 i915_gem_detect_bit_6_swizzle(dev_priv);
5077 static void i915_gem_init__mm(struct drm_i915_private *i915)
5079 spin_lock_init(&i915->mm.object_stat_lock);
5080 spin_lock_init(&i915->mm.obj_lock);
5081 spin_lock_init(&i915->mm.free_lock);
5083 init_llist_head(&i915->mm.free_list);
5085 INIT_LIST_HEAD(&i915->mm.unbound_list);
5086 INIT_LIST_HEAD(&i915->mm.bound_list);
5087 INIT_LIST_HEAD(&i915->mm.fence_list);
5088 INIT_LIST_HEAD(&i915->mm.userfault_list);
5090 INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
5093 int i915_gem_init_early(struct drm_i915_private *dev_priv)
5097 INIT_LIST_HEAD(&dev_priv->gt.active_rings);
5098 INIT_LIST_HEAD(&dev_priv->gt.closed_vma);
5100 i915_gem_init__mm(dev_priv);
5102 INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
5103 i915_gem_retire_work_handler);
5104 INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
5105 i915_gem_idle_work_handler);
5106 init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
5107 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
5108 mutex_init(&dev_priv->gpu_error.wedge_mutex);
5109 init_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu);
5111 atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
5113 spin_lock_init(&dev_priv->fb_tracking.lock);
5115 err = i915_gemfs_init(dev_priv);
5117 DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", err);
5122 void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
5124 i915_gem_drain_freed_objects(dev_priv);
5125 GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
5126 GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
5127 WARN_ON(dev_priv->mm.object_count);
5129 cleanup_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu);
5131 i915_gemfs_fini(dev_priv);
5134 int i915_gem_freeze(struct drm_i915_private *dev_priv)
5136 /* Discard all purgeable objects, let userspace recover those as
5137 * required after resuming.
5139 i915_gem_shrink_all(dev_priv);
5144 int i915_gem_freeze_late(struct drm_i915_private *i915)
5146 struct drm_i915_gem_object *obj;
5147 struct list_head *phases[] = {
5148 &i915->mm.unbound_list,
5149 &i915->mm.bound_list,
5154 * Called just before we write the hibernation image.
5156 * We need to update the domain tracking to reflect that the CPU
5157 * will be accessing all the pages to create and restore from the
5158 * hibernation, and so upon restoration those pages will be in the
5161 * To make sure the hibernation image contains the latest state,
5162 * we update that state just before writing out the image.
5164 * To try and reduce the hibernation image, we manually shrink
5165 * the objects as well, see i915_gem_freeze()
5168 i915_gem_shrink(i915, -1UL, NULL, I915_SHRINK_UNBOUND);
5169 i915_gem_drain_freed_objects(i915);
5171 mutex_lock(&i915->drm.struct_mutex);
5172 for (phase = phases; *phase; phase++) {
5173 list_for_each_entry(obj, *phase, mm.link)
5174 WARN_ON(i915_gem_object_set_to_cpu_domain(obj, true));
5176 mutex_unlock(&i915->drm.struct_mutex);
5181 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
5183 struct drm_i915_file_private *file_priv = file->driver_priv;
5184 struct i915_request *request;
5186 /* Clean up our request list when the client is going away, so that
5187 * later retire_requests won't dereference our soon-to-be-gone
5190 spin_lock(&file_priv->mm.lock);
5191 list_for_each_entry(request, &file_priv->mm.request_list, client_link)
5192 request->file_priv = NULL;
5193 spin_unlock(&file_priv->mm.lock);
5196 int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
5198 struct drm_i915_file_private *file_priv;
5203 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
5207 file->driver_priv = file_priv;
5208 file_priv->dev_priv = i915;
5209 file_priv->file = file;
5211 spin_lock_init(&file_priv->mm.lock);
5212 INIT_LIST_HEAD(&file_priv->mm.request_list);
5214 file_priv->bsd_engine = -1;
5215 file_priv->hang_timestamp = jiffies;
5217 ret = i915_gem_context_open(i915, file);
5225 * i915_gem_track_fb - update frontbuffer tracking
5226 * @old: current GEM buffer for the frontbuffer slots
5227 * @new: new GEM buffer for the frontbuffer slots
5228 * @frontbuffer_bits: bitmask of frontbuffer slots
5230 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
5231 * from @old and setting them in @new. Both @old and @new can be NULL.
5233 void i915_gem_track_fb(struct drm_i915_gem_object *old,
5234 struct drm_i915_gem_object *new,
5235 unsigned frontbuffer_bits)
5237 /* Control of individual bits within the mask are guarded by
5238 * the owning plane->mutex, i.e. we can never see concurrent
5239 * manipulation of individual bits. But since the bitfield as a whole
5240 * is updated using RMW, we need to use atomics in order to update
5243 BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
5244 BITS_PER_TYPE(atomic_t));
5247 WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
5248 atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
5252 WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
5253 atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
5257 /* Allocate a new GEM object and fill it with the supplied data */
5258 struct drm_i915_gem_object *
5259 i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
5260 const void *data, size_t size)
5262 struct drm_i915_gem_object *obj;
5267 obj = i915_gem_object_create(dev_priv, round_up(size, PAGE_SIZE));
5271 GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
5273 file = obj->base.filp;
5276 unsigned int len = min_t(typeof(size), size, PAGE_SIZE);
5278 void *pgdata, *vaddr;
5280 err = pagecache_write_begin(file, file->f_mapping,
5287 memcpy(vaddr, data, len);
5290 err = pagecache_write_end(file, file->f_mapping,
5304 i915_gem_object_put(obj);
5305 return ERR_PTR(err);
5308 struct scatterlist *
5309 i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
5311 unsigned int *offset)
5313 struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
5314 struct scatterlist *sg;
5315 unsigned int idx, count;
5318 GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
5319 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
5321 /* As we iterate forward through the sg, we record each entry in a
5322 * radixtree for quick repeated (backwards) lookups. If we have seen
5323 * this index previously, we will have an entry for it.
5325 * Initial lookup is O(N), but this is amortized to O(1) for
5326 * sequential page access (where each new request is consecutive
5327 * to the previous one). Repeated lookups are O(lg(obj->base.size)),
5328 * i.e. O(1) with a large constant!
5330 if (n < READ_ONCE(iter->sg_idx))
5333 mutex_lock(&iter->lock);
5335 /* We prefer to reuse the last sg so that repeated lookup of this
5336 * (or the subsequent) sg are fast - comparing against the last
5337 * sg is faster than going through the radixtree.
5342 count = __sg_page_count(sg);
5344 while (idx + count <= n) {
5349 /* If we cannot allocate and insert this entry, or the
5350 * individual pages from this range, cancel updating the
5351 * sg_idx so that on this lookup we are forced to linearly
5352 * scan onwards, but on future lookups we will try the
5353 * insertion again (in which case we need to be careful of
5354 * the error return reporting that we have already inserted
5357 ret = radix_tree_insert(&iter->radix, idx, sg);
5358 if (ret && ret != -EEXIST)
5361 entry = xa_mk_value(idx);
5362 for (i = 1; i < count; i++) {
5363 ret = radix_tree_insert(&iter->radix, idx + i, entry);
5364 if (ret && ret != -EEXIST)
5369 sg = ____sg_next(sg);
5370 count = __sg_page_count(sg);
5377 mutex_unlock(&iter->lock);
5379 if (unlikely(n < idx)) /* insertion completed by another thread */
5382 /* In case we failed to insert the entry into the radixtree, we need
5383 * to look beyond the current sg.
5385 while (idx + count <= n) {
5387 sg = ____sg_next(sg);
5388 count = __sg_page_count(sg);
5397 sg = radix_tree_lookup(&iter->radix, n);
5400 /* If this index is in the middle of multi-page sg entry,
5401 * the radix tree will contain a value entry that points
5402 * to the start of that range. We will return the pointer to
5403 * the base page and the offset of this page within the
5407 if (unlikely(xa_is_value(sg))) {
5408 unsigned long base = xa_to_value(sg);
5410 sg = radix_tree_lookup(&iter->radix, base);
5422 i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
5424 struct scatterlist *sg;
5425 unsigned int offset;
5427 GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
5429 sg = i915_gem_object_get_sg(obj, n, &offset);
5430 return nth_page(sg_page(sg), offset);
5433 /* Like i915_gem_object_get_page(), but mark the returned page dirty */
5435 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
5440 page = i915_gem_object_get_page(obj, n);
5442 set_page_dirty(page);
5448 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
5451 struct scatterlist *sg;
5452 unsigned int offset;
5454 sg = i915_gem_object_get_sg(obj, n, &offset);
5455 return sg_dma_address(sg) + (offset << PAGE_SHIFT);
5458 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
5460 struct sg_table *pages;
5463 if (align > obj->base.size)
5466 if (obj->ops == &i915_gem_phys_ops)
5469 if (obj->ops != &i915_gem_object_ops)
5472 err = i915_gem_object_unbind(obj);
5476 mutex_lock(&obj->mm.lock);
5478 if (obj->mm.madv != I915_MADV_WILLNEED) {
5483 if (obj->mm.quirked) {
5488 if (obj->mm.mapping) {
5493 pages = __i915_gem_object_unset_pages(obj);
5495 obj->ops = &i915_gem_phys_ops;
5497 err = ____i915_gem_object_get_pages(obj);
5501 /* Perma-pin (until release) the physical set of pages */
5502 __i915_gem_object_pin_pages(obj);
5504 if (!IS_ERR_OR_NULL(pages))
5505 i915_gem_object_ops.put_pages(obj, pages);
5506 mutex_unlock(&obj->mm.lock);
5510 obj->ops = &i915_gem_object_ops;
5511 if (!IS_ERR_OR_NULL(pages)) {
5512 unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl);
5514 __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
5517 mutex_unlock(&obj->mm.lock);
5521 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
5522 #include "selftests/scatterlist.c"
5523 #include "selftests/mock_gem_device.c"
5524 #include "selftests/huge_gem_object.c"
5525 #include "selftests/huge_pages.c"
5526 #include "selftests/i915_gem_object.c"
5527 #include "selftests/i915_gem_coherency.c"
5528 #include "selftests/i915_gem.c"