2 * Copyright © 2008-2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
28 #include <drm/drm_vma_manager.h>
29 #include <drm/i915_drm.h>
30 #include <linux/dma-fence-array.h>
31 #include <linux/kthread.h>
32 #include <linux/reservation.h>
33 #include <linux/shmem_fs.h>
34 #include <linux/slab.h>
35 #include <linux/stop_machine.h>
36 #include <linux/swap.h>
37 #include <linux/pci.h>
38 #include <linux/dma-buf.h>
41 #include "i915_gem_clflush.h"
42 #include "i915_gemfs.h"
43 #include "i915_reset.h"
44 #include "i915_trace.h"
45 #include "i915_vgpu.h"
47 #include "intel_drv.h"
48 #include "intel_frontbuffer.h"
49 #include "intel_mocs.h"
50 #include "intel_workarounds.h"
52 static void i915_gem_flush_free_objects(struct drm_i915_private *i915);
54 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
59 if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
62 return obj->pin_global; /* currently in use by HW, keep flushed */
66 insert_mappable_node(struct i915_ggtt *ggtt,
67 struct drm_mm_node *node, u32 size)
69 memset(node, 0, sizeof(*node));
70 return drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
71 size, 0, I915_COLOR_UNEVICTABLE,
72 0, ggtt->mappable_end,
77 remove_mappable_node(struct drm_mm_node *node)
79 drm_mm_remove_node(node);
82 /* some bookkeeping */
83 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
86 spin_lock(&dev_priv->mm.object_stat_lock);
87 dev_priv->mm.object_count++;
88 dev_priv->mm.object_memory += size;
89 spin_unlock(&dev_priv->mm.object_stat_lock);
92 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
95 spin_lock(&dev_priv->mm.object_stat_lock);
96 dev_priv->mm.object_count--;
97 dev_priv->mm.object_memory -= size;
98 spin_unlock(&dev_priv->mm.object_stat_lock);
102 i915_gem_wait_for_error(struct i915_gpu_error *error)
109 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
110 * userspace. If it takes that long something really bad is going on and
111 * we should simply try to bail out and fail as gracefully as possible.
113 ret = wait_event_interruptible_timeout(error->reset_queue,
114 !i915_reset_backoff(error),
117 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
119 } else if (ret < 0) {
126 int i915_mutex_lock_interruptible(struct drm_device *dev)
128 struct drm_i915_private *dev_priv = to_i915(dev);
131 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
135 ret = mutex_lock_interruptible(&dev->struct_mutex);
142 static u32 __i915_gem_park(struct drm_i915_private *i915)
144 intel_wakeref_t wakeref;
148 lockdep_assert_held(&i915->drm.struct_mutex);
149 GEM_BUG_ON(i915->gt.active_requests);
150 GEM_BUG_ON(!list_empty(&i915->gt.active_rings));
153 return I915_EPOCH_INVALID;
155 GEM_BUG_ON(i915->gt.epoch == I915_EPOCH_INVALID);
158 * Be paranoid and flush a concurrent interrupt to make sure
159 * we don't reactivate any irq tasklets after parking.
161 * FIXME: Note that even though we have waited for execlists to be idle,
162 * there may still be an in-flight interrupt even though the CSB
163 * is now empty. synchronize_irq() makes sure that a residual interrupt
164 * is completed before we continue, but it doesn't prevent the HW from
165 * raising a spurious interrupt later. To complete the shield we should
166 * coordinate disabling the CS irq with flushing the interrupts.
168 synchronize_irq(i915->drm.irq);
170 intel_engines_park(i915);
171 i915_timelines_park(i915);
173 i915_pmu_gt_parked(i915);
174 i915_vma_parked(i915);
176 wakeref = fetch_and_zero(&i915->gt.awake);
177 GEM_BUG_ON(!wakeref);
179 if (INTEL_GEN(i915) >= 6)
182 intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, wakeref);
184 return i915->gt.epoch;
187 void i915_gem_park(struct drm_i915_private *i915)
191 lockdep_assert_held(&i915->drm.struct_mutex);
192 GEM_BUG_ON(i915->gt.active_requests);
197 /* Defer the actual call to __i915_gem_park() to prevent ping-pongs */
198 mod_delayed_work(i915->wq, &i915->gt.idle_work, msecs_to_jiffies(100));
201 void i915_gem_unpark(struct drm_i915_private *i915)
205 lockdep_assert_held(&i915->drm.struct_mutex);
206 GEM_BUG_ON(!i915->gt.active_requests);
207 assert_rpm_wakelock_held(i915);
213 * It seems that the DMC likes to transition between the DC states a lot
214 * when there are no connected displays (no active power domains) during
215 * command submission.
217 * This activity has negative impact on the performance of the chip with
218 * huge latencies observed in the interrupt handler and elsewhere.
220 * Work around it by grabbing a GT IRQ power domain whilst there is any
221 * GT activity, preventing any DC state transitions.
223 i915->gt.awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
224 GEM_BUG_ON(!i915->gt.awake);
226 if (unlikely(++i915->gt.epoch == 0)) /* keep 0 as invalid */
229 intel_enable_gt_powersave(i915);
230 i915_update_gfx_val(i915);
231 if (INTEL_GEN(i915) >= 6)
233 i915_pmu_gt_unparked(i915);
235 intel_engines_unpark(i915);
237 i915_queue_hangcheck(i915);
239 queue_delayed_work(i915->wq,
240 &i915->gt.retire_work,
241 round_jiffies_up_relative(HZ));
245 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
246 struct drm_file *file)
248 struct i915_ggtt *ggtt = &to_i915(dev)->ggtt;
249 struct drm_i915_gem_get_aperture *args = data;
250 struct i915_vma *vma;
253 mutex_lock(&ggtt->vm.mutex);
255 pinned = ggtt->vm.reserved;
256 list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
257 if (i915_vma_is_pinned(vma))
258 pinned += vma->node.size;
260 mutex_unlock(&ggtt->vm.mutex);
262 args->aper_size = ggtt->vm.total;
263 args->aper_available_size = args->aper_size - pinned;
268 static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
270 struct address_space *mapping = obj->base.filp->f_mapping;
271 drm_dma_handle_t *phys;
273 struct scatterlist *sg;
278 if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
281 /* Always aligning to the object size, allows a single allocation
282 * to handle all possible callers, and given typical object sizes,
283 * the alignment of the buddy allocation will naturally match.
285 phys = drm_pci_alloc(obj->base.dev,
286 roundup_pow_of_two(obj->base.size),
287 roundup_pow_of_two(obj->base.size));
292 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
296 page = shmem_read_mapping_page(mapping, i);
302 src = kmap_atomic(page);
303 memcpy(vaddr, src, PAGE_SIZE);
304 drm_clflush_virt_range(vaddr, PAGE_SIZE);
311 i915_gem_chipset_flush(to_i915(obj->base.dev));
313 st = kmalloc(sizeof(*st), GFP_KERNEL);
319 if (sg_alloc_table(st, 1, GFP_KERNEL)) {
327 sg->length = obj->base.size;
329 sg_dma_address(sg) = phys->busaddr;
330 sg_dma_len(sg) = obj->base.size;
332 obj->phys_handle = phys;
334 __i915_gem_object_set_pages(obj, st, sg->length);
339 drm_pci_free(obj->base.dev, phys);
344 static void __start_cpu_write(struct drm_i915_gem_object *obj)
346 obj->read_domains = I915_GEM_DOMAIN_CPU;
347 obj->write_domain = I915_GEM_DOMAIN_CPU;
348 if (cpu_write_needs_clflush(obj))
349 obj->cache_dirty = true;
353 __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
354 struct sg_table *pages,
357 GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
359 if (obj->mm.madv == I915_MADV_DONTNEED)
360 obj->mm.dirty = false;
363 (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
364 !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
365 drm_clflush_sg(pages);
367 __start_cpu_write(obj);
371 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
372 struct sg_table *pages)
374 __i915_gem_object_release_shmem(obj, pages, false);
377 struct address_space *mapping = obj->base.filp->f_mapping;
378 char *vaddr = obj->phys_handle->vaddr;
381 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
385 page = shmem_read_mapping_page(mapping, i);
389 dst = kmap_atomic(page);
390 drm_clflush_virt_range(vaddr, PAGE_SIZE);
391 memcpy(dst, vaddr, PAGE_SIZE);
394 set_page_dirty(page);
395 if (obj->mm.madv == I915_MADV_WILLNEED)
396 mark_page_accessed(page);
400 obj->mm.dirty = false;
403 sg_free_table(pages);
406 drm_pci_free(obj->base.dev, obj->phys_handle);
410 i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
412 i915_gem_object_unpin_pages(obj);
415 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
416 .get_pages = i915_gem_object_get_pages_phys,
417 .put_pages = i915_gem_object_put_pages_phys,
418 .release = i915_gem_object_release_phys,
421 static const struct drm_i915_gem_object_ops i915_gem_object_ops;
423 int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
425 struct i915_vma *vma;
426 LIST_HEAD(still_in_list);
429 lockdep_assert_held(&obj->base.dev->struct_mutex);
431 /* Closed vma are removed from the obj->vma_list - but they may
432 * still have an active binding on the object. To remove those we
433 * must wait for all rendering to complete to the object (as unbinding
434 * must anyway), and retire the requests.
436 ret = i915_gem_object_set_to_cpu_domain(obj, false);
440 spin_lock(&obj->vma.lock);
441 while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
444 list_move_tail(&vma->obj_link, &still_in_list);
445 spin_unlock(&obj->vma.lock);
447 ret = i915_vma_unbind(vma);
449 spin_lock(&obj->vma.lock);
451 list_splice(&still_in_list, &obj->vma.list);
452 spin_unlock(&obj->vma.lock);
458 i915_gem_object_wait_fence(struct dma_fence *fence,
461 struct intel_rps_client *rps_client)
463 struct i915_request *rq;
465 BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1);
467 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
470 if (!dma_fence_is_i915(fence))
471 return dma_fence_wait_timeout(fence,
472 flags & I915_WAIT_INTERRUPTIBLE,
475 rq = to_request(fence);
476 if (i915_request_completed(rq))
480 * This client is about to stall waiting for the GPU. In many cases
481 * this is undesirable and limits the throughput of the system, as
482 * many clients cannot continue processing user input/output whilst
483 * blocked. RPS autotuning may take tens of milliseconds to respond
484 * to the GPU load and thus incurs additional latency for the client.
485 * We can circumvent that by promoting the GPU frequency to maximum
486 * before we wait. This makes the GPU throttle up much more quickly
487 * (good for benchmarks and user experience, e.g. window animations),
488 * but at a cost of spending more power processing the workload
489 * (bad for battery). Not all clients even want their results
490 * immediately and for them we should just let the GPU select its own
491 * frequency to maximise efficiency. To prevent a single client from
492 * forcing the clocks too high for the whole system, we only allow
493 * each client to waitboost once in a busy period.
495 if (rps_client && !i915_request_started(rq)) {
496 if (INTEL_GEN(rq->i915) >= 6)
497 gen6_rps_boost(rq, rps_client);
500 timeout = i915_request_wait(rq, flags, timeout);
503 if (flags & I915_WAIT_LOCKED && i915_request_completed(rq))
504 i915_request_retire_upto(rq);
510 i915_gem_object_wait_reservation(struct reservation_object *resv,
513 struct intel_rps_client *rps_client)
515 unsigned int seq = __read_seqcount_begin(&resv->seq);
516 struct dma_fence *excl;
517 bool prune_fences = false;
519 if (flags & I915_WAIT_ALL) {
520 struct dma_fence **shared;
521 unsigned int count, i;
524 ret = reservation_object_get_fences_rcu(resv,
525 &excl, &count, &shared);
529 for (i = 0; i < count; i++) {
530 timeout = i915_gem_object_wait_fence(shared[i],
536 dma_fence_put(shared[i]);
539 for (; i < count; i++)
540 dma_fence_put(shared[i]);
544 * If both shared fences and an exclusive fence exist,
545 * then by construction the shared fences must be later
546 * than the exclusive fence. If we successfully wait for
547 * all the shared fences, we know that the exclusive fence
548 * must all be signaled. If all the shared fences are
549 * signaled, we can prune the array and recover the
550 * floating references on the fences/requests.
552 prune_fences = count && timeout >= 0;
554 excl = reservation_object_get_excl_rcu(resv);
557 if (excl && timeout >= 0)
558 timeout = i915_gem_object_wait_fence(excl, flags, timeout,
564 * Opportunistically prune the fences iff we know they have *all* been
565 * signaled and that the reservation object has not been changed (i.e.
566 * no new fences have been added).
568 if (prune_fences && !__read_seqcount_retry(&resv->seq, seq)) {
569 if (reservation_object_trylock(resv)) {
570 if (!__read_seqcount_retry(&resv->seq, seq))
571 reservation_object_add_excl_fence(resv, NULL);
572 reservation_object_unlock(resv);
579 static void __fence_set_priority(struct dma_fence *fence,
580 const struct i915_sched_attr *attr)
582 struct i915_request *rq;
583 struct intel_engine_cs *engine;
585 if (dma_fence_is_signaled(fence) || !dma_fence_is_i915(fence))
588 rq = to_request(fence);
592 rcu_read_lock(); /* RCU serialisation for set-wedged protection */
593 if (engine->schedule)
594 engine->schedule(rq, attr);
596 local_bh_enable(); /* kick the tasklets if queues were reprioritised */
599 static void fence_set_priority(struct dma_fence *fence,
600 const struct i915_sched_attr *attr)
602 /* Recurse once into a fence-array */
603 if (dma_fence_is_array(fence)) {
604 struct dma_fence_array *array = to_dma_fence_array(fence);
607 for (i = 0; i < array->num_fences; i++)
608 __fence_set_priority(array->fences[i], attr);
610 __fence_set_priority(fence, attr);
615 i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
617 const struct i915_sched_attr *attr)
619 struct dma_fence *excl;
621 if (flags & I915_WAIT_ALL) {
622 struct dma_fence **shared;
623 unsigned int count, i;
626 ret = reservation_object_get_fences_rcu(obj->resv,
627 &excl, &count, &shared);
631 for (i = 0; i < count; i++) {
632 fence_set_priority(shared[i], attr);
633 dma_fence_put(shared[i]);
638 excl = reservation_object_get_excl_rcu(obj->resv);
642 fence_set_priority(excl, attr);
649 * Waits for rendering to the object to be completed
650 * @obj: i915 gem object
651 * @flags: how to wait (under a lock, for all rendering or just for writes etc)
652 * @timeout: how long to wait
653 * @rps_client: client (user process) to charge for any waitboosting
656 i915_gem_object_wait(struct drm_i915_gem_object *obj,
659 struct intel_rps_client *rps_client)
662 GEM_BUG_ON(timeout < 0);
664 timeout = i915_gem_object_wait_reservation(obj->resv,
667 return timeout < 0 ? timeout : 0;
670 static struct intel_rps_client *to_rps_client(struct drm_file *file)
672 struct drm_i915_file_private *fpriv = file->driver_priv;
674 return &fpriv->rps_client;
678 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
679 struct drm_i915_gem_pwrite *args,
680 struct drm_file *file)
682 void *vaddr = obj->phys_handle->vaddr + args->offset;
683 char __user *user_data = u64_to_user_ptr(args->data_ptr);
685 /* We manually control the domain here and pretend that it
686 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
688 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
689 if (copy_from_user(vaddr, user_data, args->size))
692 drm_clflush_virt_range(vaddr, args->size);
693 i915_gem_chipset_flush(to_i915(obj->base.dev));
695 intel_fb_obj_flush(obj, ORIGIN_CPU);
699 void *i915_gem_object_alloc(struct drm_i915_private *dev_priv)
701 return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
704 void i915_gem_object_free(struct drm_i915_gem_object *obj)
706 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
707 kmem_cache_free(dev_priv->objects, obj);
711 i915_gem_create(struct drm_file *file,
712 struct drm_i915_private *dev_priv,
716 struct drm_i915_gem_object *obj;
720 size = roundup(size, PAGE_SIZE);
724 /* Allocate the new object */
725 obj = i915_gem_object_create(dev_priv, size);
729 ret = drm_gem_handle_create(file, &obj->base, &handle);
730 /* drop reference from allocate - handle holds it now */
731 i915_gem_object_put(obj);
740 i915_gem_dumb_create(struct drm_file *file,
741 struct drm_device *dev,
742 struct drm_mode_create_dumb *args)
744 /* have to work out size/pitch and return them */
745 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
746 args->size = args->pitch * args->height;
747 return i915_gem_create(file, to_i915(dev),
748 args->size, &args->handle);
751 static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
753 return !(obj->cache_level == I915_CACHE_NONE ||
754 obj->cache_level == I915_CACHE_WT);
758 * Creates a new mm object and returns a handle to it.
759 * @dev: drm device pointer
760 * @data: ioctl data blob
761 * @file: drm file pointer
764 i915_gem_create_ioctl(struct drm_device *dev, void *data,
765 struct drm_file *file)
767 struct drm_i915_private *dev_priv = to_i915(dev);
768 struct drm_i915_gem_create *args = data;
770 i915_gem_flush_free_objects(dev_priv);
772 return i915_gem_create(file, dev_priv,
773 args->size, &args->handle);
776 static inline enum fb_op_origin
777 fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)
779 return (domain == I915_GEM_DOMAIN_GTT ?
780 obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
783 void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
785 intel_wakeref_t wakeref;
788 * No actual flushing is required for the GTT write domain for reads
789 * from the GTT domain. Writes to it "immediately" go to main memory
790 * as far as we know, so there's no chipset flush. It also doesn't
791 * land in the GPU render cache.
793 * However, we do have to enforce the order so that all writes through
794 * the GTT land before any writes to the device, such as updates to
797 * We also have to wait a bit for the writes to land from the GTT.
798 * An uncached read (i.e. mmio) seems to be ideal for the round-trip
799 * timing. This issue has only been observed when switching quickly
800 * between GTT writes and CPU reads from inside the kernel on recent hw,
801 * and it appears to only affect discrete GTT blocks (i.e. on LLC
802 * system agents we cannot reproduce this behaviour, until Cannonlake
808 if (INTEL_INFO(dev_priv)->has_coherent_ggtt)
811 i915_gem_chipset_flush(dev_priv);
813 with_intel_runtime_pm(dev_priv, wakeref) {
814 spin_lock_irq(&dev_priv->uncore.lock);
816 POSTING_READ_FW(RING_HEAD(RENDER_RING_BASE));
818 spin_unlock_irq(&dev_priv->uncore.lock);
823 flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
825 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
826 struct i915_vma *vma;
828 if (!(obj->write_domain & flush_domains))
831 switch (obj->write_domain) {
832 case I915_GEM_DOMAIN_GTT:
833 i915_gem_flush_ggtt_writes(dev_priv);
835 intel_fb_obj_flush(obj,
836 fb_write_origin(obj, I915_GEM_DOMAIN_GTT));
838 for_each_ggtt_vma(vma, obj) {
842 i915_vma_unset_ggtt_write(vma);
846 case I915_GEM_DOMAIN_WC:
850 case I915_GEM_DOMAIN_CPU:
851 i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
854 case I915_GEM_DOMAIN_RENDER:
855 if (gpu_write_needs_clflush(obj))
856 obj->cache_dirty = true;
860 obj->write_domain = 0;
864 * Pins the specified object's pages and synchronizes the object with
865 * GPU accesses. Sets needs_clflush to non-zero if the caller should
866 * flush the object from the CPU cache.
868 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
869 unsigned int *needs_clflush)
873 lockdep_assert_held(&obj->base.dev->struct_mutex);
876 if (!i915_gem_object_has_struct_page(obj))
879 ret = i915_gem_object_wait(obj,
880 I915_WAIT_INTERRUPTIBLE |
882 MAX_SCHEDULE_TIMEOUT,
887 ret = i915_gem_object_pin_pages(obj);
891 if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ ||
892 !static_cpu_has(X86_FEATURE_CLFLUSH)) {
893 ret = i915_gem_object_set_to_cpu_domain(obj, false);
900 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
902 /* If we're not in the cpu read domain, set ourself into the gtt
903 * read domain and manually flush cachelines (if required). This
904 * optimizes for the case when the gpu will dirty the data
905 * anyway again before the next pread happens.
907 if (!obj->cache_dirty &&
908 !(obj->read_domains & I915_GEM_DOMAIN_CPU))
909 *needs_clflush = CLFLUSH_BEFORE;
912 /* return with the pages pinned */
916 i915_gem_object_unpin_pages(obj);
920 int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
921 unsigned int *needs_clflush)
925 lockdep_assert_held(&obj->base.dev->struct_mutex);
928 if (!i915_gem_object_has_struct_page(obj))
931 ret = i915_gem_object_wait(obj,
932 I915_WAIT_INTERRUPTIBLE |
935 MAX_SCHEDULE_TIMEOUT,
940 ret = i915_gem_object_pin_pages(obj);
944 if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE ||
945 !static_cpu_has(X86_FEATURE_CLFLUSH)) {
946 ret = i915_gem_object_set_to_cpu_domain(obj, true);
953 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
955 /* If we're not in the cpu write domain, set ourself into the
956 * gtt write domain and manually flush cachelines (as required).
957 * This optimizes for the case when the gpu will use the data
958 * right away and we therefore have to clflush anyway.
960 if (!obj->cache_dirty) {
961 *needs_clflush |= CLFLUSH_AFTER;
964 * Same trick applies to invalidate partially written
965 * cachelines read before writing.
967 if (!(obj->read_domains & I915_GEM_DOMAIN_CPU))
968 *needs_clflush |= CLFLUSH_BEFORE;
972 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
973 obj->mm.dirty = true;
974 /* return with the pages pinned */
978 i915_gem_object_unpin_pages(obj);
983 shmem_pread(struct page *page, int offset, int len, char __user *user_data,
992 drm_clflush_virt_range(vaddr + offset, len);
994 ret = __copy_to_user(user_data, vaddr + offset, len);
998 return ret ? -EFAULT : 0;
1002 i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
1003 struct drm_i915_gem_pread *args)
1005 char __user *user_data;
1007 unsigned int needs_clflush;
1008 unsigned int idx, offset;
1011 ret = mutex_lock_interruptible(&obj->base.dev->struct_mutex);
1015 ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
1016 mutex_unlock(&obj->base.dev->struct_mutex);
1020 remain = args->size;
1021 user_data = u64_to_user_ptr(args->data_ptr);
1022 offset = offset_in_page(args->offset);
1023 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
1024 struct page *page = i915_gem_object_get_page(obj, idx);
1025 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
1027 ret = shmem_pread(page, offset, length, user_data,
1033 user_data += length;
1037 i915_gem_obj_finish_shmem_access(obj);
1042 gtt_user_read(struct io_mapping *mapping,
1043 loff_t base, int offset,
1044 char __user *user_data, int length)
1046 void __iomem *vaddr;
1047 unsigned long unwritten;
1049 /* We can use the cpu mem copy function because this is X86. */
1050 vaddr = io_mapping_map_atomic_wc(mapping, base);
1051 unwritten = __copy_to_user_inatomic(user_data,
1052 (void __force *)vaddr + offset,
1054 io_mapping_unmap_atomic(vaddr);
1056 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
1057 unwritten = copy_to_user(user_data,
1058 (void __force *)vaddr + offset,
1060 io_mapping_unmap(vaddr);
1066 i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
1067 const struct drm_i915_gem_pread *args)
1069 struct drm_i915_private *i915 = to_i915(obj->base.dev);
1070 struct i915_ggtt *ggtt = &i915->ggtt;
1071 intel_wakeref_t wakeref;
1072 struct drm_mm_node node;
1073 struct i915_vma *vma;
1074 void __user *user_data;
1078 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1082 wakeref = intel_runtime_pm_get(i915);
1083 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
1088 node.start = i915_ggtt_offset(vma);
1089 node.allocated = false;
1090 ret = i915_vma_put_fence(vma);
1092 i915_vma_unpin(vma);
1097 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
1100 GEM_BUG_ON(!node.allocated);
1103 ret = i915_gem_object_set_to_gtt_domain(obj, false);
1107 mutex_unlock(&i915->drm.struct_mutex);
1109 user_data = u64_to_user_ptr(args->data_ptr);
1110 remain = args->size;
1111 offset = args->offset;
1113 while (remain > 0) {
1114 /* Operation in this page
1116 * page_base = page offset within aperture
1117 * page_offset = offset within page
1118 * page_length = bytes to copy for this page
1120 u32 page_base = node.start;
1121 unsigned page_offset = offset_in_page(offset);
1122 unsigned page_length = PAGE_SIZE - page_offset;
1123 page_length = remain < page_length ? remain : page_length;
1124 if (node.allocated) {
1126 ggtt->vm.insert_page(&ggtt->vm,
1127 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
1128 node.start, I915_CACHE_NONE, 0);
1131 page_base += offset & PAGE_MASK;
1134 if (gtt_user_read(&ggtt->iomap, page_base, page_offset,
1135 user_data, page_length)) {
1140 remain -= page_length;
1141 user_data += page_length;
1142 offset += page_length;
1145 mutex_lock(&i915->drm.struct_mutex);
1147 if (node.allocated) {
1149 ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
1150 remove_mappable_node(&node);
1152 i915_vma_unpin(vma);
1155 intel_runtime_pm_put(i915, wakeref);
1156 mutex_unlock(&i915->drm.struct_mutex);
1162 * Reads data from the object referenced by handle.
1163 * @dev: drm device pointer
1164 * @data: ioctl data blob
1165 * @file: drm file pointer
1167 * On error, the contents of *data are undefined.
1170 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
1171 struct drm_file *file)
1173 struct drm_i915_gem_pread *args = data;
1174 struct drm_i915_gem_object *obj;
1177 if (args->size == 0)
1180 if (!access_ok(u64_to_user_ptr(args->data_ptr),
1184 obj = i915_gem_object_lookup(file, args->handle);
1188 /* Bounds check source. */
1189 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
1194 trace_i915_gem_object_pread(obj, args->offset, args->size);
1196 ret = i915_gem_object_wait(obj,
1197 I915_WAIT_INTERRUPTIBLE,
1198 MAX_SCHEDULE_TIMEOUT,
1199 to_rps_client(file));
1203 ret = i915_gem_object_pin_pages(obj);
1207 ret = i915_gem_shmem_pread(obj, args);
1208 if (ret == -EFAULT || ret == -ENODEV)
1209 ret = i915_gem_gtt_pread(obj, args);
1211 i915_gem_object_unpin_pages(obj);
1213 i915_gem_object_put(obj);
1217 /* This is the fast write path which cannot handle
1218 * page faults in the source data
1222 ggtt_write(struct io_mapping *mapping,
1223 loff_t base, int offset,
1224 char __user *user_data, int length)
1226 void __iomem *vaddr;
1227 unsigned long unwritten;
1229 /* We can use the cpu mem copy function because this is X86. */
1230 vaddr = io_mapping_map_atomic_wc(mapping, base);
1231 unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
1233 io_mapping_unmap_atomic(vaddr);
1235 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
1236 unwritten = copy_from_user((void __force *)vaddr + offset,
1238 io_mapping_unmap(vaddr);
1245 * This is the fast pwrite path, where we copy the data directly from the
1246 * user into the GTT, uncached.
1247 * @obj: i915 GEM object
1248 * @args: pwrite arguments structure
1251 i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
1252 const struct drm_i915_gem_pwrite *args)
1254 struct drm_i915_private *i915 = to_i915(obj->base.dev);
1255 struct i915_ggtt *ggtt = &i915->ggtt;
1256 intel_wakeref_t wakeref;
1257 struct drm_mm_node node;
1258 struct i915_vma *vma;
1260 void __user *user_data;
1263 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1267 if (i915_gem_object_has_struct_page(obj)) {
1269 * Avoid waking the device up if we can fallback, as
1270 * waking/resuming is very slow (worst-case 10-100 ms
1271 * depending on PCI sleeps and our own resume time).
1272 * This easily dwarfs any performance advantage from
1273 * using the cache bypass of indirect GGTT access.
1275 wakeref = intel_runtime_pm_get_if_in_use(i915);
1281 /* No backing pages, no fallback, we must force GGTT access */
1282 wakeref = intel_runtime_pm_get(i915);
1285 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
1290 node.start = i915_ggtt_offset(vma);
1291 node.allocated = false;
1292 ret = i915_vma_put_fence(vma);
1294 i915_vma_unpin(vma);
1299 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
1302 GEM_BUG_ON(!node.allocated);
1305 ret = i915_gem_object_set_to_gtt_domain(obj, true);
1309 mutex_unlock(&i915->drm.struct_mutex);
1311 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
1313 user_data = u64_to_user_ptr(args->data_ptr);
1314 offset = args->offset;
1315 remain = args->size;
1317 /* Operation in this page
1319 * page_base = page offset within aperture
1320 * page_offset = offset within page
1321 * page_length = bytes to copy for this page
1323 u32 page_base = node.start;
1324 unsigned int page_offset = offset_in_page(offset);
1325 unsigned int page_length = PAGE_SIZE - page_offset;
1326 page_length = remain < page_length ? remain : page_length;
1327 if (node.allocated) {
1328 wmb(); /* flush the write before we modify the GGTT */
1329 ggtt->vm.insert_page(&ggtt->vm,
1330 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
1331 node.start, I915_CACHE_NONE, 0);
1332 wmb(); /* flush modifications to the GGTT (insert_page) */
1334 page_base += offset & PAGE_MASK;
1336 /* If we get a fault while copying data, then (presumably) our
1337 * source page isn't available. Return the error and we'll
1338 * retry in the slow path.
1339 * If the object is non-shmem backed, we retry again with the
1340 * path that handles page fault.
1342 if (ggtt_write(&ggtt->iomap, page_base, page_offset,
1343 user_data, page_length)) {
1348 remain -= page_length;
1349 user_data += page_length;
1350 offset += page_length;
1352 intel_fb_obj_flush(obj, ORIGIN_CPU);
1354 mutex_lock(&i915->drm.struct_mutex);
1356 if (node.allocated) {
1358 ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
1359 remove_mappable_node(&node);
1361 i915_vma_unpin(vma);
1364 intel_runtime_pm_put(i915, wakeref);
1366 mutex_unlock(&i915->drm.struct_mutex);
1370 /* Per-page copy function for the shmem pwrite fastpath.
1371 * Flushes invalid cachelines before writing to the target if
1372 * needs_clflush_before is set and flushes out any written cachelines after
1373 * writing if needs_clflush is set.
1376 shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
1377 bool needs_clflush_before,
1378 bool needs_clflush_after)
1385 if (needs_clflush_before)
1386 drm_clflush_virt_range(vaddr + offset, len);
1388 ret = __copy_from_user(vaddr + offset, user_data, len);
1389 if (!ret && needs_clflush_after)
1390 drm_clflush_virt_range(vaddr + offset, len);
1394 return ret ? -EFAULT : 0;
1398 i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
1399 const struct drm_i915_gem_pwrite *args)
1401 struct drm_i915_private *i915 = to_i915(obj->base.dev);
1402 void __user *user_data;
1404 unsigned int partial_cacheline_write;
1405 unsigned int needs_clflush;
1406 unsigned int offset, idx;
1409 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1413 ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
1414 mutex_unlock(&i915->drm.struct_mutex);
1418 /* If we don't overwrite a cacheline completely we need to be
1419 * careful to have up-to-date data by first clflushing. Don't
1420 * overcomplicate things and flush the entire patch.
1422 partial_cacheline_write = 0;
1423 if (needs_clflush & CLFLUSH_BEFORE)
1424 partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
1426 user_data = u64_to_user_ptr(args->data_ptr);
1427 remain = args->size;
1428 offset = offset_in_page(args->offset);
1429 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
1430 struct page *page = i915_gem_object_get_page(obj, idx);
1431 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
1433 ret = shmem_pwrite(page, offset, length, user_data,
1434 (offset | length) & partial_cacheline_write,
1435 needs_clflush & CLFLUSH_AFTER);
1440 user_data += length;
1444 intel_fb_obj_flush(obj, ORIGIN_CPU);
1445 i915_gem_obj_finish_shmem_access(obj);
1450 * Writes data to the object referenced by handle.
1452 * @data: ioctl data blob
1455 * On error, the contents of the buffer that were to be modified are undefined.
1458 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1459 struct drm_file *file)
1461 struct drm_i915_gem_pwrite *args = data;
1462 struct drm_i915_gem_object *obj;
1465 if (args->size == 0)
1468 if (!access_ok(u64_to_user_ptr(args->data_ptr), args->size))
1471 obj = i915_gem_object_lookup(file, args->handle);
1475 /* Bounds check destination. */
1476 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
1481 /* Writes not allowed into this read-only object */
1482 if (i915_gem_object_is_readonly(obj)) {
1487 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
1490 if (obj->ops->pwrite)
1491 ret = obj->ops->pwrite(obj, args);
1495 ret = i915_gem_object_wait(obj,
1496 I915_WAIT_INTERRUPTIBLE |
1498 MAX_SCHEDULE_TIMEOUT,
1499 to_rps_client(file));
1503 ret = i915_gem_object_pin_pages(obj);
1508 /* We can only do the GTT pwrite on untiled buffers, as otherwise
1509 * it would end up going through the fenced access, and we'll get
1510 * different detiling behavior between reading and writing.
1511 * pread/pwrite currently are reading and writing from the CPU
1512 * perspective, requiring manual detiling by the client.
1514 if (!i915_gem_object_has_struct_page(obj) ||
1515 cpu_write_needs_clflush(obj))
1516 /* Note that the gtt paths might fail with non-page-backed user
1517 * pointers (e.g. gtt mappings when moving data between
1518 * textures). Fallback to the shmem path in that case.
1520 ret = i915_gem_gtt_pwrite_fast(obj, args);
1522 if (ret == -EFAULT || ret == -ENOSPC) {
1523 if (obj->phys_handle)
1524 ret = i915_gem_phys_pwrite(obj, args, file);
1526 ret = i915_gem_shmem_pwrite(obj, args);
1529 i915_gem_object_unpin_pages(obj);
1531 i915_gem_object_put(obj);
1535 static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
1537 struct drm_i915_private *i915 = to_i915(obj->base.dev);
1538 struct list_head *list;
1539 struct i915_vma *vma;
1541 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
1543 mutex_lock(&i915->ggtt.vm.mutex);
1544 for_each_ggtt_vma(vma, obj) {
1545 if (!drm_mm_node_allocated(&vma->node))
1548 list_move_tail(&vma->vm_link, &vma->vm->bound_list);
1550 mutex_unlock(&i915->ggtt.vm.mutex);
1552 spin_lock(&i915->mm.obj_lock);
1553 list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list;
1554 list_move_tail(&obj->mm.link, list);
1555 spin_unlock(&i915->mm.obj_lock);
1559 * Called when user space prepares to use an object with the CPU, either
1560 * through the mmap ioctl's mapping or a GTT mapping.
1562 * @data: ioctl data blob
1566 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1567 struct drm_file *file)
1569 struct drm_i915_gem_set_domain *args = data;
1570 struct drm_i915_gem_object *obj;
1571 u32 read_domains = args->read_domains;
1572 u32 write_domain = args->write_domain;
1575 /* Only handle setting domains to types used by the CPU. */
1576 if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
1579 /* Having something in the write domain implies it's in the read
1580 * domain, and only that read domain. Enforce that in the request.
1582 if (write_domain != 0 && read_domains != write_domain)
1585 obj = i915_gem_object_lookup(file, args->handle);
1589 /* Try to flush the object off the GPU without holding the lock.
1590 * We will repeat the flush holding the lock in the normal manner
1591 * to catch cases where we are gazumped.
1593 err = i915_gem_object_wait(obj,
1594 I915_WAIT_INTERRUPTIBLE |
1595 I915_WAIT_PRIORITY |
1596 (write_domain ? I915_WAIT_ALL : 0),
1597 MAX_SCHEDULE_TIMEOUT,
1598 to_rps_client(file));
1603 * Proxy objects do not control access to the backing storage, ergo
1604 * they cannot be used as a means to manipulate the cache domain
1605 * tracking for that backing storage. The proxy object is always
1606 * considered to be outside of any cache domain.
1608 if (i915_gem_object_is_proxy(obj)) {
1614 * Flush and acquire obj->pages so that we are coherent through
1615 * direct access in memory with previous cached writes through
1616 * shmemfs and that our cache domain tracking remains valid.
1617 * For example, if the obj->filp was moved to swap without us
1618 * being notified and releasing the pages, we would mistakenly
1619 * continue to assume that the obj remained out of the CPU cached
1622 err = i915_gem_object_pin_pages(obj);
1626 err = i915_mutex_lock_interruptible(dev);
1630 if (read_domains & I915_GEM_DOMAIN_WC)
1631 err = i915_gem_object_set_to_wc_domain(obj, write_domain);
1632 else if (read_domains & I915_GEM_DOMAIN_GTT)
1633 err = i915_gem_object_set_to_gtt_domain(obj, write_domain);
1635 err = i915_gem_object_set_to_cpu_domain(obj, write_domain);
1637 /* And bump the LRU for this access */
1638 i915_gem_object_bump_inactive_ggtt(obj);
1640 mutex_unlock(&dev->struct_mutex);
1642 if (write_domain != 0)
1643 intel_fb_obj_invalidate(obj,
1644 fb_write_origin(obj, write_domain));
1647 i915_gem_object_unpin_pages(obj);
1649 i915_gem_object_put(obj);
1654 * Called when user space has done writes to this buffer
1656 * @data: ioctl data blob
1660 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1661 struct drm_file *file)
1663 struct drm_i915_gem_sw_finish *args = data;
1664 struct drm_i915_gem_object *obj;
1666 obj = i915_gem_object_lookup(file, args->handle);
1671 * Proxy objects are barred from CPU access, so there is no
1672 * need to ban sw_finish as it is a nop.
1675 /* Pinned buffers may be scanout, so flush the cache */
1676 i915_gem_object_flush_if_display(obj);
1677 i915_gem_object_put(obj);
1683 __vma_matches(struct vm_area_struct *vma, struct file *filp,
1684 unsigned long addr, unsigned long size)
1686 if (vma->vm_file != filp)
1689 return vma->vm_start == addr && (vma->vm_end - vma->vm_start) == size;
1693 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
1696 * @data: ioctl data blob
1699 * While the mapping holds a reference on the contents of the object, it doesn't
1700 * imply a ref on the object itself.
1704 * DRM driver writers who look a this function as an example for how to do GEM
1705 * mmap support, please don't implement mmap support like here. The modern way
1706 * to implement DRM mmap support is with an mmap offset ioctl (like
1707 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
1708 * That way debug tooling like valgrind will understand what's going on, hiding
1709 * the mmap call in a driver private ioctl will break that. The i915 driver only
1710 * does cpu mmaps this way because we didn't know better.
1713 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1714 struct drm_file *file)
1716 struct drm_i915_gem_mmap *args = data;
1717 struct drm_i915_gem_object *obj;
1720 if (args->flags & ~(I915_MMAP_WC))
1723 if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
1726 obj = i915_gem_object_lookup(file, args->handle);
1730 /* prime objects have no backing filp to GEM mmap
1733 if (!obj->base.filp) {
1734 i915_gem_object_put(obj);
1738 addr = vm_mmap(obj->base.filp, 0, args->size,
1739 PROT_READ | PROT_WRITE, MAP_SHARED,
1741 if (IS_ERR_VALUE(addr))
1744 if (args->flags & I915_MMAP_WC) {
1745 struct mm_struct *mm = current->mm;
1746 struct vm_area_struct *vma;
1748 if (down_write_killable(&mm->mmap_sem)) {
1749 i915_gem_object_put(obj);
1752 vma = find_vma(mm, addr);
1753 if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
1755 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1758 up_write(&mm->mmap_sem);
1759 if (IS_ERR_VALUE(addr))
1762 /* This may race, but that's ok, it only gets set */
1763 WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
1765 i915_gem_object_put(obj);
1767 args->addr_ptr = (u64)addr;
1772 i915_gem_object_put(obj);
1777 static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
1779 return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT;
1783 * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps
1785 * A history of the GTT mmap interface:
1787 * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to
1788 * aligned and suitable for fencing, and still fit into the available
1789 * mappable space left by the pinned display objects. A classic problem
1790 * we called the page-fault-of-doom where we would ping-pong between
1791 * two objects that could not fit inside the GTT and so the memcpy
1792 * would page one object in at the expense of the other between every
1795 * 1 - Objects can be any size, and have any compatible fencing (X Y, or none
1796 * as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the
1797 * object is too large for the available space (or simply too large
1798 * for the mappable aperture!), a view is created instead and faulted
1799 * into userspace. (This view is aligned and sized appropriately for
1802 * 2 - Recognise WC as a separate cache domain so that we can flush the
1803 * delayed writes via GTT before performing direct access via WC.
1807 * * snoopable objects cannot be accessed via the GTT. It can cause machine
1808 * hangs on some architectures, corruption on others. An attempt to service
1809 * a GTT page fault from a snoopable object will generate a SIGBUS.
1811 * * the object must be able to fit into RAM (physical memory, though no
1812 * limited to the mappable aperture).
1817 * * a new GTT page fault will synchronize rendering from the GPU and flush
1818 * all data to system memory. Subsequent access will not be synchronized.
1820 * * all mappings are revoked on runtime device suspend.
1822 * * there are only 8, 16 or 32 fence registers to share between all users
1823 * (older machines require fence register for display and blitter access
1824 * as well). Contention of the fence registers will cause the previous users
1825 * to be unmapped and any new access will generate new page faults.
1827 * * running out of memory while servicing a fault may generate a SIGBUS,
1828 * rather than the expected SIGSEGV.
1830 int i915_gem_mmap_gtt_version(void)
1835 static inline struct i915_ggtt_view
1836 compute_partial_view(const struct drm_i915_gem_object *obj,
1837 pgoff_t page_offset,
1840 struct i915_ggtt_view view;
1842 if (i915_gem_object_is_tiled(obj))
1843 chunk = roundup(chunk, tile_row_pages(obj));
1845 view.type = I915_GGTT_VIEW_PARTIAL;
1846 view.partial.offset = rounddown(page_offset, chunk);
1848 min_t(unsigned int, chunk,
1849 (obj->base.size >> PAGE_SHIFT) - view.partial.offset);
1851 /* If the partial covers the entire object, just create a normal VMA. */
1852 if (chunk >= obj->base.size >> PAGE_SHIFT)
1853 view.type = I915_GGTT_VIEW_NORMAL;
1859 * i915_gem_fault - fault a page into the GTT
1862 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1863 * from userspace. The fault handler takes care of binding the object to
1864 * the GTT (if needed), allocating and programming a fence register (again,
1865 * only if needed based on whether the old reg is still valid or the object
1866 * is tiled) and inserting a new PTE into the faulting process.
1868 * Note that the faulting process may involve evicting existing objects
1869 * from the GTT and/or fence registers to make room. So performance may
1870 * suffer if the GTT working set is large or there are few fence registers
1873 * The current feature set supported by i915_gem_fault() and thus GTT mmaps
1874 * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
1876 vm_fault_t i915_gem_fault(struct vm_fault *vmf)
1878 #define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
1879 struct vm_area_struct *area = vmf->vma;
1880 struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
1881 struct drm_device *dev = obj->base.dev;
1882 struct drm_i915_private *dev_priv = to_i915(dev);
1883 struct i915_ggtt *ggtt = &dev_priv->ggtt;
1884 bool write = area->vm_flags & VM_WRITE;
1885 intel_wakeref_t wakeref;
1886 struct i915_vma *vma;
1887 pgoff_t page_offset;
1890 /* Sanity check that we allow writing into this object */
1891 if (i915_gem_object_is_readonly(obj) && write)
1892 return VM_FAULT_SIGBUS;
1894 /* We don't use vmf->pgoff since that has the fake offset */
1895 page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
1897 trace_i915_gem_object_fault(obj, page_offset, true, write);
1899 /* Try to flush the object off the GPU first without holding the lock.
1900 * Upon acquiring the lock, we will perform our sanity checks and then
1901 * repeat the flush holding the lock in the normal manner to catch cases
1902 * where we are gazumped.
1904 ret = i915_gem_object_wait(obj,
1905 I915_WAIT_INTERRUPTIBLE,
1906 MAX_SCHEDULE_TIMEOUT,
1911 ret = i915_gem_object_pin_pages(obj);
1915 wakeref = intel_runtime_pm_get(dev_priv);
1917 ret = i915_mutex_lock_interruptible(dev);
1921 /* Access to snoopable pages through the GTT is incoherent. */
1922 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) {
1928 /* Now pin it into the GTT as needed */
1929 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
1934 /* Use a partial view if it is bigger than available space */
1935 struct i915_ggtt_view view =
1936 compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
1939 flags = PIN_MAPPABLE;
1940 if (view.type == I915_GGTT_VIEW_NORMAL)
1941 flags |= PIN_NONBLOCK; /* avoid warnings for pinned */
1944 * Userspace is now writing through an untracked VMA, abandon
1945 * all hope that the hardware is able to track future writes.
1947 obj->frontbuffer_ggtt_origin = ORIGIN_CPU;
1949 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
1950 if (IS_ERR(vma) && !view.type) {
1951 flags = PIN_MAPPABLE;
1952 view.type = I915_GGTT_VIEW_PARTIAL;
1953 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
1961 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1965 ret = i915_vma_pin_fence(vma);
1969 /* Finally, remap it using the new GTT offset */
1970 ret = remap_io_mapping(area,
1971 area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
1972 (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
1973 min_t(u64, vma->size, area->vm_end - area->vm_start),
1978 /* Mark as being mmapped into userspace for later revocation */
1979 assert_rpm_wakelock_held(dev_priv);
1980 if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
1981 list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
1982 GEM_BUG_ON(!obj->userfault_count);
1984 i915_vma_set_ggtt_write(vma);
1987 i915_vma_unpin_fence(vma);
1989 __i915_vma_unpin(vma);
1991 mutex_unlock(&dev->struct_mutex);
1993 intel_runtime_pm_put(dev_priv, wakeref);
1994 i915_gem_object_unpin_pages(obj);
1999 * We eat errors when the gpu is terminally wedged to avoid
2000 * userspace unduly crashing (gl has no provisions for mmaps to
2001 * fail). But any other -EIO isn't ours (e.g. swap in failure)
2002 * and so needs to be reported.
2004 if (!i915_terminally_wedged(&dev_priv->gpu_error))
2005 return VM_FAULT_SIGBUS;
2006 /* else: fall through */
2009 * EAGAIN means the gpu is hung and we'll wait for the error
2010 * handler to reset everything when re-faulting in
2011 * i915_mutex_lock_interruptible.
2018 * EBUSY is ok: this just means that another thread
2019 * already did the job.
2021 return VM_FAULT_NOPAGE;
2023 return VM_FAULT_OOM;
2026 return VM_FAULT_SIGBUS;
2028 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
2029 return VM_FAULT_SIGBUS;
2033 static void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
2035 struct i915_vma *vma;
2037 GEM_BUG_ON(!obj->userfault_count);
2039 obj->userfault_count = 0;
2040 list_del(&obj->userfault_link);
2041 drm_vma_node_unmap(&obj->base.vma_node,
2042 obj->base.dev->anon_inode->i_mapping);
2044 for_each_ggtt_vma(vma, obj)
2045 i915_vma_unset_userfault(vma);
2049 * i915_gem_release_mmap - remove physical page mappings
2050 * @obj: obj in question
2052 * Preserve the reservation of the mmapping with the DRM core code, but
2053 * relinquish ownership of the pages back to the system.
2055 * It is vital that we remove the page mapping if we have mapped a tiled
2056 * object through the GTT and then lose the fence register due to
2057 * resource pressure. Similarly if the object has been moved out of the
2058 * aperture, than pages mapped into userspace must be revoked. Removing the
2059 * mapping will then trigger a page fault on the next user access, allowing
2060 * fixup by i915_gem_fault().
2063 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
2065 struct drm_i915_private *i915 = to_i915(obj->base.dev);
2066 intel_wakeref_t wakeref;
2068 /* Serialisation between user GTT access and our code depends upon
2069 * revoking the CPU's PTE whilst the mutex is held. The next user
2070 * pagefault then has to wait until we release the mutex.
2072 * Note that RPM complicates somewhat by adding an additional
2073 * requirement that operations to the GGTT be made holding the RPM
2076 lockdep_assert_held(&i915->drm.struct_mutex);
2077 wakeref = intel_runtime_pm_get(i915);
2079 if (!obj->userfault_count)
2082 __i915_gem_object_release_mmap(obj);
2084 /* Ensure that the CPU's PTE are revoked and there are not outstanding
2085 * memory transactions from userspace before we return. The TLB
2086 * flushing implied above by changing the PTE above *should* be
2087 * sufficient, an extra barrier here just provides us with a bit
2088 * of paranoid documentation about our requirement to serialise
2089 * memory writes before touching registers / GSM.
2094 intel_runtime_pm_put(i915, wakeref);
2097 void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
2099 struct drm_i915_gem_object *obj, *on;
2103 * Only called during RPM suspend. All users of the userfault_list
2104 * must be holding an RPM wakeref to ensure that this can not
2105 * run concurrently with themselves (and use the struct_mutex for
2106 * protection between themselves).
2109 list_for_each_entry_safe(obj, on,
2110 &dev_priv->mm.userfault_list, userfault_link)
2111 __i915_gem_object_release_mmap(obj);
2113 /* The fence will be lost when the device powers down. If any were
2114 * in use by hardware (i.e. they are pinned), we should not be powering
2115 * down! All other fences will be reacquired by the user upon waking.
2117 for (i = 0; i < dev_priv->num_fence_regs; i++) {
2118 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
2120 /* Ideally we want to assert that the fence register is not
2121 * live at this point (i.e. that no piece of code will be
2122 * trying to write through fence + GTT, as that both violates
2123 * our tracking of activity and associated locking/barriers,
2124 * but also is illegal given that the hw is powered down).
2126 * Previously we used reg->pin_count as a "liveness" indicator.
2127 * That is not sufficient, and we need a more fine-grained
2128 * tool if we want to have a sanity check here.
2134 GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
2139 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
2141 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2144 err = drm_gem_create_mmap_offset(&obj->base);
2148 /* Attempt to reap some mmap space from dead objects */
2150 err = i915_gem_wait_for_idle(dev_priv,
2151 I915_WAIT_INTERRUPTIBLE,
2152 MAX_SCHEDULE_TIMEOUT);
2156 i915_gem_drain_freed_objects(dev_priv);
2157 err = drm_gem_create_mmap_offset(&obj->base);
2161 } while (flush_delayed_work(&dev_priv->gt.retire_work));
2166 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
2168 drm_gem_free_mmap_offset(&obj->base);
2172 i915_gem_mmap_gtt(struct drm_file *file,
2173 struct drm_device *dev,
2177 struct drm_i915_gem_object *obj;
2180 obj = i915_gem_object_lookup(file, handle);
2184 ret = i915_gem_object_create_mmap_offset(obj);
2186 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
2188 i915_gem_object_put(obj);
2193 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
2195 * @data: GTT mapping ioctl data
2196 * @file: GEM object info
2198 * Simply returns the fake offset to userspace so it can mmap it.
2199 * The mmap call will end up in drm_gem_mmap(), which will set things
2200 * up so we can get faults in the handler above.
2202 * The fault handler will take care of binding the object into the GTT
2203 * (since it may have been evicted to make room for something), allocating
2204 * a fence register, and mapping the appropriate aperture address into
2208 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
2209 struct drm_file *file)
2211 struct drm_i915_gem_mmap_gtt *args = data;
2213 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
2216 /* Immediately discard the backing storage */
2218 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
2220 i915_gem_object_free_mmap_offset(obj);
2222 if (obj->base.filp == NULL)
2225 /* Our goal here is to return as much of the memory as
2226 * is possible back to the system as we are called from OOM.
2227 * To do this we must instruct the shmfs to drop all of its
2228 * backing pages, *now*.
2230 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
2231 obj->mm.madv = __I915_MADV_PURGED;
2232 obj->mm.pages = ERR_PTR(-EFAULT);
2235 /* Try to discard unwanted pages */
2236 void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
2238 struct address_space *mapping;
2240 lockdep_assert_held(&obj->mm.lock);
2241 GEM_BUG_ON(i915_gem_object_has_pages(obj));
2243 switch (obj->mm.madv) {
2244 case I915_MADV_DONTNEED:
2245 i915_gem_object_truncate(obj);
2246 case __I915_MADV_PURGED:
2250 if (obj->base.filp == NULL)
2253 mapping = obj->base.filp->f_mapping,
2254 invalidate_mapping_pages(mapping, 0, (loff_t)-1);
2258 * Move pages to appropriate lru and release the pagevec, decrementing the
2259 * ref count of those pages.
2261 static void check_release_pagevec(struct pagevec *pvec)
2263 check_move_unevictable_pages(pvec);
2264 __pagevec_release(pvec);
2269 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
2270 struct sg_table *pages)
2272 struct sgt_iter sgt_iter;
2273 struct pagevec pvec;
2276 __i915_gem_object_release_shmem(obj, pages, true);
2278 i915_gem_gtt_finish_pages(obj, pages);
2280 if (i915_gem_object_needs_bit17_swizzle(obj))
2281 i915_gem_object_save_bit_17_swizzle(obj, pages);
2283 mapping_clear_unevictable(file_inode(obj->base.filp)->i_mapping);
2285 pagevec_init(&pvec);
2286 for_each_sgt_page(page, sgt_iter, pages) {
2288 set_page_dirty(page);
2290 if (obj->mm.madv == I915_MADV_WILLNEED)
2291 mark_page_accessed(page);
2293 if (!pagevec_add(&pvec, page))
2294 check_release_pagevec(&pvec);
2296 if (pagevec_count(&pvec))
2297 check_release_pagevec(&pvec);
2298 obj->mm.dirty = false;
2300 sg_free_table(pages);
2304 static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
2306 struct radix_tree_iter iter;
2310 radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
2311 radix_tree_delete(&obj->mm.get_page.radix, iter.index);
2315 static struct sg_table *
2316 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
2318 struct drm_i915_private *i915 = to_i915(obj->base.dev);
2319 struct sg_table *pages;
2321 pages = fetch_and_zero(&obj->mm.pages);
2322 if (IS_ERR_OR_NULL(pages))
2325 spin_lock(&i915->mm.obj_lock);
2326 list_del(&obj->mm.link);
2327 spin_unlock(&i915->mm.obj_lock);
2329 if (obj->mm.mapping) {
2332 ptr = page_mask_bits(obj->mm.mapping);
2333 if (is_vmalloc_addr(ptr))
2336 kunmap(kmap_to_page(ptr));
2338 obj->mm.mapping = NULL;
2341 __i915_gem_object_reset_page_iter(obj);
2342 obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
2347 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
2348 enum i915_mm_subclass subclass)
2350 struct sg_table *pages;
2353 if (i915_gem_object_has_pinned_pages(obj))
2356 GEM_BUG_ON(obj->bind_count);
2358 /* May be called by shrinker from within get_pages() (on another bo) */
2359 mutex_lock_nested(&obj->mm.lock, subclass);
2360 if (unlikely(atomic_read(&obj->mm.pages_pin_count))) {
2366 * ->put_pages might need to allocate memory for the bit17 swizzle
2367 * array, hence protect them from being reaped by removing them from gtt
2370 pages = __i915_gem_object_unset_pages(obj);
2373 * XXX Temporary hijinx to avoid updating all backends to handle
2374 * NULL pages. In the future, when we have more asynchronous
2375 * get_pages backends we should be better able to handle the
2376 * cancellation of the async task in a more uniform manner.
2378 if (!pages && !i915_gem_object_needs_async_cancel(obj))
2379 pages = ERR_PTR(-EINVAL);
2382 obj->ops->put_pages(obj, pages);
2386 mutex_unlock(&obj->mm.lock);
2391 bool i915_sg_trim(struct sg_table *orig_st)
2393 struct sg_table new_st;
2394 struct scatterlist *sg, *new_sg;
2397 if (orig_st->nents == orig_st->orig_nents)
2400 if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL | __GFP_NOWARN))
2403 new_sg = new_st.sgl;
2404 for_each_sg(orig_st->sgl, sg, orig_st->nents, i) {
2405 sg_set_page(new_sg, sg_page(sg), sg->length, 0);
2406 sg_dma_address(new_sg) = sg_dma_address(sg);
2407 sg_dma_len(new_sg) = sg_dma_len(sg);
2409 new_sg = sg_next(new_sg);
2411 GEM_BUG_ON(new_sg); /* Should walk exactly nents and hit the end */
2413 sg_free_table(orig_st);
2419 static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2421 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2422 const unsigned long page_count = obj->base.size / PAGE_SIZE;
2424 struct address_space *mapping;
2425 struct sg_table *st;
2426 struct scatterlist *sg;
2427 struct sgt_iter sgt_iter;
2429 unsigned long last_pfn = 0; /* suppress gcc warning */
2430 unsigned int max_segment = i915_sg_segment_size();
2431 unsigned int sg_page_sizes;
2432 struct pagevec pvec;
2437 * Assert that the object is not currently in any GPU domain. As it
2438 * wasn't in the GTT, there shouldn't be any way it could have been in
2441 GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
2442 GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
2445 * If there's no chance of allocating enough pages for the whole
2446 * object, bail early.
2448 if (page_count > totalram_pages())
2451 st = kmalloc(sizeof(*st), GFP_KERNEL);
2456 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
2462 * Get the list of pages out of our struct file. They'll be pinned
2463 * at this point until we release them.
2465 * Fail silently without starting the shrinker
2467 mapping = obj->base.filp->f_mapping;
2468 mapping_set_unevictable(mapping);
2469 noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM);
2470 noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
2475 for (i = 0; i < page_count; i++) {
2476 const unsigned int shrink[] = {
2477 I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE,
2480 gfp_t gfp = noreclaim;
2484 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2485 if (likely(!IS_ERR(page)))
2489 ret = PTR_ERR(page);
2493 i915_gem_shrink(dev_priv, 2 * page_count, NULL, *s++);
2496 * We've tried hard to allocate the memory by reaping
2497 * our own buffer, now let the real VM do its job and
2498 * go down in flames if truly OOM.
2500 * However, since graphics tend to be disposable,
2501 * defer the oom here by reporting the ENOMEM back
2505 /* reclaim and warn, but no oom */
2506 gfp = mapping_gfp_mask(mapping);
2509 * Our bo are always dirty and so we require
2510 * kswapd to reclaim our pages (direct reclaim
2511 * does not effectively begin pageout of our
2512 * buffers on its own). However, direct reclaim
2513 * only waits for kswapd when under allocation
2514 * congestion. So as a result __GFP_RECLAIM is
2515 * unreliable and fails to actually reclaim our
2516 * dirty pages -- unless you try over and over
2517 * again with !__GFP_NORETRY. However, we still
2518 * want to fail this allocation rather than
2519 * trigger the out-of-memory killer and for
2520 * this we want __GFP_RETRY_MAYFAIL.
2522 gfp |= __GFP_RETRY_MAYFAIL;
2527 sg->length >= max_segment ||
2528 page_to_pfn(page) != last_pfn + 1) {
2530 sg_page_sizes |= sg->length;
2534 sg_set_page(sg, page, PAGE_SIZE, 0);
2536 sg->length += PAGE_SIZE;
2538 last_pfn = page_to_pfn(page);
2540 /* Check that the i965g/gm workaround works. */
2541 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
2543 if (sg) { /* loop terminated early; short sg table */
2544 sg_page_sizes |= sg->length;
2548 /* Trim unused sg entries to avoid wasting memory. */
2551 ret = i915_gem_gtt_prepare_pages(obj, st);
2554 * DMA remapping failed? One possible cause is that
2555 * it could not reserve enough large entries, asking
2556 * for PAGE_SIZE chunks instead may be helpful.
2558 if (max_segment > PAGE_SIZE) {
2559 for_each_sgt_page(page, sgt_iter, st)
2563 max_segment = PAGE_SIZE;
2566 dev_warn(&dev_priv->drm.pdev->dev,
2567 "Failed to DMA remap %lu pages\n",
2573 if (i915_gem_object_needs_bit17_swizzle(obj))
2574 i915_gem_object_do_bit_17_swizzle(obj, st);
2576 __i915_gem_object_set_pages(obj, st, sg_page_sizes);
2583 mapping_clear_unevictable(mapping);
2584 pagevec_init(&pvec);
2585 for_each_sgt_page(page, sgt_iter, st) {
2586 if (!pagevec_add(&pvec, page))
2587 check_release_pagevec(&pvec);
2589 if (pagevec_count(&pvec))
2590 check_release_pagevec(&pvec);
2595 * shmemfs first checks if there is enough memory to allocate the page
2596 * and reports ENOSPC should there be insufficient, along with the usual
2597 * ENOMEM for a genuine allocation failure.
2599 * We use ENOSPC in our driver to mean that we have run out of aperture
2600 * space and so want to translate the error from shmemfs back to our
2601 * usual understanding of ENOMEM.
2609 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
2610 struct sg_table *pages,
2611 unsigned int sg_page_sizes)
2613 struct drm_i915_private *i915 = to_i915(obj->base.dev);
2614 unsigned long supported = INTEL_INFO(i915)->page_sizes;
2617 lockdep_assert_held(&obj->mm.lock);
2619 obj->mm.get_page.sg_pos = pages->sgl;
2620 obj->mm.get_page.sg_idx = 0;
2622 obj->mm.pages = pages;
2624 if (i915_gem_object_is_tiled(obj) &&
2625 i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
2626 GEM_BUG_ON(obj->mm.quirked);
2627 __i915_gem_object_pin_pages(obj);
2628 obj->mm.quirked = true;
2631 GEM_BUG_ON(!sg_page_sizes);
2632 obj->mm.page_sizes.phys = sg_page_sizes;
2635 * Calculate the supported page-sizes which fit into the given
2636 * sg_page_sizes. This will give us the page-sizes which we may be able
2637 * to use opportunistically when later inserting into the GTT. For
2638 * example if phys=2G, then in theory we should be able to use 1G, 2M,
2639 * 64K or 4K pages, although in practice this will depend on a number of
2642 obj->mm.page_sizes.sg = 0;
2643 for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
2644 if (obj->mm.page_sizes.phys & ~0u << i)
2645 obj->mm.page_sizes.sg |= BIT(i);
2647 GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
2649 spin_lock(&i915->mm.obj_lock);
2650 list_add(&obj->mm.link, &i915->mm.unbound_list);
2651 spin_unlock(&i915->mm.obj_lock);
2654 static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2658 if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
2659 DRM_DEBUG("Attempting to obtain a purgeable object\n");
2663 err = obj->ops->get_pages(obj);
2664 GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
2669 /* Ensure that the associated pages are gathered from the backing storage
2670 * and pinned into our object. i915_gem_object_pin_pages() may be called
2671 * multiple times before they are released by a single call to
2672 * i915_gem_object_unpin_pages() - once the pages are no longer referenced
2673 * either as a result of memory pressure (reaping pages under the shrinker)
2674 * or as the object is itself released.
2676 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2680 err = mutex_lock_interruptible(&obj->mm.lock);
2684 if (unlikely(!i915_gem_object_has_pages(obj))) {
2685 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
2687 err = ____i915_gem_object_get_pages(obj);
2691 smp_mb__before_atomic();
2693 atomic_inc(&obj->mm.pages_pin_count);
2696 mutex_unlock(&obj->mm.lock);
2700 /* The 'mapping' part of i915_gem_object_pin_map() below */
2701 static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
2702 enum i915_map_type type)
2704 unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
2705 struct sg_table *sgt = obj->mm.pages;
2706 struct sgt_iter sgt_iter;
2708 struct page *stack_pages[32];
2709 struct page **pages = stack_pages;
2710 unsigned long i = 0;
2714 /* A single page can always be kmapped */
2715 if (n_pages == 1 && type == I915_MAP_WB)
2716 return kmap(sg_page(sgt->sgl));
2718 if (n_pages > ARRAY_SIZE(stack_pages)) {
2719 /* Too big for stack -- allocate temporary array instead */
2720 pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
2725 for_each_sgt_page(page, sgt_iter, sgt)
2728 /* Check that we have the expected number of pages */
2729 GEM_BUG_ON(i != n_pages);
2734 /* fallthrough to use PAGE_KERNEL anyway */
2736 pgprot = PAGE_KERNEL;
2739 pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
2742 addr = vmap(pages, n_pages, 0, pgprot);
2744 if (pages != stack_pages)
2750 /* get, pin, and map the pages of the object into kernel space */
2751 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
2752 enum i915_map_type type)
2754 enum i915_map_type has_type;
2759 if (unlikely(!i915_gem_object_has_struct_page(obj)))
2760 return ERR_PTR(-ENXIO);
2762 ret = mutex_lock_interruptible(&obj->mm.lock);
2764 return ERR_PTR(ret);
2766 pinned = !(type & I915_MAP_OVERRIDE);
2767 type &= ~I915_MAP_OVERRIDE;
2769 if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
2770 if (unlikely(!i915_gem_object_has_pages(obj))) {
2771 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
2773 ret = ____i915_gem_object_get_pages(obj);
2777 smp_mb__before_atomic();
2779 atomic_inc(&obj->mm.pages_pin_count);
2782 GEM_BUG_ON(!i915_gem_object_has_pages(obj));
2784 ptr = page_unpack_bits(obj->mm.mapping, &has_type);
2785 if (ptr && has_type != type) {
2791 if (is_vmalloc_addr(ptr))
2794 kunmap(kmap_to_page(ptr));
2796 ptr = obj->mm.mapping = NULL;
2800 ptr = i915_gem_object_map(obj, type);
2806 obj->mm.mapping = page_pack_bits(ptr, type);
2810 mutex_unlock(&obj->mm.lock);
2814 atomic_dec(&obj->mm.pages_pin_count);
2821 i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
2822 const struct drm_i915_gem_pwrite *arg)
2824 struct address_space *mapping = obj->base.filp->f_mapping;
2825 char __user *user_data = u64_to_user_ptr(arg->data_ptr);
2829 /* Before we instantiate/pin the backing store for our use, we
2830 * can prepopulate the shmemfs filp efficiently using a write into
2831 * the pagecache. We avoid the penalty of instantiating all the
2832 * pages, important if the user is just writing to a few and never
2833 * uses the object on the GPU, and using a direct write into shmemfs
2834 * allows it to avoid the cost of retrieving a page (either swapin
2835 * or clearing-before-use) before it is overwritten.
2837 if (i915_gem_object_has_pages(obj))
2840 if (obj->mm.madv != I915_MADV_WILLNEED)
2843 /* Before the pages are instantiated the object is treated as being
2844 * in the CPU domain. The pages will be clflushed as required before
2845 * use, and we can freely write into the pages directly. If userspace
2846 * races pwrite with any other operation; corruption will ensue -
2847 * that is userspace's prerogative!
2851 offset = arg->offset;
2852 pg = offset_in_page(offset);
2855 unsigned int len, unwritten;
2860 len = PAGE_SIZE - pg;
2864 err = pagecache_write_begin(obj->base.filp, mapping,
2871 unwritten = copy_from_user(vaddr + pg, user_data, len);
2874 err = pagecache_write_end(obj->base.filp, mapping,
2875 offset, len, len - unwritten,
2892 static bool match_ring(struct i915_request *rq)
2894 struct drm_i915_private *dev_priv = rq->i915;
2895 u32 ring = I915_READ(RING_START(rq->engine->mmio_base));
2897 return ring == i915_ggtt_offset(rq->ring->vma);
2900 struct i915_request *
2901 i915_gem_find_active_request(struct intel_engine_cs *engine)
2903 struct i915_request *request, *active = NULL;
2904 unsigned long flags;
2907 * We are called by the error capture, reset and to dump engine
2908 * state at random points in time. In particular, note that neither is
2909 * crucially ordered with an interrupt. After a hang, the GPU is dead
2910 * and we assume that no more writes can happen (we waited long enough
2911 * for all writes that were in transaction to be flushed) - adding an
2912 * extra delay for a recent interrupt is pointless. Hence, we do
2913 * not need an engine->irq_seqno_barrier() before the seqno reads.
2914 * At all other times, we must assume the GPU is still running, but
2915 * we only care about the snapshot of this moment.
2917 spin_lock_irqsave(&engine->timeline.lock, flags);
2918 list_for_each_entry(request, &engine->timeline.requests, link) {
2919 if (i915_request_completed(request))
2922 if (!i915_request_started(request))
2925 /* More than one preemptible request may match! */
2926 if (!match_ring(request))
2932 spin_unlock_irqrestore(&engine->timeline.lock, flags);
2938 i915_gem_retire_work_handler(struct work_struct *work)
2940 struct drm_i915_private *dev_priv =
2941 container_of(work, typeof(*dev_priv), gt.retire_work.work);
2942 struct drm_device *dev = &dev_priv->drm;
2944 /* Come back later if the device is busy... */
2945 if (mutex_trylock(&dev->struct_mutex)) {
2946 i915_retire_requests(dev_priv);
2947 mutex_unlock(&dev->struct_mutex);
2951 * Keep the retire handler running until we are finally idle.
2952 * We do not need to do this test under locking as in the worst-case
2953 * we queue the retire worker once too often.
2955 if (READ_ONCE(dev_priv->gt.awake))
2956 queue_delayed_work(dev_priv->wq,
2957 &dev_priv->gt.retire_work,
2958 round_jiffies_up_relative(HZ));
2961 static void shrink_caches(struct drm_i915_private *i915)
2964 * kmem_cache_shrink() discards empty slabs and reorders partially
2965 * filled slabs to prioritise allocating from the mostly full slabs,
2966 * with the aim of reducing fragmentation.
2968 kmem_cache_shrink(i915->priorities);
2969 kmem_cache_shrink(i915->dependencies);
2970 kmem_cache_shrink(i915->requests);
2971 kmem_cache_shrink(i915->luts);
2972 kmem_cache_shrink(i915->vmas);
2973 kmem_cache_shrink(i915->objects);
2976 struct sleep_rcu_work {
2978 struct rcu_head rcu;
2979 struct work_struct work;
2981 struct drm_i915_private *i915;
2986 same_epoch(struct drm_i915_private *i915, unsigned int epoch)
2989 * There is a small chance that the epoch wrapped since we started
2990 * sleeping. If we assume that epoch is at least a u32, then it will
2991 * take at least 2^32 * 100ms for it to wrap, or about 326 years.
2993 return epoch == READ_ONCE(i915->gt.epoch);
2996 static void __sleep_work(struct work_struct *work)
2998 struct sleep_rcu_work *s = container_of(work, typeof(*s), work);
2999 struct drm_i915_private *i915 = s->i915;
3000 unsigned int epoch = s->epoch;
3003 if (same_epoch(i915, epoch))
3004 shrink_caches(i915);
3007 static void __sleep_rcu(struct rcu_head *rcu)
3009 struct sleep_rcu_work *s = container_of(rcu, typeof(*s), rcu);
3010 struct drm_i915_private *i915 = s->i915;
3012 destroy_rcu_head(&s->rcu);
3014 if (same_epoch(i915, s->epoch)) {
3015 INIT_WORK(&s->work, __sleep_work);
3016 queue_work(i915->wq, &s->work);
3023 new_requests_since_last_retire(const struct drm_i915_private *i915)
3025 return (READ_ONCE(i915->gt.active_requests) ||
3026 work_pending(&i915->gt.idle_work.work));
3029 static void assert_kernel_context_is_current(struct drm_i915_private *i915)
3031 struct intel_engine_cs *engine;
3032 enum intel_engine_id id;
3034 if (i915_terminally_wedged(&i915->gpu_error))
3037 GEM_BUG_ON(i915->gt.active_requests);
3038 for_each_engine(engine, i915, id) {
3039 GEM_BUG_ON(__i915_active_request_peek(&engine->timeline.last_request));
3040 GEM_BUG_ON(engine->last_retired_context !=
3041 to_intel_context(i915->kernel_context, engine));
3046 i915_gem_idle_work_handler(struct work_struct *work)
3048 struct drm_i915_private *dev_priv =
3049 container_of(work, typeof(*dev_priv), gt.idle_work.work);
3050 unsigned int epoch = I915_EPOCH_INVALID;
3051 bool rearm_hangcheck;
3053 if (!READ_ONCE(dev_priv->gt.awake))
3056 if (READ_ONCE(dev_priv->gt.active_requests))
3060 * Flush out the last user context, leaving only the pinned
3061 * kernel context resident. When we are idling on the kernel_context,
3062 * no more new requests (with a context switch) are emitted and we
3063 * can finally rest. A consequence is that the idle work handler is
3064 * always called at least twice before idling (and if the system is
3065 * idle that implies a round trip through the retire worker).
3067 mutex_lock(&dev_priv->drm.struct_mutex);
3068 i915_gem_switch_to_kernel_context(dev_priv);
3069 mutex_unlock(&dev_priv->drm.struct_mutex);
3071 GEM_TRACE("active_requests=%d (after switch-to-kernel-context)\n",
3072 READ_ONCE(dev_priv->gt.active_requests));
3075 * Wait for last execlists context complete, but bail out in case a
3076 * new request is submitted. As we don't trust the hardware, we
3077 * continue on if the wait times out. This is necessary to allow
3078 * the machine to suspend even if the hardware dies, and we will
3079 * try to recover in resume (after depriving the hardware of power,
3080 * it may be in a better mmod).
3082 __wait_for(if (new_requests_since_last_retire(dev_priv)) return,
3083 intel_engines_are_idle(dev_priv),
3084 I915_IDLE_ENGINES_TIMEOUT * 1000,
3088 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
3090 if (!mutex_trylock(&dev_priv->drm.struct_mutex)) {
3091 /* Currently busy, come back later */
3092 mod_delayed_work(dev_priv->wq,
3093 &dev_priv->gt.idle_work,
3094 msecs_to_jiffies(50));
3099 * New request retired after this work handler started, extend active
3100 * period until next instance of the work.
3102 if (new_requests_since_last_retire(dev_priv))
3105 epoch = __i915_gem_park(dev_priv);
3107 assert_kernel_context_is_current(dev_priv);
3109 rearm_hangcheck = false;
3111 mutex_unlock(&dev_priv->drm.struct_mutex);
3114 if (rearm_hangcheck) {
3115 GEM_BUG_ON(!dev_priv->gt.awake);
3116 i915_queue_hangcheck(dev_priv);
3120 * When we are idle, it is an opportune time to reap our caches.
3121 * However, we have many objects that utilise RCU and the ordered
3122 * i915->wq that this work is executing on. To try and flush any
3123 * pending frees now we are idle, we first wait for an RCU grace
3124 * period, and then queue a task (that will run last on the wq) to
3125 * shrink and re-optimize the caches.
3127 if (same_epoch(dev_priv, epoch)) {
3128 struct sleep_rcu_work *s = kmalloc(sizeof(*s), GFP_KERNEL);
3130 init_rcu_head(&s->rcu);
3133 call_rcu(&s->rcu, __sleep_rcu);
3138 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
3140 struct drm_i915_private *i915 = to_i915(gem->dev);
3141 struct drm_i915_gem_object *obj = to_intel_bo(gem);
3142 struct drm_i915_file_private *fpriv = file->driver_priv;
3143 struct i915_lut_handle *lut, *ln;
3145 mutex_lock(&i915->drm.struct_mutex);
3147 list_for_each_entry_safe(lut, ln, &obj->lut_list, obj_link) {
3148 struct i915_gem_context *ctx = lut->ctx;
3149 struct i915_vma *vma;
3151 GEM_BUG_ON(ctx->file_priv == ERR_PTR(-EBADF));
3152 if (ctx->file_priv != fpriv)
3155 vma = radix_tree_delete(&ctx->handles_vma, lut->handle);
3156 GEM_BUG_ON(vma->obj != obj);
3158 /* We allow the process to have multiple handles to the same
3159 * vma, in the same fd namespace, by virtue of flink/open.
3161 GEM_BUG_ON(!vma->open_count);
3162 if (!--vma->open_count && !i915_vma_is_ggtt(vma))
3163 i915_vma_close(vma);
3165 list_del(&lut->obj_link);
3166 list_del(&lut->ctx_link);
3168 kmem_cache_free(i915->luts, lut);
3169 __i915_gem_object_release_unless_active(obj);
3172 mutex_unlock(&i915->drm.struct_mutex);
3175 static unsigned long to_wait_timeout(s64 timeout_ns)
3178 return MAX_SCHEDULE_TIMEOUT;
3180 if (timeout_ns == 0)
3183 return nsecs_to_jiffies_timeout(timeout_ns);
3187 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
3188 * @dev: drm device pointer
3189 * @data: ioctl data blob
3190 * @file: drm file pointer
3192 * Returns 0 if successful, else an error is returned with the remaining time in
3193 * the timeout parameter.
3194 * -ETIME: object is still busy after timeout
3195 * -ERESTARTSYS: signal interrupted the wait
3196 * -ENONENT: object doesn't exist
3197 * Also possible, but rare:
3198 * -EAGAIN: incomplete, restart syscall
3200 * -ENODEV: Internal IRQ fail
3201 * -E?: The add request failed
3203 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
3204 * non-zero timeout parameter the wait ioctl will wait for the given number of
3205 * nanoseconds on an object becoming unbusy. Since the wait itself does so
3206 * without holding struct_mutex the object may become re-busied before this
3207 * function completes. A similar but shorter * race condition exists in the busy
3211 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
3213 struct drm_i915_gem_wait *args = data;
3214 struct drm_i915_gem_object *obj;
3218 if (args->flags != 0)
3221 obj = i915_gem_object_lookup(file, args->bo_handle);
3225 start = ktime_get();
3227 ret = i915_gem_object_wait(obj,
3228 I915_WAIT_INTERRUPTIBLE |
3229 I915_WAIT_PRIORITY |
3231 to_wait_timeout(args->timeout_ns),
3232 to_rps_client(file));
3234 if (args->timeout_ns > 0) {
3235 args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
3236 if (args->timeout_ns < 0)
3237 args->timeout_ns = 0;
3240 * Apparently ktime isn't accurate enough and occasionally has a
3241 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
3242 * things up to make the test happy. We allow up to 1 jiffy.
3244 * This is a regression from the timespec->ktime conversion.
3246 if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns))
3247 args->timeout_ns = 0;
3249 /* Asked to wait beyond the jiffie/scheduler precision? */
3250 if (ret == -ETIME && args->timeout_ns)
3254 i915_gem_object_put(obj);
3258 static int wait_for_engines(struct drm_i915_private *i915)
3260 if (wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT)) {
3261 dev_err(i915->drm.dev,
3262 "Failed to idle engines, declaring wedged!\n");
3264 i915_gem_set_wedged(i915);
3272 wait_for_timelines(struct drm_i915_private *i915,
3273 unsigned int flags, long timeout)
3275 struct i915_gt_timelines *gt = &i915->gt.timelines;
3276 struct i915_timeline *tl;
3278 if (!READ_ONCE(i915->gt.active_requests))
3281 mutex_lock(>->mutex);
3282 list_for_each_entry(tl, >->active_list, link) {
3283 struct i915_request *rq;
3285 rq = i915_active_request_get_unlocked(&tl->last_request);
3289 mutex_unlock(>->mutex);
3294 * Switching to the kernel context is often used a synchronous
3295 * step prior to idling, e.g. in suspend for flushing all
3296 * current operations to memory before sleeping. These we
3297 * want to complete as quickly as possible to avoid prolonged
3298 * stalls, so allow the gpu to boost to maximum clocks.
3300 if (flags & I915_WAIT_FOR_IDLE_BOOST)
3301 gen6_rps_boost(rq, NULL);
3303 timeout = i915_request_wait(rq, flags, timeout);
3304 i915_request_put(rq);
3308 /* restart after reacquiring the lock */
3309 mutex_lock(>->mutex);
3310 tl = list_entry(>->active_list, typeof(*tl), link);
3312 mutex_unlock(>->mutex);
3317 int i915_gem_wait_for_idle(struct drm_i915_private *i915,
3318 unsigned int flags, long timeout)
3320 GEM_TRACE("flags=%x (%s), timeout=%ld%s\n",
3321 flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked",
3322 timeout, timeout == MAX_SCHEDULE_TIMEOUT ? " (forever)" : "");
3324 /* If the device is asleep, we have no requests outstanding */
3325 if (!READ_ONCE(i915->gt.awake))
3328 timeout = wait_for_timelines(i915, flags, timeout);
3332 if (flags & I915_WAIT_LOCKED) {
3335 lockdep_assert_held(&i915->drm.struct_mutex);
3337 if (GEM_SHOW_DEBUG() && !timeout) {
3338 /* Presume that timeout was non-zero to begin with! */
3339 dev_warn(&i915->drm.pdev->dev,
3340 "Missed idle-completion interrupt!\n");
3344 err = wait_for_engines(i915);
3348 i915_retire_requests(i915);
3349 GEM_BUG_ON(i915->gt.active_requests);
3355 static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
3358 * We manually flush the CPU domain so that we can override and
3359 * force the flush for the display, and perform it asyncrhonously.
3361 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
3362 if (obj->cache_dirty)
3363 i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE);
3364 obj->write_domain = 0;
3367 void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj)
3369 if (!READ_ONCE(obj->pin_global))
3372 mutex_lock(&obj->base.dev->struct_mutex);
3373 __i915_gem_object_flush_for_display(obj);
3374 mutex_unlock(&obj->base.dev->struct_mutex);
3378 * Moves a single object to the WC read, and possibly write domain.
3379 * @obj: object to act on
3380 * @write: ask for write access or read only
3382 * This function returns when the move is complete, including waiting on
3386 i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
3390 lockdep_assert_held(&obj->base.dev->struct_mutex);
3392 ret = i915_gem_object_wait(obj,
3393 I915_WAIT_INTERRUPTIBLE |
3395 (write ? I915_WAIT_ALL : 0),
3396 MAX_SCHEDULE_TIMEOUT,
3401 if (obj->write_domain == I915_GEM_DOMAIN_WC)
3404 /* Flush and acquire obj->pages so that we are coherent through
3405 * direct access in memory with previous cached writes through
3406 * shmemfs and that our cache domain tracking remains valid.
3407 * For example, if the obj->filp was moved to swap without us
3408 * being notified and releasing the pages, we would mistakenly
3409 * continue to assume that the obj remained out of the CPU cached
3412 ret = i915_gem_object_pin_pages(obj);
3416 flush_write_domain(obj, ~I915_GEM_DOMAIN_WC);
3418 /* Serialise direct access to this object with the barriers for
3419 * coherent writes from the GPU, by effectively invalidating the
3420 * WC domain upon first access.
3422 if ((obj->read_domains & I915_GEM_DOMAIN_WC) == 0)
3425 /* It should now be out of any other write domains, and we can update
3426 * the domain values for our changes.
3428 GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_WC) != 0);
3429 obj->read_domains |= I915_GEM_DOMAIN_WC;
3431 obj->read_domains = I915_GEM_DOMAIN_WC;
3432 obj->write_domain = I915_GEM_DOMAIN_WC;
3433 obj->mm.dirty = true;
3436 i915_gem_object_unpin_pages(obj);
3441 * Moves a single object to the GTT read, and possibly write domain.
3442 * @obj: object to act on
3443 * @write: ask for write access or read only
3445 * This function returns when the move is complete, including waiting on
3449 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3453 lockdep_assert_held(&obj->base.dev->struct_mutex);
3455 ret = i915_gem_object_wait(obj,
3456 I915_WAIT_INTERRUPTIBLE |
3458 (write ? I915_WAIT_ALL : 0),
3459 MAX_SCHEDULE_TIMEOUT,
3464 if (obj->write_domain == I915_GEM_DOMAIN_GTT)
3467 /* Flush and acquire obj->pages so that we are coherent through
3468 * direct access in memory with previous cached writes through
3469 * shmemfs and that our cache domain tracking remains valid.
3470 * For example, if the obj->filp was moved to swap without us
3471 * being notified and releasing the pages, we would mistakenly
3472 * continue to assume that the obj remained out of the CPU cached
3475 ret = i915_gem_object_pin_pages(obj);
3479 flush_write_domain(obj, ~I915_GEM_DOMAIN_GTT);
3481 /* Serialise direct access to this object with the barriers for
3482 * coherent writes from the GPU, by effectively invalidating the
3483 * GTT domain upon first access.
3485 if ((obj->read_domains & I915_GEM_DOMAIN_GTT) == 0)
3488 /* It should now be out of any other write domains, and we can update
3489 * the domain values for our changes.
3491 GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3492 obj->read_domains |= I915_GEM_DOMAIN_GTT;
3494 obj->read_domains = I915_GEM_DOMAIN_GTT;
3495 obj->write_domain = I915_GEM_DOMAIN_GTT;
3496 obj->mm.dirty = true;
3499 i915_gem_object_unpin_pages(obj);
3504 * Changes the cache-level of an object across all VMA.
3505 * @obj: object to act on
3506 * @cache_level: new cache level to set for the object
3508 * After this function returns, the object will be in the new cache-level
3509 * across all GTT and the contents of the backing storage will be coherent,
3510 * with respect to the new cache-level. In order to keep the backing storage
3511 * coherent for all users, we only allow a single cache level to be set
3512 * globally on the object and prevent it from being changed whilst the
3513 * hardware is reading from the object. That is if the object is currently
3514 * on the scanout it will be set to uncached (or equivalent display
3515 * cache coherency) and all non-MOCS GPU access will also be uncached so
3516 * that all direct access to the scanout remains coherent.
3518 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3519 enum i915_cache_level cache_level)
3521 struct i915_vma *vma;
3524 lockdep_assert_held(&obj->base.dev->struct_mutex);
3526 if (obj->cache_level == cache_level)
3529 /* Inspect the list of currently bound VMA and unbind any that would
3530 * be invalid given the new cache-level. This is principally to
3531 * catch the issue of the CS prefetch crossing page boundaries and
3532 * reading an invalid PTE on older architectures.
3535 list_for_each_entry(vma, &obj->vma.list, obj_link) {
3536 if (!drm_mm_node_allocated(&vma->node))
3539 if (i915_vma_is_pinned(vma)) {
3540 DRM_DEBUG("can not change the cache level of pinned objects\n");
3544 if (!i915_vma_is_closed(vma) &&
3545 i915_gem_valid_gtt_space(vma, cache_level))
3548 ret = i915_vma_unbind(vma);
3552 /* As unbinding may affect other elements in the
3553 * obj->vma_list (due to side-effects from retiring
3554 * an active vma), play safe and restart the iterator.
3559 /* We can reuse the existing drm_mm nodes but need to change the
3560 * cache-level on the PTE. We could simply unbind them all and
3561 * rebind with the correct cache-level on next use. However since
3562 * we already have a valid slot, dma mapping, pages etc, we may as
3563 * rewrite the PTE in the belief that doing so tramples upon less
3564 * state and so involves less work.
3566 if (obj->bind_count) {
3567 /* Before we change the PTE, the GPU must not be accessing it.
3568 * If we wait upon the object, we know that all the bound
3569 * VMA are no longer active.
3571 ret = i915_gem_object_wait(obj,
3572 I915_WAIT_INTERRUPTIBLE |
3575 MAX_SCHEDULE_TIMEOUT,
3580 if (!HAS_LLC(to_i915(obj->base.dev)) &&
3581 cache_level != I915_CACHE_NONE) {
3582 /* Access to snoopable pages through the GTT is
3583 * incoherent and on some machines causes a hard
3584 * lockup. Relinquish the CPU mmaping to force
3585 * userspace to refault in the pages and we can
3586 * then double check if the GTT mapping is still
3587 * valid for that pointer access.
3589 i915_gem_release_mmap(obj);
3591 /* As we no longer need a fence for GTT access,
3592 * we can relinquish it now (and so prevent having
3593 * to steal a fence from someone else on the next
3594 * fence request). Note GPU activity would have
3595 * dropped the fence as all snoopable access is
3596 * supposed to be linear.
3598 for_each_ggtt_vma(vma, obj) {
3599 ret = i915_vma_put_fence(vma);
3604 /* We either have incoherent backing store and
3605 * so no GTT access or the architecture is fully
3606 * coherent. In such cases, existing GTT mmaps
3607 * ignore the cache bit in the PTE and we can
3608 * rewrite it without confusing the GPU or having
3609 * to force userspace to fault back in its mmaps.
3613 list_for_each_entry(vma, &obj->vma.list, obj_link) {
3614 if (!drm_mm_node_allocated(&vma->node))
3617 ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
3623 list_for_each_entry(vma, &obj->vma.list, obj_link)
3624 vma->node.color = cache_level;
3625 i915_gem_object_set_cache_coherency(obj, cache_level);
3626 obj->cache_dirty = true; /* Always invalidate stale cachelines */
3631 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3632 struct drm_file *file)
3634 struct drm_i915_gem_caching *args = data;
3635 struct drm_i915_gem_object *obj;
3639 obj = i915_gem_object_lookup_rcu(file, args->handle);
3645 switch (obj->cache_level) {
3646 case I915_CACHE_LLC:
3647 case I915_CACHE_L3_LLC:
3648 args->caching = I915_CACHING_CACHED;
3652 args->caching = I915_CACHING_DISPLAY;
3656 args->caching = I915_CACHING_NONE;
3664 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3665 struct drm_file *file)
3667 struct drm_i915_private *i915 = to_i915(dev);
3668 struct drm_i915_gem_caching *args = data;
3669 struct drm_i915_gem_object *obj;
3670 enum i915_cache_level level;
3673 switch (args->caching) {
3674 case I915_CACHING_NONE:
3675 level = I915_CACHE_NONE;
3677 case I915_CACHING_CACHED:
3679 * Due to a HW issue on BXT A stepping, GPU stores via a
3680 * snooped mapping may leave stale data in a corresponding CPU
3681 * cacheline, whereas normally such cachelines would get
3684 if (!HAS_LLC(i915) && !HAS_SNOOP(i915))
3687 level = I915_CACHE_LLC;
3689 case I915_CACHING_DISPLAY:
3690 level = HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE;
3696 obj = i915_gem_object_lookup(file, args->handle);
3701 * The caching mode of proxy object is handled by its generator, and
3702 * not allowed to be changed by userspace.
3704 if (i915_gem_object_is_proxy(obj)) {
3709 if (obj->cache_level == level)
3712 ret = i915_gem_object_wait(obj,
3713 I915_WAIT_INTERRUPTIBLE,
3714 MAX_SCHEDULE_TIMEOUT,
3715 to_rps_client(file));
3719 ret = i915_mutex_lock_interruptible(dev);
3723 ret = i915_gem_object_set_cache_level(obj, level);
3724 mutex_unlock(&dev->struct_mutex);
3727 i915_gem_object_put(obj);
3732 * Prepare buffer for display plane (scanout, cursors, etc). Can be called from
3733 * an uninterruptible phase (modesetting) and allows any flushes to be pipelined
3734 * (for pageflips). We only flush the caches while preparing the buffer for
3735 * display, the callers are responsible for frontbuffer flush.
3738 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3740 const struct i915_ggtt_view *view,
3743 struct i915_vma *vma;
3746 lockdep_assert_held(&obj->base.dev->struct_mutex);
3748 /* Mark the global pin early so that we account for the
3749 * display coherency whilst setting up the cache domains.
3753 /* The display engine is not coherent with the LLC cache on gen6. As
3754 * a result, we make sure that the pinning that is about to occur is
3755 * done with uncached PTEs. This is lowest common denominator for all
3758 * However for gen6+, we could do better by using the GFDT bit instead
3759 * of uncaching, which would allow us to flush all the LLC-cached data
3760 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3762 ret = i915_gem_object_set_cache_level(obj,
3763 HAS_WT(to_i915(obj->base.dev)) ?
3764 I915_CACHE_WT : I915_CACHE_NONE);
3767 goto err_unpin_global;
3770 /* As the user may map the buffer once pinned in the display plane
3771 * (e.g. libkms for the bootup splash), we have to ensure that we
3772 * always use map_and_fenceable for all scanout buffers. However,
3773 * it may simply be too big to fit into mappable, in which case
3774 * put it anyway and hope that userspace can cope (but always first
3775 * try to preserve the existing ABI).
3777 vma = ERR_PTR(-ENOSPC);
3778 if ((flags & PIN_MAPPABLE) == 0 &&
3779 (!view || view->type == I915_GGTT_VIEW_NORMAL))
3780 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
3785 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
3787 goto err_unpin_global;
3789 vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
3791 __i915_gem_object_flush_for_display(obj);
3793 /* It should now be out of any other write domains, and we can update
3794 * the domain values for our changes.
3796 obj->read_domains |= I915_GEM_DOMAIN_GTT;
3806 i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
3808 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
3810 if (WARN_ON(vma->obj->pin_global == 0))
3813 if (--vma->obj->pin_global == 0)
3814 vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
3816 /* Bump the LRU to try and avoid premature eviction whilst flipping */
3817 i915_gem_object_bump_inactive_ggtt(vma->obj);
3819 i915_vma_unpin(vma);
3823 * Moves a single object to the CPU read, and possibly write domain.
3824 * @obj: object to act on
3825 * @write: requesting write or read-only access
3827 * This function returns when the move is complete, including waiting on
3831 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3835 lockdep_assert_held(&obj->base.dev->struct_mutex);
3837 ret = i915_gem_object_wait(obj,
3838 I915_WAIT_INTERRUPTIBLE |
3840 (write ? I915_WAIT_ALL : 0),
3841 MAX_SCHEDULE_TIMEOUT,
3846 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
3848 /* Flush the CPU cache if it's still invalid. */
3849 if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3850 i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
3851 obj->read_domains |= I915_GEM_DOMAIN_CPU;
3854 /* It should now be out of any other write domains, and we can update
3855 * the domain values for our changes.
3857 GEM_BUG_ON(obj->write_domain & ~I915_GEM_DOMAIN_CPU);
3859 /* If we're writing through the CPU, then the GPU read domains will
3860 * need to be invalidated at next use.
3863 __start_cpu_write(obj);
3868 /* Throttle our rendering by waiting until the ring has completed our requests
3869 * emitted over 20 msec ago.
3871 * Note that if we were to use the current jiffies each time around the loop,
3872 * we wouldn't escape the function with any frames outstanding if the time to
3873 * render a frame was over 20ms.
3875 * This should get us reasonable parallelism between CPU and GPU but also
3876 * relatively low latency when blocking on a particular request to finish.
3879 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3881 struct drm_i915_private *dev_priv = to_i915(dev);
3882 struct drm_i915_file_private *file_priv = file->driver_priv;
3883 unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
3884 struct i915_request *request, *target = NULL;
3887 /* ABI: return -EIO if already wedged */
3888 if (i915_terminally_wedged(&dev_priv->gpu_error))
3891 spin_lock(&file_priv->mm.lock);
3892 list_for_each_entry(request, &file_priv->mm.request_list, client_link) {
3893 if (time_after_eq(request->emitted_jiffies, recent_enough))
3897 list_del(&target->client_link);
3898 target->file_priv = NULL;
3904 i915_request_get(target);
3905 spin_unlock(&file_priv->mm.lock);
3910 ret = i915_request_wait(target,
3911 I915_WAIT_INTERRUPTIBLE,
3912 MAX_SCHEDULE_TIMEOUT);
3913 i915_request_put(target);
3915 return ret < 0 ? ret : 0;
3919 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
3920 const struct i915_ggtt_view *view,
3925 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
3926 struct i915_address_space *vm = &dev_priv->ggtt.vm;
3927 struct i915_vma *vma;
3930 lockdep_assert_held(&obj->base.dev->struct_mutex);
3932 if (flags & PIN_MAPPABLE &&
3933 (!view || view->type == I915_GGTT_VIEW_NORMAL)) {
3934 /* If the required space is larger than the available
3935 * aperture, we will not able to find a slot for the
3936 * object and unbinding the object now will be in
3937 * vain. Worse, doing so may cause us to ping-pong
3938 * the object in and out of the Global GTT and
3939 * waste a lot of cycles under the mutex.
3941 if (obj->base.size > dev_priv->ggtt.mappable_end)
3942 return ERR_PTR(-E2BIG);
3944 /* If NONBLOCK is set the caller is optimistically
3945 * trying to cache the full object within the mappable
3946 * aperture, and *must* have a fallback in place for
3947 * situations where we cannot bind the object. We
3948 * can be a little more lax here and use the fallback
3949 * more often to avoid costly migrations of ourselves
3950 * and other objects within the aperture.
3952 * Half-the-aperture is used as a simple heuristic.
3953 * More interesting would to do search for a free
3954 * block prior to making the commitment to unbind.
3955 * That caters for the self-harm case, and with a
3956 * little more heuristics (e.g. NOFAULT, NOEVICT)
3957 * we could try to minimise harm to others.
3959 if (flags & PIN_NONBLOCK &&
3960 obj->base.size > dev_priv->ggtt.mappable_end / 2)
3961 return ERR_PTR(-ENOSPC);
3964 vma = i915_vma_instance(obj, vm, view);
3965 if (unlikely(IS_ERR(vma)))
3968 if (i915_vma_misplaced(vma, size, alignment, flags)) {
3969 if (flags & PIN_NONBLOCK) {
3970 if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))
3971 return ERR_PTR(-ENOSPC);
3973 if (flags & PIN_MAPPABLE &&
3974 vma->fence_size > dev_priv->ggtt.mappable_end / 2)
3975 return ERR_PTR(-ENOSPC);
3978 WARN(i915_vma_is_pinned(vma),
3979 "bo is already pinned in ggtt with incorrect alignment:"
3980 " offset=%08x, req.alignment=%llx,"
3981 " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
3982 i915_ggtt_offset(vma), alignment,
3983 !!(flags & PIN_MAPPABLE),
3984 i915_vma_is_map_and_fenceable(vma));
3985 ret = i915_vma_unbind(vma);
3987 return ERR_PTR(ret);
3990 ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
3992 return ERR_PTR(ret);
3997 static __always_inline unsigned int __busy_read_flag(unsigned int id)
3999 /* Note that we could alias engines in the execbuf API, but
4000 * that would be very unwise as it prevents userspace from
4001 * fine control over engine selection. Ahem.
4003 * This should be something like EXEC_MAX_ENGINE instead of
4006 BUILD_BUG_ON(I915_NUM_ENGINES > 16);
4007 return 0x10000 << id;
4010 static __always_inline unsigned int __busy_write_id(unsigned int id)
4012 /* The uABI guarantees an active writer is also amongst the read
4013 * engines. This would be true if we accessed the activity tracking
4014 * under the lock, but as we perform the lookup of the object and
4015 * its activity locklessly we can not guarantee that the last_write
4016 * being active implies that we have set the same engine flag from
4017 * last_read - hence we always set both read and write busy for
4020 return id | __busy_read_flag(id);
4023 static __always_inline unsigned int
4024 __busy_set_if_active(const struct dma_fence *fence,
4025 unsigned int (*flag)(unsigned int id))
4027 struct i915_request *rq;
4029 /* We have to check the current hw status of the fence as the uABI
4030 * guarantees forward progress. We could rely on the idle worker
4031 * to eventually flush us, but to minimise latency just ask the
4034 * Note we only report on the status of native fences.
4036 if (!dma_fence_is_i915(fence))
4039 /* opencode to_request() in order to avoid const warnings */
4040 rq = container_of(fence, struct i915_request, fence);
4041 if (i915_request_completed(rq))
4044 return flag(rq->engine->uabi_id);
4047 static __always_inline unsigned int
4048 busy_check_reader(const struct dma_fence *fence)
4050 return __busy_set_if_active(fence, __busy_read_flag);
4053 static __always_inline unsigned int
4054 busy_check_writer(const struct dma_fence *fence)
4059 return __busy_set_if_active(fence, __busy_write_id);
4063 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4064 struct drm_file *file)
4066 struct drm_i915_gem_busy *args = data;
4067 struct drm_i915_gem_object *obj;
4068 struct reservation_object_list *list;
4074 obj = i915_gem_object_lookup_rcu(file, args->handle);
4078 /* A discrepancy here is that we do not report the status of
4079 * non-i915 fences, i.e. even though we may report the object as idle,
4080 * a call to set-domain may still stall waiting for foreign rendering.
4081 * This also means that wait-ioctl may report an object as busy,
4082 * where busy-ioctl considers it idle.
4084 * We trade the ability to warn of foreign fences to report on which
4085 * i915 engines are active for the object.
4087 * Alternatively, we can trade that extra information on read/write
4090 * !reservation_object_test_signaled_rcu(obj->resv, true);
4091 * to report the overall busyness. This is what the wait-ioctl does.
4095 seq = raw_read_seqcount(&obj->resv->seq);
4097 /* Translate the exclusive fence to the READ *and* WRITE engine */
4098 args->busy = busy_check_writer(rcu_dereference(obj->resv->fence_excl));
4100 /* Translate shared fences to READ set of engines */
4101 list = rcu_dereference(obj->resv->fence);
4103 unsigned int shared_count = list->shared_count, i;
4105 for (i = 0; i < shared_count; ++i) {
4106 struct dma_fence *fence =
4107 rcu_dereference(list->shared[i]);
4109 args->busy |= busy_check_reader(fence);
4113 if (args->busy && read_seqcount_retry(&obj->resv->seq, seq))
4123 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4124 struct drm_file *file_priv)
4126 return i915_gem_ring_throttle(dev, file_priv);
4130 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4131 struct drm_file *file_priv)
4133 struct drm_i915_private *dev_priv = to_i915(dev);
4134 struct drm_i915_gem_madvise *args = data;
4135 struct drm_i915_gem_object *obj;
4138 switch (args->madv) {
4139 case I915_MADV_DONTNEED:
4140 case I915_MADV_WILLNEED:
4146 obj = i915_gem_object_lookup(file_priv, args->handle);
4150 err = mutex_lock_interruptible(&obj->mm.lock);
4154 if (i915_gem_object_has_pages(obj) &&
4155 i915_gem_object_is_tiled(obj) &&
4156 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
4157 if (obj->mm.madv == I915_MADV_WILLNEED) {
4158 GEM_BUG_ON(!obj->mm.quirked);
4159 __i915_gem_object_unpin_pages(obj);
4160 obj->mm.quirked = false;
4162 if (args->madv == I915_MADV_WILLNEED) {
4163 GEM_BUG_ON(obj->mm.quirked);
4164 __i915_gem_object_pin_pages(obj);
4165 obj->mm.quirked = true;
4169 if (obj->mm.madv != __I915_MADV_PURGED)
4170 obj->mm.madv = args->madv;
4172 /* if the object is no longer attached, discard its backing storage */
4173 if (obj->mm.madv == I915_MADV_DONTNEED &&
4174 !i915_gem_object_has_pages(obj))
4175 i915_gem_object_truncate(obj);
4177 args->retained = obj->mm.madv != __I915_MADV_PURGED;
4178 mutex_unlock(&obj->mm.lock);
4181 i915_gem_object_put(obj);
4186 frontbuffer_retire(struct i915_active_request *active,
4187 struct i915_request *request)
4189 struct drm_i915_gem_object *obj =
4190 container_of(active, typeof(*obj), frontbuffer_write);
4192 intel_fb_obj_flush(obj, ORIGIN_CS);
4195 void i915_gem_object_init(struct drm_i915_gem_object *obj,
4196 const struct drm_i915_gem_object_ops *ops)
4198 mutex_init(&obj->mm.lock);
4200 spin_lock_init(&obj->vma.lock);
4201 INIT_LIST_HEAD(&obj->vma.list);
4203 INIT_LIST_HEAD(&obj->lut_list);
4204 INIT_LIST_HEAD(&obj->batch_pool_link);
4206 init_rcu_head(&obj->rcu);
4210 reservation_object_init(&obj->__builtin_resv);
4211 obj->resv = &obj->__builtin_resv;
4213 obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
4214 i915_active_request_init(&obj->frontbuffer_write,
4215 NULL, frontbuffer_retire);
4217 obj->mm.madv = I915_MADV_WILLNEED;
4218 INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
4219 mutex_init(&obj->mm.get_page.lock);
4221 i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
4224 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4225 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
4226 I915_GEM_OBJECT_IS_SHRINKABLE,
4228 .get_pages = i915_gem_object_get_pages_gtt,
4229 .put_pages = i915_gem_object_put_pages_gtt,
4231 .pwrite = i915_gem_object_pwrite_gtt,
4234 static int i915_gem_object_create_shmem(struct drm_device *dev,
4235 struct drm_gem_object *obj,
4238 struct drm_i915_private *i915 = to_i915(dev);
4239 unsigned long flags = VM_NORESERVE;
4242 drm_gem_private_object_init(dev, obj, size);
4245 filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size,
4248 filp = shmem_file_setup("i915", size, flags);
4251 return PTR_ERR(filp);
4258 struct drm_i915_gem_object *
4259 i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
4261 struct drm_i915_gem_object *obj;
4262 struct address_space *mapping;
4263 unsigned int cache_level;
4267 /* There is a prevalence of the assumption that we fit the object's
4268 * page count inside a 32bit _signed_ variable. Let's document this and
4269 * catch if we ever need to fix it. In the meantime, if you do spot
4270 * such a local variable, please consider fixing!
4272 if (size >> PAGE_SHIFT > INT_MAX)
4273 return ERR_PTR(-E2BIG);
4275 if (overflows_type(size, obj->base.size))
4276 return ERR_PTR(-E2BIG);
4278 obj = i915_gem_object_alloc(dev_priv);
4280 return ERR_PTR(-ENOMEM);
4282 ret = i915_gem_object_create_shmem(&dev_priv->drm, &obj->base, size);
4286 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4287 if (IS_I965GM(dev_priv) || IS_I965G(dev_priv)) {
4288 /* 965gm cannot relocate objects above 4GiB. */
4289 mask &= ~__GFP_HIGHMEM;
4290 mask |= __GFP_DMA32;
4293 mapping = obj->base.filp->f_mapping;
4294 mapping_set_gfp_mask(mapping, mask);
4295 GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
4297 i915_gem_object_init(obj, &i915_gem_object_ops);
4299 obj->write_domain = I915_GEM_DOMAIN_CPU;
4300 obj->read_domains = I915_GEM_DOMAIN_CPU;
4302 if (HAS_LLC(dev_priv))
4303 /* On some devices, we can have the GPU use the LLC (the CPU
4304 * cache) for about a 10% performance improvement
4305 * compared to uncached. Graphics requests other than
4306 * display scanout are coherent with the CPU in
4307 * accessing this cache. This means in this mode we
4308 * don't need to clflush on the CPU side, and on the
4309 * GPU side we only need to flush internal caches to
4310 * get data visible to the CPU.
4312 * However, we maintain the display planes as UC, and so
4313 * need to rebind when first used as such.
4315 cache_level = I915_CACHE_LLC;
4317 cache_level = I915_CACHE_NONE;
4319 i915_gem_object_set_cache_coherency(obj, cache_level);
4321 trace_i915_gem_object_create(obj);
4326 i915_gem_object_free(obj);
4327 return ERR_PTR(ret);
4330 static bool discard_backing_storage(struct drm_i915_gem_object *obj)
4332 /* If we are the last user of the backing storage (be it shmemfs
4333 * pages or stolen etc), we know that the pages are going to be
4334 * immediately released. In this case, we can then skip copying
4335 * back the contents from the GPU.
4338 if (obj->mm.madv != I915_MADV_WILLNEED)
4341 if (obj->base.filp == NULL)
4344 /* At first glance, this looks racy, but then again so would be
4345 * userspace racing mmap against close. However, the first external
4346 * reference to the filp can only be obtained through the
4347 * i915_gem_mmap_ioctl() which safeguards us against the user
4348 * acquiring such a reference whilst we are in the middle of
4349 * freeing the object.
4351 return atomic_long_read(&obj->base.filp->f_count) == 1;
4354 static void __i915_gem_free_objects(struct drm_i915_private *i915,
4355 struct llist_node *freed)
4357 struct drm_i915_gem_object *obj, *on;
4358 intel_wakeref_t wakeref;
4360 wakeref = intel_runtime_pm_get(i915);
4361 llist_for_each_entry_safe(obj, on, freed, freed) {
4362 struct i915_vma *vma, *vn;
4364 trace_i915_gem_object_destroy(obj);
4366 mutex_lock(&i915->drm.struct_mutex);
4368 GEM_BUG_ON(i915_gem_object_is_active(obj));
4369 list_for_each_entry_safe(vma, vn, &obj->vma.list, obj_link) {
4370 GEM_BUG_ON(i915_vma_is_active(vma));
4371 vma->flags &= ~I915_VMA_PIN_MASK;
4372 i915_vma_destroy(vma);
4374 GEM_BUG_ON(!list_empty(&obj->vma.list));
4375 GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma.tree));
4377 /* This serializes freeing with the shrinker. Since the free
4378 * is delayed, first by RCU then by the workqueue, we want the
4379 * shrinker to be able to free pages of unreferenced objects,
4380 * or else we may oom whilst there are plenty of deferred
4383 if (i915_gem_object_has_pages(obj)) {
4384 spin_lock(&i915->mm.obj_lock);
4385 list_del_init(&obj->mm.link);
4386 spin_unlock(&i915->mm.obj_lock);
4389 mutex_unlock(&i915->drm.struct_mutex);
4391 GEM_BUG_ON(obj->bind_count);
4392 GEM_BUG_ON(obj->userfault_count);
4393 GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
4394 GEM_BUG_ON(!list_empty(&obj->lut_list));
4396 if (obj->ops->release)
4397 obj->ops->release(obj);
4399 if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
4400 atomic_set(&obj->mm.pages_pin_count, 0);
4401 __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
4402 GEM_BUG_ON(i915_gem_object_has_pages(obj));
4404 if (obj->base.import_attach)
4405 drm_prime_gem_destroy(&obj->base, NULL);
4407 reservation_object_fini(&obj->__builtin_resv);
4408 drm_gem_object_release(&obj->base);
4409 i915_gem_info_remove_obj(i915, obj->base.size);
4412 i915_gem_object_free(obj);
4414 GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
4415 atomic_dec(&i915->mm.free_count);
4420 intel_runtime_pm_put(i915, wakeref);
4423 static void i915_gem_flush_free_objects(struct drm_i915_private *i915)
4425 struct llist_node *freed;
4427 /* Free the oldest, most stale object to keep the free_list short */
4429 if (!llist_empty(&i915->mm.free_list)) { /* quick test for hotpath */
4430 /* Only one consumer of llist_del_first() allowed */
4431 spin_lock(&i915->mm.free_lock);
4432 freed = llist_del_first(&i915->mm.free_list);
4433 spin_unlock(&i915->mm.free_lock);
4435 if (unlikely(freed)) {
4437 __i915_gem_free_objects(i915, freed);
4441 static void __i915_gem_free_work(struct work_struct *work)
4443 struct drm_i915_private *i915 =
4444 container_of(work, struct drm_i915_private, mm.free_work);
4445 struct llist_node *freed;
4448 * All file-owned VMA should have been released by this point through
4449 * i915_gem_close_object(), or earlier by i915_gem_context_close().
4450 * However, the object may also be bound into the global GTT (e.g.
4451 * older GPUs without per-process support, or for direct access through
4452 * the GTT either for the user or for scanout). Those VMA still need to
4456 spin_lock(&i915->mm.free_lock);
4457 while ((freed = llist_del_all(&i915->mm.free_list))) {
4458 spin_unlock(&i915->mm.free_lock);
4460 __i915_gem_free_objects(i915, freed);
4464 spin_lock(&i915->mm.free_lock);
4466 spin_unlock(&i915->mm.free_lock);
4469 static void __i915_gem_free_object_rcu(struct rcu_head *head)
4471 struct drm_i915_gem_object *obj =
4472 container_of(head, typeof(*obj), rcu);
4473 struct drm_i915_private *i915 = to_i915(obj->base.dev);
4476 * We reuse obj->rcu for the freed list, so we had better not treat
4477 * it like a rcu_head from this point forwards. And we expect all
4478 * objects to be freed via this path.
4480 destroy_rcu_head(&obj->rcu);
4483 * Since we require blocking on struct_mutex to unbind the freed
4484 * object from the GPU before releasing resources back to the
4485 * system, we can not do that directly from the RCU callback (which may
4486 * be a softirq context), but must instead then defer that work onto a
4487 * kthread. We use the RCU callback rather than move the freed object
4488 * directly onto the work queue so that we can mix between using the
4489 * worker and performing frees directly from subsequent allocations for
4490 * crude but effective memory throttling.
4492 if (llist_add(&obj->freed, &i915->mm.free_list))
4493 queue_work(i915->wq, &i915->mm.free_work);
4496 void i915_gem_free_object(struct drm_gem_object *gem_obj)
4498 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4500 if (obj->mm.quirked)
4501 __i915_gem_object_unpin_pages(obj);
4503 if (discard_backing_storage(obj))
4504 obj->mm.madv = I915_MADV_DONTNEED;
4507 * Before we free the object, make sure any pure RCU-only
4508 * read-side critical sections are complete, e.g.
4509 * i915_gem_busy_ioctl(). For the corresponding synchronized
4510 * lookup see i915_gem_object_lookup_rcu().
4512 atomic_inc(&to_i915(obj->base.dev)->mm.free_count);
4513 call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
4516 void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
4518 lockdep_assert_held(&obj->base.dev->struct_mutex);
4520 if (!i915_gem_object_has_active_reference(obj) &&
4521 i915_gem_object_is_active(obj))
4522 i915_gem_object_set_active_reference(obj);
4524 i915_gem_object_put(obj);
4527 void i915_gem_sanitize(struct drm_i915_private *i915)
4529 intel_wakeref_t wakeref;
4533 wakeref = intel_runtime_pm_get(i915);
4534 intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
4537 * As we have just resumed the machine and woken the device up from
4538 * deep PCI sleep (presumably D3_cold), assume the HW has been reset
4539 * back to defaults, recovering from whatever wedged state we left it
4540 * in and so worth trying to use the device once more.
4542 if (i915_terminally_wedged(&i915->gpu_error))
4543 i915_gem_unset_wedged(i915);
4546 * If we inherit context state from the BIOS or earlier occupants
4547 * of the GPU, the GPU may be in an inconsistent state when we
4548 * try to take over. The only way to remove the earlier state
4549 * is by resetting. However, resetting on earlier gen is tricky as
4550 * it may impact the display and we are uncertain about the stability
4551 * of the reset, so this could be applied to even earlier gen.
4553 intel_engines_sanitize(i915, false);
4555 intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
4556 intel_runtime_pm_put(i915, wakeref);
4558 mutex_lock(&i915->drm.struct_mutex);
4559 i915_gem_contexts_lost(i915);
4560 mutex_unlock(&i915->drm.struct_mutex);
4563 int i915_gem_suspend(struct drm_i915_private *i915)
4565 intel_wakeref_t wakeref;
4570 wakeref = intel_runtime_pm_get(i915);
4571 intel_suspend_gt_powersave(i915);
4573 flush_workqueue(i915->wq);
4575 mutex_lock(&i915->drm.struct_mutex);
4578 * We have to flush all the executing contexts to main memory so
4579 * that they can saved in the hibernation image. To ensure the last
4580 * context image is coherent, we have to switch away from it. That
4581 * leaves the i915->kernel_context still active when
4582 * we actually suspend, and its image in memory may not match the GPU
4583 * state. Fortunately, the kernel_context is disposable and we do
4584 * not rely on its state.
4586 if (!i915_terminally_wedged(&i915->gpu_error)) {
4587 ret = i915_gem_switch_to_kernel_context(i915);
4591 ret = i915_gem_wait_for_idle(i915,
4592 I915_WAIT_INTERRUPTIBLE |
4594 I915_WAIT_FOR_IDLE_BOOST,
4595 MAX_SCHEDULE_TIMEOUT);
4596 if (ret && ret != -EIO)
4599 assert_kernel_context_is_current(i915);
4601 i915_retire_requests(i915); /* ensure we flush after wedging */
4603 mutex_unlock(&i915->drm.struct_mutex);
4604 i915_reset_flush(i915);
4606 drain_delayed_work(&i915->gt.retire_work);
4609 * As the idle_work is rearming if it detects a race, play safe and
4610 * repeat the flush until it is definitely idle.
4612 drain_delayed_work(&i915->gt.idle_work);
4614 intel_uc_suspend(i915);
4617 * Assert that we successfully flushed all the work and
4618 * reset the GPU back to its idle, low power state.
4620 WARN_ON(i915->gt.awake);
4621 if (WARN_ON(!intel_engines_are_idle(i915)))
4622 i915_gem_set_wedged(i915); /* no hope, discard everything */
4624 intel_runtime_pm_put(i915, wakeref);
4628 mutex_unlock(&i915->drm.struct_mutex);
4629 intel_runtime_pm_put(i915, wakeref);
4633 void i915_gem_suspend_late(struct drm_i915_private *i915)
4635 struct drm_i915_gem_object *obj;
4636 struct list_head *phases[] = {
4637 &i915->mm.unbound_list,
4638 &i915->mm.bound_list,
4643 * Neither the BIOS, ourselves or any other kernel
4644 * expects the system to be in execlists mode on startup,
4645 * so we need to reset the GPU back to legacy mode. And the only
4646 * known way to disable logical contexts is through a GPU reset.
4648 * So in order to leave the system in a known default configuration,
4649 * always reset the GPU upon unload and suspend. Afterwards we then
4650 * clean up the GEM state tracking, flushing off the requests and
4651 * leaving the system in a known idle state.
4653 * Note that is of the upmost importance that the GPU is idle and
4654 * all stray writes are flushed *before* we dismantle the backing
4655 * storage for the pinned objects.
4657 * However, since we are uncertain that resetting the GPU on older
4658 * machines is a good idea, we don't - just in case it leaves the
4659 * machine in an unusable condition.
4662 mutex_lock(&i915->drm.struct_mutex);
4663 for (phase = phases; *phase; phase++) {
4664 list_for_each_entry(obj, *phase, mm.link)
4665 WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
4667 mutex_unlock(&i915->drm.struct_mutex);
4669 intel_uc_sanitize(i915);
4670 i915_gem_sanitize(i915);
4673 void i915_gem_resume(struct drm_i915_private *i915)
4677 WARN_ON(i915->gt.awake);
4679 mutex_lock(&i915->drm.struct_mutex);
4680 intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
4682 i915_gem_restore_gtt_mappings(i915);
4683 i915_gem_restore_fences(i915);
4686 * As we didn't flush the kernel context before suspend, we cannot
4687 * guarantee that the context image is complete. So let's just reset
4688 * it and start again.
4690 i915->gt.resume(i915);
4692 if (i915_gem_init_hw(i915))
4695 intel_uc_resume(i915);
4697 /* Always reload a context for powersaving. */
4698 if (i915_gem_switch_to_kernel_context(i915))
4702 intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
4703 mutex_unlock(&i915->drm.struct_mutex);
4707 if (!i915_terminally_wedged(&i915->gpu_error)) {
4708 DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
4709 i915_gem_set_wedged(i915);
4714 void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
4716 if (INTEL_GEN(dev_priv) < 5 ||
4717 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4720 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4721 DISP_TILE_SURFACE_SWIZZLING);
4723 if (IS_GEN(dev_priv, 5))
4726 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4727 if (IS_GEN(dev_priv, 6))
4728 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4729 else if (IS_GEN(dev_priv, 7))
4730 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4731 else if (IS_GEN(dev_priv, 8))
4732 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
4737 static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base)
4739 I915_WRITE(RING_CTL(base), 0);
4740 I915_WRITE(RING_HEAD(base), 0);
4741 I915_WRITE(RING_TAIL(base), 0);
4742 I915_WRITE(RING_START(base), 0);
4745 static void init_unused_rings(struct drm_i915_private *dev_priv)
4747 if (IS_I830(dev_priv)) {
4748 init_unused_ring(dev_priv, PRB1_BASE);
4749 init_unused_ring(dev_priv, SRB0_BASE);
4750 init_unused_ring(dev_priv, SRB1_BASE);
4751 init_unused_ring(dev_priv, SRB2_BASE);
4752 init_unused_ring(dev_priv, SRB3_BASE);
4753 } else if (IS_GEN(dev_priv, 2)) {
4754 init_unused_ring(dev_priv, SRB0_BASE);
4755 init_unused_ring(dev_priv, SRB1_BASE);
4756 } else if (IS_GEN(dev_priv, 3)) {
4757 init_unused_ring(dev_priv, PRB1_BASE);
4758 init_unused_ring(dev_priv, PRB2_BASE);
4762 static int __i915_gem_restart_engines(void *data)
4764 struct drm_i915_private *i915 = data;
4765 struct intel_engine_cs *engine;
4766 enum intel_engine_id id;
4769 for_each_engine(engine, i915, id) {
4770 err = engine->init_hw(engine);
4772 DRM_ERROR("Failed to restart %s (%d)\n",
4781 int i915_gem_init_hw(struct drm_i915_private *dev_priv)
4785 dev_priv->gt.last_init_time = ktime_get();
4787 /* Double layer security blanket, see i915_gem_init() */
4788 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4790 if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9)
4791 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4793 if (IS_HASWELL(dev_priv))
4794 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
4795 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
4797 /* Apply the GT workarounds... */
4798 intel_gt_apply_workarounds(dev_priv);
4799 /* ...and determine whether they are sticking. */
4800 intel_gt_verify_workarounds(dev_priv, "init");
4802 i915_gem_init_swizzling(dev_priv);
4805 * At least 830 can leave some of the unused rings
4806 * "active" (ie. head != tail) after resume which
4807 * will prevent c3 entry. Makes sure all unused rings
4810 init_unused_rings(dev_priv);
4812 BUG_ON(!dev_priv->kernel_context);
4813 if (i915_terminally_wedged(&dev_priv->gpu_error)) {
4818 ret = i915_ppgtt_init_hw(dev_priv);
4820 DRM_ERROR("Enabling PPGTT failed (%d)\n", ret);
4824 ret = intel_wopcm_init_hw(&dev_priv->wopcm);
4826 DRM_ERROR("Enabling WOPCM failed (%d)\n", ret);
4830 /* We can't enable contexts until all firmware is loaded */
4831 ret = intel_uc_init_hw(dev_priv);
4833 DRM_ERROR("Enabling uc failed (%d)\n", ret);
4837 intel_mocs_init_l3cc_table(dev_priv);
4839 /* Only when the HW is re-initialised, can we replay the requests */
4840 ret = __i915_gem_restart_engines(dev_priv);
4844 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4849 intel_uc_fini_hw(dev_priv);
4851 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4856 static int __intel_engines_record_defaults(struct drm_i915_private *i915)
4858 struct i915_gem_context *ctx;
4859 struct intel_engine_cs *engine;
4860 enum intel_engine_id id;
4864 * As we reset the gpu during very early sanitisation, the current
4865 * register state on the GPU should reflect its defaults values.
4866 * We load a context onto the hw (with restore-inhibit), then switch
4867 * over to a second context to save that default register state. We
4868 * can then prime every new context with that state so they all start
4869 * from the same default HW values.
4872 ctx = i915_gem_context_create_kernel(i915, 0);
4874 return PTR_ERR(ctx);
4876 for_each_engine(engine, i915, id) {
4877 struct i915_request *rq;
4879 rq = i915_request_alloc(engine, ctx);
4886 if (engine->init_context)
4887 err = engine->init_context(rq);
4889 i915_request_add(rq);
4894 err = i915_gem_switch_to_kernel_context(i915);
4898 if (i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED, HZ / 5)) {
4899 i915_gem_set_wedged(i915);
4900 err = -EIO; /* Caller will declare us wedged */
4904 assert_kernel_context_is_current(i915);
4907 * Immediately park the GPU so that we enable powersaving and
4908 * treat it as idle. The next time we issue a request, we will
4909 * unpark and start using the engine->pinned_default_state, otherwise
4910 * it is in limbo and an early reset may fail.
4912 __i915_gem_park(i915);
4914 for_each_engine(engine, i915, id) {
4915 struct i915_vma *state;
4918 GEM_BUG_ON(to_intel_context(ctx, engine)->pin_count);
4920 state = to_intel_context(ctx, engine)->state;
4925 * As we will hold a reference to the logical state, it will
4926 * not be torn down with the context, and importantly the
4927 * object will hold onto its vma (making it possible for a
4928 * stray GTT write to corrupt our defaults). Unmap the vma
4929 * from the GTT to prevent such accidents and reclaim the
4932 err = i915_vma_unbind(state);
4936 err = i915_gem_object_set_to_cpu_domain(state->obj, false);
4940 engine->default_state = i915_gem_object_get(state->obj);
4942 /* Check we can acquire the image of the context state */
4943 vaddr = i915_gem_object_pin_map(engine->default_state,
4945 if (IS_ERR(vaddr)) {
4946 err = PTR_ERR(vaddr);
4950 i915_gem_object_unpin_map(engine->default_state);
4953 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
4954 unsigned int found = intel_engines_has_context_isolation(i915);
4957 * Make sure that classes with multiple engine instances all
4958 * share the same basic configuration.
4960 for_each_engine(engine, i915, id) {
4961 unsigned int bit = BIT(engine->uabi_class);
4962 unsigned int expected = engine->default_state ? bit : 0;
4964 if ((found & bit) != expected) {
4965 DRM_ERROR("mismatching default context state for class %d on engine %s\n",
4966 engine->uabi_class, engine->name);
4972 i915_gem_context_set_closed(ctx);
4973 i915_gem_context_put(ctx);
4978 * If we have to abandon now, we expect the engines to be idle
4979 * and ready to be torn-down. First try to flush any remaining
4980 * request, ensure we are pointing at the kernel context and
4983 if (WARN_ON(i915_gem_switch_to_kernel_context(i915)))
4986 if (WARN_ON(i915_gem_wait_for_idle(i915,
4988 MAX_SCHEDULE_TIMEOUT)))
4991 i915_gem_contexts_lost(i915);
4996 i915_gem_init_scratch(struct drm_i915_private *i915, unsigned int size)
4998 struct drm_i915_gem_object *obj;
4999 struct i915_vma *vma;
5002 obj = i915_gem_object_create_stolen(i915, size);
5004 obj = i915_gem_object_create_internal(i915, size);
5006 DRM_ERROR("Failed to allocate scratch page\n");
5007 return PTR_ERR(obj);
5010 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
5016 ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
5020 i915->gt.scratch = vma;
5024 i915_gem_object_put(obj);
5028 static void i915_gem_fini_scratch(struct drm_i915_private *i915)
5030 i915_vma_unpin_and_release(&i915->gt.scratch, 0);
5033 int i915_gem_init(struct drm_i915_private *dev_priv)
5037 /* We need to fallback to 4K pages if host doesn't support huge gtt. */
5038 if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
5039 mkwrite_device_info(dev_priv)->page_sizes =
5040 I915_GTT_PAGE_SIZE_4K;
5042 dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
5044 if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
5045 dev_priv->gt.resume = intel_lr_context_resume;
5046 dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
5048 dev_priv->gt.resume = intel_legacy_submission_resume;
5049 dev_priv->gt.cleanup_engine = intel_engine_cleanup;
5052 i915_timelines_init(dev_priv);
5054 ret = i915_gem_init_userptr(dev_priv);
5058 ret = intel_uc_init_misc(dev_priv);
5062 ret = intel_wopcm_init(&dev_priv->wopcm);
5066 /* This is just a security blanket to placate dragons.
5067 * On some systems, we very sporadically observe that the first TLBs
5068 * used by the CS may be stale, despite us poking the TLB reset. If
5069 * we hold the forcewake during initialisation these problems
5070 * just magically go away.
5072 mutex_lock(&dev_priv->drm.struct_mutex);
5073 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5075 ret = i915_gem_init_ggtt(dev_priv);
5077 GEM_BUG_ON(ret == -EIO);
5081 ret = i915_gem_init_scratch(dev_priv,
5082 IS_GEN(dev_priv, 2) ? SZ_256K : PAGE_SIZE);
5084 GEM_BUG_ON(ret == -EIO);
5088 ret = i915_gem_contexts_init(dev_priv);
5090 GEM_BUG_ON(ret == -EIO);
5094 ret = intel_engines_init(dev_priv);
5096 GEM_BUG_ON(ret == -EIO);
5100 intel_init_gt_powersave(dev_priv);
5102 ret = intel_uc_init(dev_priv);
5106 ret = i915_gem_init_hw(dev_priv);
5111 * Despite its name intel_init_clock_gating applies both display
5112 * clock gating workarounds; GT mmio workarounds and the occasional
5113 * GT power context workaround. Worse, sometimes it includes a context
5114 * register workaround which we need to apply before we record the
5115 * default HW state for all contexts.
5117 * FIXME: break up the workarounds and apply them at the right time!
5119 intel_init_clock_gating(dev_priv);
5121 ret = __intel_engines_record_defaults(dev_priv);
5125 if (i915_inject_load_failure()) {
5130 if (i915_inject_load_failure()) {
5135 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5136 mutex_unlock(&dev_priv->drm.struct_mutex);
5141 * Unwinding is complicated by that we want to handle -EIO to mean
5142 * disable GPU submission but keep KMS alive. We want to mark the
5143 * HW as irrevisibly wedged, but keep enough state around that the
5144 * driver doesn't explode during runtime.
5147 mutex_unlock(&dev_priv->drm.struct_mutex);
5149 WARN_ON(i915_gem_suspend(dev_priv));
5150 i915_gem_suspend_late(dev_priv);
5152 i915_gem_drain_workqueue(dev_priv);
5154 mutex_lock(&dev_priv->drm.struct_mutex);
5155 intel_uc_fini_hw(dev_priv);
5157 intel_uc_fini(dev_priv);
5160 intel_cleanup_gt_powersave(dev_priv);
5161 i915_gem_cleanup_engines(dev_priv);
5165 i915_gem_contexts_fini(dev_priv);
5167 i915_gem_fini_scratch(dev_priv);
5170 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5171 mutex_unlock(&dev_priv->drm.struct_mutex);
5174 intel_uc_fini_misc(dev_priv);
5177 i915_gem_cleanup_userptr(dev_priv);
5178 i915_timelines_fini(dev_priv);
5182 mutex_lock(&dev_priv->drm.struct_mutex);
5185 * Allow engine initialisation to fail by marking the GPU as
5186 * wedged. But we only want to do this where the GPU is angry,
5187 * for all other failure, such as an allocation failure, bail.
5189 if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
5190 i915_load_error(dev_priv,
5191 "Failed to initialize GPU, declaring it wedged!\n");
5192 i915_gem_set_wedged(dev_priv);
5195 /* Minimal basic recovery for KMS */
5196 ret = i915_ggtt_enable_hw(dev_priv);
5197 i915_gem_restore_gtt_mappings(dev_priv);
5198 i915_gem_restore_fences(dev_priv);
5199 intel_init_clock_gating(dev_priv);
5201 mutex_unlock(&dev_priv->drm.struct_mutex);
5204 i915_gem_drain_freed_objects(dev_priv);
5208 void i915_gem_fini(struct drm_i915_private *dev_priv)
5210 i915_gem_suspend_late(dev_priv);
5211 intel_disable_gt_powersave(dev_priv);
5213 /* Flush any outstanding unpin_work. */
5214 i915_gem_drain_workqueue(dev_priv);
5216 mutex_lock(&dev_priv->drm.struct_mutex);
5217 intel_uc_fini_hw(dev_priv);
5218 intel_uc_fini(dev_priv);
5219 i915_gem_cleanup_engines(dev_priv);
5220 i915_gem_contexts_fini(dev_priv);
5221 i915_gem_fini_scratch(dev_priv);
5222 mutex_unlock(&dev_priv->drm.struct_mutex);
5224 intel_wa_list_free(&dev_priv->gt_wa_list);
5226 intel_cleanup_gt_powersave(dev_priv);
5228 intel_uc_fini_misc(dev_priv);
5229 i915_gem_cleanup_userptr(dev_priv);
5230 i915_timelines_fini(dev_priv);
5232 i915_gem_drain_freed_objects(dev_priv);
5234 WARN_ON(!list_empty(&dev_priv->contexts.list));
5237 void i915_gem_init_mmio(struct drm_i915_private *i915)
5239 i915_gem_sanitize(i915);
5243 i915_gem_cleanup_engines(struct drm_i915_private *dev_priv)
5245 struct intel_engine_cs *engine;
5246 enum intel_engine_id id;
5248 for_each_engine(engine, dev_priv, id)
5249 dev_priv->gt.cleanup_engine(engine);
5253 i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
5257 if (INTEL_GEN(dev_priv) >= 7 && !IS_VALLEYVIEW(dev_priv) &&
5258 !IS_CHERRYVIEW(dev_priv))
5259 dev_priv->num_fence_regs = 32;
5260 else if (INTEL_GEN(dev_priv) >= 4 ||
5261 IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
5262 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
5263 dev_priv->num_fence_regs = 16;
5265 dev_priv->num_fence_regs = 8;
5267 if (intel_vgpu_active(dev_priv))
5268 dev_priv->num_fence_regs =
5269 I915_READ(vgtif_reg(avail_rs.fence_num));
5271 /* Initialize fence registers to zero */
5272 for (i = 0; i < dev_priv->num_fence_regs; i++) {
5273 struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];
5275 fence->i915 = dev_priv;
5277 list_add_tail(&fence->link, &dev_priv->mm.fence_list);
5279 i915_gem_restore_fences(dev_priv);
5281 i915_gem_detect_bit_6_swizzle(dev_priv);
5284 static void i915_gem_init__mm(struct drm_i915_private *i915)
5286 spin_lock_init(&i915->mm.object_stat_lock);
5287 spin_lock_init(&i915->mm.obj_lock);
5288 spin_lock_init(&i915->mm.free_lock);
5290 init_llist_head(&i915->mm.free_list);
5292 INIT_LIST_HEAD(&i915->mm.unbound_list);
5293 INIT_LIST_HEAD(&i915->mm.bound_list);
5294 INIT_LIST_HEAD(&i915->mm.fence_list);
5295 INIT_LIST_HEAD(&i915->mm.userfault_list);
5297 INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
5300 int i915_gem_init_early(struct drm_i915_private *dev_priv)
5304 dev_priv->objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
5305 if (!dev_priv->objects)
5308 dev_priv->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
5309 if (!dev_priv->vmas)
5312 dev_priv->luts = KMEM_CACHE(i915_lut_handle, 0);
5313 if (!dev_priv->luts)
5316 dev_priv->requests = KMEM_CACHE(i915_request,
5317 SLAB_HWCACHE_ALIGN |
5318 SLAB_RECLAIM_ACCOUNT |
5319 SLAB_TYPESAFE_BY_RCU);
5320 if (!dev_priv->requests)
5323 dev_priv->dependencies = KMEM_CACHE(i915_dependency,
5324 SLAB_HWCACHE_ALIGN |
5325 SLAB_RECLAIM_ACCOUNT);
5326 if (!dev_priv->dependencies)
5329 dev_priv->priorities = KMEM_CACHE(i915_priolist, SLAB_HWCACHE_ALIGN);
5330 if (!dev_priv->priorities)
5331 goto err_dependencies;
5333 INIT_LIST_HEAD(&dev_priv->gt.active_rings);
5334 INIT_LIST_HEAD(&dev_priv->gt.closed_vma);
5336 i915_gem_init__mm(dev_priv);
5338 INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
5339 i915_gem_retire_work_handler);
5340 INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
5341 i915_gem_idle_work_handler);
5342 init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
5343 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
5344 mutex_init(&dev_priv->gpu_error.wedge_mutex);
5346 atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
5348 spin_lock_init(&dev_priv->fb_tracking.lock);
5350 err = i915_gemfs_init(dev_priv);
5352 DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", err);
5357 kmem_cache_destroy(dev_priv->dependencies);
5359 kmem_cache_destroy(dev_priv->requests);
5361 kmem_cache_destroy(dev_priv->luts);
5363 kmem_cache_destroy(dev_priv->vmas);
5365 kmem_cache_destroy(dev_priv->objects);
5370 void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
5372 i915_gem_drain_freed_objects(dev_priv);
5373 GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
5374 GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
5375 WARN_ON(dev_priv->mm.object_count);
5377 kmem_cache_destroy(dev_priv->priorities);
5378 kmem_cache_destroy(dev_priv->dependencies);
5379 kmem_cache_destroy(dev_priv->requests);
5380 kmem_cache_destroy(dev_priv->luts);
5381 kmem_cache_destroy(dev_priv->vmas);
5382 kmem_cache_destroy(dev_priv->objects);
5384 /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
5387 i915_gemfs_fini(dev_priv);
5390 int i915_gem_freeze(struct drm_i915_private *dev_priv)
5392 /* Discard all purgeable objects, let userspace recover those as
5393 * required after resuming.
5395 i915_gem_shrink_all(dev_priv);
5400 int i915_gem_freeze_late(struct drm_i915_private *i915)
5402 struct drm_i915_gem_object *obj;
5403 struct list_head *phases[] = {
5404 &i915->mm.unbound_list,
5405 &i915->mm.bound_list,
5410 * Called just before we write the hibernation image.
5412 * We need to update the domain tracking to reflect that the CPU
5413 * will be accessing all the pages to create and restore from the
5414 * hibernation, and so upon restoration those pages will be in the
5417 * To make sure the hibernation image contains the latest state,
5418 * we update that state just before writing out the image.
5420 * To try and reduce the hibernation image, we manually shrink
5421 * the objects as well, see i915_gem_freeze()
5424 i915_gem_shrink(i915, -1UL, NULL, I915_SHRINK_UNBOUND);
5425 i915_gem_drain_freed_objects(i915);
5427 mutex_lock(&i915->drm.struct_mutex);
5428 for (phase = phases; *phase; phase++) {
5429 list_for_each_entry(obj, *phase, mm.link)
5430 WARN_ON(i915_gem_object_set_to_cpu_domain(obj, true));
5432 mutex_unlock(&i915->drm.struct_mutex);
5437 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
5439 struct drm_i915_file_private *file_priv = file->driver_priv;
5440 struct i915_request *request;
5442 /* Clean up our request list when the client is going away, so that
5443 * later retire_requests won't dereference our soon-to-be-gone
5446 spin_lock(&file_priv->mm.lock);
5447 list_for_each_entry(request, &file_priv->mm.request_list, client_link)
5448 request->file_priv = NULL;
5449 spin_unlock(&file_priv->mm.lock);
5452 int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
5454 struct drm_i915_file_private *file_priv;
5459 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
5463 file->driver_priv = file_priv;
5464 file_priv->dev_priv = i915;
5465 file_priv->file = file;
5467 spin_lock_init(&file_priv->mm.lock);
5468 INIT_LIST_HEAD(&file_priv->mm.request_list);
5470 file_priv->bsd_engine = -1;
5471 file_priv->hang_timestamp = jiffies;
5473 ret = i915_gem_context_open(i915, file);
5481 * i915_gem_track_fb - update frontbuffer tracking
5482 * @old: current GEM buffer for the frontbuffer slots
5483 * @new: new GEM buffer for the frontbuffer slots
5484 * @frontbuffer_bits: bitmask of frontbuffer slots
5486 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
5487 * from @old and setting them in @new. Both @old and @new can be NULL.
5489 void i915_gem_track_fb(struct drm_i915_gem_object *old,
5490 struct drm_i915_gem_object *new,
5491 unsigned frontbuffer_bits)
5493 /* Control of individual bits within the mask are guarded by
5494 * the owning plane->mutex, i.e. we can never see concurrent
5495 * manipulation of individual bits. But since the bitfield as a whole
5496 * is updated using RMW, we need to use atomics in order to update
5499 BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
5500 BITS_PER_TYPE(atomic_t));
5503 WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
5504 atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
5508 WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
5509 atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
5513 /* Allocate a new GEM object and fill it with the supplied data */
5514 struct drm_i915_gem_object *
5515 i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
5516 const void *data, size_t size)
5518 struct drm_i915_gem_object *obj;
5523 obj = i915_gem_object_create(dev_priv, round_up(size, PAGE_SIZE));
5527 GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
5529 file = obj->base.filp;
5532 unsigned int len = min_t(typeof(size), size, PAGE_SIZE);
5534 void *pgdata, *vaddr;
5536 err = pagecache_write_begin(file, file->f_mapping,
5543 memcpy(vaddr, data, len);
5546 err = pagecache_write_end(file, file->f_mapping,
5560 i915_gem_object_put(obj);
5561 return ERR_PTR(err);
5564 struct scatterlist *
5565 i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
5567 unsigned int *offset)
5569 struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
5570 struct scatterlist *sg;
5571 unsigned int idx, count;
5574 GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
5575 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
5577 /* As we iterate forward through the sg, we record each entry in a
5578 * radixtree for quick repeated (backwards) lookups. If we have seen
5579 * this index previously, we will have an entry for it.
5581 * Initial lookup is O(N), but this is amortized to O(1) for
5582 * sequential page access (where each new request is consecutive
5583 * to the previous one). Repeated lookups are O(lg(obj->base.size)),
5584 * i.e. O(1) with a large constant!
5586 if (n < READ_ONCE(iter->sg_idx))
5589 mutex_lock(&iter->lock);
5591 /* We prefer to reuse the last sg so that repeated lookup of this
5592 * (or the subsequent) sg are fast - comparing against the last
5593 * sg is faster than going through the radixtree.
5598 count = __sg_page_count(sg);
5600 while (idx + count <= n) {
5605 /* If we cannot allocate and insert this entry, or the
5606 * individual pages from this range, cancel updating the
5607 * sg_idx so that on this lookup we are forced to linearly
5608 * scan onwards, but on future lookups we will try the
5609 * insertion again (in which case we need to be careful of
5610 * the error return reporting that we have already inserted
5613 ret = radix_tree_insert(&iter->radix, idx, sg);
5614 if (ret && ret != -EEXIST)
5617 entry = xa_mk_value(idx);
5618 for (i = 1; i < count; i++) {
5619 ret = radix_tree_insert(&iter->radix, idx + i, entry);
5620 if (ret && ret != -EEXIST)
5625 sg = ____sg_next(sg);
5626 count = __sg_page_count(sg);
5633 mutex_unlock(&iter->lock);
5635 if (unlikely(n < idx)) /* insertion completed by another thread */
5638 /* In case we failed to insert the entry into the radixtree, we need
5639 * to look beyond the current sg.
5641 while (idx + count <= n) {
5643 sg = ____sg_next(sg);
5644 count = __sg_page_count(sg);
5653 sg = radix_tree_lookup(&iter->radix, n);
5656 /* If this index is in the middle of multi-page sg entry,
5657 * the radix tree will contain a value entry that points
5658 * to the start of that range. We will return the pointer to
5659 * the base page and the offset of this page within the
5663 if (unlikely(xa_is_value(sg))) {
5664 unsigned long base = xa_to_value(sg);
5666 sg = radix_tree_lookup(&iter->radix, base);
5678 i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
5680 struct scatterlist *sg;
5681 unsigned int offset;
5683 GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
5685 sg = i915_gem_object_get_sg(obj, n, &offset);
5686 return nth_page(sg_page(sg), offset);
5689 /* Like i915_gem_object_get_page(), but mark the returned page dirty */
5691 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
5696 page = i915_gem_object_get_page(obj, n);
5698 set_page_dirty(page);
5704 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
5707 struct scatterlist *sg;
5708 unsigned int offset;
5710 sg = i915_gem_object_get_sg(obj, n, &offset);
5711 return sg_dma_address(sg) + (offset << PAGE_SHIFT);
5714 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
5716 struct sg_table *pages;
5719 if (align > obj->base.size)
5722 if (obj->ops == &i915_gem_phys_ops)
5725 if (obj->ops != &i915_gem_object_ops)
5728 err = i915_gem_object_unbind(obj);
5732 mutex_lock(&obj->mm.lock);
5734 if (obj->mm.madv != I915_MADV_WILLNEED) {
5739 if (obj->mm.quirked) {
5744 if (obj->mm.mapping) {
5749 pages = __i915_gem_object_unset_pages(obj);
5751 obj->ops = &i915_gem_phys_ops;
5753 err = ____i915_gem_object_get_pages(obj);
5757 /* Perma-pin (until release) the physical set of pages */
5758 __i915_gem_object_pin_pages(obj);
5760 if (!IS_ERR_OR_NULL(pages))
5761 i915_gem_object_ops.put_pages(obj, pages);
5762 mutex_unlock(&obj->mm.lock);
5766 obj->ops = &i915_gem_object_ops;
5767 if (!IS_ERR_OR_NULL(pages)) {
5768 unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl);
5770 __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
5773 mutex_unlock(&obj->mm.lock);
5777 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
5778 #include "selftests/scatterlist.c"
5779 #include "selftests/mock_gem_device.c"
5780 #include "selftests/huge_gem_object.c"
5781 #include "selftests/huge_pages.c"
5782 #include "selftests/i915_gem_object.c"
5783 #include "selftests/i915_gem_coherency.c"
5784 #include "selftests/i915_gem.c"