2 * Copyright © 2008-2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
28 #include <drm/drm_vma_manager.h>
29 #include <drm/i915_drm.h>
30 #include <linux/dma-fence-array.h>
31 #include <linux/kthread.h>
32 #include <linux/reservation.h>
33 #include <linux/shmem_fs.h>
34 #include <linux/slab.h>
35 #include <linux/stop_machine.h>
36 #include <linux/swap.h>
37 #include <linux/pci.h>
38 #include <linux/dma-buf.h>
41 #include "i915_gem_clflush.h"
42 #include "i915_gemfs.h"
43 #include "i915_reset.h"
44 #include "i915_trace.h"
45 #include "i915_vgpu.h"
47 #include "intel_drv.h"
48 #include "intel_frontbuffer.h"
49 #include "intel_mocs.h"
50 #include "intel_workarounds.h"
52 static void i915_gem_flush_free_objects(struct drm_i915_private *i915);
54 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
59 if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
62 return obj->pin_global; /* currently in use by HW, keep flushed */
66 insert_mappable_node(struct i915_ggtt *ggtt,
67 struct drm_mm_node *node, u32 size)
69 memset(node, 0, sizeof(*node));
70 return drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
71 size, 0, I915_COLOR_UNEVICTABLE,
72 0, ggtt->mappable_end,
77 remove_mappable_node(struct drm_mm_node *node)
79 drm_mm_remove_node(node);
82 /* some bookkeeping */
83 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
86 spin_lock(&dev_priv->mm.object_stat_lock);
87 dev_priv->mm.object_count++;
88 dev_priv->mm.object_memory += size;
89 spin_unlock(&dev_priv->mm.object_stat_lock);
92 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
95 spin_lock(&dev_priv->mm.object_stat_lock);
96 dev_priv->mm.object_count--;
97 dev_priv->mm.object_memory -= size;
98 spin_unlock(&dev_priv->mm.object_stat_lock);
102 i915_gem_wait_for_error(struct i915_gpu_error *error)
109 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
110 * userspace. If it takes that long something really bad is going on and
111 * we should simply try to bail out and fail as gracefully as possible.
113 ret = wait_event_interruptible_timeout(error->reset_queue,
114 !i915_reset_backoff(error),
117 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
119 } else if (ret < 0) {
126 int i915_mutex_lock_interruptible(struct drm_device *dev)
128 struct drm_i915_private *dev_priv = to_i915(dev);
131 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
135 ret = mutex_lock_interruptible(&dev->struct_mutex);
142 static u32 __i915_gem_park(struct drm_i915_private *i915)
144 intel_wakeref_t wakeref;
148 lockdep_assert_held(&i915->drm.struct_mutex);
149 GEM_BUG_ON(i915->gt.active_requests);
150 GEM_BUG_ON(!list_empty(&i915->gt.active_rings));
153 return I915_EPOCH_INVALID;
155 GEM_BUG_ON(i915->gt.epoch == I915_EPOCH_INVALID);
158 * Be paranoid and flush a concurrent interrupt to make sure
159 * we don't reactivate any irq tasklets after parking.
161 * FIXME: Note that even though we have waited for execlists to be idle,
162 * there may still be an in-flight interrupt even though the CSB
163 * is now empty. synchronize_irq() makes sure that a residual interrupt
164 * is completed before we continue, but it doesn't prevent the HW from
165 * raising a spurious interrupt later. To complete the shield we should
166 * coordinate disabling the CS irq with flushing the interrupts.
168 synchronize_irq(i915->drm.irq);
170 intel_engines_park(i915);
171 i915_timelines_park(i915);
173 i915_pmu_gt_parked(i915);
174 i915_vma_parked(i915);
176 wakeref = fetch_and_zero(&i915->gt.awake);
177 GEM_BUG_ON(!wakeref);
179 if (INTEL_GEN(i915) >= 6)
182 intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, wakeref);
184 return i915->gt.epoch;
187 void i915_gem_park(struct drm_i915_private *i915)
191 lockdep_assert_held(&i915->drm.struct_mutex);
192 GEM_BUG_ON(i915->gt.active_requests);
197 /* Defer the actual call to __i915_gem_park() to prevent ping-pongs */
198 mod_delayed_work(i915->wq, &i915->gt.idle_work, msecs_to_jiffies(100));
201 void i915_gem_unpark(struct drm_i915_private *i915)
205 lockdep_assert_held(&i915->drm.struct_mutex);
206 GEM_BUG_ON(!i915->gt.active_requests);
207 assert_rpm_wakelock_held(i915);
213 * It seems that the DMC likes to transition between the DC states a lot
214 * when there are no connected displays (no active power domains) during
215 * command submission.
217 * This activity has negative impact on the performance of the chip with
218 * huge latencies observed in the interrupt handler and elsewhere.
220 * Work around it by grabbing a GT IRQ power domain whilst there is any
221 * GT activity, preventing any DC state transitions.
223 i915->gt.awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
224 GEM_BUG_ON(!i915->gt.awake);
226 if (unlikely(++i915->gt.epoch == 0)) /* keep 0 as invalid */
229 intel_enable_gt_powersave(i915);
230 i915_update_gfx_val(i915);
231 if (INTEL_GEN(i915) >= 6)
233 i915_pmu_gt_unparked(i915);
235 intel_engines_unpark(i915);
237 i915_queue_hangcheck(i915);
239 queue_delayed_work(i915->wq,
240 &i915->gt.retire_work,
241 round_jiffies_up_relative(HZ));
245 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
246 struct drm_file *file)
248 struct i915_ggtt *ggtt = &to_i915(dev)->ggtt;
249 struct drm_i915_gem_get_aperture *args = data;
250 struct i915_vma *vma;
253 mutex_lock(&ggtt->vm.mutex);
255 pinned = ggtt->vm.reserved;
256 list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
257 if (i915_vma_is_pinned(vma))
258 pinned += vma->node.size;
260 mutex_unlock(&ggtt->vm.mutex);
262 args->aper_size = ggtt->vm.total;
263 args->aper_available_size = args->aper_size - pinned;
268 static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
270 struct address_space *mapping = obj->base.filp->f_mapping;
271 drm_dma_handle_t *phys;
273 struct scatterlist *sg;
278 if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
281 /* Always aligning to the object size, allows a single allocation
282 * to handle all possible callers, and given typical object sizes,
283 * the alignment of the buddy allocation will naturally match.
285 phys = drm_pci_alloc(obj->base.dev,
286 roundup_pow_of_two(obj->base.size),
287 roundup_pow_of_two(obj->base.size));
292 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
296 page = shmem_read_mapping_page(mapping, i);
302 src = kmap_atomic(page);
303 memcpy(vaddr, src, PAGE_SIZE);
304 drm_clflush_virt_range(vaddr, PAGE_SIZE);
311 i915_gem_chipset_flush(to_i915(obj->base.dev));
313 st = kmalloc(sizeof(*st), GFP_KERNEL);
319 if (sg_alloc_table(st, 1, GFP_KERNEL)) {
327 sg->length = obj->base.size;
329 sg_dma_address(sg) = phys->busaddr;
330 sg_dma_len(sg) = obj->base.size;
332 obj->phys_handle = phys;
334 __i915_gem_object_set_pages(obj, st, sg->length);
339 drm_pci_free(obj->base.dev, phys);
344 static void __start_cpu_write(struct drm_i915_gem_object *obj)
346 obj->read_domains = I915_GEM_DOMAIN_CPU;
347 obj->write_domain = I915_GEM_DOMAIN_CPU;
348 if (cpu_write_needs_clflush(obj))
349 obj->cache_dirty = true;
353 __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
354 struct sg_table *pages,
357 GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
359 if (obj->mm.madv == I915_MADV_DONTNEED)
360 obj->mm.dirty = false;
363 (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
364 !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
365 drm_clflush_sg(pages);
367 __start_cpu_write(obj);
371 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
372 struct sg_table *pages)
374 __i915_gem_object_release_shmem(obj, pages, false);
377 struct address_space *mapping = obj->base.filp->f_mapping;
378 char *vaddr = obj->phys_handle->vaddr;
381 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
385 page = shmem_read_mapping_page(mapping, i);
389 dst = kmap_atomic(page);
390 drm_clflush_virt_range(vaddr, PAGE_SIZE);
391 memcpy(dst, vaddr, PAGE_SIZE);
394 set_page_dirty(page);
395 if (obj->mm.madv == I915_MADV_WILLNEED)
396 mark_page_accessed(page);
400 obj->mm.dirty = false;
403 sg_free_table(pages);
406 drm_pci_free(obj->base.dev, obj->phys_handle);
410 i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
412 i915_gem_object_unpin_pages(obj);
415 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
416 .get_pages = i915_gem_object_get_pages_phys,
417 .put_pages = i915_gem_object_put_pages_phys,
418 .release = i915_gem_object_release_phys,
421 static const struct drm_i915_gem_object_ops i915_gem_object_ops;
423 int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
425 struct i915_vma *vma;
426 LIST_HEAD(still_in_list);
429 lockdep_assert_held(&obj->base.dev->struct_mutex);
431 /* Closed vma are removed from the obj->vma_list - but they may
432 * still have an active binding on the object. To remove those we
433 * must wait for all rendering to complete to the object (as unbinding
434 * must anyway), and retire the requests.
436 ret = i915_gem_object_set_to_cpu_domain(obj, false);
440 spin_lock(&obj->vma.lock);
441 while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
444 list_move_tail(&vma->obj_link, &still_in_list);
445 spin_unlock(&obj->vma.lock);
447 ret = i915_vma_unbind(vma);
449 spin_lock(&obj->vma.lock);
451 list_splice(&still_in_list, &obj->vma.list);
452 spin_unlock(&obj->vma.lock);
458 i915_gem_object_wait_fence(struct dma_fence *fence,
461 struct intel_rps_client *rps_client)
463 struct i915_request *rq;
465 BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1);
467 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
470 if (!dma_fence_is_i915(fence))
471 return dma_fence_wait_timeout(fence,
472 flags & I915_WAIT_INTERRUPTIBLE,
475 rq = to_request(fence);
476 if (i915_request_completed(rq))
480 * This client is about to stall waiting for the GPU. In many cases
481 * this is undesirable and limits the throughput of the system, as
482 * many clients cannot continue processing user input/output whilst
483 * blocked. RPS autotuning may take tens of milliseconds to respond
484 * to the GPU load and thus incurs additional latency for the client.
485 * We can circumvent that by promoting the GPU frequency to maximum
486 * before we wait. This makes the GPU throttle up much more quickly
487 * (good for benchmarks and user experience, e.g. window animations),
488 * but at a cost of spending more power processing the workload
489 * (bad for battery). Not all clients even want their results
490 * immediately and for them we should just let the GPU select its own
491 * frequency to maximise efficiency. To prevent a single client from
492 * forcing the clocks too high for the whole system, we only allow
493 * each client to waitboost once in a busy period.
495 if (rps_client && !i915_request_started(rq)) {
496 if (INTEL_GEN(rq->i915) >= 6)
497 gen6_rps_boost(rq, rps_client);
500 timeout = i915_request_wait(rq, flags, timeout);
503 if (flags & I915_WAIT_LOCKED && i915_request_completed(rq))
504 i915_request_retire_upto(rq);
510 i915_gem_object_wait_reservation(struct reservation_object *resv,
513 struct intel_rps_client *rps_client)
515 unsigned int seq = __read_seqcount_begin(&resv->seq);
516 struct dma_fence *excl;
517 bool prune_fences = false;
519 if (flags & I915_WAIT_ALL) {
520 struct dma_fence **shared;
521 unsigned int count, i;
524 ret = reservation_object_get_fences_rcu(resv,
525 &excl, &count, &shared);
529 for (i = 0; i < count; i++) {
530 timeout = i915_gem_object_wait_fence(shared[i],
536 dma_fence_put(shared[i]);
539 for (; i < count; i++)
540 dma_fence_put(shared[i]);
544 * If both shared fences and an exclusive fence exist,
545 * then by construction the shared fences must be later
546 * than the exclusive fence. If we successfully wait for
547 * all the shared fences, we know that the exclusive fence
548 * must all be signaled. If all the shared fences are
549 * signaled, we can prune the array and recover the
550 * floating references on the fences/requests.
552 prune_fences = count && timeout >= 0;
554 excl = reservation_object_get_excl_rcu(resv);
557 if (excl && timeout >= 0)
558 timeout = i915_gem_object_wait_fence(excl, flags, timeout,
564 * Opportunistically prune the fences iff we know they have *all* been
565 * signaled and that the reservation object has not been changed (i.e.
566 * no new fences have been added).
568 if (prune_fences && !__read_seqcount_retry(&resv->seq, seq)) {
569 if (reservation_object_trylock(resv)) {
570 if (!__read_seqcount_retry(&resv->seq, seq))
571 reservation_object_add_excl_fence(resv, NULL);
572 reservation_object_unlock(resv);
579 static void __fence_set_priority(struct dma_fence *fence,
580 const struct i915_sched_attr *attr)
582 struct i915_request *rq;
583 struct intel_engine_cs *engine;
585 if (dma_fence_is_signaled(fence) || !dma_fence_is_i915(fence))
588 rq = to_request(fence);
592 rcu_read_lock(); /* RCU serialisation for set-wedged protection */
593 if (engine->schedule)
594 engine->schedule(rq, attr);
596 local_bh_enable(); /* kick the tasklets if queues were reprioritised */
599 static void fence_set_priority(struct dma_fence *fence,
600 const struct i915_sched_attr *attr)
602 /* Recurse once into a fence-array */
603 if (dma_fence_is_array(fence)) {
604 struct dma_fence_array *array = to_dma_fence_array(fence);
607 for (i = 0; i < array->num_fences; i++)
608 __fence_set_priority(array->fences[i], attr);
610 __fence_set_priority(fence, attr);
615 i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
617 const struct i915_sched_attr *attr)
619 struct dma_fence *excl;
621 if (flags & I915_WAIT_ALL) {
622 struct dma_fence **shared;
623 unsigned int count, i;
626 ret = reservation_object_get_fences_rcu(obj->resv,
627 &excl, &count, &shared);
631 for (i = 0; i < count; i++) {
632 fence_set_priority(shared[i], attr);
633 dma_fence_put(shared[i]);
638 excl = reservation_object_get_excl_rcu(obj->resv);
642 fence_set_priority(excl, attr);
649 * Waits for rendering to the object to be completed
650 * @obj: i915 gem object
651 * @flags: how to wait (under a lock, for all rendering or just for writes etc)
652 * @timeout: how long to wait
653 * @rps_client: client (user process) to charge for any waitboosting
656 i915_gem_object_wait(struct drm_i915_gem_object *obj,
659 struct intel_rps_client *rps_client)
662 GEM_BUG_ON(timeout < 0);
664 timeout = i915_gem_object_wait_reservation(obj->resv,
667 return timeout < 0 ? timeout : 0;
670 static struct intel_rps_client *to_rps_client(struct drm_file *file)
672 struct drm_i915_file_private *fpriv = file->driver_priv;
674 return &fpriv->rps_client;
678 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
679 struct drm_i915_gem_pwrite *args,
680 struct drm_file *file)
682 void *vaddr = obj->phys_handle->vaddr + args->offset;
683 char __user *user_data = u64_to_user_ptr(args->data_ptr);
685 /* We manually control the domain here and pretend that it
686 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
688 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
689 if (copy_from_user(vaddr, user_data, args->size))
692 drm_clflush_virt_range(vaddr, args->size);
693 i915_gem_chipset_flush(to_i915(obj->base.dev));
695 intel_fb_obj_flush(obj, ORIGIN_CPU);
699 void *i915_gem_object_alloc(struct drm_i915_private *dev_priv)
701 return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
704 void i915_gem_object_free(struct drm_i915_gem_object *obj)
706 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
707 kmem_cache_free(dev_priv->objects, obj);
711 i915_gem_create(struct drm_file *file,
712 struct drm_i915_private *dev_priv,
716 struct drm_i915_gem_object *obj;
720 size = roundup(size, PAGE_SIZE);
724 /* Allocate the new object */
725 obj = i915_gem_object_create(dev_priv, size);
729 ret = drm_gem_handle_create(file, &obj->base, &handle);
730 /* drop reference from allocate - handle holds it now */
731 i915_gem_object_put(obj);
740 i915_gem_dumb_create(struct drm_file *file,
741 struct drm_device *dev,
742 struct drm_mode_create_dumb *args)
744 /* have to work out size/pitch and return them */
745 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
746 args->size = args->pitch * args->height;
747 return i915_gem_create(file, to_i915(dev),
748 args->size, &args->handle);
751 static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
753 return !(obj->cache_level == I915_CACHE_NONE ||
754 obj->cache_level == I915_CACHE_WT);
758 * Creates a new mm object and returns a handle to it.
759 * @dev: drm device pointer
760 * @data: ioctl data blob
761 * @file: drm file pointer
764 i915_gem_create_ioctl(struct drm_device *dev, void *data,
765 struct drm_file *file)
767 struct drm_i915_private *dev_priv = to_i915(dev);
768 struct drm_i915_gem_create *args = data;
770 i915_gem_flush_free_objects(dev_priv);
772 return i915_gem_create(file, dev_priv,
773 args->size, &args->handle);
776 static inline enum fb_op_origin
777 fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)
779 return (domain == I915_GEM_DOMAIN_GTT ?
780 obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
783 void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
785 intel_wakeref_t wakeref;
788 * No actual flushing is required for the GTT write domain for reads
789 * from the GTT domain. Writes to it "immediately" go to main memory
790 * as far as we know, so there's no chipset flush. It also doesn't
791 * land in the GPU render cache.
793 * However, we do have to enforce the order so that all writes through
794 * the GTT land before any writes to the device, such as updates to
797 * We also have to wait a bit for the writes to land from the GTT.
798 * An uncached read (i.e. mmio) seems to be ideal for the round-trip
799 * timing. This issue has only been observed when switching quickly
800 * between GTT writes and CPU reads from inside the kernel on recent hw,
801 * and it appears to only affect discrete GTT blocks (i.e. on LLC
802 * system agents we cannot reproduce this behaviour, until Cannonlake
808 if (INTEL_INFO(dev_priv)->has_coherent_ggtt)
811 i915_gem_chipset_flush(dev_priv);
813 with_intel_runtime_pm(dev_priv, wakeref) {
814 spin_lock_irq(&dev_priv->uncore.lock);
816 POSTING_READ_FW(RING_HEAD(RENDER_RING_BASE));
818 spin_unlock_irq(&dev_priv->uncore.lock);
823 flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
825 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
826 struct i915_vma *vma;
828 if (!(obj->write_domain & flush_domains))
831 switch (obj->write_domain) {
832 case I915_GEM_DOMAIN_GTT:
833 i915_gem_flush_ggtt_writes(dev_priv);
835 intel_fb_obj_flush(obj,
836 fb_write_origin(obj, I915_GEM_DOMAIN_GTT));
838 for_each_ggtt_vma(vma, obj) {
842 i915_vma_unset_ggtt_write(vma);
846 case I915_GEM_DOMAIN_WC:
850 case I915_GEM_DOMAIN_CPU:
851 i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
854 case I915_GEM_DOMAIN_RENDER:
855 if (gpu_write_needs_clflush(obj))
856 obj->cache_dirty = true;
860 obj->write_domain = 0;
864 * Pins the specified object's pages and synchronizes the object with
865 * GPU accesses. Sets needs_clflush to non-zero if the caller should
866 * flush the object from the CPU cache.
868 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
869 unsigned int *needs_clflush)
873 lockdep_assert_held(&obj->base.dev->struct_mutex);
876 if (!i915_gem_object_has_struct_page(obj))
879 ret = i915_gem_object_wait(obj,
880 I915_WAIT_INTERRUPTIBLE |
882 MAX_SCHEDULE_TIMEOUT,
887 ret = i915_gem_object_pin_pages(obj);
891 if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ ||
892 !static_cpu_has(X86_FEATURE_CLFLUSH)) {
893 ret = i915_gem_object_set_to_cpu_domain(obj, false);
900 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
902 /* If we're not in the cpu read domain, set ourself into the gtt
903 * read domain and manually flush cachelines (if required). This
904 * optimizes for the case when the gpu will dirty the data
905 * anyway again before the next pread happens.
907 if (!obj->cache_dirty &&
908 !(obj->read_domains & I915_GEM_DOMAIN_CPU))
909 *needs_clflush = CLFLUSH_BEFORE;
912 /* return with the pages pinned */
916 i915_gem_object_unpin_pages(obj);
920 int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
921 unsigned int *needs_clflush)
925 lockdep_assert_held(&obj->base.dev->struct_mutex);
928 if (!i915_gem_object_has_struct_page(obj))
931 ret = i915_gem_object_wait(obj,
932 I915_WAIT_INTERRUPTIBLE |
935 MAX_SCHEDULE_TIMEOUT,
940 ret = i915_gem_object_pin_pages(obj);
944 if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE ||
945 !static_cpu_has(X86_FEATURE_CLFLUSH)) {
946 ret = i915_gem_object_set_to_cpu_domain(obj, true);
953 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
955 /* If we're not in the cpu write domain, set ourself into the
956 * gtt write domain and manually flush cachelines (as required).
957 * This optimizes for the case when the gpu will use the data
958 * right away and we therefore have to clflush anyway.
960 if (!obj->cache_dirty) {
961 *needs_clflush |= CLFLUSH_AFTER;
964 * Same trick applies to invalidate partially written
965 * cachelines read before writing.
967 if (!(obj->read_domains & I915_GEM_DOMAIN_CPU))
968 *needs_clflush |= CLFLUSH_BEFORE;
972 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
973 obj->mm.dirty = true;
974 /* return with the pages pinned */
978 i915_gem_object_unpin_pages(obj);
983 shmem_pread(struct page *page, int offset, int len, char __user *user_data,
992 drm_clflush_virt_range(vaddr + offset, len);
994 ret = __copy_to_user(user_data, vaddr + offset, len);
998 return ret ? -EFAULT : 0;
1002 i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
1003 struct drm_i915_gem_pread *args)
1005 char __user *user_data;
1007 unsigned int needs_clflush;
1008 unsigned int idx, offset;
1011 ret = mutex_lock_interruptible(&obj->base.dev->struct_mutex);
1015 ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
1016 mutex_unlock(&obj->base.dev->struct_mutex);
1020 remain = args->size;
1021 user_data = u64_to_user_ptr(args->data_ptr);
1022 offset = offset_in_page(args->offset);
1023 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
1024 struct page *page = i915_gem_object_get_page(obj, idx);
1025 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
1027 ret = shmem_pread(page, offset, length, user_data,
1033 user_data += length;
1037 i915_gem_obj_finish_shmem_access(obj);
1042 gtt_user_read(struct io_mapping *mapping,
1043 loff_t base, int offset,
1044 char __user *user_data, int length)
1046 void __iomem *vaddr;
1047 unsigned long unwritten;
1049 /* We can use the cpu mem copy function because this is X86. */
1050 vaddr = io_mapping_map_atomic_wc(mapping, base);
1051 unwritten = __copy_to_user_inatomic(user_data,
1052 (void __force *)vaddr + offset,
1054 io_mapping_unmap_atomic(vaddr);
1056 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
1057 unwritten = copy_to_user(user_data,
1058 (void __force *)vaddr + offset,
1060 io_mapping_unmap(vaddr);
1066 i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
1067 const struct drm_i915_gem_pread *args)
1069 struct drm_i915_private *i915 = to_i915(obj->base.dev);
1070 struct i915_ggtt *ggtt = &i915->ggtt;
1071 intel_wakeref_t wakeref;
1072 struct drm_mm_node node;
1073 struct i915_vma *vma;
1074 void __user *user_data;
1078 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1082 wakeref = intel_runtime_pm_get(i915);
1083 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
1088 node.start = i915_ggtt_offset(vma);
1089 node.allocated = false;
1090 ret = i915_vma_put_fence(vma);
1092 i915_vma_unpin(vma);
1097 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
1100 GEM_BUG_ON(!node.allocated);
1103 ret = i915_gem_object_set_to_gtt_domain(obj, false);
1107 mutex_unlock(&i915->drm.struct_mutex);
1109 user_data = u64_to_user_ptr(args->data_ptr);
1110 remain = args->size;
1111 offset = args->offset;
1113 while (remain > 0) {
1114 /* Operation in this page
1116 * page_base = page offset within aperture
1117 * page_offset = offset within page
1118 * page_length = bytes to copy for this page
1120 u32 page_base = node.start;
1121 unsigned page_offset = offset_in_page(offset);
1122 unsigned page_length = PAGE_SIZE - page_offset;
1123 page_length = remain < page_length ? remain : page_length;
1124 if (node.allocated) {
1126 ggtt->vm.insert_page(&ggtt->vm,
1127 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
1128 node.start, I915_CACHE_NONE, 0);
1131 page_base += offset & PAGE_MASK;
1134 if (gtt_user_read(&ggtt->iomap, page_base, page_offset,
1135 user_data, page_length)) {
1140 remain -= page_length;
1141 user_data += page_length;
1142 offset += page_length;
1145 mutex_lock(&i915->drm.struct_mutex);
1147 if (node.allocated) {
1149 ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
1150 remove_mappable_node(&node);
1152 i915_vma_unpin(vma);
1155 intel_runtime_pm_put(i915, wakeref);
1156 mutex_unlock(&i915->drm.struct_mutex);
1162 * Reads data from the object referenced by handle.
1163 * @dev: drm device pointer
1164 * @data: ioctl data blob
1165 * @file: drm file pointer
1167 * On error, the contents of *data are undefined.
1170 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
1171 struct drm_file *file)
1173 struct drm_i915_gem_pread *args = data;
1174 struct drm_i915_gem_object *obj;
1177 if (args->size == 0)
1180 if (!access_ok(u64_to_user_ptr(args->data_ptr),
1184 obj = i915_gem_object_lookup(file, args->handle);
1188 /* Bounds check source. */
1189 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
1194 trace_i915_gem_object_pread(obj, args->offset, args->size);
1196 ret = i915_gem_object_wait(obj,
1197 I915_WAIT_INTERRUPTIBLE,
1198 MAX_SCHEDULE_TIMEOUT,
1199 to_rps_client(file));
1203 ret = i915_gem_object_pin_pages(obj);
1207 ret = i915_gem_shmem_pread(obj, args);
1208 if (ret == -EFAULT || ret == -ENODEV)
1209 ret = i915_gem_gtt_pread(obj, args);
1211 i915_gem_object_unpin_pages(obj);
1213 i915_gem_object_put(obj);
1217 /* This is the fast write path which cannot handle
1218 * page faults in the source data
1222 ggtt_write(struct io_mapping *mapping,
1223 loff_t base, int offset,
1224 char __user *user_data, int length)
1226 void __iomem *vaddr;
1227 unsigned long unwritten;
1229 /* We can use the cpu mem copy function because this is X86. */
1230 vaddr = io_mapping_map_atomic_wc(mapping, base);
1231 unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
1233 io_mapping_unmap_atomic(vaddr);
1235 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
1236 unwritten = copy_from_user((void __force *)vaddr + offset,
1238 io_mapping_unmap(vaddr);
1245 * This is the fast pwrite path, where we copy the data directly from the
1246 * user into the GTT, uncached.
1247 * @obj: i915 GEM object
1248 * @args: pwrite arguments structure
1251 i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
1252 const struct drm_i915_gem_pwrite *args)
1254 struct drm_i915_private *i915 = to_i915(obj->base.dev);
1255 struct i915_ggtt *ggtt = &i915->ggtt;
1256 intel_wakeref_t wakeref;
1257 struct drm_mm_node node;
1258 struct i915_vma *vma;
1260 void __user *user_data;
1263 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1267 if (i915_gem_object_has_struct_page(obj)) {
1269 * Avoid waking the device up if we can fallback, as
1270 * waking/resuming is very slow (worst-case 10-100 ms
1271 * depending on PCI sleeps and our own resume time).
1272 * This easily dwarfs any performance advantage from
1273 * using the cache bypass of indirect GGTT access.
1275 wakeref = intel_runtime_pm_get_if_in_use(i915);
1281 /* No backing pages, no fallback, we must force GGTT access */
1282 wakeref = intel_runtime_pm_get(i915);
1285 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
1290 node.start = i915_ggtt_offset(vma);
1291 node.allocated = false;
1292 ret = i915_vma_put_fence(vma);
1294 i915_vma_unpin(vma);
1299 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
1302 GEM_BUG_ON(!node.allocated);
1305 ret = i915_gem_object_set_to_gtt_domain(obj, true);
1309 mutex_unlock(&i915->drm.struct_mutex);
1311 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
1313 user_data = u64_to_user_ptr(args->data_ptr);
1314 offset = args->offset;
1315 remain = args->size;
1317 /* Operation in this page
1319 * page_base = page offset within aperture
1320 * page_offset = offset within page
1321 * page_length = bytes to copy for this page
1323 u32 page_base = node.start;
1324 unsigned int page_offset = offset_in_page(offset);
1325 unsigned int page_length = PAGE_SIZE - page_offset;
1326 page_length = remain < page_length ? remain : page_length;
1327 if (node.allocated) {
1328 wmb(); /* flush the write before we modify the GGTT */
1329 ggtt->vm.insert_page(&ggtt->vm,
1330 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
1331 node.start, I915_CACHE_NONE, 0);
1332 wmb(); /* flush modifications to the GGTT (insert_page) */
1334 page_base += offset & PAGE_MASK;
1336 /* If we get a fault while copying data, then (presumably) our
1337 * source page isn't available. Return the error and we'll
1338 * retry in the slow path.
1339 * If the object is non-shmem backed, we retry again with the
1340 * path that handles page fault.
1342 if (ggtt_write(&ggtt->iomap, page_base, page_offset,
1343 user_data, page_length)) {
1348 remain -= page_length;
1349 user_data += page_length;
1350 offset += page_length;
1352 intel_fb_obj_flush(obj, ORIGIN_CPU);
1354 mutex_lock(&i915->drm.struct_mutex);
1356 if (node.allocated) {
1358 ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
1359 remove_mappable_node(&node);
1361 i915_vma_unpin(vma);
1364 intel_runtime_pm_put(i915, wakeref);
1366 mutex_unlock(&i915->drm.struct_mutex);
1370 /* Per-page copy function for the shmem pwrite fastpath.
1371 * Flushes invalid cachelines before writing to the target if
1372 * needs_clflush_before is set and flushes out any written cachelines after
1373 * writing if needs_clflush is set.
1376 shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
1377 bool needs_clflush_before,
1378 bool needs_clflush_after)
1385 if (needs_clflush_before)
1386 drm_clflush_virt_range(vaddr + offset, len);
1388 ret = __copy_from_user(vaddr + offset, user_data, len);
1389 if (!ret && needs_clflush_after)
1390 drm_clflush_virt_range(vaddr + offset, len);
1394 return ret ? -EFAULT : 0;
1398 i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
1399 const struct drm_i915_gem_pwrite *args)
1401 struct drm_i915_private *i915 = to_i915(obj->base.dev);
1402 void __user *user_data;
1404 unsigned int partial_cacheline_write;
1405 unsigned int needs_clflush;
1406 unsigned int offset, idx;
1409 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1413 ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
1414 mutex_unlock(&i915->drm.struct_mutex);
1418 /* If we don't overwrite a cacheline completely we need to be
1419 * careful to have up-to-date data by first clflushing. Don't
1420 * overcomplicate things and flush the entire patch.
1422 partial_cacheline_write = 0;
1423 if (needs_clflush & CLFLUSH_BEFORE)
1424 partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
1426 user_data = u64_to_user_ptr(args->data_ptr);
1427 remain = args->size;
1428 offset = offset_in_page(args->offset);
1429 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
1430 struct page *page = i915_gem_object_get_page(obj, idx);
1431 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
1433 ret = shmem_pwrite(page, offset, length, user_data,
1434 (offset | length) & partial_cacheline_write,
1435 needs_clflush & CLFLUSH_AFTER);
1440 user_data += length;
1444 intel_fb_obj_flush(obj, ORIGIN_CPU);
1445 i915_gem_obj_finish_shmem_access(obj);
1450 * Writes data to the object referenced by handle.
1452 * @data: ioctl data blob
1455 * On error, the contents of the buffer that were to be modified are undefined.
1458 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1459 struct drm_file *file)
1461 struct drm_i915_gem_pwrite *args = data;
1462 struct drm_i915_gem_object *obj;
1465 if (args->size == 0)
1468 if (!access_ok(u64_to_user_ptr(args->data_ptr), args->size))
1471 obj = i915_gem_object_lookup(file, args->handle);
1475 /* Bounds check destination. */
1476 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
1481 /* Writes not allowed into this read-only object */
1482 if (i915_gem_object_is_readonly(obj)) {
1487 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
1490 if (obj->ops->pwrite)
1491 ret = obj->ops->pwrite(obj, args);
1495 ret = i915_gem_object_wait(obj,
1496 I915_WAIT_INTERRUPTIBLE |
1498 MAX_SCHEDULE_TIMEOUT,
1499 to_rps_client(file));
1503 ret = i915_gem_object_pin_pages(obj);
1508 /* We can only do the GTT pwrite on untiled buffers, as otherwise
1509 * it would end up going through the fenced access, and we'll get
1510 * different detiling behavior between reading and writing.
1511 * pread/pwrite currently are reading and writing from the CPU
1512 * perspective, requiring manual detiling by the client.
1514 if (!i915_gem_object_has_struct_page(obj) ||
1515 cpu_write_needs_clflush(obj))
1516 /* Note that the gtt paths might fail with non-page-backed user
1517 * pointers (e.g. gtt mappings when moving data between
1518 * textures). Fallback to the shmem path in that case.
1520 ret = i915_gem_gtt_pwrite_fast(obj, args);
1522 if (ret == -EFAULT || ret == -ENOSPC) {
1523 if (obj->phys_handle)
1524 ret = i915_gem_phys_pwrite(obj, args, file);
1526 ret = i915_gem_shmem_pwrite(obj, args);
1529 i915_gem_object_unpin_pages(obj);
1531 i915_gem_object_put(obj);
1535 static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
1537 struct drm_i915_private *i915 = to_i915(obj->base.dev);
1538 struct list_head *list;
1539 struct i915_vma *vma;
1541 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
1543 mutex_lock(&i915->ggtt.vm.mutex);
1544 for_each_ggtt_vma(vma, obj) {
1545 if (!drm_mm_node_allocated(&vma->node))
1548 list_move_tail(&vma->vm_link, &vma->vm->bound_list);
1550 mutex_unlock(&i915->ggtt.vm.mutex);
1552 spin_lock(&i915->mm.obj_lock);
1553 list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list;
1554 list_move_tail(&obj->mm.link, list);
1555 spin_unlock(&i915->mm.obj_lock);
1559 * Called when user space prepares to use an object with the CPU, either
1560 * through the mmap ioctl's mapping or a GTT mapping.
1562 * @data: ioctl data blob
1566 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1567 struct drm_file *file)
1569 struct drm_i915_gem_set_domain *args = data;
1570 struct drm_i915_gem_object *obj;
1571 u32 read_domains = args->read_domains;
1572 u32 write_domain = args->write_domain;
1575 /* Only handle setting domains to types used by the CPU. */
1576 if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
1579 /* Having something in the write domain implies it's in the read
1580 * domain, and only that read domain. Enforce that in the request.
1582 if (write_domain != 0 && read_domains != write_domain)
1585 obj = i915_gem_object_lookup(file, args->handle);
1589 /* Try to flush the object off the GPU without holding the lock.
1590 * We will repeat the flush holding the lock in the normal manner
1591 * to catch cases where we are gazumped.
1593 err = i915_gem_object_wait(obj,
1594 I915_WAIT_INTERRUPTIBLE |
1595 I915_WAIT_PRIORITY |
1596 (write_domain ? I915_WAIT_ALL : 0),
1597 MAX_SCHEDULE_TIMEOUT,
1598 to_rps_client(file));
1603 * Proxy objects do not control access to the backing storage, ergo
1604 * they cannot be used as a means to manipulate the cache domain
1605 * tracking for that backing storage. The proxy object is always
1606 * considered to be outside of any cache domain.
1608 if (i915_gem_object_is_proxy(obj)) {
1614 * Flush and acquire obj->pages so that we are coherent through
1615 * direct access in memory with previous cached writes through
1616 * shmemfs and that our cache domain tracking remains valid.
1617 * For example, if the obj->filp was moved to swap without us
1618 * being notified and releasing the pages, we would mistakenly
1619 * continue to assume that the obj remained out of the CPU cached
1622 err = i915_gem_object_pin_pages(obj);
1626 err = i915_mutex_lock_interruptible(dev);
1630 if (read_domains & I915_GEM_DOMAIN_WC)
1631 err = i915_gem_object_set_to_wc_domain(obj, write_domain);
1632 else if (read_domains & I915_GEM_DOMAIN_GTT)
1633 err = i915_gem_object_set_to_gtt_domain(obj, write_domain);
1635 err = i915_gem_object_set_to_cpu_domain(obj, write_domain);
1637 /* And bump the LRU for this access */
1638 i915_gem_object_bump_inactive_ggtt(obj);
1640 mutex_unlock(&dev->struct_mutex);
1642 if (write_domain != 0)
1643 intel_fb_obj_invalidate(obj,
1644 fb_write_origin(obj, write_domain));
1647 i915_gem_object_unpin_pages(obj);
1649 i915_gem_object_put(obj);
1654 * Called when user space has done writes to this buffer
1656 * @data: ioctl data blob
1660 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1661 struct drm_file *file)
1663 struct drm_i915_gem_sw_finish *args = data;
1664 struct drm_i915_gem_object *obj;
1666 obj = i915_gem_object_lookup(file, args->handle);
1671 * Proxy objects are barred from CPU access, so there is no
1672 * need to ban sw_finish as it is a nop.
1675 /* Pinned buffers may be scanout, so flush the cache */
1676 i915_gem_object_flush_if_display(obj);
1677 i915_gem_object_put(obj);
1683 __vma_matches(struct vm_area_struct *vma, struct file *filp,
1684 unsigned long addr, unsigned long size)
1686 if (vma->vm_file != filp)
1689 return vma->vm_start == addr && (vma->vm_end - vma->vm_start) == size;
1693 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
1696 * @data: ioctl data blob
1699 * While the mapping holds a reference on the contents of the object, it doesn't
1700 * imply a ref on the object itself.
1704 * DRM driver writers who look a this function as an example for how to do GEM
1705 * mmap support, please don't implement mmap support like here. The modern way
1706 * to implement DRM mmap support is with an mmap offset ioctl (like
1707 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
1708 * That way debug tooling like valgrind will understand what's going on, hiding
1709 * the mmap call in a driver private ioctl will break that. The i915 driver only
1710 * does cpu mmaps this way because we didn't know better.
1713 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1714 struct drm_file *file)
1716 struct drm_i915_gem_mmap *args = data;
1717 struct drm_i915_gem_object *obj;
1720 if (args->flags & ~(I915_MMAP_WC))
1723 if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
1726 obj = i915_gem_object_lookup(file, args->handle);
1730 /* prime objects have no backing filp to GEM mmap
1733 if (!obj->base.filp) {
1734 i915_gem_object_put(obj);
1738 addr = vm_mmap(obj->base.filp, 0, args->size,
1739 PROT_READ | PROT_WRITE, MAP_SHARED,
1741 if (args->flags & I915_MMAP_WC) {
1742 struct mm_struct *mm = current->mm;
1743 struct vm_area_struct *vma;
1745 if (down_write_killable(&mm->mmap_sem)) {
1746 i915_gem_object_put(obj);
1749 vma = find_vma(mm, addr);
1750 if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
1752 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1755 up_write(&mm->mmap_sem);
1757 /* This may race, but that's ok, it only gets set */
1758 WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
1760 i915_gem_object_put(obj);
1761 if (IS_ERR((void *)addr))
1764 args->addr_ptr = (u64)addr;
1769 static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
1771 return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT;
1775 * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps
1777 * A history of the GTT mmap interface:
1779 * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to
1780 * aligned and suitable for fencing, and still fit into the available
1781 * mappable space left by the pinned display objects. A classic problem
1782 * we called the page-fault-of-doom where we would ping-pong between
1783 * two objects that could not fit inside the GTT and so the memcpy
1784 * would page one object in at the expense of the other between every
1787 * 1 - Objects can be any size, and have any compatible fencing (X Y, or none
1788 * as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the
1789 * object is too large for the available space (or simply too large
1790 * for the mappable aperture!), a view is created instead and faulted
1791 * into userspace. (This view is aligned and sized appropriately for
1794 * 2 - Recognise WC as a separate cache domain so that we can flush the
1795 * delayed writes via GTT before performing direct access via WC.
1799 * * snoopable objects cannot be accessed via the GTT. It can cause machine
1800 * hangs on some architectures, corruption on others. An attempt to service
1801 * a GTT page fault from a snoopable object will generate a SIGBUS.
1803 * * the object must be able to fit into RAM (physical memory, though no
1804 * limited to the mappable aperture).
1809 * * a new GTT page fault will synchronize rendering from the GPU and flush
1810 * all data to system memory. Subsequent access will not be synchronized.
1812 * * all mappings are revoked on runtime device suspend.
1814 * * there are only 8, 16 or 32 fence registers to share between all users
1815 * (older machines require fence register for display and blitter access
1816 * as well). Contention of the fence registers will cause the previous users
1817 * to be unmapped and any new access will generate new page faults.
1819 * * running out of memory while servicing a fault may generate a SIGBUS,
1820 * rather than the expected SIGSEGV.
1822 int i915_gem_mmap_gtt_version(void)
1827 static inline struct i915_ggtt_view
1828 compute_partial_view(const struct drm_i915_gem_object *obj,
1829 pgoff_t page_offset,
1832 struct i915_ggtt_view view;
1834 if (i915_gem_object_is_tiled(obj))
1835 chunk = roundup(chunk, tile_row_pages(obj));
1837 view.type = I915_GGTT_VIEW_PARTIAL;
1838 view.partial.offset = rounddown(page_offset, chunk);
1840 min_t(unsigned int, chunk,
1841 (obj->base.size >> PAGE_SHIFT) - view.partial.offset);
1843 /* If the partial covers the entire object, just create a normal VMA. */
1844 if (chunk >= obj->base.size >> PAGE_SHIFT)
1845 view.type = I915_GGTT_VIEW_NORMAL;
1851 * i915_gem_fault - fault a page into the GTT
1854 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1855 * from userspace. The fault handler takes care of binding the object to
1856 * the GTT (if needed), allocating and programming a fence register (again,
1857 * only if needed based on whether the old reg is still valid or the object
1858 * is tiled) and inserting a new PTE into the faulting process.
1860 * Note that the faulting process may involve evicting existing objects
1861 * from the GTT and/or fence registers to make room. So performance may
1862 * suffer if the GTT working set is large or there are few fence registers
1865 * The current feature set supported by i915_gem_fault() and thus GTT mmaps
1866 * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
1868 vm_fault_t i915_gem_fault(struct vm_fault *vmf)
1870 #define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
1871 struct vm_area_struct *area = vmf->vma;
1872 struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
1873 struct drm_device *dev = obj->base.dev;
1874 struct drm_i915_private *dev_priv = to_i915(dev);
1875 struct i915_ggtt *ggtt = &dev_priv->ggtt;
1876 bool write = area->vm_flags & VM_WRITE;
1877 intel_wakeref_t wakeref;
1878 struct i915_vma *vma;
1879 pgoff_t page_offset;
1882 /* Sanity check that we allow writing into this object */
1883 if (i915_gem_object_is_readonly(obj) && write)
1884 return VM_FAULT_SIGBUS;
1886 /* We don't use vmf->pgoff since that has the fake offset */
1887 page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
1889 trace_i915_gem_object_fault(obj, page_offset, true, write);
1891 /* Try to flush the object off the GPU first without holding the lock.
1892 * Upon acquiring the lock, we will perform our sanity checks and then
1893 * repeat the flush holding the lock in the normal manner to catch cases
1894 * where we are gazumped.
1896 ret = i915_gem_object_wait(obj,
1897 I915_WAIT_INTERRUPTIBLE,
1898 MAX_SCHEDULE_TIMEOUT,
1903 ret = i915_gem_object_pin_pages(obj);
1907 wakeref = intel_runtime_pm_get(dev_priv);
1909 ret = i915_mutex_lock_interruptible(dev);
1913 /* Access to snoopable pages through the GTT is incoherent. */
1914 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) {
1920 /* Now pin it into the GTT as needed */
1921 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
1926 /* Use a partial view if it is bigger than available space */
1927 struct i915_ggtt_view view =
1928 compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
1931 flags = PIN_MAPPABLE;
1932 if (view.type == I915_GGTT_VIEW_NORMAL)
1933 flags |= PIN_NONBLOCK; /* avoid warnings for pinned */
1936 * Userspace is now writing through an untracked VMA, abandon
1937 * all hope that the hardware is able to track future writes.
1939 obj->frontbuffer_ggtt_origin = ORIGIN_CPU;
1941 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
1942 if (IS_ERR(vma) && !view.type) {
1943 flags = PIN_MAPPABLE;
1944 view.type = I915_GGTT_VIEW_PARTIAL;
1945 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
1953 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1957 ret = i915_vma_pin_fence(vma);
1961 /* Finally, remap it using the new GTT offset */
1962 ret = remap_io_mapping(area,
1963 area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
1964 (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
1965 min_t(u64, vma->size, area->vm_end - area->vm_start),
1970 /* Mark as being mmapped into userspace for later revocation */
1971 assert_rpm_wakelock_held(dev_priv);
1972 if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
1973 list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
1974 GEM_BUG_ON(!obj->userfault_count);
1976 i915_vma_set_ggtt_write(vma);
1979 i915_vma_unpin_fence(vma);
1981 __i915_vma_unpin(vma);
1983 mutex_unlock(&dev->struct_mutex);
1985 intel_runtime_pm_put(dev_priv, wakeref);
1986 i915_gem_object_unpin_pages(obj);
1991 * We eat errors when the gpu is terminally wedged to avoid
1992 * userspace unduly crashing (gl has no provisions for mmaps to
1993 * fail). But any other -EIO isn't ours (e.g. swap in failure)
1994 * and so needs to be reported.
1996 if (!i915_terminally_wedged(&dev_priv->gpu_error))
1997 return VM_FAULT_SIGBUS;
1998 /* else: fall through */
2001 * EAGAIN means the gpu is hung and we'll wait for the error
2002 * handler to reset everything when re-faulting in
2003 * i915_mutex_lock_interruptible.
2010 * EBUSY is ok: this just means that another thread
2011 * already did the job.
2013 return VM_FAULT_NOPAGE;
2015 return VM_FAULT_OOM;
2018 return VM_FAULT_SIGBUS;
2020 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
2021 return VM_FAULT_SIGBUS;
2025 static void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
2027 struct i915_vma *vma;
2029 GEM_BUG_ON(!obj->userfault_count);
2031 obj->userfault_count = 0;
2032 list_del(&obj->userfault_link);
2033 drm_vma_node_unmap(&obj->base.vma_node,
2034 obj->base.dev->anon_inode->i_mapping);
2036 for_each_ggtt_vma(vma, obj)
2037 i915_vma_unset_userfault(vma);
2041 * i915_gem_release_mmap - remove physical page mappings
2042 * @obj: obj in question
2044 * Preserve the reservation of the mmapping with the DRM core code, but
2045 * relinquish ownership of the pages back to the system.
2047 * It is vital that we remove the page mapping if we have mapped a tiled
2048 * object through the GTT and then lose the fence register due to
2049 * resource pressure. Similarly if the object has been moved out of the
2050 * aperture, than pages mapped into userspace must be revoked. Removing the
2051 * mapping will then trigger a page fault on the next user access, allowing
2052 * fixup by i915_gem_fault().
2055 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
2057 struct drm_i915_private *i915 = to_i915(obj->base.dev);
2058 intel_wakeref_t wakeref;
2060 /* Serialisation between user GTT access and our code depends upon
2061 * revoking the CPU's PTE whilst the mutex is held. The next user
2062 * pagefault then has to wait until we release the mutex.
2064 * Note that RPM complicates somewhat by adding an additional
2065 * requirement that operations to the GGTT be made holding the RPM
2068 lockdep_assert_held(&i915->drm.struct_mutex);
2069 wakeref = intel_runtime_pm_get(i915);
2071 if (!obj->userfault_count)
2074 __i915_gem_object_release_mmap(obj);
2076 /* Ensure that the CPU's PTE are revoked and there are not outstanding
2077 * memory transactions from userspace before we return. The TLB
2078 * flushing implied above by changing the PTE above *should* be
2079 * sufficient, an extra barrier here just provides us with a bit
2080 * of paranoid documentation about our requirement to serialise
2081 * memory writes before touching registers / GSM.
2086 intel_runtime_pm_put(i915, wakeref);
2089 void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
2091 struct drm_i915_gem_object *obj, *on;
2095 * Only called during RPM suspend. All users of the userfault_list
2096 * must be holding an RPM wakeref to ensure that this can not
2097 * run concurrently with themselves (and use the struct_mutex for
2098 * protection between themselves).
2101 list_for_each_entry_safe(obj, on,
2102 &dev_priv->mm.userfault_list, userfault_link)
2103 __i915_gem_object_release_mmap(obj);
2105 /* The fence will be lost when the device powers down. If any were
2106 * in use by hardware (i.e. they are pinned), we should not be powering
2107 * down! All other fences will be reacquired by the user upon waking.
2109 for (i = 0; i < dev_priv->num_fence_regs; i++) {
2110 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
2112 /* Ideally we want to assert that the fence register is not
2113 * live at this point (i.e. that no piece of code will be
2114 * trying to write through fence + GTT, as that both violates
2115 * our tracking of activity and associated locking/barriers,
2116 * but also is illegal given that the hw is powered down).
2118 * Previously we used reg->pin_count as a "liveness" indicator.
2119 * That is not sufficient, and we need a more fine-grained
2120 * tool if we want to have a sanity check here.
2126 GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
2131 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
2133 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2136 err = drm_gem_create_mmap_offset(&obj->base);
2140 /* Attempt to reap some mmap space from dead objects */
2142 err = i915_gem_wait_for_idle(dev_priv,
2143 I915_WAIT_INTERRUPTIBLE,
2144 MAX_SCHEDULE_TIMEOUT);
2148 i915_gem_drain_freed_objects(dev_priv);
2149 err = drm_gem_create_mmap_offset(&obj->base);
2153 } while (flush_delayed_work(&dev_priv->gt.retire_work));
2158 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
2160 drm_gem_free_mmap_offset(&obj->base);
2164 i915_gem_mmap_gtt(struct drm_file *file,
2165 struct drm_device *dev,
2169 struct drm_i915_gem_object *obj;
2172 obj = i915_gem_object_lookup(file, handle);
2176 ret = i915_gem_object_create_mmap_offset(obj);
2178 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
2180 i915_gem_object_put(obj);
2185 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
2187 * @data: GTT mapping ioctl data
2188 * @file: GEM object info
2190 * Simply returns the fake offset to userspace so it can mmap it.
2191 * The mmap call will end up in drm_gem_mmap(), which will set things
2192 * up so we can get faults in the handler above.
2194 * The fault handler will take care of binding the object into the GTT
2195 * (since it may have been evicted to make room for something), allocating
2196 * a fence register, and mapping the appropriate aperture address into
2200 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
2201 struct drm_file *file)
2203 struct drm_i915_gem_mmap_gtt *args = data;
2205 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
2208 /* Immediately discard the backing storage */
2210 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
2212 i915_gem_object_free_mmap_offset(obj);
2214 if (obj->base.filp == NULL)
2217 /* Our goal here is to return as much of the memory as
2218 * is possible back to the system as we are called from OOM.
2219 * To do this we must instruct the shmfs to drop all of its
2220 * backing pages, *now*.
2222 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
2223 obj->mm.madv = __I915_MADV_PURGED;
2224 obj->mm.pages = ERR_PTR(-EFAULT);
2227 /* Try to discard unwanted pages */
2228 void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
2230 struct address_space *mapping;
2232 lockdep_assert_held(&obj->mm.lock);
2233 GEM_BUG_ON(i915_gem_object_has_pages(obj));
2235 switch (obj->mm.madv) {
2236 case I915_MADV_DONTNEED:
2237 i915_gem_object_truncate(obj);
2238 case __I915_MADV_PURGED:
2242 if (obj->base.filp == NULL)
2245 mapping = obj->base.filp->f_mapping,
2246 invalidate_mapping_pages(mapping, 0, (loff_t)-1);
2250 * Move pages to appropriate lru and release the pagevec, decrementing the
2251 * ref count of those pages.
2253 static void check_release_pagevec(struct pagevec *pvec)
2255 check_move_unevictable_pages(pvec);
2256 __pagevec_release(pvec);
2261 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
2262 struct sg_table *pages)
2264 struct sgt_iter sgt_iter;
2265 struct pagevec pvec;
2268 __i915_gem_object_release_shmem(obj, pages, true);
2270 i915_gem_gtt_finish_pages(obj, pages);
2272 if (i915_gem_object_needs_bit17_swizzle(obj))
2273 i915_gem_object_save_bit_17_swizzle(obj, pages);
2275 mapping_clear_unevictable(file_inode(obj->base.filp)->i_mapping);
2277 pagevec_init(&pvec);
2278 for_each_sgt_page(page, sgt_iter, pages) {
2280 set_page_dirty(page);
2282 if (obj->mm.madv == I915_MADV_WILLNEED)
2283 mark_page_accessed(page);
2285 if (!pagevec_add(&pvec, page))
2286 check_release_pagevec(&pvec);
2288 if (pagevec_count(&pvec))
2289 check_release_pagevec(&pvec);
2290 obj->mm.dirty = false;
2292 sg_free_table(pages);
2296 static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
2298 struct radix_tree_iter iter;
2302 radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
2303 radix_tree_delete(&obj->mm.get_page.radix, iter.index);
2307 static struct sg_table *
2308 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
2310 struct drm_i915_private *i915 = to_i915(obj->base.dev);
2311 struct sg_table *pages;
2313 pages = fetch_and_zero(&obj->mm.pages);
2314 if (IS_ERR_OR_NULL(pages))
2317 spin_lock(&i915->mm.obj_lock);
2318 list_del(&obj->mm.link);
2319 spin_unlock(&i915->mm.obj_lock);
2321 if (obj->mm.mapping) {
2324 ptr = page_mask_bits(obj->mm.mapping);
2325 if (is_vmalloc_addr(ptr))
2328 kunmap(kmap_to_page(ptr));
2330 obj->mm.mapping = NULL;
2333 __i915_gem_object_reset_page_iter(obj);
2334 obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
2339 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
2340 enum i915_mm_subclass subclass)
2342 struct sg_table *pages;
2345 if (i915_gem_object_has_pinned_pages(obj))
2348 GEM_BUG_ON(obj->bind_count);
2350 /* May be called by shrinker from within get_pages() (on another bo) */
2351 mutex_lock_nested(&obj->mm.lock, subclass);
2352 if (unlikely(atomic_read(&obj->mm.pages_pin_count))) {
2358 * ->put_pages might need to allocate memory for the bit17 swizzle
2359 * array, hence protect them from being reaped by removing them from gtt
2362 pages = __i915_gem_object_unset_pages(obj);
2365 * XXX Temporary hijinx to avoid updating all backends to handle
2366 * NULL pages. In the future, when we have more asynchronous
2367 * get_pages backends we should be better able to handle the
2368 * cancellation of the async task in a more uniform manner.
2370 if (!pages && !i915_gem_object_needs_async_cancel(obj))
2371 pages = ERR_PTR(-EINVAL);
2374 obj->ops->put_pages(obj, pages);
2378 mutex_unlock(&obj->mm.lock);
2383 bool i915_sg_trim(struct sg_table *orig_st)
2385 struct sg_table new_st;
2386 struct scatterlist *sg, *new_sg;
2389 if (orig_st->nents == orig_st->orig_nents)
2392 if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL | __GFP_NOWARN))
2395 new_sg = new_st.sgl;
2396 for_each_sg(orig_st->sgl, sg, orig_st->nents, i) {
2397 sg_set_page(new_sg, sg_page(sg), sg->length, 0);
2398 sg_dma_address(new_sg) = sg_dma_address(sg);
2399 sg_dma_len(new_sg) = sg_dma_len(sg);
2401 new_sg = sg_next(new_sg);
2403 GEM_BUG_ON(new_sg); /* Should walk exactly nents and hit the end */
2405 sg_free_table(orig_st);
2411 static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2413 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2414 const unsigned long page_count = obj->base.size / PAGE_SIZE;
2416 struct address_space *mapping;
2417 struct sg_table *st;
2418 struct scatterlist *sg;
2419 struct sgt_iter sgt_iter;
2421 unsigned long last_pfn = 0; /* suppress gcc warning */
2422 unsigned int max_segment = i915_sg_segment_size();
2423 unsigned int sg_page_sizes;
2424 struct pagevec pvec;
2429 * Assert that the object is not currently in any GPU domain. As it
2430 * wasn't in the GTT, there shouldn't be any way it could have been in
2433 GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
2434 GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
2437 * If there's no chance of allocating enough pages for the whole
2438 * object, bail early.
2440 if (page_count > totalram_pages())
2443 st = kmalloc(sizeof(*st), GFP_KERNEL);
2448 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
2454 * Get the list of pages out of our struct file. They'll be pinned
2455 * at this point until we release them.
2457 * Fail silently without starting the shrinker
2459 mapping = obj->base.filp->f_mapping;
2460 mapping_set_unevictable(mapping);
2461 noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM);
2462 noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
2467 for (i = 0; i < page_count; i++) {
2468 const unsigned int shrink[] = {
2469 I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE,
2472 gfp_t gfp = noreclaim;
2476 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2477 if (likely(!IS_ERR(page)))
2481 ret = PTR_ERR(page);
2485 i915_gem_shrink(dev_priv, 2 * page_count, NULL, *s++);
2488 * We've tried hard to allocate the memory by reaping
2489 * our own buffer, now let the real VM do its job and
2490 * go down in flames if truly OOM.
2492 * However, since graphics tend to be disposable,
2493 * defer the oom here by reporting the ENOMEM back
2497 /* reclaim and warn, but no oom */
2498 gfp = mapping_gfp_mask(mapping);
2501 * Our bo are always dirty and so we require
2502 * kswapd to reclaim our pages (direct reclaim
2503 * does not effectively begin pageout of our
2504 * buffers on its own). However, direct reclaim
2505 * only waits for kswapd when under allocation
2506 * congestion. So as a result __GFP_RECLAIM is
2507 * unreliable and fails to actually reclaim our
2508 * dirty pages -- unless you try over and over
2509 * again with !__GFP_NORETRY. However, we still
2510 * want to fail this allocation rather than
2511 * trigger the out-of-memory killer and for
2512 * this we want __GFP_RETRY_MAYFAIL.
2514 gfp |= __GFP_RETRY_MAYFAIL;
2519 sg->length >= max_segment ||
2520 page_to_pfn(page) != last_pfn + 1) {
2522 sg_page_sizes |= sg->length;
2526 sg_set_page(sg, page, PAGE_SIZE, 0);
2528 sg->length += PAGE_SIZE;
2530 last_pfn = page_to_pfn(page);
2532 /* Check that the i965g/gm workaround works. */
2533 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
2535 if (sg) { /* loop terminated early; short sg table */
2536 sg_page_sizes |= sg->length;
2540 /* Trim unused sg entries to avoid wasting memory. */
2543 ret = i915_gem_gtt_prepare_pages(obj, st);
2546 * DMA remapping failed? One possible cause is that
2547 * it could not reserve enough large entries, asking
2548 * for PAGE_SIZE chunks instead may be helpful.
2550 if (max_segment > PAGE_SIZE) {
2551 for_each_sgt_page(page, sgt_iter, st)
2555 max_segment = PAGE_SIZE;
2558 dev_warn(&dev_priv->drm.pdev->dev,
2559 "Failed to DMA remap %lu pages\n",
2565 if (i915_gem_object_needs_bit17_swizzle(obj))
2566 i915_gem_object_do_bit_17_swizzle(obj, st);
2568 __i915_gem_object_set_pages(obj, st, sg_page_sizes);
2575 mapping_clear_unevictable(mapping);
2576 pagevec_init(&pvec);
2577 for_each_sgt_page(page, sgt_iter, st) {
2578 if (!pagevec_add(&pvec, page))
2579 check_release_pagevec(&pvec);
2581 if (pagevec_count(&pvec))
2582 check_release_pagevec(&pvec);
2587 * shmemfs first checks if there is enough memory to allocate the page
2588 * and reports ENOSPC should there be insufficient, along with the usual
2589 * ENOMEM for a genuine allocation failure.
2591 * We use ENOSPC in our driver to mean that we have run out of aperture
2592 * space and so want to translate the error from shmemfs back to our
2593 * usual understanding of ENOMEM.
2601 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
2602 struct sg_table *pages,
2603 unsigned int sg_page_sizes)
2605 struct drm_i915_private *i915 = to_i915(obj->base.dev);
2606 unsigned long supported = INTEL_INFO(i915)->page_sizes;
2609 lockdep_assert_held(&obj->mm.lock);
2611 obj->mm.get_page.sg_pos = pages->sgl;
2612 obj->mm.get_page.sg_idx = 0;
2614 obj->mm.pages = pages;
2616 if (i915_gem_object_is_tiled(obj) &&
2617 i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
2618 GEM_BUG_ON(obj->mm.quirked);
2619 __i915_gem_object_pin_pages(obj);
2620 obj->mm.quirked = true;
2623 GEM_BUG_ON(!sg_page_sizes);
2624 obj->mm.page_sizes.phys = sg_page_sizes;
2627 * Calculate the supported page-sizes which fit into the given
2628 * sg_page_sizes. This will give us the page-sizes which we may be able
2629 * to use opportunistically when later inserting into the GTT. For
2630 * example if phys=2G, then in theory we should be able to use 1G, 2M,
2631 * 64K or 4K pages, although in practice this will depend on a number of
2634 obj->mm.page_sizes.sg = 0;
2635 for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
2636 if (obj->mm.page_sizes.phys & ~0u << i)
2637 obj->mm.page_sizes.sg |= BIT(i);
2639 GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
2641 spin_lock(&i915->mm.obj_lock);
2642 list_add(&obj->mm.link, &i915->mm.unbound_list);
2643 spin_unlock(&i915->mm.obj_lock);
2646 static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2650 if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
2651 DRM_DEBUG("Attempting to obtain a purgeable object\n");
2655 err = obj->ops->get_pages(obj);
2656 GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
2661 /* Ensure that the associated pages are gathered from the backing storage
2662 * and pinned into our object. i915_gem_object_pin_pages() may be called
2663 * multiple times before they are released by a single call to
2664 * i915_gem_object_unpin_pages() - once the pages are no longer referenced
2665 * either as a result of memory pressure (reaping pages under the shrinker)
2666 * or as the object is itself released.
2668 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2672 err = mutex_lock_interruptible(&obj->mm.lock);
2676 if (unlikely(!i915_gem_object_has_pages(obj))) {
2677 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
2679 err = ____i915_gem_object_get_pages(obj);
2683 smp_mb__before_atomic();
2685 atomic_inc(&obj->mm.pages_pin_count);
2688 mutex_unlock(&obj->mm.lock);
2692 /* The 'mapping' part of i915_gem_object_pin_map() below */
2693 static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
2694 enum i915_map_type type)
2696 unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
2697 struct sg_table *sgt = obj->mm.pages;
2698 struct sgt_iter sgt_iter;
2700 struct page *stack_pages[32];
2701 struct page **pages = stack_pages;
2702 unsigned long i = 0;
2706 /* A single page can always be kmapped */
2707 if (n_pages == 1 && type == I915_MAP_WB)
2708 return kmap(sg_page(sgt->sgl));
2710 if (n_pages > ARRAY_SIZE(stack_pages)) {
2711 /* Too big for stack -- allocate temporary array instead */
2712 pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
2717 for_each_sgt_page(page, sgt_iter, sgt)
2720 /* Check that we have the expected number of pages */
2721 GEM_BUG_ON(i != n_pages);
2726 /* fallthrough to use PAGE_KERNEL anyway */
2728 pgprot = PAGE_KERNEL;
2731 pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
2734 addr = vmap(pages, n_pages, 0, pgprot);
2736 if (pages != stack_pages)
2742 /* get, pin, and map the pages of the object into kernel space */
2743 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
2744 enum i915_map_type type)
2746 enum i915_map_type has_type;
2751 if (unlikely(!i915_gem_object_has_struct_page(obj)))
2752 return ERR_PTR(-ENXIO);
2754 ret = mutex_lock_interruptible(&obj->mm.lock);
2756 return ERR_PTR(ret);
2758 pinned = !(type & I915_MAP_OVERRIDE);
2759 type &= ~I915_MAP_OVERRIDE;
2761 if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
2762 if (unlikely(!i915_gem_object_has_pages(obj))) {
2763 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
2765 ret = ____i915_gem_object_get_pages(obj);
2769 smp_mb__before_atomic();
2771 atomic_inc(&obj->mm.pages_pin_count);
2774 GEM_BUG_ON(!i915_gem_object_has_pages(obj));
2776 ptr = page_unpack_bits(obj->mm.mapping, &has_type);
2777 if (ptr && has_type != type) {
2783 if (is_vmalloc_addr(ptr))
2786 kunmap(kmap_to_page(ptr));
2788 ptr = obj->mm.mapping = NULL;
2792 ptr = i915_gem_object_map(obj, type);
2798 obj->mm.mapping = page_pack_bits(ptr, type);
2802 mutex_unlock(&obj->mm.lock);
2806 atomic_dec(&obj->mm.pages_pin_count);
2813 i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
2814 const struct drm_i915_gem_pwrite *arg)
2816 struct address_space *mapping = obj->base.filp->f_mapping;
2817 char __user *user_data = u64_to_user_ptr(arg->data_ptr);
2821 /* Before we instantiate/pin the backing store for our use, we
2822 * can prepopulate the shmemfs filp efficiently using a write into
2823 * the pagecache. We avoid the penalty of instantiating all the
2824 * pages, important if the user is just writing to a few and never
2825 * uses the object on the GPU, and using a direct write into shmemfs
2826 * allows it to avoid the cost of retrieving a page (either swapin
2827 * or clearing-before-use) before it is overwritten.
2829 if (i915_gem_object_has_pages(obj))
2832 if (obj->mm.madv != I915_MADV_WILLNEED)
2835 /* Before the pages are instantiated the object is treated as being
2836 * in the CPU domain. The pages will be clflushed as required before
2837 * use, and we can freely write into the pages directly. If userspace
2838 * races pwrite with any other operation; corruption will ensue -
2839 * that is userspace's prerogative!
2843 offset = arg->offset;
2844 pg = offset_in_page(offset);
2847 unsigned int len, unwritten;
2852 len = PAGE_SIZE - pg;
2856 err = pagecache_write_begin(obj->base.filp, mapping,
2863 unwritten = copy_from_user(vaddr + pg, user_data, len);
2866 err = pagecache_write_end(obj->base.filp, mapping,
2867 offset, len, len - unwritten,
2884 static bool match_ring(struct i915_request *rq)
2886 struct drm_i915_private *dev_priv = rq->i915;
2887 u32 ring = I915_READ(RING_START(rq->engine->mmio_base));
2889 return ring == i915_ggtt_offset(rq->ring->vma);
2892 struct i915_request *
2893 i915_gem_find_active_request(struct intel_engine_cs *engine)
2895 struct i915_request *request, *active = NULL;
2896 unsigned long flags;
2899 * We are called by the error capture, reset and to dump engine
2900 * state at random points in time. In particular, note that neither is
2901 * crucially ordered with an interrupt. After a hang, the GPU is dead
2902 * and we assume that no more writes can happen (we waited long enough
2903 * for all writes that were in transaction to be flushed) - adding an
2904 * extra delay for a recent interrupt is pointless. Hence, we do
2905 * not need an engine->irq_seqno_barrier() before the seqno reads.
2906 * At all other times, we must assume the GPU is still running, but
2907 * we only care about the snapshot of this moment.
2909 spin_lock_irqsave(&engine->timeline.lock, flags);
2910 list_for_each_entry(request, &engine->timeline.requests, link) {
2911 if (i915_request_completed(request))
2914 if (!i915_request_started(request))
2917 /* More than one preemptible request may match! */
2918 if (!match_ring(request))
2924 spin_unlock_irqrestore(&engine->timeline.lock, flags);
2930 i915_gem_retire_work_handler(struct work_struct *work)
2932 struct drm_i915_private *dev_priv =
2933 container_of(work, typeof(*dev_priv), gt.retire_work.work);
2934 struct drm_device *dev = &dev_priv->drm;
2936 /* Come back later if the device is busy... */
2937 if (mutex_trylock(&dev->struct_mutex)) {
2938 i915_retire_requests(dev_priv);
2939 mutex_unlock(&dev->struct_mutex);
2943 * Keep the retire handler running until we are finally idle.
2944 * We do not need to do this test under locking as in the worst-case
2945 * we queue the retire worker once too often.
2947 if (READ_ONCE(dev_priv->gt.awake))
2948 queue_delayed_work(dev_priv->wq,
2949 &dev_priv->gt.retire_work,
2950 round_jiffies_up_relative(HZ));
2953 static void shrink_caches(struct drm_i915_private *i915)
2956 * kmem_cache_shrink() discards empty slabs and reorders partially
2957 * filled slabs to prioritise allocating from the mostly full slabs,
2958 * with the aim of reducing fragmentation.
2960 kmem_cache_shrink(i915->priorities);
2961 kmem_cache_shrink(i915->dependencies);
2962 kmem_cache_shrink(i915->requests);
2963 kmem_cache_shrink(i915->luts);
2964 kmem_cache_shrink(i915->vmas);
2965 kmem_cache_shrink(i915->objects);
2968 struct sleep_rcu_work {
2970 struct rcu_head rcu;
2971 struct work_struct work;
2973 struct drm_i915_private *i915;
2978 same_epoch(struct drm_i915_private *i915, unsigned int epoch)
2981 * There is a small chance that the epoch wrapped since we started
2982 * sleeping. If we assume that epoch is at least a u32, then it will
2983 * take at least 2^32 * 100ms for it to wrap, or about 326 years.
2985 return epoch == READ_ONCE(i915->gt.epoch);
2988 static void __sleep_work(struct work_struct *work)
2990 struct sleep_rcu_work *s = container_of(work, typeof(*s), work);
2991 struct drm_i915_private *i915 = s->i915;
2992 unsigned int epoch = s->epoch;
2995 if (same_epoch(i915, epoch))
2996 shrink_caches(i915);
2999 static void __sleep_rcu(struct rcu_head *rcu)
3001 struct sleep_rcu_work *s = container_of(rcu, typeof(*s), rcu);
3002 struct drm_i915_private *i915 = s->i915;
3004 destroy_rcu_head(&s->rcu);
3006 if (same_epoch(i915, s->epoch)) {
3007 INIT_WORK(&s->work, __sleep_work);
3008 queue_work(i915->wq, &s->work);
3015 new_requests_since_last_retire(const struct drm_i915_private *i915)
3017 return (READ_ONCE(i915->gt.active_requests) ||
3018 work_pending(&i915->gt.idle_work.work));
3021 static void assert_kernel_context_is_current(struct drm_i915_private *i915)
3023 struct intel_engine_cs *engine;
3024 enum intel_engine_id id;
3026 if (i915_terminally_wedged(&i915->gpu_error))
3029 GEM_BUG_ON(i915->gt.active_requests);
3030 for_each_engine(engine, i915, id) {
3031 GEM_BUG_ON(__i915_active_request_peek(&engine->timeline.last_request));
3032 GEM_BUG_ON(engine->last_retired_context !=
3033 to_intel_context(i915->kernel_context, engine));
3038 i915_gem_idle_work_handler(struct work_struct *work)
3040 struct drm_i915_private *dev_priv =
3041 container_of(work, typeof(*dev_priv), gt.idle_work.work);
3042 unsigned int epoch = I915_EPOCH_INVALID;
3043 bool rearm_hangcheck;
3045 if (!READ_ONCE(dev_priv->gt.awake))
3048 if (READ_ONCE(dev_priv->gt.active_requests))
3052 * Flush out the last user context, leaving only the pinned
3053 * kernel context resident. When we are idling on the kernel_context,
3054 * no more new requests (with a context switch) are emitted and we
3055 * can finally rest. A consequence is that the idle work handler is
3056 * always called at least twice before idling (and if the system is
3057 * idle that implies a round trip through the retire worker).
3059 mutex_lock(&dev_priv->drm.struct_mutex);
3060 i915_gem_switch_to_kernel_context(dev_priv);
3061 mutex_unlock(&dev_priv->drm.struct_mutex);
3063 GEM_TRACE("active_requests=%d (after switch-to-kernel-context)\n",
3064 READ_ONCE(dev_priv->gt.active_requests));
3067 * Wait for last execlists context complete, but bail out in case a
3068 * new request is submitted. As we don't trust the hardware, we
3069 * continue on if the wait times out. This is necessary to allow
3070 * the machine to suspend even if the hardware dies, and we will
3071 * try to recover in resume (after depriving the hardware of power,
3072 * it may be in a better mmod).
3074 __wait_for(if (new_requests_since_last_retire(dev_priv)) return,
3075 intel_engines_are_idle(dev_priv),
3076 I915_IDLE_ENGINES_TIMEOUT * 1000,
3080 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
3082 if (!mutex_trylock(&dev_priv->drm.struct_mutex)) {
3083 /* Currently busy, come back later */
3084 mod_delayed_work(dev_priv->wq,
3085 &dev_priv->gt.idle_work,
3086 msecs_to_jiffies(50));
3091 * New request retired after this work handler started, extend active
3092 * period until next instance of the work.
3094 if (new_requests_since_last_retire(dev_priv))
3097 epoch = __i915_gem_park(dev_priv);
3099 assert_kernel_context_is_current(dev_priv);
3101 rearm_hangcheck = false;
3103 mutex_unlock(&dev_priv->drm.struct_mutex);
3106 if (rearm_hangcheck) {
3107 GEM_BUG_ON(!dev_priv->gt.awake);
3108 i915_queue_hangcheck(dev_priv);
3112 * When we are idle, it is an opportune time to reap our caches.
3113 * However, we have many objects that utilise RCU and the ordered
3114 * i915->wq that this work is executing on. To try and flush any
3115 * pending frees now we are idle, we first wait for an RCU grace
3116 * period, and then queue a task (that will run last on the wq) to
3117 * shrink and re-optimize the caches.
3119 if (same_epoch(dev_priv, epoch)) {
3120 struct sleep_rcu_work *s = kmalloc(sizeof(*s), GFP_KERNEL);
3122 init_rcu_head(&s->rcu);
3125 call_rcu(&s->rcu, __sleep_rcu);
3130 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
3132 struct drm_i915_private *i915 = to_i915(gem->dev);
3133 struct drm_i915_gem_object *obj = to_intel_bo(gem);
3134 struct drm_i915_file_private *fpriv = file->driver_priv;
3135 struct i915_lut_handle *lut, *ln;
3137 mutex_lock(&i915->drm.struct_mutex);
3139 list_for_each_entry_safe(lut, ln, &obj->lut_list, obj_link) {
3140 struct i915_gem_context *ctx = lut->ctx;
3141 struct i915_vma *vma;
3143 GEM_BUG_ON(ctx->file_priv == ERR_PTR(-EBADF));
3144 if (ctx->file_priv != fpriv)
3147 vma = radix_tree_delete(&ctx->handles_vma, lut->handle);
3148 GEM_BUG_ON(vma->obj != obj);
3150 /* We allow the process to have multiple handles to the same
3151 * vma, in the same fd namespace, by virtue of flink/open.
3153 GEM_BUG_ON(!vma->open_count);
3154 if (!--vma->open_count && !i915_vma_is_ggtt(vma))
3155 i915_vma_close(vma);
3157 list_del(&lut->obj_link);
3158 list_del(&lut->ctx_link);
3160 kmem_cache_free(i915->luts, lut);
3161 __i915_gem_object_release_unless_active(obj);
3164 mutex_unlock(&i915->drm.struct_mutex);
3167 static unsigned long to_wait_timeout(s64 timeout_ns)
3170 return MAX_SCHEDULE_TIMEOUT;
3172 if (timeout_ns == 0)
3175 return nsecs_to_jiffies_timeout(timeout_ns);
3179 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
3180 * @dev: drm device pointer
3181 * @data: ioctl data blob
3182 * @file: drm file pointer
3184 * Returns 0 if successful, else an error is returned with the remaining time in
3185 * the timeout parameter.
3186 * -ETIME: object is still busy after timeout
3187 * -ERESTARTSYS: signal interrupted the wait
3188 * -ENONENT: object doesn't exist
3189 * Also possible, but rare:
3190 * -EAGAIN: incomplete, restart syscall
3192 * -ENODEV: Internal IRQ fail
3193 * -E?: The add request failed
3195 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
3196 * non-zero timeout parameter the wait ioctl will wait for the given number of
3197 * nanoseconds on an object becoming unbusy. Since the wait itself does so
3198 * without holding struct_mutex the object may become re-busied before this
3199 * function completes. A similar but shorter * race condition exists in the busy
3203 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
3205 struct drm_i915_gem_wait *args = data;
3206 struct drm_i915_gem_object *obj;
3210 if (args->flags != 0)
3213 obj = i915_gem_object_lookup(file, args->bo_handle);
3217 start = ktime_get();
3219 ret = i915_gem_object_wait(obj,
3220 I915_WAIT_INTERRUPTIBLE |
3221 I915_WAIT_PRIORITY |
3223 to_wait_timeout(args->timeout_ns),
3224 to_rps_client(file));
3226 if (args->timeout_ns > 0) {
3227 args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
3228 if (args->timeout_ns < 0)
3229 args->timeout_ns = 0;
3232 * Apparently ktime isn't accurate enough and occasionally has a
3233 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
3234 * things up to make the test happy. We allow up to 1 jiffy.
3236 * This is a regression from the timespec->ktime conversion.
3238 if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns))
3239 args->timeout_ns = 0;
3241 /* Asked to wait beyond the jiffie/scheduler precision? */
3242 if (ret == -ETIME && args->timeout_ns)
3246 i915_gem_object_put(obj);
3250 static int wait_for_engines(struct drm_i915_private *i915)
3252 if (wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT)) {
3253 dev_err(i915->drm.dev,
3254 "Failed to idle engines, declaring wedged!\n");
3256 i915_gem_set_wedged(i915);
3264 wait_for_timelines(struct drm_i915_private *i915,
3265 unsigned int flags, long timeout)
3267 struct i915_gt_timelines *gt = &i915->gt.timelines;
3268 struct i915_timeline *tl;
3270 if (!READ_ONCE(i915->gt.active_requests))
3273 mutex_lock(>->mutex);
3274 list_for_each_entry(tl, >->active_list, link) {
3275 struct i915_request *rq;
3277 rq = i915_active_request_get_unlocked(&tl->last_request);
3281 mutex_unlock(>->mutex);
3286 * Switching to the kernel context is often used a synchronous
3287 * step prior to idling, e.g. in suspend for flushing all
3288 * current operations to memory before sleeping. These we
3289 * want to complete as quickly as possible to avoid prolonged
3290 * stalls, so allow the gpu to boost to maximum clocks.
3292 if (flags & I915_WAIT_FOR_IDLE_BOOST)
3293 gen6_rps_boost(rq, NULL);
3295 timeout = i915_request_wait(rq, flags, timeout);
3296 i915_request_put(rq);
3300 /* restart after reacquiring the lock */
3301 mutex_lock(>->mutex);
3302 tl = list_entry(>->active_list, typeof(*tl), link);
3304 mutex_unlock(>->mutex);
3309 int i915_gem_wait_for_idle(struct drm_i915_private *i915,
3310 unsigned int flags, long timeout)
3312 GEM_TRACE("flags=%x (%s), timeout=%ld%s\n",
3313 flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked",
3314 timeout, timeout == MAX_SCHEDULE_TIMEOUT ? " (forever)" : "");
3316 /* If the device is asleep, we have no requests outstanding */
3317 if (!READ_ONCE(i915->gt.awake))
3320 timeout = wait_for_timelines(i915, flags, timeout);
3324 if (flags & I915_WAIT_LOCKED) {
3327 lockdep_assert_held(&i915->drm.struct_mutex);
3329 if (GEM_SHOW_DEBUG() && !timeout) {
3330 /* Presume that timeout was non-zero to begin with! */
3331 dev_warn(&i915->drm.pdev->dev,
3332 "Missed idle-completion interrupt!\n");
3336 err = wait_for_engines(i915);
3340 i915_retire_requests(i915);
3341 GEM_BUG_ON(i915->gt.active_requests);
3347 static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
3350 * We manually flush the CPU domain so that we can override and
3351 * force the flush for the display, and perform it asyncrhonously.
3353 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
3354 if (obj->cache_dirty)
3355 i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE);
3356 obj->write_domain = 0;
3359 void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj)
3361 if (!READ_ONCE(obj->pin_global))
3364 mutex_lock(&obj->base.dev->struct_mutex);
3365 __i915_gem_object_flush_for_display(obj);
3366 mutex_unlock(&obj->base.dev->struct_mutex);
3370 * Moves a single object to the WC read, and possibly write domain.
3371 * @obj: object to act on
3372 * @write: ask for write access or read only
3374 * This function returns when the move is complete, including waiting on
3378 i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
3382 lockdep_assert_held(&obj->base.dev->struct_mutex);
3384 ret = i915_gem_object_wait(obj,
3385 I915_WAIT_INTERRUPTIBLE |
3387 (write ? I915_WAIT_ALL : 0),
3388 MAX_SCHEDULE_TIMEOUT,
3393 if (obj->write_domain == I915_GEM_DOMAIN_WC)
3396 /* Flush and acquire obj->pages so that we are coherent through
3397 * direct access in memory with previous cached writes through
3398 * shmemfs and that our cache domain tracking remains valid.
3399 * For example, if the obj->filp was moved to swap without us
3400 * being notified and releasing the pages, we would mistakenly
3401 * continue to assume that the obj remained out of the CPU cached
3404 ret = i915_gem_object_pin_pages(obj);
3408 flush_write_domain(obj, ~I915_GEM_DOMAIN_WC);
3410 /* Serialise direct access to this object with the barriers for
3411 * coherent writes from the GPU, by effectively invalidating the
3412 * WC domain upon first access.
3414 if ((obj->read_domains & I915_GEM_DOMAIN_WC) == 0)
3417 /* It should now be out of any other write domains, and we can update
3418 * the domain values for our changes.
3420 GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_WC) != 0);
3421 obj->read_domains |= I915_GEM_DOMAIN_WC;
3423 obj->read_domains = I915_GEM_DOMAIN_WC;
3424 obj->write_domain = I915_GEM_DOMAIN_WC;
3425 obj->mm.dirty = true;
3428 i915_gem_object_unpin_pages(obj);
3433 * Moves a single object to the GTT read, and possibly write domain.
3434 * @obj: object to act on
3435 * @write: ask for write access or read only
3437 * This function returns when the move is complete, including waiting on
3441 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3445 lockdep_assert_held(&obj->base.dev->struct_mutex);
3447 ret = i915_gem_object_wait(obj,
3448 I915_WAIT_INTERRUPTIBLE |
3450 (write ? I915_WAIT_ALL : 0),
3451 MAX_SCHEDULE_TIMEOUT,
3456 if (obj->write_domain == I915_GEM_DOMAIN_GTT)
3459 /* Flush and acquire obj->pages so that we are coherent through
3460 * direct access in memory with previous cached writes through
3461 * shmemfs and that our cache domain tracking remains valid.
3462 * For example, if the obj->filp was moved to swap without us
3463 * being notified and releasing the pages, we would mistakenly
3464 * continue to assume that the obj remained out of the CPU cached
3467 ret = i915_gem_object_pin_pages(obj);
3471 flush_write_domain(obj, ~I915_GEM_DOMAIN_GTT);
3473 /* Serialise direct access to this object with the barriers for
3474 * coherent writes from the GPU, by effectively invalidating the
3475 * GTT domain upon first access.
3477 if ((obj->read_domains & I915_GEM_DOMAIN_GTT) == 0)
3480 /* It should now be out of any other write domains, and we can update
3481 * the domain values for our changes.
3483 GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3484 obj->read_domains |= I915_GEM_DOMAIN_GTT;
3486 obj->read_domains = I915_GEM_DOMAIN_GTT;
3487 obj->write_domain = I915_GEM_DOMAIN_GTT;
3488 obj->mm.dirty = true;
3491 i915_gem_object_unpin_pages(obj);
3496 * Changes the cache-level of an object across all VMA.
3497 * @obj: object to act on
3498 * @cache_level: new cache level to set for the object
3500 * After this function returns, the object will be in the new cache-level
3501 * across all GTT and the contents of the backing storage will be coherent,
3502 * with respect to the new cache-level. In order to keep the backing storage
3503 * coherent for all users, we only allow a single cache level to be set
3504 * globally on the object and prevent it from being changed whilst the
3505 * hardware is reading from the object. That is if the object is currently
3506 * on the scanout it will be set to uncached (or equivalent display
3507 * cache coherency) and all non-MOCS GPU access will also be uncached so
3508 * that all direct access to the scanout remains coherent.
3510 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3511 enum i915_cache_level cache_level)
3513 struct i915_vma *vma;
3516 lockdep_assert_held(&obj->base.dev->struct_mutex);
3518 if (obj->cache_level == cache_level)
3521 /* Inspect the list of currently bound VMA and unbind any that would
3522 * be invalid given the new cache-level. This is principally to
3523 * catch the issue of the CS prefetch crossing page boundaries and
3524 * reading an invalid PTE on older architectures.
3527 list_for_each_entry(vma, &obj->vma.list, obj_link) {
3528 if (!drm_mm_node_allocated(&vma->node))
3531 if (i915_vma_is_pinned(vma)) {
3532 DRM_DEBUG("can not change the cache level of pinned objects\n");
3536 if (!i915_vma_is_closed(vma) &&
3537 i915_gem_valid_gtt_space(vma, cache_level))
3540 ret = i915_vma_unbind(vma);
3544 /* As unbinding may affect other elements in the
3545 * obj->vma_list (due to side-effects from retiring
3546 * an active vma), play safe and restart the iterator.
3551 /* We can reuse the existing drm_mm nodes but need to change the
3552 * cache-level on the PTE. We could simply unbind them all and
3553 * rebind with the correct cache-level on next use. However since
3554 * we already have a valid slot, dma mapping, pages etc, we may as
3555 * rewrite the PTE in the belief that doing so tramples upon less
3556 * state and so involves less work.
3558 if (obj->bind_count) {
3559 /* Before we change the PTE, the GPU must not be accessing it.
3560 * If we wait upon the object, we know that all the bound
3561 * VMA are no longer active.
3563 ret = i915_gem_object_wait(obj,
3564 I915_WAIT_INTERRUPTIBLE |
3567 MAX_SCHEDULE_TIMEOUT,
3572 if (!HAS_LLC(to_i915(obj->base.dev)) &&
3573 cache_level != I915_CACHE_NONE) {
3574 /* Access to snoopable pages through the GTT is
3575 * incoherent and on some machines causes a hard
3576 * lockup. Relinquish the CPU mmaping to force
3577 * userspace to refault in the pages and we can
3578 * then double check if the GTT mapping is still
3579 * valid for that pointer access.
3581 i915_gem_release_mmap(obj);
3583 /* As we no longer need a fence for GTT access,
3584 * we can relinquish it now (and so prevent having
3585 * to steal a fence from someone else on the next
3586 * fence request). Note GPU activity would have
3587 * dropped the fence as all snoopable access is
3588 * supposed to be linear.
3590 for_each_ggtt_vma(vma, obj) {
3591 ret = i915_vma_put_fence(vma);
3596 /* We either have incoherent backing store and
3597 * so no GTT access or the architecture is fully
3598 * coherent. In such cases, existing GTT mmaps
3599 * ignore the cache bit in the PTE and we can
3600 * rewrite it without confusing the GPU or having
3601 * to force userspace to fault back in its mmaps.
3605 list_for_each_entry(vma, &obj->vma.list, obj_link) {
3606 if (!drm_mm_node_allocated(&vma->node))
3609 ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
3615 list_for_each_entry(vma, &obj->vma.list, obj_link)
3616 vma->node.color = cache_level;
3617 i915_gem_object_set_cache_coherency(obj, cache_level);
3618 obj->cache_dirty = true; /* Always invalidate stale cachelines */
3623 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3624 struct drm_file *file)
3626 struct drm_i915_gem_caching *args = data;
3627 struct drm_i915_gem_object *obj;
3631 obj = i915_gem_object_lookup_rcu(file, args->handle);
3637 switch (obj->cache_level) {
3638 case I915_CACHE_LLC:
3639 case I915_CACHE_L3_LLC:
3640 args->caching = I915_CACHING_CACHED;
3644 args->caching = I915_CACHING_DISPLAY;
3648 args->caching = I915_CACHING_NONE;
3656 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3657 struct drm_file *file)
3659 struct drm_i915_private *i915 = to_i915(dev);
3660 struct drm_i915_gem_caching *args = data;
3661 struct drm_i915_gem_object *obj;
3662 enum i915_cache_level level;
3665 switch (args->caching) {
3666 case I915_CACHING_NONE:
3667 level = I915_CACHE_NONE;
3669 case I915_CACHING_CACHED:
3671 * Due to a HW issue on BXT A stepping, GPU stores via a
3672 * snooped mapping may leave stale data in a corresponding CPU
3673 * cacheline, whereas normally such cachelines would get
3676 if (!HAS_LLC(i915) && !HAS_SNOOP(i915))
3679 level = I915_CACHE_LLC;
3681 case I915_CACHING_DISPLAY:
3682 level = HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE;
3688 obj = i915_gem_object_lookup(file, args->handle);
3693 * The caching mode of proxy object is handled by its generator, and
3694 * not allowed to be changed by userspace.
3696 if (i915_gem_object_is_proxy(obj)) {
3701 if (obj->cache_level == level)
3704 ret = i915_gem_object_wait(obj,
3705 I915_WAIT_INTERRUPTIBLE,
3706 MAX_SCHEDULE_TIMEOUT,
3707 to_rps_client(file));
3711 ret = i915_mutex_lock_interruptible(dev);
3715 ret = i915_gem_object_set_cache_level(obj, level);
3716 mutex_unlock(&dev->struct_mutex);
3719 i915_gem_object_put(obj);
3724 * Prepare buffer for display plane (scanout, cursors, etc). Can be called from
3725 * an uninterruptible phase (modesetting) and allows any flushes to be pipelined
3726 * (for pageflips). We only flush the caches while preparing the buffer for
3727 * display, the callers are responsible for frontbuffer flush.
3730 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3732 const struct i915_ggtt_view *view,
3735 struct i915_vma *vma;
3738 lockdep_assert_held(&obj->base.dev->struct_mutex);
3740 /* Mark the global pin early so that we account for the
3741 * display coherency whilst setting up the cache domains.
3745 /* The display engine is not coherent with the LLC cache on gen6. As
3746 * a result, we make sure that the pinning that is about to occur is
3747 * done with uncached PTEs. This is lowest common denominator for all
3750 * However for gen6+, we could do better by using the GFDT bit instead
3751 * of uncaching, which would allow us to flush all the LLC-cached data
3752 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3754 ret = i915_gem_object_set_cache_level(obj,
3755 HAS_WT(to_i915(obj->base.dev)) ?
3756 I915_CACHE_WT : I915_CACHE_NONE);
3759 goto err_unpin_global;
3762 /* As the user may map the buffer once pinned in the display plane
3763 * (e.g. libkms for the bootup splash), we have to ensure that we
3764 * always use map_and_fenceable for all scanout buffers. However,
3765 * it may simply be too big to fit into mappable, in which case
3766 * put it anyway and hope that userspace can cope (but always first
3767 * try to preserve the existing ABI).
3769 vma = ERR_PTR(-ENOSPC);
3770 if ((flags & PIN_MAPPABLE) == 0 &&
3771 (!view || view->type == I915_GGTT_VIEW_NORMAL))
3772 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
3777 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
3779 goto err_unpin_global;
3781 vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
3783 __i915_gem_object_flush_for_display(obj);
3785 /* It should now be out of any other write domains, and we can update
3786 * the domain values for our changes.
3788 obj->read_domains |= I915_GEM_DOMAIN_GTT;
3798 i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
3800 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
3802 if (WARN_ON(vma->obj->pin_global == 0))
3805 if (--vma->obj->pin_global == 0)
3806 vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
3808 /* Bump the LRU to try and avoid premature eviction whilst flipping */
3809 i915_gem_object_bump_inactive_ggtt(vma->obj);
3811 i915_vma_unpin(vma);
3815 * Moves a single object to the CPU read, and possibly write domain.
3816 * @obj: object to act on
3817 * @write: requesting write or read-only access
3819 * This function returns when the move is complete, including waiting on
3823 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3827 lockdep_assert_held(&obj->base.dev->struct_mutex);
3829 ret = i915_gem_object_wait(obj,
3830 I915_WAIT_INTERRUPTIBLE |
3832 (write ? I915_WAIT_ALL : 0),
3833 MAX_SCHEDULE_TIMEOUT,
3838 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
3840 /* Flush the CPU cache if it's still invalid. */
3841 if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3842 i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
3843 obj->read_domains |= I915_GEM_DOMAIN_CPU;
3846 /* It should now be out of any other write domains, and we can update
3847 * the domain values for our changes.
3849 GEM_BUG_ON(obj->write_domain & ~I915_GEM_DOMAIN_CPU);
3851 /* If we're writing through the CPU, then the GPU read domains will
3852 * need to be invalidated at next use.
3855 __start_cpu_write(obj);
3860 /* Throttle our rendering by waiting until the ring has completed our requests
3861 * emitted over 20 msec ago.
3863 * Note that if we were to use the current jiffies each time around the loop,
3864 * we wouldn't escape the function with any frames outstanding if the time to
3865 * render a frame was over 20ms.
3867 * This should get us reasonable parallelism between CPU and GPU but also
3868 * relatively low latency when blocking on a particular request to finish.
3871 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3873 struct drm_i915_private *dev_priv = to_i915(dev);
3874 struct drm_i915_file_private *file_priv = file->driver_priv;
3875 unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
3876 struct i915_request *request, *target = NULL;
3879 /* ABI: return -EIO if already wedged */
3880 if (i915_terminally_wedged(&dev_priv->gpu_error))
3883 spin_lock(&file_priv->mm.lock);
3884 list_for_each_entry(request, &file_priv->mm.request_list, client_link) {
3885 if (time_after_eq(request->emitted_jiffies, recent_enough))
3889 list_del(&target->client_link);
3890 target->file_priv = NULL;
3896 i915_request_get(target);
3897 spin_unlock(&file_priv->mm.lock);
3902 ret = i915_request_wait(target,
3903 I915_WAIT_INTERRUPTIBLE,
3904 MAX_SCHEDULE_TIMEOUT);
3905 i915_request_put(target);
3907 return ret < 0 ? ret : 0;
3911 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
3912 const struct i915_ggtt_view *view,
3917 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
3918 struct i915_address_space *vm = &dev_priv->ggtt.vm;
3919 struct i915_vma *vma;
3922 lockdep_assert_held(&obj->base.dev->struct_mutex);
3924 if (flags & PIN_MAPPABLE &&
3925 (!view || view->type == I915_GGTT_VIEW_NORMAL)) {
3926 /* If the required space is larger than the available
3927 * aperture, we will not able to find a slot for the
3928 * object and unbinding the object now will be in
3929 * vain. Worse, doing so may cause us to ping-pong
3930 * the object in and out of the Global GTT and
3931 * waste a lot of cycles under the mutex.
3933 if (obj->base.size > dev_priv->ggtt.mappable_end)
3934 return ERR_PTR(-E2BIG);
3936 /* If NONBLOCK is set the caller is optimistically
3937 * trying to cache the full object within the mappable
3938 * aperture, and *must* have a fallback in place for
3939 * situations where we cannot bind the object. We
3940 * can be a little more lax here and use the fallback
3941 * more often to avoid costly migrations of ourselves
3942 * and other objects within the aperture.
3944 * Half-the-aperture is used as a simple heuristic.
3945 * More interesting would to do search for a free
3946 * block prior to making the commitment to unbind.
3947 * That caters for the self-harm case, and with a
3948 * little more heuristics (e.g. NOFAULT, NOEVICT)
3949 * we could try to minimise harm to others.
3951 if (flags & PIN_NONBLOCK &&
3952 obj->base.size > dev_priv->ggtt.mappable_end / 2)
3953 return ERR_PTR(-ENOSPC);
3956 vma = i915_vma_instance(obj, vm, view);
3957 if (unlikely(IS_ERR(vma)))
3960 if (i915_vma_misplaced(vma, size, alignment, flags)) {
3961 if (flags & PIN_NONBLOCK) {
3962 if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))
3963 return ERR_PTR(-ENOSPC);
3965 if (flags & PIN_MAPPABLE &&
3966 vma->fence_size > dev_priv->ggtt.mappable_end / 2)
3967 return ERR_PTR(-ENOSPC);
3970 WARN(i915_vma_is_pinned(vma),
3971 "bo is already pinned in ggtt with incorrect alignment:"
3972 " offset=%08x, req.alignment=%llx,"
3973 " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
3974 i915_ggtt_offset(vma), alignment,
3975 !!(flags & PIN_MAPPABLE),
3976 i915_vma_is_map_and_fenceable(vma));
3977 ret = i915_vma_unbind(vma);
3979 return ERR_PTR(ret);
3982 ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
3984 return ERR_PTR(ret);
3989 static __always_inline unsigned int __busy_read_flag(unsigned int id)
3991 /* Note that we could alias engines in the execbuf API, but
3992 * that would be very unwise as it prevents userspace from
3993 * fine control over engine selection. Ahem.
3995 * This should be something like EXEC_MAX_ENGINE instead of
3998 BUILD_BUG_ON(I915_NUM_ENGINES > 16);
3999 return 0x10000 << id;
4002 static __always_inline unsigned int __busy_write_id(unsigned int id)
4004 /* The uABI guarantees an active writer is also amongst the read
4005 * engines. This would be true if we accessed the activity tracking
4006 * under the lock, but as we perform the lookup of the object and
4007 * its activity locklessly we can not guarantee that the last_write
4008 * being active implies that we have set the same engine flag from
4009 * last_read - hence we always set both read and write busy for
4012 return id | __busy_read_flag(id);
4015 static __always_inline unsigned int
4016 __busy_set_if_active(const struct dma_fence *fence,
4017 unsigned int (*flag)(unsigned int id))
4019 struct i915_request *rq;
4021 /* We have to check the current hw status of the fence as the uABI
4022 * guarantees forward progress. We could rely on the idle worker
4023 * to eventually flush us, but to minimise latency just ask the
4026 * Note we only report on the status of native fences.
4028 if (!dma_fence_is_i915(fence))
4031 /* opencode to_request() in order to avoid const warnings */
4032 rq = container_of(fence, struct i915_request, fence);
4033 if (i915_request_completed(rq))
4036 return flag(rq->engine->uabi_id);
4039 static __always_inline unsigned int
4040 busy_check_reader(const struct dma_fence *fence)
4042 return __busy_set_if_active(fence, __busy_read_flag);
4045 static __always_inline unsigned int
4046 busy_check_writer(const struct dma_fence *fence)
4051 return __busy_set_if_active(fence, __busy_write_id);
4055 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4056 struct drm_file *file)
4058 struct drm_i915_gem_busy *args = data;
4059 struct drm_i915_gem_object *obj;
4060 struct reservation_object_list *list;
4066 obj = i915_gem_object_lookup_rcu(file, args->handle);
4070 /* A discrepancy here is that we do not report the status of
4071 * non-i915 fences, i.e. even though we may report the object as idle,
4072 * a call to set-domain may still stall waiting for foreign rendering.
4073 * This also means that wait-ioctl may report an object as busy,
4074 * where busy-ioctl considers it idle.
4076 * We trade the ability to warn of foreign fences to report on which
4077 * i915 engines are active for the object.
4079 * Alternatively, we can trade that extra information on read/write
4082 * !reservation_object_test_signaled_rcu(obj->resv, true);
4083 * to report the overall busyness. This is what the wait-ioctl does.
4087 seq = raw_read_seqcount(&obj->resv->seq);
4089 /* Translate the exclusive fence to the READ *and* WRITE engine */
4090 args->busy = busy_check_writer(rcu_dereference(obj->resv->fence_excl));
4092 /* Translate shared fences to READ set of engines */
4093 list = rcu_dereference(obj->resv->fence);
4095 unsigned int shared_count = list->shared_count, i;
4097 for (i = 0; i < shared_count; ++i) {
4098 struct dma_fence *fence =
4099 rcu_dereference(list->shared[i]);
4101 args->busy |= busy_check_reader(fence);
4105 if (args->busy && read_seqcount_retry(&obj->resv->seq, seq))
4115 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4116 struct drm_file *file_priv)
4118 return i915_gem_ring_throttle(dev, file_priv);
4122 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4123 struct drm_file *file_priv)
4125 struct drm_i915_private *dev_priv = to_i915(dev);
4126 struct drm_i915_gem_madvise *args = data;
4127 struct drm_i915_gem_object *obj;
4130 switch (args->madv) {
4131 case I915_MADV_DONTNEED:
4132 case I915_MADV_WILLNEED:
4138 obj = i915_gem_object_lookup(file_priv, args->handle);
4142 err = mutex_lock_interruptible(&obj->mm.lock);
4146 if (i915_gem_object_has_pages(obj) &&
4147 i915_gem_object_is_tiled(obj) &&
4148 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
4149 if (obj->mm.madv == I915_MADV_WILLNEED) {
4150 GEM_BUG_ON(!obj->mm.quirked);
4151 __i915_gem_object_unpin_pages(obj);
4152 obj->mm.quirked = false;
4154 if (args->madv == I915_MADV_WILLNEED) {
4155 GEM_BUG_ON(obj->mm.quirked);
4156 __i915_gem_object_pin_pages(obj);
4157 obj->mm.quirked = true;
4161 if (obj->mm.madv != __I915_MADV_PURGED)
4162 obj->mm.madv = args->madv;
4164 /* if the object is no longer attached, discard its backing storage */
4165 if (obj->mm.madv == I915_MADV_DONTNEED &&
4166 !i915_gem_object_has_pages(obj))
4167 i915_gem_object_truncate(obj);
4169 args->retained = obj->mm.madv != __I915_MADV_PURGED;
4170 mutex_unlock(&obj->mm.lock);
4173 i915_gem_object_put(obj);
4178 frontbuffer_retire(struct i915_active_request *active,
4179 struct i915_request *request)
4181 struct drm_i915_gem_object *obj =
4182 container_of(active, typeof(*obj), frontbuffer_write);
4184 intel_fb_obj_flush(obj, ORIGIN_CS);
4187 void i915_gem_object_init(struct drm_i915_gem_object *obj,
4188 const struct drm_i915_gem_object_ops *ops)
4190 mutex_init(&obj->mm.lock);
4192 spin_lock_init(&obj->vma.lock);
4193 INIT_LIST_HEAD(&obj->vma.list);
4195 INIT_LIST_HEAD(&obj->lut_list);
4196 INIT_LIST_HEAD(&obj->batch_pool_link);
4198 init_rcu_head(&obj->rcu);
4202 reservation_object_init(&obj->__builtin_resv);
4203 obj->resv = &obj->__builtin_resv;
4205 obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
4206 i915_active_request_init(&obj->frontbuffer_write,
4207 NULL, frontbuffer_retire);
4209 obj->mm.madv = I915_MADV_WILLNEED;
4210 INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
4211 mutex_init(&obj->mm.get_page.lock);
4213 i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
4216 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4217 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
4218 I915_GEM_OBJECT_IS_SHRINKABLE,
4220 .get_pages = i915_gem_object_get_pages_gtt,
4221 .put_pages = i915_gem_object_put_pages_gtt,
4223 .pwrite = i915_gem_object_pwrite_gtt,
4226 static int i915_gem_object_create_shmem(struct drm_device *dev,
4227 struct drm_gem_object *obj,
4230 struct drm_i915_private *i915 = to_i915(dev);
4231 unsigned long flags = VM_NORESERVE;
4234 drm_gem_private_object_init(dev, obj, size);
4237 filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size,
4240 filp = shmem_file_setup("i915", size, flags);
4243 return PTR_ERR(filp);
4250 struct drm_i915_gem_object *
4251 i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
4253 struct drm_i915_gem_object *obj;
4254 struct address_space *mapping;
4255 unsigned int cache_level;
4259 /* There is a prevalence of the assumption that we fit the object's
4260 * page count inside a 32bit _signed_ variable. Let's document this and
4261 * catch if we ever need to fix it. In the meantime, if you do spot
4262 * such a local variable, please consider fixing!
4264 if (size >> PAGE_SHIFT > INT_MAX)
4265 return ERR_PTR(-E2BIG);
4267 if (overflows_type(size, obj->base.size))
4268 return ERR_PTR(-E2BIG);
4270 obj = i915_gem_object_alloc(dev_priv);
4272 return ERR_PTR(-ENOMEM);
4274 ret = i915_gem_object_create_shmem(&dev_priv->drm, &obj->base, size);
4278 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4279 if (IS_I965GM(dev_priv) || IS_I965G(dev_priv)) {
4280 /* 965gm cannot relocate objects above 4GiB. */
4281 mask &= ~__GFP_HIGHMEM;
4282 mask |= __GFP_DMA32;
4285 mapping = obj->base.filp->f_mapping;
4286 mapping_set_gfp_mask(mapping, mask);
4287 GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
4289 i915_gem_object_init(obj, &i915_gem_object_ops);
4291 obj->write_domain = I915_GEM_DOMAIN_CPU;
4292 obj->read_domains = I915_GEM_DOMAIN_CPU;
4294 if (HAS_LLC(dev_priv))
4295 /* On some devices, we can have the GPU use the LLC (the CPU
4296 * cache) for about a 10% performance improvement
4297 * compared to uncached. Graphics requests other than
4298 * display scanout are coherent with the CPU in
4299 * accessing this cache. This means in this mode we
4300 * don't need to clflush on the CPU side, and on the
4301 * GPU side we only need to flush internal caches to
4302 * get data visible to the CPU.
4304 * However, we maintain the display planes as UC, and so
4305 * need to rebind when first used as such.
4307 cache_level = I915_CACHE_LLC;
4309 cache_level = I915_CACHE_NONE;
4311 i915_gem_object_set_cache_coherency(obj, cache_level);
4313 trace_i915_gem_object_create(obj);
4318 i915_gem_object_free(obj);
4319 return ERR_PTR(ret);
4322 static bool discard_backing_storage(struct drm_i915_gem_object *obj)
4324 /* If we are the last user of the backing storage (be it shmemfs
4325 * pages or stolen etc), we know that the pages are going to be
4326 * immediately released. In this case, we can then skip copying
4327 * back the contents from the GPU.
4330 if (obj->mm.madv != I915_MADV_WILLNEED)
4333 if (obj->base.filp == NULL)
4336 /* At first glance, this looks racy, but then again so would be
4337 * userspace racing mmap against close. However, the first external
4338 * reference to the filp can only be obtained through the
4339 * i915_gem_mmap_ioctl() which safeguards us against the user
4340 * acquiring such a reference whilst we are in the middle of
4341 * freeing the object.
4343 return atomic_long_read(&obj->base.filp->f_count) == 1;
4346 static void __i915_gem_free_objects(struct drm_i915_private *i915,
4347 struct llist_node *freed)
4349 struct drm_i915_gem_object *obj, *on;
4350 intel_wakeref_t wakeref;
4352 wakeref = intel_runtime_pm_get(i915);
4353 llist_for_each_entry_safe(obj, on, freed, freed) {
4354 struct i915_vma *vma, *vn;
4356 trace_i915_gem_object_destroy(obj);
4358 mutex_lock(&i915->drm.struct_mutex);
4360 GEM_BUG_ON(i915_gem_object_is_active(obj));
4361 list_for_each_entry_safe(vma, vn, &obj->vma.list, obj_link) {
4362 GEM_BUG_ON(i915_vma_is_active(vma));
4363 vma->flags &= ~I915_VMA_PIN_MASK;
4364 i915_vma_destroy(vma);
4366 GEM_BUG_ON(!list_empty(&obj->vma.list));
4367 GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma.tree));
4369 /* This serializes freeing with the shrinker. Since the free
4370 * is delayed, first by RCU then by the workqueue, we want the
4371 * shrinker to be able to free pages of unreferenced objects,
4372 * or else we may oom whilst there are plenty of deferred
4375 if (i915_gem_object_has_pages(obj)) {
4376 spin_lock(&i915->mm.obj_lock);
4377 list_del_init(&obj->mm.link);
4378 spin_unlock(&i915->mm.obj_lock);
4381 mutex_unlock(&i915->drm.struct_mutex);
4383 GEM_BUG_ON(obj->bind_count);
4384 GEM_BUG_ON(obj->userfault_count);
4385 GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
4386 GEM_BUG_ON(!list_empty(&obj->lut_list));
4388 if (obj->ops->release)
4389 obj->ops->release(obj);
4391 if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
4392 atomic_set(&obj->mm.pages_pin_count, 0);
4393 __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
4394 GEM_BUG_ON(i915_gem_object_has_pages(obj));
4396 if (obj->base.import_attach)
4397 drm_prime_gem_destroy(&obj->base, NULL);
4399 reservation_object_fini(&obj->__builtin_resv);
4400 drm_gem_object_release(&obj->base);
4401 i915_gem_info_remove_obj(i915, obj->base.size);
4404 i915_gem_object_free(obj);
4406 GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
4407 atomic_dec(&i915->mm.free_count);
4412 intel_runtime_pm_put(i915, wakeref);
4415 static void i915_gem_flush_free_objects(struct drm_i915_private *i915)
4417 struct llist_node *freed;
4419 /* Free the oldest, most stale object to keep the free_list short */
4421 if (!llist_empty(&i915->mm.free_list)) { /* quick test for hotpath */
4422 /* Only one consumer of llist_del_first() allowed */
4423 spin_lock(&i915->mm.free_lock);
4424 freed = llist_del_first(&i915->mm.free_list);
4425 spin_unlock(&i915->mm.free_lock);
4427 if (unlikely(freed)) {
4429 __i915_gem_free_objects(i915, freed);
4433 static void __i915_gem_free_work(struct work_struct *work)
4435 struct drm_i915_private *i915 =
4436 container_of(work, struct drm_i915_private, mm.free_work);
4437 struct llist_node *freed;
4440 * All file-owned VMA should have been released by this point through
4441 * i915_gem_close_object(), or earlier by i915_gem_context_close().
4442 * However, the object may also be bound into the global GTT (e.g.
4443 * older GPUs without per-process support, or for direct access through
4444 * the GTT either for the user or for scanout). Those VMA still need to
4448 spin_lock(&i915->mm.free_lock);
4449 while ((freed = llist_del_all(&i915->mm.free_list))) {
4450 spin_unlock(&i915->mm.free_lock);
4452 __i915_gem_free_objects(i915, freed);
4456 spin_lock(&i915->mm.free_lock);
4458 spin_unlock(&i915->mm.free_lock);
4461 static void __i915_gem_free_object_rcu(struct rcu_head *head)
4463 struct drm_i915_gem_object *obj =
4464 container_of(head, typeof(*obj), rcu);
4465 struct drm_i915_private *i915 = to_i915(obj->base.dev);
4468 * We reuse obj->rcu for the freed list, so we had better not treat
4469 * it like a rcu_head from this point forwards. And we expect all
4470 * objects to be freed via this path.
4472 destroy_rcu_head(&obj->rcu);
4475 * Since we require blocking on struct_mutex to unbind the freed
4476 * object from the GPU before releasing resources back to the
4477 * system, we can not do that directly from the RCU callback (which may
4478 * be a softirq context), but must instead then defer that work onto a
4479 * kthread. We use the RCU callback rather than move the freed object
4480 * directly onto the work queue so that we can mix between using the
4481 * worker and performing frees directly from subsequent allocations for
4482 * crude but effective memory throttling.
4484 if (llist_add(&obj->freed, &i915->mm.free_list))
4485 queue_work(i915->wq, &i915->mm.free_work);
4488 void i915_gem_free_object(struct drm_gem_object *gem_obj)
4490 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4492 if (obj->mm.quirked)
4493 __i915_gem_object_unpin_pages(obj);
4495 if (discard_backing_storage(obj))
4496 obj->mm.madv = I915_MADV_DONTNEED;
4499 * Before we free the object, make sure any pure RCU-only
4500 * read-side critical sections are complete, e.g.
4501 * i915_gem_busy_ioctl(). For the corresponding synchronized
4502 * lookup see i915_gem_object_lookup_rcu().
4504 atomic_inc(&to_i915(obj->base.dev)->mm.free_count);
4505 call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
4508 void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
4510 lockdep_assert_held(&obj->base.dev->struct_mutex);
4512 if (!i915_gem_object_has_active_reference(obj) &&
4513 i915_gem_object_is_active(obj))
4514 i915_gem_object_set_active_reference(obj);
4516 i915_gem_object_put(obj);
4519 void i915_gem_sanitize(struct drm_i915_private *i915)
4521 intel_wakeref_t wakeref;
4525 wakeref = intel_runtime_pm_get(i915);
4526 intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
4529 * As we have just resumed the machine and woken the device up from
4530 * deep PCI sleep (presumably D3_cold), assume the HW has been reset
4531 * back to defaults, recovering from whatever wedged state we left it
4532 * in and so worth trying to use the device once more.
4534 if (i915_terminally_wedged(&i915->gpu_error))
4535 i915_gem_unset_wedged(i915);
4538 * If we inherit context state from the BIOS or earlier occupants
4539 * of the GPU, the GPU may be in an inconsistent state when we
4540 * try to take over. The only way to remove the earlier state
4541 * is by resetting. However, resetting on earlier gen is tricky as
4542 * it may impact the display and we are uncertain about the stability
4543 * of the reset, so this could be applied to even earlier gen.
4545 intel_engines_sanitize(i915, false);
4547 intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
4548 intel_runtime_pm_put(i915, wakeref);
4550 mutex_lock(&i915->drm.struct_mutex);
4551 i915_gem_contexts_lost(i915);
4552 mutex_unlock(&i915->drm.struct_mutex);
4555 int i915_gem_suspend(struct drm_i915_private *i915)
4557 intel_wakeref_t wakeref;
4562 wakeref = intel_runtime_pm_get(i915);
4563 intel_suspend_gt_powersave(i915);
4565 flush_workqueue(i915->wq);
4567 mutex_lock(&i915->drm.struct_mutex);
4570 * We have to flush all the executing contexts to main memory so
4571 * that they can saved in the hibernation image. To ensure the last
4572 * context image is coherent, we have to switch away from it. That
4573 * leaves the i915->kernel_context still active when
4574 * we actually suspend, and its image in memory may not match the GPU
4575 * state. Fortunately, the kernel_context is disposable and we do
4576 * not rely on its state.
4578 if (!i915_terminally_wedged(&i915->gpu_error)) {
4579 ret = i915_gem_switch_to_kernel_context(i915);
4583 ret = i915_gem_wait_for_idle(i915,
4584 I915_WAIT_INTERRUPTIBLE |
4586 I915_WAIT_FOR_IDLE_BOOST,
4587 MAX_SCHEDULE_TIMEOUT);
4588 if (ret && ret != -EIO)
4591 assert_kernel_context_is_current(i915);
4593 i915_retire_requests(i915); /* ensure we flush after wedging */
4595 mutex_unlock(&i915->drm.struct_mutex);
4596 i915_reset_flush(i915);
4598 drain_delayed_work(&i915->gt.retire_work);
4601 * As the idle_work is rearming if it detects a race, play safe and
4602 * repeat the flush until it is definitely idle.
4604 drain_delayed_work(&i915->gt.idle_work);
4606 intel_uc_suspend(i915);
4609 * Assert that we successfully flushed all the work and
4610 * reset the GPU back to its idle, low power state.
4612 WARN_ON(i915->gt.awake);
4613 if (WARN_ON(!intel_engines_are_idle(i915)))
4614 i915_gem_set_wedged(i915); /* no hope, discard everything */
4616 intel_runtime_pm_put(i915, wakeref);
4620 mutex_unlock(&i915->drm.struct_mutex);
4621 intel_runtime_pm_put(i915, wakeref);
4625 void i915_gem_suspend_late(struct drm_i915_private *i915)
4627 struct drm_i915_gem_object *obj;
4628 struct list_head *phases[] = {
4629 &i915->mm.unbound_list,
4630 &i915->mm.bound_list,
4635 * Neither the BIOS, ourselves or any other kernel
4636 * expects the system to be in execlists mode on startup,
4637 * so we need to reset the GPU back to legacy mode. And the only
4638 * known way to disable logical contexts is through a GPU reset.
4640 * So in order to leave the system in a known default configuration,
4641 * always reset the GPU upon unload and suspend. Afterwards we then
4642 * clean up the GEM state tracking, flushing off the requests and
4643 * leaving the system in a known idle state.
4645 * Note that is of the upmost importance that the GPU is idle and
4646 * all stray writes are flushed *before* we dismantle the backing
4647 * storage for the pinned objects.
4649 * However, since we are uncertain that resetting the GPU on older
4650 * machines is a good idea, we don't - just in case it leaves the
4651 * machine in an unusable condition.
4654 mutex_lock(&i915->drm.struct_mutex);
4655 for (phase = phases; *phase; phase++) {
4656 list_for_each_entry(obj, *phase, mm.link)
4657 WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
4659 mutex_unlock(&i915->drm.struct_mutex);
4661 intel_uc_sanitize(i915);
4662 i915_gem_sanitize(i915);
4665 void i915_gem_resume(struct drm_i915_private *i915)
4669 WARN_ON(i915->gt.awake);
4671 mutex_lock(&i915->drm.struct_mutex);
4672 intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
4674 i915_gem_restore_gtt_mappings(i915);
4675 i915_gem_restore_fences(i915);
4678 * As we didn't flush the kernel context before suspend, we cannot
4679 * guarantee that the context image is complete. So let's just reset
4680 * it and start again.
4682 i915->gt.resume(i915);
4684 if (i915_gem_init_hw(i915))
4687 intel_uc_resume(i915);
4689 /* Always reload a context for powersaving. */
4690 if (i915_gem_switch_to_kernel_context(i915))
4694 intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
4695 mutex_unlock(&i915->drm.struct_mutex);
4699 if (!i915_terminally_wedged(&i915->gpu_error)) {
4700 DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
4701 i915_gem_set_wedged(i915);
4706 void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
4708 if (INTEL_GEN(dev_priv) < 5 ||
4709 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4712 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4713 DISP_TILE_SURFACE_SWIZZLING);
4715 if (IS_GEN(dev_priv, 5))
4718 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4719 if (IS_GEN(dev_priv, 6))
4720 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4721 else if (IS_GEN(dev_priv, 7))
4722 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4723 else if (IS_GEN(dev_priv, 8))
4724 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
4729 static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base)
4731 I915_WRITE(RING_CTL(base), 0);
4732 I915_WRITE(RING_HEAD(base), 0);
4733 I915_WRITE(RING_TAIL(base), 0);
4734 I915_WRITE(RING_START(base), 0);
4737 static void init_unused_rings(struct drm_i915_private *dev_priv)
4739 if (IS_I830(dev_priv)) {
4740 init_unused_ring(dev_priv, PRB1_BASE);
4741 init_unused_ring(dev_priv, SRB0_BASE);
4742 init_unused_ring(dev_priv, SRB1_BASE);
4743 init_unused_ring(dev_priv, SRB2_BASE);
4744 init_unused_ring(dev_priv, SRB3_BASE);
4745 } else if (IS_GEN(dev_priv, 2)) {
4746 init_unused_ring(dev_priv, SRB0_BASE);
4747 init_unused_ring(dev_priv, SRB1_BASE);
4748 } else if (IS_GEN(dev_priv, 3)) {
4749 init_unused_ring(dev_priv, PRB1_BASE);
4750 init_unused_ring(dev_priv, PRB2_BASE);
4754 static int __i915_gem_restart_engines(void *data)
4756 struct drm_i915_private *i915 = data;
4757 struct intel_engine_cs *engine;
4758 enum intel_engine_id id;
4761 for_each_engine(engine, i915, id) {
4762 err = engine->init_hw(engine);
4764 DRM_ERROR("Failed to restart %s (%d)\n",
4773 int i915_gem_init_hw(struct drm_i915_private *dev_priv)
4777 dev_priv->gt.last_init_time = ktime_get();
4779 /* Double layer security blanket, see i915_gem_init() */
4780 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4782 if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9)
4783 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4785 if (IS_HASWELL(dev_priv))
4786 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
4787 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
4789 /* Apply the GT workarounds... */
4790 intel_gt_apply_workarounds(dev_priv);
4791 /* ...and determine whether they are sticking. */
4792 intel_gt_verify_workarounds(dev_priv, "init");
4794 i915_gem_init_swizzling(dev_priv);
4797 * At least 830 can leave some of the unused rings
4798 * "active" (ie. head != tail) after resume which
4799 * will prevent c3 entry. Makes sure all unused rings
4802 init_unused_rings(dev_priv);
4804 BUG_ON(!dev_priv->kernel_context);
4805 if (i915_terminally_wedged(&dev_priv->gpu_error)) {
4810 ret = i915_ppgtt_init_hw(dev_priv);
4812 DRM_ERROR("Enabling PPGTT failed (%d)\n", ret);
4816 ret = intel_wopcm_init_hw(&dev_priv->wopcm);
4818 DRM_ERROR("Enabling WOPCM failed (%d)\n", ret);
4822 /* We can't enable contexts until all firmware is loaded */
4823 ret = intel_uc_init_hw(dev_priv);
4825 DRM_ERROR("Enabling uc failed (%d)\n", ret);
4829 intel_mocs_init_l3cc_table(dev_priv);
4831 /* Only when the HW is re-initialised, can we replay the requests */
4832 ret = __i915_gem_restart_engines(dev_priv);
4836 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4841 intel_uc_fini_hw(dev_priv);
4843 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4848 static int __intel_engines_record_defaults(struct drm_i915_private *i915)
4850 struct i915_gem_context *ctx;
4851 struct intel_engine_cs *engine;
4852 enum intel_engine_id id;
4856 * As we reset the gpu during very early sanitisation, the current
4857 * register state on the GPU should reflect its defaults values.
4858 * We load a context onto the hw (with restore-inhibit), then switch
4859 * over to a second context to save that default register state. We
4860 * can then prime every new context with that state so they all start
4861 * from the same default HW values.
4864 ctx = i915_gem_context_create_kernel(i915, 0);
4866 return PTR_ERR(ctx);
4868 for_each_engine(engine, i915, id) {
4869 struct i915_request *rq;
4871 rq = i915_request_alloc(engine, ctx);
4878 if (engine->init_context)
4879 err = engine->init_context(rq);
4881 i915_request_add(rq);
4886 err = i915_gem_switch_to_kernel_context(i915);
4890 if (i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED, HZ / 5)) {
4891 i915_gem_set_wedged(i915);
4892 err = -EIO; /* Caller will declare us wedged */
4896 assert_kernel_context_is_current(i915);
4899 * Immediately park the GPU so that we enable powersaving and
4900 * treat it as idle. The next time we issue a request, we will
4901 * unpark and start using the engine->pinned_default_state, otherwise
4902 * it is in limbo and an early reset may fail.
4904 __i915_gem_park(i915);
4906 for_each_engine(engine, i915, id) {
4907 struct i915_vma *state;
4910 GEM_BUG_ON(to_intel_context(ctx, engine)->pin_count);
4912 state = to_intel_context(ctx, engine)->state;
4917 * As we will hold a reference to the logical state, it will
4918 * not be torn down with the context, and importantly the
4919 * object will hold onto its vma (making it possible for a
4920 * stray GTT write to corrupt our defaults). Unmap the vma
4921 * from the GTT to prevent such accidents and reclaim the
4924 err = i915_vma_unbind(state);
4928 err = i915_gem_object_set_to_cpu_domain(state->obj, false);
4932 engine->default_state = i915_gem_object_get(state->obj);
4934 /* Check we can acquire the image of the context state */
4935 vaddr = i915_gem_object_pin_map(engine->default_state,
4937 if (IS_ERR(vaddr)) {
4938 err = PTR_ERR(vaddr);
4942 i915_gem_object_unpin_map(engine->default_state);
4945 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
4946 unsigned int found = intel_engines_has_context_isolation(i915);
4949 * Make sure that classes with multiple engine instances all
4950 * share the same basic configuration.
4952 for_each_engine(engine, i915, id) {
4953 unsigned int bit = BIT(engine->uabi_class);
4954 unsigned int expected = engine->default_state ? bit : 0;
4956 if ((found & bit) != expected) {
4957 DRM_ERROR("mismatching default context state for class %d on engine %s\n",
4958 engine->uabi_class, engine->name);
4964 i915_gem_context_set_closed(ctx);
4965 i915_gem_context_put(ctx);
4970 * If we have to abandon now, we expect the engines to be idle
4971 * and ready to be torn-down. First try to flush any remaining
4972 * request, ensure we are pointing at the kernel context and
4975 if (WARN_ON(i915_gem_switch_to_kernel_context(i915)))
4978 if (WARN_ON(i915_gem_wait_for_idle(i915,
4980 MAX_SCHEDULE_TIMEOUT)))
4983 i915_gem_contexts_lost(i915);
4988 i915_gem_init_scratch(struct drm_i915_private *i915, unsigned int size)
4990 struct drm_i915_gem_object *obj;
4991 struct i915_vma *vma;
4994 obj = i915_gem_object_create_stolen(i915, size);
4996 obj = i915_gem_object_create_internal(i915, size);
4998 DRM_ERROR("Failed to allocate scratch page\n");
4999 return PTR_ERR(obj);
5002 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
5008 ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
5012 i915->gt.scratch = vma;
5016 i915_gem_object_put(obj);
5020 static void i915_gem_fini_scratch(struct drm_i915_private *i915)
5022 i915_vma_unpin_and_release(&i915->gt.scratch, 0);
5025 int i915_gem_init(struct drm_i915_private *dev_priv)
5029 /* We need to fallback to 4K pages if host doesn't support huge gtt. */
5030 if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
5031 mkwrite_device_info(dev_priv)->page_sizes =
5032 I915_GTT_PAGE_SIZE_4K;
5034 dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
5036 if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
5037 dev_priv->gt.resume = intel_lr_context_resume;
5038 dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
5040 dev_priv->gt.resume = intel_legacy_submission_resume;
5041 dev_priv->gt.cleanup_engine = intel_engine_cleanup;
5044 i915_timelines_init(dev_priv);
5046 ret = i915_gem_init_userptr(dev_priv);
5050 ret = intel_uc_init_misc(dev_priv);
5054 ret = intel_wopcm_init(&dev_priv->wopcm);
5058 /* This is just a security blanket to placate dragons.
5059 * On some systems, we very sporadically observe that the first TLBs
5060 * used by the CS may be stale, despite us poking the TLB reset. If
5061 * we hold the forcewake during initialisation these problems
5062 * just magically go away.
5064 mutex_lock(&dev_priv->drm.struct_mutex);
5065 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5067 ret = i915_gem_init_ggtt(dev_priv);
5069 GEM_BUG_ON(ret == -EIO);
5073 ret = i915_gem_init_scratch(dev_priv,
5074 IS_GEN(dev_priv, 2) ? SZ_256K : PAGE_SIZE);
5076 GEM_BUG_ON(ret == -EIO);
5080 ret = i915_gem_contexts_init(dev_priv);
5082 GEM_BUG_ON(ret == -EIO);
5086 ret = intel_engines_init(dev_priv);
5088 GEM_BUG_ON(ret == -EIO);
5092 intel_init_gt_powersave(dev_priv);
5094 ret = intel_uc_init(dev_priv);
5098 ret = i915_gem_init_hw(dev_priv);
5103 * Despite its name intel_init_clock_gating applies both display
5104 * clock gating workarounds; GT mmio workarounds and the occasional
5105 * GT power context workaround. Worse, sometimes it includes a context
5106 * register workaround which we need to apply before we record the
5107 * default HW state for all contexts.
5109 * FIXME: break up the workarounds and apply them at the right time!
5111 intel_init_clock_gating(dev_priv);
5113 ret = __intel_engines_record_defaults(dev_priv);
5117 if (i915_inject_load_failure()) {
5122 if (i915_inject_load_failure()) {
5127 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5128 mutex_unlock(&dev_priv->drm.struct_mutex);
5133 * Unwinding is complicated by that we want to handle -EIO to mean
5134 * disable GPU submission but keep KMS alive. We want to mark the
5135 * HW as irrevisibly wedged, but keep enough state around that the
5136 * driver doesn't explode during runtime.
5139 mutex_unlock(&dev_priv->drm.struct_mutex);
5141 WARN_ON(i915_gem_suspend(dev_priv));
5142 i915_gem_suspend_late(dev_priv);
5144 i915_gem_drain_workqueue(dev_priv);
5146 mutex_lock(&dev_priv->drm.struct_mutex);
5147 intel_uc_fini_hw(dev_priv);
5149 intel_uc_fini(dev_priv);
5152 intel_cleanup_gt_powersave(dev_priv);
5153 i915_gem_cleanup_engines(dev_priv);
5157 i915_gem_contexts_fini(dev_priv);
5159 i915_gem_fini_scratch(dev_priv);
5162 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5163 mutex_unlock(&dev_priv->drm.struct_mutex);
5166 intel_uc_fini_misc(dev_priv);
5169 i915_gem_cleanup_userptr(dev_priv);
5170 i915_timelines_fini(dev_priv);
5174 mutex_lock(&dev_priv->drm.struct_mutex);
5177 * Allow engine initialisation to fail by marking the GPU as
5178 * wedged. But we only want to do this where the GPU is angry,
5179 * for all other failure, such as an allocation failure, bail.
5181 if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
5182 i915_load_error(dev_priv,
5183 "Failed to initialize GPU, declaring it wedged!\n");
5184 i915_gem_set_wedged(dev_priv);
5187 /* Minimal basic recovery for KMS */
5188 ret = i915_ggtt_enable_hw(dev_priv);
5189 i915_gem_restore_gtt_mappings(dev_priv);
5190 i915_gem_restore_fences(dev_priv);
5191 intel_init_clock_gating(dev_priv);
5193 mutex_unlock(&dev_priv->drm.struct_mutex);
5196 i915_gem_drain_freed_objects(dev_priv);
5200 void i915_gem_fini(struct drm_i915_private *dev_priv)
5202 i915_gem_suspend_late(dev_priv);
5203 intel_disable_gt_powersave(dev_priv);
5205 /* Flush any outstanding unpin_work. */
5206 i915_gem_drain_workqueue(dev_priv);
5208 mutex_lock(&dev_priv->drm.struct_mutex);
5209 intel_uc_fini_hw(dev_priv);
5210 intel_uc_fini(dev_priv);
5211 i915_gem_cleanup_engines(dev_priv);
5212 i915_gem_contexts_fini(dev_priv);
5213 i915_gem_fini_scratch(dev_priv);
5214 mutex_unlock(&dev_priv->drm.struct_mutex);
5216 intel_wa_list_free(&dev_priv->gt_wa_list);
5218 intel_cleanup_gt_powersave(dev_priv);
5220 intel_uc_fini_misc(dev_priv);
5221 i915_gem_cleanup_userptr(dev_priv);
5222 i915_timelines_fini(dev_priv);
5224 i915_gem_drain_freed_objects(dev_priv);
5226 WARN_ON(!list_empty(&dev_priv->contexts.list));
5229 void i915_gem_init_mmio(struct drm_i915_private *i915)
5231 i915_gem_sanitize(i915);
5235 i915_gem_cleanup_engines(struct drm_i915_private *dev_priv)
5237 struct intel_engine_cs *engine;
5238 enum intel_engine_id id;
5240 for_each_engine(engine, dev_priv, id)
5241 dev_priv->gt.cleanup_engine(engine);
5245 i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
5249 if (INTEL_GEN(dev_priv) >= 7 && !IS_VALLEYVIEW(dev_priv) &&
5250 !IS_CHERRYVIEW(dev_priv))
5251 dev_priv->num_fence_regs = 32;
5252 else if (INTEL_GEN(dev_priv) >= 4 ||
5253 IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
5254 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
5255 dev_priv->num_fence_regs = 16;
5257 dev_priv->num_fence_regs = 8;
5259 if (intel_vgpu_active(dev_priv))
5260 dev_priv->num_fence_regs =
5261 I915_READ(vgtif_reg(avail_rs.fence_num));
5263 /* Initialize fence registers to zero */
5264 for (i = 0; i < dev_priv->num_fence_regs; i++) {
5265 struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];
5267 fence->i915 = dev_priv;
5269 list_add_tail(&fence->link, &dev_priv->mm.fence_list);
5271 i915_gem_restore_fences(dev_priv);
5273 i915_gem_detect_bit_6_swizzle(dev_priv);
5276 static void i915_gem_init__mm(struct drm_i915_private *i915)
5278 spin_lock_init(&i915->mm.object_stat_lock);
5279 spin_lock_init(&i915->mm.obj_lock);
5280 spin_lock_init(&i915->mm.free_lock);
5282 init_llist_head(&i915->mm.free_list);
5284 INIT_LIST_HEAD(&i915->mm.unbound_list);
5285 INIT_LIST_HEAD(&i915->mm.bound_list);
5286 INIT_LIST_HEAD(&i915->mm.fence_list);
5287 INIT_LIST_HEAD(&i915->mm.userfault_list);
5289 INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
5292 int i915_gem_init_early(struct drm_i915_private *dev_priv)
5296 dev_priv->objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
5297 if (!dev_priv->objects)
5300 dev_priv->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
5301 if (!dev_priv->vmas)
5304 dev_priv->luts = KMEM_CACHE(i915_lut_handle, 0);
5305 if (!dev_priv->luts)
5308 dev_priv->requests = KMEM_CACHE(i915_request,
5309 SLAB_HWCACHE_ALIGN |
5310 SLAB_RECLAIM_ACCOUNT |
5311 SLAB_TYPESAFE_BY_RCU);
5312 if (!dev_priv->requests)
5315 dev_priv->dependencies = KMEM_CACHE(i915_dependency,
5316 SLAB_HWCACHE_ALIGN |
5317 SLAB_RECLAIM_ACCOUNT);
5318 if (!dev_priv->dependencies)
5321 dev_priv->priorities = KMEM_CACHE(i915_priolist, SLAB_HWCACHE_ALIGN);
5322 if (!dev_priv->priorities)
5323 goto err_dependencies;
5325 INIT_LIST_HEAD(&dev_priv->gt.active_rings);
5326 INIT_LIST_HEAD(&dev_priv->gt.closed_vma);
5328 i915_gem_init__mm(dev_priv);
5330 INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
5331 i915_gem_retire_work_handler);
5332 INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
5333 i915_gem_idle_work_handler);
5334 init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
5335 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
5336 mutex_init(&dev_priv->gpu_error.wedge_mutex);
5338 atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
5340 spin_lock_init(&dev_priv->fb_tracking.lock);
5342 err = i915_gemfs_init(dev_priv);
5344 DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", err);
5349 kmem_cache_destroy(dev_priv->dependencies);
5351 kmem_cache_destroy(dev_priv->requests);
5353 kmem_cache_destroy(dev_priv->luts);
5355 kmem_cache_destroy(dev_priv->vmas);
5357 kmem_cache_destroy(dev_priv->objects);
5362 void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
5364 i915_gem_drain_freed_objects(dev_priv);
5365 GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
5366 GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
5367 WARN_ON(dev_priv->mm.object_count);
5369 kmem_cache_destroy(dev_priv->priorities);
5370 kmem_cache_destroy(dev_priv->dependencies);
5371 kmem_cache_destroy(dev_priv->requests);
5372 kmem_cache_destroy(dev_priv->luts);
5373 kmem_cache_destroy(dev_priv->vmas);
5374 kmem_cache_destroy(dev_priv->objects);
5376 /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
5379 i915_gemfs_fini(dev_priv);
5382 int i915_gem_freeze(struct drm_i915_private *dev_priv)
5384 /* Discard all purgeable objects, let userspace recover those as
5385 * required after resuming.
5387 i915_gem_shrink_all(dev_priv);
5392 int i915_gem_freeze_late(struct drm_i915_private *i915)
5394 struct drm_i915_gem_object *obj;
5395 struct list_head *phases[] = {
5396 &i915->mm.unbound_list,
5397 &i915->mm.bound_list,
5402 * Called just before we write the hibernation image.
5404 * We need to update the domain tracking to reflect that the CPU
5405 * will be accessing all the pages to create and restore from the
5406 * hibernation, and so upon restoration those pages will be in the
5409 * To make sure the hibernation image contains the latest state,
5410 * we update that state just before writing out the image.
5412 * To try and reduce the hibernation image, we manually shrink
5413 * the objects as well, see i915_gem_freeze()
5416 i915_gem_shrink(i915, -1UL, NULL, I915_SHRINK_UNBOUND);
5417 i915_gem_drain_freed_objects(i915);
5419 mutex_lock(&i915->drm.struct_mutex);
5420 for (phase = phases; *phase; phase++) {
5421 list_for_each_entry(obj, *phase, mm.link)
5422 WARN_ON(i915_gem_object_set_to_cpu_domain(obj, true));
5424 mutex_unlock(&i915->drm.struct_mutex);
5429 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
5431 struct drm_i915_file_private *file_priv = file->driver_priv;
5432 struct i915_request *request;
5434 /* Clean up our request list when the client is going away, so that
5435 * later retire_requests won't dereference our soon-to-be-gone
5438 spin_lock(&file_priv->mm.lock);
5439 list_for_each_entry(request, &file_priv->mm.request_list, client_link)
5440 request->file_priv = NULL;
5441 spin_unlock(&file_priv->mm.lock);
5444 int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
5446 struct drm_i915_file_private *file_priv;
5451 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
5455 file->driver_priv = file_priv;
5456 file_priv->dev_priv = i915;
5457 file_priv->file = file;
5459 spin_lock_init(&file_priv->mm.lock);
5460 INIT_LIST_HEAD(&file_priv->mm.request_list);
5462 file_priv->bsd_engine = -1;
5463 file_priv->hang_timestamp = jiffies;
5465 ret = i915_gem_context_open(i915, file);
5473 * i915_gem_track_fb - update frontbuffer tracking
5474 * @old: current GEM buffer for the frontbuffer slots
5475 * @new: new GEM buffer for the frontbuffer slots
5476 * @frontbuffer_bits: bitmask of frontbuffer slots
5478 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
5479 * from @old and setting them in @new. Both @old and @new can be NULL.
5481 void i915_gem_track_fb(struct drm_i915_gem_object *old,
5482 struct drm_i915_gem_object *new,
5483 unsigned frontbuffer_bits)
5485 /* Control of individual bits within the mask are guarded by
5486 * the owning plane->mutex, i.e. we can never see concurrent
5487 * manipulation of individual bits. But since the bitfield as a whole
5488 * is updated using RMW, we need to use atomics in order to update
5491 BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
5492 BITS_PER_TYPE(atomic_t));
5495 WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
5496 atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
5500 WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
5501 atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
5505 /* Allocate a new GEM object and fill it with the supplied data */
5506 struct drm_i915_gem_object *
5507 i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
5508 const void *data, size_t size)
5510 struct drm_i915_gem_object *obj;
5515 obj = i915_gem_object_create(dev_priv, round_up(size, PAGE_SIZE));
5519 GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
5521 file = obj->base.filp;
5524 unsigned int len = min_t(typeof(size), size, PAGE_SIZE);
5526 void *pgdata, *vaddr;
5528 err = pagecache_write_begin(file, file->f_mapping,
5535 memcpy(vaddr, data, len);
5538 err = pagecache_write_end(file, file->f_mapping,
5552 i915_gem_object_put(obj);
5553 return ERR_PTR(err);
5556 struct scatterlist *
5557 i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
5559 unsigned int *offset)
5561 struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
5562 struct scatterlist *sg;
5563 unsigned int idx, count;
5566 GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
5567 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
5569 /* As we iterate forward through the sg, we record each entry in a
5570 * radixtree for quick repeated (backwards) lookups. If we have seen
5571 * this index previously, we will have an entry for it.
5573 * Initial lookup is O(N), but this is amortized to O(1) for
5574 * sequential page access (where each new request is consecutive
5575 * to the previous one). Repeated lookups are O(lg(obj->base.size)),
5576 * i.e. O(1) with a large constant!
5578 if (n < READ_ONCE(iter->sg_idx))
5581 mutex_lock(&iter->lock);
5583 /* We prefer to reuse the last sg so that repeated lookup of this
5584 * (or the subsequent) sg are fast - comparing against the last
5585 * sg is faster than going through the radixtree.
5590 count = __sg_page_count(sg);
5592 while (idx + count <= n) {
5597 /* If we cannot allocate and insert this entry, or the
5598 * individual pages from this range, cancel updating the
5599 * sg_idx so that on this lookup we are forced to linearly
5600 * scan onwards, but on future lookups we will try the
5601 * insertion again (in which case we need to be careful of
5602 * the error return reporting that we have already inserted
5605 ret = radix_tree_insert(&iter->radix, idx, sg);
5606 if (ret && ret != -EEXIST)
5609 entry = xa_mk_value(idx);
5610 for (i = 1; i < count; i++) {
5611 ret = radix_tree_insert(&iter->radix, idx + i, entry);
5612 if (ret && ret != -EEXIST)
5617 sg = ____sg_next(sg);
5618 count = __sg_page_count(sg);
5625 mutex_unlock(&iter->lock);
5627 if (unlikely(n < idx)) /* insertion completed by another thread */
5630 /* In case we failed to insert the entry into the radixtree, we need
5631 * to look beyond the current sg.
5633 while (idx + count <= n) {
5635 sg = ____sg_next(sg);
5636 count = __sg_page_count(sg);
5645 sg = radix_tree_lookup(&iter->radix, n);
5648 /* If this index is in the middle of multi-page sg entry,
5649 * the radix tree will contain a value entry that points
5650 * to the start of that range. We will return the pointer to
5651 * the base page and the offset of this page within the
5655 if (unlikely(xa_is_value(sg))) {
5656 unsigned long base = xa_to_value(sg);
5658 sg = radix_tree_lookup(&iter->radix, base);
5670 i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
5672 struct scatterlist *sg;
5673 unsigned int offset;
5675 GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
5677 sg = i915_gem_object_get_sg(obj, n, &offset);
5678 return nth_page(sg_page(sg), offset);
5681 /* Like i915_gem_object_get_page(), but mark the returned page dirty */
5683 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
5688 page = i915_gem_object_get_page(obj, n);
5690 set_page_dirty(page);
5696 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
5699 struct scatterlist *sg;
5700 unsigned int offset;
5702 sg = i915_gem_object_get_sg(obj, n, &offset);
5703 return sg_dma_address(sg) + (offset << PAGE_SHIFT);
5706 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
5708 struct sg_table *pages;
5711 if (align > obj->base.size)
5714 if (obj->ops == &i915_gem_phys_ops)
5717 if (obj->ops != &i915_gem_object_ops)
5720 err = i915_gem_object_unbind(obj);
5724 mutex_lock(&obj->mm.lock);
5726 if (obj->mm.madv != I915_MADV_WILLNEED) {
5731 if (obj->mm.quirked) {
5736 if (obj->mm.mapping) {
5741 pages = __i915_gem_object_unset_pages(obj);
5743 obj->ops = &i915_gem_phys_ops;
5745 err = ____i915_gem_object_get_pages(obj);
5749 /* Perma-pin (until release) the physical set of pages */
5750 __i915_gem_object_pin_pages(obj);
5752 if (!IS_ERR_OR_NULL(pages))
5753 i915_gem_object_ops.put_pages(obj, pages);
5754 mutex_unlock(&obj->mm.lock);
5758 obj->ops = &i915_gem_object_ops;
5759 if (!IS_ERR_OR_NULL(pages)) {
5760 unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl);
5762 __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
5765 mutex_unlock(&obj->mm.lock);
5769 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
5770 #include "selftests/scatterlist.c"
5771 #include "selftests/mock_gem_device.c"
5772 #include "selftests/huge_gem_object.c"
5773 #include "selftests/huge_pages.c"
5774 #include "selftests/i915_gem_object.c"
5775 #include "selftests/i915_gem_coherency.c"
5776 #include "selftests/i915_gem.c"