]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/i915/i915_gem.c
4017ecf561f6c75e60e15b4c4a2ece6ec6de57fe
[linux.git] / drivers / gpu / drm / i915 / i915_gem.c
1 /*
2  * Copyright © 2008-2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27
28 #include <drm/drm_vma_manager.h>
29 #include <drm/i915_drm.h>
30 #include <linux/dma-fence-array.h>
31 #include <linux/kthread.h>
32 #include <linux/reservation.h>
33 #include <linux/shmem_fs.h>
34 #include <linux/slab.h>
35 #include <linux/stop_machine.h>
36 #include <linux/swap.h>
37 #include <linux/pci.h>
38 #include <linux/dma-buf.h>
39 #include <linux/mman.h>
40
41 #include "gem/i915_gem_clflush.h"
42 #include "gem/i915_gem_context.h"
43 #include "gem/i915_gem_ioctls.h"
44 #include "gem/i915_gem_pm.h"
45 #include "gem/i915_gemfs.h"
46 #include "gt/intel_engine_pm.h"
47 #include "gt/intel_gt_pm.h"
48 #include "gt/intel_mocs.h"
49 #include "gt/intel_reset.h"
50 #include "gt/intel_workarounds.h"
51
52 #include "i915_drv.h"
53 #include "i915_scatterlist.h"
54 #include "i915_trace.h"
55 #include "i915_vgpu.h"
56
57 #include "intel_display.h"
58 #include "intel_drv.h"
59 #include "intel_frontbuffer.h"
60 #include "intel_pm.h"
61
62 static int
63 insert_mappable_node(struct i915_ggtt *ggtt,
64                      struct drm_mm_node *node, u32 size)
65 {
66         memset(node, 0, sizeof(*node));
67         return drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
68                                            size, 0, I915_COLOR_UNEVICTABLE,
69                                            0, ggtt->mappable_end,
70                                            DRM_MM_INSERT_LOW);
71 }
72
73 static void
74 remove_mappable_node(struct drm_mm_node *node)
75 {
76         drm_mm_remove_node(node);
77 }
78
79 int
80 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
81                             struct drm_file *file)
82 {
83         struct i915_ggtt *ggtt = &to_i915(dev)->ggtt;
84         struct drm_i915_gem_get_aperture *args = data;
85         struct i915_vma *vma;
86         u64 pinned;
87
88         mutex_lock(&ggtt->vm.mutex);
89
90         pinned = ggtt->vm.reserved;
91         list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
92                 if (i915_vma_is_pinned(vma))
93                         pinned += vma->node.size;
94
95         mutex_unlock(&ggtt->vm.mutex);
96
97         args->aper_size = ggtt->vm.total;
98         args->aper_available_size = args->aper_size - pinned;
99
100         return 0;
101 }
102
103 int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
104 {
105         struct i915_vma *vma;
106         LIST_HEAD(still_in_list);
107         int ret = 0;
108
109         lockdep_assert_held(&obj->base.dev->struct_mutex);
110
111         spin_lock(&obj->vma.lock);
112         while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
113                                                        struct i915_vma,
114                                                        obj_link))) {
115                 list_move_tail(&vma->obj_link, &still_in_list);
116                 spin_unlock(&obj->vma.lock);
117
118                 ret = i915_vma_unbind(vma);
119
120                 spin_lock(&obj->vma.lock);
121         }
122         list_splice(&still_in_list, &obj->vma.list);
123         spin_unlock(&obj->vma.lock);
124
125         return ret;
126 }
127
128 static int
129 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
130                      struct drm_i915_gem_pwrite *args,
131                      struct drm_file *file)
132 {
133         void *vaddr = obj->phys_handle->vaddr + args->offset;
134         char __user *user_data = u64_to_user_ptr(args->data_ptr);
135
136         /* We manually control the domain here and pretend that it
137          * remains coherent i.e. in the GTT domain, like shmem_pwrite.
138          */
139         intel_fb_obj_invalidate(obj, ORIGIN_CPU);
140         if (copy_from_user(vaddr, user_data, args->size))
141                 return -EFAULT;
142
143         drm_clflush_virt_range(vaddr, args->size);
144         i915_gem_chipset_flush(to_i915(obj->base.dev));
145
146         intel_fb_obj_flush(obj, ORIGIN_CPU);
147         return 0;
148 }
149
150 static int
151 i915_gem_create(struct drm_file *file,
152                 struct drm_i915_private *dev_priv,
153                 u64 *size_p,
154                 u32 *handle_p)
155 {
156         struct drm_i915_gem_object *obj;
157         u32 handle;
158         u64 size;
159         int ret;
160
161         size = round_up(*size_p, PAGE_SIZE);
162         if (size == 0)
163                 return -EINVAL;
164
165         /* Allocate the new object */
166         obj = i915_gem_object_create_shmem(dev_priv, size);
167         if (IS_ERR(obj))
168                 return PTR_ERR(obj);
169
170         ret = drm_gem_handle_create(file, &obj->base, &handle);
171         /* drop reference from allocate - handle holds it now */
172         i915_gem_object_put(obj);
173         if (ret)
174                 return ret;
175
176         *handle_p = handle;
177         *size_p = size;
178         return 0;
179 }
180
181 int
182 i915_gem_dumb_create(struct drm_file *file,
183                      struct drm_device *dev,
184                      struct drm_mode_create_dumb *args)
185 {
186         int cpp = DIV_ROUND_UP(args->bpp, 8);
187         u32 format;
188
189         switch (cpp) {
190         case 1:
191                 format = DRM_FORMAT_C8;
192                 break;
193         case 2:
194                 format = DRM_FORMAT_RGB565;
195                 break;
196         case 4:
197                 format = DRM_FORMAT_XRGB8888;
198                 break;
199         default:
200                 return -EINVAL;
201         }
202
203         /* have to work out size/pitch and return them */
204         args->pitch = ALIGN(args->width * cpp, 64);
205
206         /* align stride to page size so that we can remap */
207         if (args->pitch > intel_plane_fb_max_stride(to_i915(dev), format,
208                                                     DRM_FORMAT_MOD_LINEAR))
209                 args->pitch = ALIGN(args->pitch, 4096);
210
211         args->size = args->pitch * args->height;
212         return i915_gem_create(file, to_i915(dev),
213                                &args->size, &args->handle);
214 }
215
216 /**
217  * Creates a new mm object and returns a handle to it.
218  * @dev: drm device pointer
219  * @data: ioctl data blob
220  * @file: drm file pointer
221  */
222 int
223 i915_gem_create_ioctl(struct drm_device *dev, void *data,
224                       struct drm_file *file)
225 {
226         struct drm_i915_private *dev_priv = to_i915(dev);
227         struct drm_i915_gem_create *args = data;
228
229         i915_gem_flush_free_objects(dev_priv);
230
231         return i915_gem_create(file, dev_priv,
232                                &args->size, &args->handle);
233 }
234
235 void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
236 {
237         intel_wakeref_t wakeref;
238
239         /*
240          * No actual flushing is required for the GTT write domain for reads
241          * from the GTT domain. Writes to it "immediately" go to main memory
242          * as far as we know, so there's no chipset flush. It also doesn't
243          * land in the GPU render cache.
244          *
245          * However, we do have to enforce the order so that all writes through
246          * the GTT land before any writes to the device, such as updates to
247          * the GATT itself.
248          *
249          * We also have to wait a bit for the writes to land from the GTT.
250          * An uncached read (i.e. mmio) seems to be ideal for the round-trip
251          * timing. This issue has only been observed when switching quickly
252          * between GTT writes and CPU reads from inside the kernel on recent hw,
253          * and it appears to only affect discrete GTT blocks (i.e. on LLC
254          * system agents we cannot reproduce this behaviour, until Cannonlake
255          * that was!).
256          */
257
258         wmb();
259
260         if (INTEL_INFO(dev_priv)->has_coherent_ggtt)
261                 return;
262
263         i915_gem_chipset_flush(dev_priv);
264
265         with_intel_runtime_pm(dev_priv, wakeref) {
266                 struct intel_uncore *uncore = &dev_priv->uncore;
267
268                 spin_lock_irq(&uncore->lock);
269                 intel_uncore_posting_read_fw(uncore,
270                                              RING_HEAD(RENDER_RING_BASE));
271                 spin_unlock_irq(&uncore->lock);
272         }
273 }
274
275 static int
276 shmem_pread(struct page *page, int offset, int len, char __user *user_data,
277             bool needs_clflush)
278 {
279         char *vaddr;
280         int ret;
281
282         vaddr = kmap(page);
283
284         if (needs_clflush)
285                 drm_clflush_virt_range(vaddr + offset, len);
286
287         ret = __copy_to_user(user_data, vaddr + offset, len);
288
289         kunmap(page);
290
291         return ret ? -EFAULT : 0;
292 }
293
294 static int
295 i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
296                      struct drm_i915_gem_pread *args)
297 {
298         unsigned int needs_clflush;
299         unsigned int idx, offset;
300         struct dma_fence *fence;
301         char __user *user_data;
302         u64 remain;
303         int ret;
304
305         ret = i915_gem_object_prepare_read(obj, &needs_clflush);
306         if (ret)
307                 return ret;
308
309         fence = i915_gem_object_lock_fence(obj);
310         i915_gem_object_finish_access(obj);
311         if (!fence)
312                 return -ENOMEM;
313
314         remain = args->size;
315         user_data = u64_to_user_ptr(args->data_ptr);
316         offset = offset_in_page(args->offset);
317         for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
318                 struct page *page = i915_gem_object_get_page(obj, idx);
319                 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
320
321                 ret = shmem_pread(page, offset, length, user_data,
322                                   needs_clflush);
323                 if (ret)
324                         break;
325
326                 remain -= length;
327                 user_data += length;
328                 offset = 0;
329         }
330
331         i915_gem_object_unlock_fence(obj, fence);
332         return ret;
333 }
334
335 static inline bool
336 gtt_user_read(struct io_mapping *mapping,
337               loff_t base, int offset,
338               char __user *user_data, int length)
339 {
340         void __iomem *vaddr;
341         unsigned long unwritten;
342
343         /* We can use the cpu mem copy function because this is X86. */
344         vaddr = io_mapping_map_atomic_wc(mapping, base);
345         unwritten = __copy_to_user_inatomic(user_data,
346                                             (void __force *)vaddr + offset,
347                                             length);
348         io_mapping_unmap_atomic(vaddr);
349         if (unwritten) {
350                 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
351                 unwritten = copy_to_user(user_data,
352                                          (void __force *)vaddr + offset,
353                                          length);
354                 io_mapping_unmap(vaddr);
355         }
356         return unwritten;
357 }
358
359 static int
360 i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
361                    const struct drm_i915_gem_pread *args)
362 {
363         struct drm_i915_private *i915 = to_i915(obj->base.dev);
364         struct i915_ggtt *ggtt = &i915->ggtt;
365         intel_wakeref_t wakeref;
366         struct drm_mm_node node;
367         struct dma_fence *fence;
368         void __user *user_data;
369         struct i915_vma *vma;
370         u64 remain, offset;
371         int ret;
372
373         ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
374         if (ret)
375                 return ret;
376
377         wakeref = intel_runtime_pm_get(i915);
378         vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
379                                        PIN_MAPPABLE |
380                                        PIN_NONFAULT |
381                                        PIN_NONBLOCK);
382         if (!IS_ERR(vma)) {
383                 node.start = i915_ggtt_offset(vma);
384                 node.allocated = false;
385                 ret = i915_vma_put_fence(vma);
386                 if (ret) {
387                         i915_vma_unpin(vma);
388                         vma = ERR_PTR(ret);
389                 }
390         }
391         if (IS_ERR(vma)) {
392                 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
393                 if (ret)
394                         goto out_unlock;
395                 GEM_BUG_ON(!node.allocated);
396         }
397
398         mutex_unlock(&i915->drm.struct_mutex);
399
400         ret = i915_gem_object_lock_interruptible(obj);
401         if (ret)
402                 goto out_unpin;
403
404         ret = i915_gem_object_set_to_gtt_domain(obj, false);
405         if (ret) {
406                 i915_gem_object_unlock(obj);
407                 goto out_unpin;
408         }
409
410         fence = i915_gem_object_lock_fence(obj);
411         i915_gem_object_unlock(obj);
412         if (!fence) {
413                 ret = -ENOMEM;
414                 goto out_unpin;
415         }
416
417         user_data = u64_to_user_ptr(args->data_ptr);
418         remain = args->size;
419         offset = args->offset;
420
421         while (remain > 0) {
422                 /* Operation in this page
423                  *
424                  * page_base = page offset within aperture
425                  * page_offset = offset within page
426                  * page_length = bytes to copy for this page
427                  */
428                 u32 page_base = node.start;
429                 unsigned page_offset = offset_in_page(offset);
430                 unsigned page_length = PAGE_SIZE - page_offset;
431                 page_length = remain < page_length ? remain : page_length;
432                 if (node.allocated) {
433                         wmb();
434                         ggtt->vm.insert_page(&ggtt->vm,
435                                              i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
436                                              node.start, I915_CACHE_NONE, 0);
437                         wmb();
438                 } else {
439                         page_base += offset & PAGE_MASK;
440                 }
441
442                 if (gtt_user_read(&ggtt->iomap, page_base, page_offset,
443                                   user_data, page_length)) {
444                         ret = -EFAULT;
445                         break;
446                 }
447
448                 remain -= page_length;
449                 user_data += page_length;
450                 offset += page_length;
451         }
452
453         i915_gem_object_unlock_fence(obj, fence);
454 out_unpin:
455         mutex_lock(&i915->drm.struct_mutex);
456         if (node.allocated) {
457                 wmb();
458                 ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
459                 remove_mappable_node(&node);
460         } else {
461                 i915_vma_unpin(vma);
462         }
463 out_unlock:
464         intel_runtime_pm_put(i915, wakeref);
465         mutex_unlock(&i915->drm.struct_mutex);
466
467         return ret;
468 }
469
470 /**
471  * Reads data from the object referenced by handle.
472  * @dev: drm device pointer
473  * @data: ioctl data blob
474  * @file: drm file pointer
475  *
476  * On error, the contents of *data are undefined.
477  */
478 int
479 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
480                      struct drm_file *file)
481 {
482         struct drm_i915_gem_pread *args = data;
483         struct drm_i915_gem_object *obj;
484         int ret;
485
486         if (args->size == 0)
487                 return 0;
488
489         if (!access_ok(u64_to_user_ptr(args->data_ptr),
490                        args->size))
491                 return -EFAULT;
492
493         obj = i915_gem_object_lookup(file, args->handle);
494         if (!obj)
495                 return -ENOENT;
496
497         /* Bounds check source.  */
498         if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
499                 ret = -EINVAL;
500                 goto out;
501         }
502
503         trace_i915_gem_object_pread(obj, args->offset, args->size);
504
505         ret = i915_gem_object_wait(obj,
506                                    I915_WAIT_INTERRUPTIBLE,
507                                    MAX_SCHEDULE_TIMEOUT);
508         if (ret)
509                 goto out;
510
511         ret = i915_gem_object_pin_pages(obj);
512         if (ret)
513                 goto out;
514
515         ret = i915_gem_shmem_pread(obj, args);
516         if (ret == -EFAULT || ret == -ENODEV)
517                 ret = i915_gem_gtt_pread(obj, args);
518
519         i915_gem_object_unpin_pages(obj);
520 out:
521         i915_gem_object_put(obj);
522         return ret;
523 }
524
525 /* This is the fast write path which cannot handle
526  * page faults in the source data
527  */
528
529 static inline bool
530 ggtt_write(struct io_mapping *mapping,
531            loff_t base, int offset,
532            char __user *user_data, int length)
533 {
534         void __iomem *vaddr;
535         unsigned long unwritten;
536
537         /* We can use the cpu mem copy function because this is X86. */
538         vaddr = io_mapping_map_atomic_wc(mapping, base);
539         unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
540                                                       user_data, length);
541         io_mapping_unmap_atomic(vaddr);
542         if (unwritten) {
543                 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
544                 unwritten = copy_from_user((void __force *)vaddr + offset,
545                                            user_data, length);
546                 io_mapping_unmap(vaddr);
547         }
548
549         return unwritten;
550 }
551
552 /**
553  * This is the fast pwrite path, where we copy the data directly from the
554  * user into the GTT, uncached.
555  * @obj: i915 GEM object
556  * @args: pwrite arguments structure
557  */
558 static int
559 i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
560                          const struct drm_i915_gem_pwrite *args)
561 {
562         struct drm_i915_private *i915 = to_i915(obj->base.dev);
563         struct i915_ggtt *ggtt = &i915->ggtt;
564         intel_wakeref_t wakeref;
565         struct drm_mm_node node;
566         struct dma_fence *fence;
567         struct i915_vma *vma;
568         u64 remain, offset;
569         void __user *user_data;
570         int ret;
571
572         ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
573         if (ret)
574                 return ret;
575
576         if (i915_gem_object_has_struct_page(obj)) {
577                 /*
578                  * Avoid waking the device up if we can fallback, as
579                  * waking/resuming is very slow (worst-case 10-100 ms
580                  * depending on PCI sleeps and our own resume time).
581                  * This easily dwarfs any performance advantage from
582                  * using the cache bypass of indirect GGTT access.
583                  */
584                 wakeref = intel_runtime_pm_get_if_in_use(i915);
585                 if (!wakeref) {
586                         ret = -EFAULT;
587                         goto out_unlock;
588                 }
589         } else {
590                 /* No backing pages, no fallback, we must force GGTT access */
591                 wakeref = intel_runtime_pm_get(i915);
592         }
593
594         vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
595                                        PIN_MAPPABLE |
596                                        PIN_NONFAULT |
597                                        PIN_NONBLOCK);
598         if (!IS_ERR(vma)) {
599                 node.start = i915_ggtt_offset(vma);
600                 node.allocated = false;
601                 ret = i915_vma_put_fence(vma);
602                 if (ret) {
603                         i915_vma_unpin(vma);
604                         vma = ERR_PTR(ret);
605                 }
606         }
607         if (IS_ERR(vma)) {
608                 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
609                 if (ret)
610                         goto out_rpm;
611                 GEM_BUG_ON(!node.allocated);
612         }
613
614         mutex_unlock(&i915->drm.struct_mutex);
615
616         ret = i915_gem_object_lock_interruptible(obj);
617         if (ret)
618                 goto out_unpin;
619
620         ret = i915_gem_object_set_to_gtt_domain(obj, true);
621         if (ret) {
622                 i915_gem_object_unlock(obj);
623                 goto out_unpin;
624         }
625
626         fence = i915_gem_object_lock_fence(obj);
627         i915_gem_object_unlock(obj);
628         if (!fence) {
629                 ret = -ENOMEM;
630                 goto out_unpin;
631         }
632
633         intel_fb_obj_invalidate(obj, ORIGIN_CPU);
634
635         user_data = u64_to_user_ptr(args->data_ptr);
636         offset = args->offset;
637         remain = args->size;
638         while (remain) {
639                 /* Operation in this page
640                  *
641                  * page_base = page offset within aperture
642                  * page_offset = offset within page
643                  * page_length = bytes to copy for this page
644                  */
645                 u32 page_base = node.start;
646                 unsigned int page_offset = offset_in_page(offset);
647                 unsigned int page_length = PAGE_SIZE - page_offset;
648                 page_length = remain < page_length ? remain : page_length;
649                 if (node.allocated) {
650                         wmb(); /* flush the write before we modify the GGTT */
651                         ggtt->vm.insert_page(&ggtt->vm,
652                                              i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
653                                              node.start, I915_CACHE_NONE, 0);
654                         wmb(); /* flush modifications to the GGTT (insert_page) */
655                 } else {
656                         page_base += offset & PAGE_MASK;
657                 }
658                 /* If we get a fault while copying data, then (presumably) our
659                  * source page isn't available.  Return the error and we'll
660                  * retry in the slow path.
661                  * If the object is non-shmem backed, we retry again with the
662                  * path that handles page fault.
663                  */
664                 if (ggtt_write(&ggtt->iomap, page_base, page_offset,
665                                user_data, page_length)) {
666                         ret = -EFAULT;
667                         break;
668                 }
669
670                 remain -= page_length;
671                 user_data += page_length;
672                 offset += page_length;
673         }
674         intel_fb_obj_flush(obj, ORIGIN_CPU);
675
676         i915_gem_object_unlock_fence(obj, fence);
677 out_unpin:
678         mutex_lock(&i915->drm.struct_mutex);
679         if (node.allocated) {
680                 wmb();
681                 ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
682                 remove_mappable_node(&node);
683         } else {
684                 i915_vma_unpin(vma);
685         }
686 out_rpm:
687         intel_runtime_pm_put(i915, wakeref);
688 out_unlock:
689         mutex_unlock(&i915->drm.struct_mutex);
690         return ret;
691 }
692
693 /* Per-page copy function for the shmem pwrite fastpath.
694  * Flushes invalid cachelines before writing to the target if
695  * needs_clflush_before is set and flushes out any written cachelines after
696  * writing if needs_clflush is set.
697  */
698 static int
699 shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
700              bool needs_clflush_before,
701              bool needs_clflush_after)
702 {
703         char *vaddr;
704         int ret;
705
706         vaddr = kmap(page);
707
708         if (needs_clflush_before)
709                 drm_clflush_virt_range(vaddr + offset, len);
710
711         ret = __copy_from_user(vaddr + offset, user_data, len);
712         if (!ret && needs_clflush_after)
713                 drm_clflush_virt_range(vaddr + offset, len);
714
715         kunmap(page);
716
717         return ret ? -EFAULT : 0;
718 }
719
720 static int
721 i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
722                       const struct drm_i915_gem_pwrite *args)
723 {
724         unsigned int partial_cacheline_write;
725         unsigned int needs_clflush;
726         unsigned int offset, idx;
727         struct dma_fence *fence;
728         void __user *user_data;
729         u64 remain;
730         int ret;
731
732         ret = i915_gem_object_prepare_write(obj, &needs_clflush);
733         if (ret)
734                 return ret;
735
736         fence = i915_gem_object_lock_fence(obj);
737         i915_gem_object_finish_access(obj);
738         if (!fence)
739                 return -ENOMEM;
740
741         /* If we don't overwrite a cacheline completely we need to be
742          * careful to have up-to-date data by first clflushing. Don't
743          * overcomplicate things and flush the entire patch.
744          */
745         partial_cacheline_write = 0;
746         if (needs_clflush & CLFLUSH_BEFORE)
747                 partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
748
749         user_data = u64_to_user_ptr(args->data_ptr);
750         remain = args->size;
751         offset = offset_in_page(args->offset);
752         for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
753                 struct page *page = i915_gem_object_get_page(obj, idx);
754                 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
755
756                 ret = shmem_pwrite(page, offset, length, user_data,
757                                    (offset | length) & partial_cacheline_write,
758                                    needs_clflush & CLFLUSH_AFTER);
759                 if (ret)
760                         break;
761
762                 remain -= length;
763                 user_data += length;
764                 offset = 0;
765         }
766
767         intel_fb_obj_flush(obj, ORIGIN_CPU);
768         i915_gem_object_unlock_fence(obj, fence);
769
770         return ret;
771 }
772
773 /**
774  * Writes data to the object referenced by handle.
775  * @dev: drm device
776  * @data: ioctl data blob
777  * @file: drm file
778  *
779  * On error, the contents of the buffer that were to be modified are undefined.
780  */
781 int
782 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
783                       struct drm_file *file)
784 {
785         struct drm_i915_gem_pwrite *args = data;
786         struct drm_i915_gem_object *obj;
787         int ret;
788
789         if (args->size == 0)
790                 return 0;
791
792         if (!access_ok(u64_to_user_ptr(args->data_ptr), args->size))
793                 return -EFAULT;
794
795         obj = i915_gem_object_lookup(file, args->handle);
796         if (!obj)
797                 return -ENOENT;
798
799         /* Bounds check destination. */
800         if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
801                 ret = -EINVAL;
802                 goto err;
803         }
804
805         /* Writes not allowed into this read-only object */
806         if (i915_gem_object_is_readonly(obj)) {
807                 ret = -EINVAL;
808                 goto err;
809         }
810
811         trace_i915_gem_object_pwrite(obj, args->offset, args->size);
812
813         ret = -ENODEV;
814         if (obj->ops->pwrite)
815                 ret = obj->ops->pwrite(obj, args);
816         if (ret != -ENODEV)
817                 goto err;
818
819         ret = i915_gem_object_wait(obj,
820                                    I915_WAIT_INTERRUPTIBLE |
821                                    I915_WAIT_ALL,
822                                    MAX_SCHEDULE_TIMEOUT);
823         if (ret)
824                 goto err;
825
826         ret = i915_gem_object_pin_pages(obj);
827         if (ret)
828                 goto err;
829
830         ret = -EFAULT;
831         /* We can only do the GTT pwrite on untiled buffers, as otherwise
832          * it would end up going through the fenced access, and we'll get
833          * different detiling behavior between reading and writing.
834          * pread/pwrite currently are reading and writing from the CPU
835          * perspective, requiring manual detiling by the client.
836          */
837         if (!i915_gem_object_has_struct_page(obj) ||
838             cpu_write_needs_clflush(obj))
839                 /* Note that the gtt paths might fail with non-page-backed user
840                  * pointers (e.g. gtt mappings when moving data between
841                  * textures). Fallback to the shmem path in that case.
842                  */
843                 ret = i915_gem_gtt_pwrite_fast(obj, args);
844
845         if (ret == -EFAULT || ret == -ENOSPC) {
846                 if (obj->phys_handle)
847                         ret = i915_gem_phys_pwrite(obj, args, file);
848                 else
849                         ret = i915_gem_shmem_pwrite(obj, args);
850         }
851
852         i915_gem_object_unpin_pages(obj);
853 err:
854         i915_gem_object_put(obj);
855         return ret;
856 }
857
858 /**
859  * Called when user space has done writes to this buffer
860  * @dev: drm device
861  * @data: ioctl data blob
862  * @file: drm file
863  */
864 int
865 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
866                          struct drm_file *file)
867 {
868         struct drm_i915_gem_sw_finish *args = data;
869         struct drm_i915_gem_object *obj;
870
871         obj = i915_gem_object_lookup(file, args->handle);
872         if (!obj)
873                 return -ENOENT;
874
875         /*
876          * Proxy objects are barred from CPU access, so there is no
877          * need to ban sw_finish as it is a nop.
878          */
879
880         /* Pinned buffers may be scanout, so flush the cache */
881         i915_gem_object_flush_if_display(obj);
882         i915_gem_object_put(obj);
883
884         return 0;
885 }
886
887 void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
888 {
889         struct drm_i915_gem_object *obj, *on;
890         int i;
891
892         /*
893          * Only called during RPM suspend. All users of the userfault_list
894          * must be holding an RPM wakeref to ensure that this can not
895          * run concurrently with themselves (and use the struct_mutex for
896          * protection between themselves).
897          */
898
899         list_for_each_entry_safe(obj, on,
900                                  &dev_priv->mm.userfault_list, userfault_link)
901                 __i915_gem_object_release_mmap(obj);
902
903         /* The fence will be lost when the device powers down. If any were
904          * in use by hardware (i.e. they are pinned), we should not be powering
905          * down! All other fences will be reacquired by the user upon waking.
906          */
907         for (i = 0; i < dev_priv->num_fence_regs; i++) {
908                 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
909
910                 /* Ideally we want to assert that the fence register is not
911                  * live at this point (i.e. that no piece of code will be
912                  * trying to write through fence + GTT, as that both violates
913                  * our tracking of activity and associated locking/barriers,
914                  * but also is illegal given that the hw is powered down).
915                  *
916                  * Previously we used reg->pin_count as a "liveness" indicator.
917                  * That is not sufficient, and we need a more fine-grained
918                  * tool if we want to have a sanity check here.
919                  */
920
921                 if (!reg->vma)
922                         continue;
923
924                 GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
925                 reg->dirty = true;
926         }
927 }
928
929 static int wait_for_engines(struct drm_i915_private *i915)
930 {
931         if (wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT)) {
932                 dev_err(i915->drm.dev,
933                         "Failed to idle engines, declaring wedged!\n");
934                 GEM_TRACE_DUMP();
935                 i915_gem_set_wedged(i915);
936                 return -EIO;
937         }
938
939         return 0;
940 }
941
942 static long
943 wait_for_timelines(struct drm_i915_private *i915,
944                    unsigned int flags, long timeout)
945 {
946         struct i915_gt_timelines *gt = &i915->gt.timelines;
947         struct i915_timeline *tl;
948
949         mutex_lock(&gt->mutex);
950         list_for_each_entry(tl, &gt->active_list, link) {
951                 struct i915_request *rq;
952
953                 rq = i915_active_request_get_unlocked(&tl->last_request);
954                 if (!rq)
955                         continue;
956
957                 mutex_unlock(&gt->mutex);
958
959                 /*
960                  * "Race-to-idle".
961                  *
962                  * Switching to the kernel context is often used a synchronous
963                  * step prior to idling, e.g. in suspend for flushing all
964                  * current operations to memory before sleeping. These we
965                  * want to complete as quickly as possible to avoid prolonged
966                  * stalls, so allow the gpu to boost to maximum clocks.
967                  */
968                 if (flags & I915_WAIT_FOR_IDLE_BOOST)
969                         gen6_rps_boost(rq);
970
971                 timeout = i915_request_wait(rq, flags, timeout);
972                 i915_request_put(rq);
973                 if (timeout < 0)
974                         return timeout;
975
976                 /* restart after reacquiring the lock */
977                 mutex_lock(&gt->mutex);
978                 tl = list_entry(&gt->active_list, typeof(*tl), link);
979         }
980         mutex_unlock(&gt->mutex);
981
982         return timeout;
983 }
984
985 int i915_gem_wait_for_idle(struct drm_i915_private *i915,
986                            unsigned int flags, long timeout)
987 {
988         GEM_TRACE("flags=%x (%s), timeout=%ld%s, awake?=%s\n",
989                   flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked",
990                   timeout, timeout == MAX_SCHEDULE_TIMEOUT ? " (forever)" : "",
991                   yesno(i915->gt.awake));
992
993         /* If the device is asleep, we have no requests outstanding */
994         if (!READ_ONCE(i915->gt.awake))
995                 return 0;
996
997         timeout = wait_for_timelines(i915, flags, timeout);
998         if (timeout < 0)
999                 return timeout;
1000
1001         if (flags & I915_WAIT_LOCKED) {
1002                 int err;
1003
1004                 lockdep_assert_held(&i915->drm.struct_mutex);
1005
1006                 err = wait_for_engines(i915);
1007                 if (err)
1008                         return err;
1009
1010                 i915_retire_requests(i915);
1011         }
1012
1013         return 0;
1014 }
1015
1016 struct i915_vma *
1017 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
1018                          const struct i915_ggtt_view *view,
1019                          u64 size,
1020                          u64 alignment,
1021                          u64 flags)
1022 {
1023         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
1024         struct i915_address_space *vm = &dev_priv->ggtt.vm;
1025         struct i915_vma *vma;
1026         int ret;
1027
1028         lockdep_assert_held(&obj->base.dev->struct_mutex);
1029
1030         if (flags & PIN_MAPPABLE &&
1031             (!view || view->type == I915_GGTT_VIEW_NORMAL)) {
1032                 /* If the required space is larger than the available
1033                  * aperture, we will not able to find a slot for the
1034                  * object and unbinding the object now will be in
1035                  * vain. Worse, doing so may cause us to ping-pong
1036                  * the object in and out of the Global GTT and
1037                  * waste a lot of cycles under the mutex.
1038                  */
1039                 if (obj->base.size > dev_priv->ggtt.mappable_end)
1040                         return ERR_PTR(-E2BIG);
1041
1042                 /* If NONBLOCK is set the caller is optimistically
1043                  * trying to cache the full object within the mappable
1044                  * aperture, and *must* have a fallback in place for
1045                  * situations where we cannot bind the object. We
1046                  * can be a little more lax here and use the fallback
1047                  * more often to avoid costly migrations of ourselves
1048                  * and other objects within the aperture.
1049                  *
1050                  * Half-the-aperture is used as a simple heuristic.
1051                  * More interesting would to do search for a free
1052                  * block prior to making the commitment to unbind.
1053                  * That caters for the self-harm case, and with a
1054                  * little more heuristics (e.g. NOFAULT, NOEVICT)
1055                  * we could try to minimise harm to others.
1056                  */
1057                 if (flags & PIN_NONBLOCK &&
1058                     obj->base.size > dev_priv->ggtt.mappable_end / 2)
1059                         return ERR_PTR(-ENOSPC);
1060         }
1061
1062         vma = i915_vma_instance(obj, vm, view);
1063         if (IS_ERR(vma))
1064                 return vma;
1065
1066         if (i915_vma_misplaced(vma, size, alignment, flags)) {
1067                 if (flags & PIN_NONBLOCK) {
1068                         if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))
1069                                 return ERR_PTR(-ENOSPC);
1070
1071                         if (flags & PIN_MAPPABLE &&
1072                             vma->fence_size > dev_priv->ggtt.mappable_end / 2)
1073                                 return ERR_PTR(-ENOSPC);
1074                 }
1075
1076                 WARN(i915_vma_is_pinned(vma),
1077                      "bo is already pinned in ggtt with incorrect alignment:"
1078                      " offset=%08x, req.alignment=%llx,"
1079                      " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
1080                      i915_ggtt_offset(vma), alignment,
1081                      !!(flags & PIN_MAPPABLE),
1082                      i915_vma_is_map_and_fenceable(vma));
1083                 ret = i915_vma_unbind(vma);
1084                 if (ret)
1085                         return ERR_PTR(ret);
1086         }
1087
1088         ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
1089         if (ret)
1090                 return ERR_PTR(ret);
1091
1092         return vma;
1093 }
1094
1095 int
1096 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
1097                        struct drm_file *file_priv)
1098 {
1099         struct drm_i915_private *i915 = to_i915(dev);
1100         struct drm_i915_gem_madvise *args = data;
1101         struct drm_i915_gem_object *obj;
1102         int err;
1103
1104         switch (args->madv) {
1105         case I915_MADV_DONTNEED:
1106         case I915_MADV_WILLNEED:
1107             break;
1108         default:
1109             return -EINVAL;
1110         }
1111
1112         obj = i915_gem_object_lookup(file_priv, args->handle);
1113         if (!obj)
1114                 return -ENOENT;
1115
1116         err = mutex_lock_interruptible(&obj->mm.lock);
1117         if (err)
1118                 goto out;
1119
1120         if (i915_gem_object_has_pages(obj) &&
1121             i915_gem_object_is_tiled(obj) &&
1122             i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
1123                 if (obj->mm.madv == I915_MADV_WILLNEED) {
1124                         GEM_BUG_ON(!obj->mm.quirked);
1125                         __i915_gem_object_unpin_pages(obj);
1126                         obj->mm.quirked = false;
1127                 }
1128                 if (args->madv == I915_MADV_WILLNEED) {
1129                         GEM_BUG_ON(obj->mm.quirked);
1130                         __i915_gem_object_pin_pages(obj);
1131                         obj->mm.quirked = true;
1132                 }
1133         }
1134
1135         if (obj->mm.madv != __I915_MADV_PURGED)
1136                 obj->mm.madv = args->madv;
1137
1138         if (i915_gem_object_has_pages(obj)) {
1139                 struct list_head *list;
1140
1141                 if (i915_gem_object_is_shrinkable(obj)) {
1142                         unsigned long flags;
1143
1144                         spin_lock_irqsave(&i915->mm.obj_lock, flags);
1145
1146                         if (obj->mm.madv != I915_MADV_WILLNEED)
1147                                 list = &i915->mm.purge_list;
1148                         else
1149                                 list = &i915->mm.shrink_list;
1150                         list_move_tail(&obj->mm.link, list);
1151
1152                         spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
1153                 }
1154         }
1155
1156         /* if the object is no longer attached, discard its backing storage */
1157         if (obj->mm.madv == I915_MADV_DONTNEED &&
1158             !i915_gem_object_has_pages(obj))
1159                 i915_gem_object_truncate(obj);
1160
1161         args->retained = obj->mm.madv != __I915_MADV_PURGED;
1162         mutex_unlock(&obj->mm.lock);
1163
1164 out:
1165         i915_gem_object_put(obj);
1166         return err;
1167 }
1168
1169 void i915_gem_sanitize(struct drm_i915_private *i915)
1170 {
1171         intel_wakeref_t wakeref;
1172
1173         GEM_TRACE("\n");
1174
1175         wakeref = intel_runtime_pm_get(i915);
1176         intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
1177
1178         /*
1179          * As we have just resumed the machine and woken the device up from
1180          * deep PCI sleep (presumably D3_cold), assume the HW has been reset
1181          * back to defaults, recovering from whatever wedged state we left it
1182          * in and so worth trying to use the device once more.
1183          */
1184         if (i915_terminally_wedged(i915))
1185                 i915_gem_unset_wedged(i915);
1186
1187         /*
1188          * If we inherit context state from the BIOS or earlier occupants
1189          * of the GPU, the GPU may be in an inconsistent state when we
1190          * try to take over. The only way to remove the earlier state
1191          * is by resetting. However, resetting on earlier gen is tricky as
1192          * it may impact the display and we are uncertain about the stability
1193          * of the reset, so this could be applied to even earlier gen.
1194          */
1195         intel_gt_sanitize(i915, false);
1196
1197         intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
1198         intel_runtime_pm_put(i915, wakeref);
1199
1200         mutex_lock(&i915->drm.struct_mutex);
1201         i915_gem_contexts_lost(i915);
1202         mutex_unlock(&i915->drm.struct_mutex);
1203 }
1204
1205 void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
1206 {
1207         if (INTEL_GEN(dev_priv) < 5 ||
1208             dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
1209                 return;
1210
1211         I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
1212                                  DISP_TILE_SURFACE_SWIZZLING);
1213
1214         if (IS_GEN(dev_priv, 5))
1215                 return;
1216
1217         I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
1218         if (IS_GEN(dev_priv, 6))
1219                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
1220         else if (IS_GEN(dev_priv, 7))
1221                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
1222         else if (IS_GEN(dev_priv, 8))
1223                 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
1224         else
1225                 BUG();
1226 }
1227
1228 static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base)
1229 {
1230         I915_WRITE(RING_CTL(base), 0);
1231         I915_WRITE(RING_HEAD(base), 0);
1232         I915_WRITE(RING_TAIL(base), 0);
1233         I915_WRITE(RING_START(base), 0);
1234 }
1235
1236 static void init_unused_rings(struct drm_i915_private *dev_priv)
1237 {
1238         if (IS_I830(dev_priv)) {
1239                 init_unused_ring(dev_priv, PRB1_BASE);
1240                 init_unused_ring(dev_priv, SRB0_BASE);
1241                 init_unused_ring(dev_priv, SRB1_BASE);
1242                 init_unused_ring(dev_priv, SRB2_BASE);
1243                 init_unused_ring(dev_priv, SRB3_BASE);
1244         } else if (IS_GEN(dev_priv, 2)) {
1245                 init_unused_ring(dev_priv, SRB0_BASE);
1246                 init_unused_ring(dev_priv, SRB1_BASE);
1247         } else if (IS_GEN(dev_priv, 3)) {
1248                 init_unused_ring(dev_priv, PRB1_BASE);
1249                 init_unused_ring(dev_priv, PRB2_BASE);
1250         }
1251 }
1252
1253 int i915_gem_init_hw(struct drm_i915_private *dev_priv)
1254 {
1255         int ret;
1256
1257         dev_priv->gt.last_init_time = ktime_get();
1258
1259         /* Double layer security blanket, see i915_gem_init() */
1260         intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
1261
1262         if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9)
1263                 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
1264
1265         if (IS_HASWELL(dev_priv))
1266                 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
1267                            LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
1268
1269         /* Apply the GT workarounds... */
1270         intel_gt_apply_workarounds(dev_priv);
1271         /* ...and determine whether they are sticking. */
1272         intel_gt_verify_workarounds(dev_priv, "init");
1273
1274         i915_gem_init_swizzling(dev_priv);
1275
1276         /*
1277          * At least 830 can leave some of the unused rings
1278          * "active" (ie. head != tail) after resume which
1279          * will prevent c3 entry. Makes sure all unused rings
1280          * are totally idle.
1281          */
1282         init_unused_rings(dev_priv);
1283
1284         BUG_ON(!dev_priv->kernel_context);
1285         ret = i915_terminally_wedged(dev_priv);
1286         if (ret)
1287                 goto out;
1288
1289         ret = i915_ppgtt_init_hw(dev_priv);
1290         if (ret) {
1291                 DRM_ERROR("Enabling PPGTT failed (%d)\n", ret);
1292                 goto out;
1293         }
1294
1295         ret = intel_wopcm_init_hw(&dev_priv->wopcm);
1296         if (ret) {
1297                 DRM_ERROR("Enabling WOPCM failed (%d)\n", ret);
1298                 goto out;
1299         }
1300
1301         /* We can't enable contexts until all firmware is loaded */
1302         ret = intel_uc_init_hw(dev_priv);
1303         if (ret) {
1304                 DRM_ERROR("Enabling uc failed (%d)\n", ret);
1305                 goto out;
1306         }
1307
1308         intel_mocs_init_l3cc_table(dev_priv);
1309
1310         /* Only when the HW is re-initialised, can we replay the requests */
1311         ret = intel_engines_resume(dev_priv);
1312         if (ret)
1313                 goto cleanup_uc;
1314
1315         intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1316
1317         intel_engines_set_scheduler_caps(dev_priv);
1318         return 0;
1319
1320 cleanup_uc:
1321         intel_uc_fini_hw(dev_priv);
1322 out:
1323         intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1324
1325         return ret;
1326 }
1327
1328 static int __intel_engines_record_defaults(struct drm_i915_private *i915)
1329 {
1330         struct intel_engine_cs *engine;
1331         struct i915_gem_context *ctx;
1332         struct i915_gem_engines *e;
1333         enum intel_engine_id id;
1334         int err = 0;
1335
1336         /*
1337          * As we reset the gpu during very early sanitisation, the current
1338          * register state on the GPU should reflect its defaults values.
1339          * We load a context onto the hw (with restore-inhibit), then switch
1340          * over to a second context to save that default register state. We
1341          * can then prime every new context with that state so they all start
1342          * from the same default HW values.
1343          */
1344
1345         ctx = i915_gem_context_create_kernel(i915, 0);
1346         if (IS_ERR(ctx))
1347                 return PTR_ERR(ctx);
1348
1349         e = i915_gem_context_lock_engines(ctx);
1350
1351         for_each_engine(engine, i915, id) {
1352                 struct intel_context *ce = e->engines[id];
1353                 struct i915_request *rq;
1354
1355                 rq = intel_context_create_request(ce);
1356                 if (IS_ERR(rq)) {
1357                         err = PTR_ERR(rq);
1358                         goto err_active;
1359                 }
1360
1361                 err = 0;
1362                 if (rq->engine->init_context)
1363                         err = rq->engine->init_context(rq);
1364
1365                 i915_request_add(rq);
1366                 if (err)
1367                         goto err_active;
1368         }
1369
1370         /* Flush the default context image to memory, and enable powersaving. */
1371         if (!i915_gem_load_power_context(i915)) {
1372                 err = -EIO;
1373                 goto err_active;
1374         }
1375
1376         for_each_engine(engine, i915, id) {
1377                 struct intel_context *ce = e->engines[id];
1378                 struct i915_vma *state = ce->state;
1379                 void *vaddr;
1380
1381                 if (!state)
1382                         continue;
1383
1384                 GEM_BUG_ON(intel_context_is_pinned(ce));
1385
1386                 /*
1387                  * As we will hold a reference to the logical state, it will
1388                  * not be torn down with the context, and importantly the
1389                  * object will hold onto its vma (making it possible for a
1390                  * stray GTT write to corrupt our defaults). Unmap the vma
1391                  * from the GTT to prevent such accidents and reclaim the
1392                  * space.
1393                  */
1394                 err = i915_vma_unbind(state);
1395                 if (err)
1396                         goto err_active;
1397
1398                 i915_gem_object_lock(state->obj);
1399                 err = i915_gem_object_set_to_cpu_domain(state->obj, false);
1400                 i915_gem_object_unlock(state->obj);
1401                 if (err)
1402                         goto err_active;
1403
1404                 engine->default_state = i915_gem_object_get(state->obj);
1405                 i915_gem_object_set_cache_coherency(engine->default_state,
1406                                                     I915_CACHE_LLC);
1407
1408                 /* Check we can acquire the image of the context state */
1409                 vaddr = i915_gem_object_pin_map(engine->default_state,
1410                                                 I915_MAP_FORCE_WB);
1411                 if (IS_ERR(vaddr)) {
1412                         err = PTR_ERR(vaddr);
1413                         goto err_active;
1414                 }
1415
1416                 i915_gem_object_unpin_map(engine->default_state);
1417         }
1418
1419         if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
1420                 unsigned int found = intel_engines_has_context_isolation(i915);
1421
1422                 /*
1423                  * Make sure that classes with multiple engine instances all
1424                  * share the same basic configuration.
1425                  */
1426                 for_each_engine(engine, i915, id) {
1427                         unsigned int bit = BIT(engine->uabi_class);
1428                         unsigned int expected = engine->default_state ? bit : 0;
1429
1430                         if ((found & bit) != expected) {
1431                                 DRM_ERROR("mismatching default context state for class %d on engine %s\n",
1432                                           engine->uabi_class, engine->name);
1433                         }
1434                 }
1435         }
1436
1437 out_ctx:
1438         i915_gem_context_unlock_engines(ctx);
1439         i915_gem_context_set_closed(ctx);
1440         i915_gem_context_put(ctx);
1441         return err;
1442
1443 err_active:
1444         /*
1445          * If we have to abandon now, we expect the engines to be idle
1446          * and ready to be torn-down. The quickest way we can accomplish
1447          * this is by declaring ourselves wedged.
1448          */
1449         i915_gem_set_wedged(i915);
1450         goto out_ctx;
1451 }
1452
1453 static int
1454 i915_gem_init_scratch(struct drm_i915_private *i915, unsigned int size)
1455 {
1456         struct drm_i915_gem_object *obj;
1457         struct i915_vma *vma;
1458         int ret;
1459
1460         obj = i915_gem_object_create_stolen(i915, size);
1461         if (!obj)
1462                 obj = i915_gem_object_create_internal(i915, size);
1463         if (IS_ERR(obj)) {
1464                 DRM_ERROR("Failed to allocate scratch page\n");
1465                 return PTR_ERR(obj);
1466         }
1467
1468         vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1469         if (IS_ERR(vma)) {
1470                 ret = PTR_ERR(vma);
1471                 goto err_unref;
1472         }
1473
1474         ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
1475         if (ret)
1476                 goto err_unref;
1477
1478         i915->gt.scratch = vma;
1479         return 0;
1480
1481 err_unref:
1482         i915_gem_object_put(obj);
1483         return ret;
1484 }
1485
1486 static void i915_gem_fini_scratch(struct drm_i915_private *i915)
1487 {
1488         i915_vma_unpin_and_release(&i915->gt.scratch, 0);
1489 }
1490
1491 static int intel_engines_verify_workarounds(struct drm_i915_private *i915)
1492 {
1493         struct intel_engine_cs *engine;
1494         enum intel_engine_id id;
1495         int err = 0;
1496
1497         if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
1498                 return 0;
1499
1500         for_each_engine(engine, i915, id) {
1501                 if (intel_engine_verify_workarounds(engine, "load"))
1502                         err = -EIO;
1503         }
1504
1505         return err;
1506 }
1507
1508 int i915_gem_init(struct drm_i915_private *dev_priv)
1509 {
1510         int ret;
1511
1512         /* We need to fallback to 4K pages if host doesn't support huge gtt. */
1513         if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
1514                 mkwrite_device_info(dev_priv)->page_sizes =
1515                         I915_GTT_PAGE_SIZE_4K;
1516
1517         dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
1518
1519         i915_timelines_init(dev_priv);
1520
1521         ret = i915_gem_init_userptr(dev_priv);
1522         if (ret)
1523                 return ret;
1524
1525         ret = intel_uc_init_misc(dev_priv);
1526         if (ret)
1527                 return ret;
1528
1529         ret = intel_wopcm_init(&dev_priv->wopcm);
1530         if (ret)
1531                 goto err_uc_misc;
1532
1533         /* This is just a security blanket to placate dragons.
1534          * On some systems, we very sporadically observe that the first TLBs
1535          * used by the CS may be stale, despite us poking the TLB reset. If
1536          * we hold the forcewake during initialisation these problems
1537          * just magically go away.
1538          */
1539         mutex_lock(&dev_priv->drm.struct_mutex);
1540         intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
1541
1542         ret = i915_gem_init_ggtt(dev_priv);
1543         if (ret) {
1544                 GEM_BUG_ON(ret == -EIO);
1545                 goto err_unlock;
1546         }
1547
1548         ret = i915_gem_init_scratch(dev_priv,
1549                                     IS_GEN(dev_priv, 2) ? SZ_256K : PAGE_SIZE);
1550         if (ret) {
1551                 GEM_BUG_ON(ret == -EIO);
1552                 goto err_ggtt;
1553         }
1554
1555         ret = intel_engines_setup(dev_priv);
1556         if (ret) {
1557                 GEM_BUG_ON(ret == -EIO);
1558                 goto err_unlock;
1559         }
1560
1561         ret = i915_gem_contexts_init(dev_priv);
1562         if (ret) {
1563                 GEM_BUG_ON(ret == -EIO);
1564                 goto err_scratch;
1565         }
1566
1567         ret = intel_engines_init(dev_priv);
1568         if (ret) {
1569                 GEM_BUG_ON(ret == -EIO);
1570                 goto err_context;
1571         }
1572
1573         intel_init_gt_powersave(dev_priv);
1574
1575         ret = intel_uc_init(dev_priv);
1576         if (ret)
1577                 goto err_pm;
1578
1579         ret = i915_gem_init_hw(dev_priv);
1580         if (ret)
1581                 goto err_uc_init;
1582
1583         /*
1584          * Despite its name intel_init_clock_gating applies both display
1585          * clock gating workarounds; GT mmio workarounds and the occasional
1586          * GT power context workaround. Worse, sometimes it includes a context
1587          * register workaround which we need to apply before we record the
1588          * default HW state for all contexts.
1589          *
1590          * FIXME: break up the workarounds and apply them at the right time!
1591          */
1592         intel_init_clock_gating(dev_priv);
1593
1594         ret = intel_engines_verify_workarounds(dev_priv);
1595         if (ret)
1596                 goto err_init_hw;
1597
1598         ret = __intel_engines_record_defaults(dev_priv);
1599         if (ret)
1600                 goto err_init_hw;
1601
1602         if (i915_inject_load_failure()) {
1603                 ret = -ENODEV;
1604                 goto err_init_hw;
1605         }
1606
1607         if (i915_inject_load_failure()) {
1608                 ret = -EIO;
1609                 goto err_init_hw;
1610         }
1611
1612         intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1613         mutex_unlock(&dev_priv->drm.struct_mutex);
1614
1615         return 0;
1616
1617         /*
1618          * Unwinding is complicated by that we want to handle -EIO to mean
1619          * disable GPU submission but keep KMS alive. We want to mark the
1620          * HW as irrevisibly wedged, but keep enough state around that the
1621          * driver doesn't explode during runtime.
1622          */
1623 err_init_hw:
1624         mutex_unlock(&dev_priv->drm.struct_mutex);
1625
1626         i915_gem_set_wedged(dev_priv);
1627         i915_gem_suspend(dev_priv);
1628         i915_gem_suspend_late(dev_priv);
1629
1630         i915_gem_drain_workqueue(dev_priv);
1631
1632         mutex_lock(&dev_priv->drm.struct_mutex);
1633         intel_uc_fini_hw(dev_priv);
1634 err_uc_init:
1635         intel_uc_fini(dev_priv);
1636 err_pm:
1637         if (ret != -EIO) {
1638                 intel_cleanup_gt_powersave(dev_priv);
1639                 intel_engines_cleanup(dev_priv);
1640         }
1641 err_context:
1642         if (ret != -EIO)
1643                 i915_gem_contexts_fini(dev_priv);
1644 err_scratch:
1645         i915_gem_fini_scratch(dev_priv);
1646 err_ggtt:
1647 err_unlock:
1648         intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1649         mutex_unlock(&dev_priv->drm.struct_mutex);
1650
1651 err_uc_misc:
1652         intel_uc_fini_misc(dev_priv);
1653
1654         if (ret != -EIO) {
1655                 i915_gem_cleanup_userptr(dev_priv);
1656                 i915_timelines_fini(dev_priv);
1657         }
1658
1659         if (ret == -EIO) {
1660                 mutex_lock(&dev_priv->drm.struct_mutex);
1661
1662                 /*
1663                  * Allow engine initialisation to fail by marking the GPU as
1664                  * wedged. But we only want to do this where the GPU is angry,
1665                  * for all other failure, such as an allocation failure, bail.
1666                  */
1667                 if (!i915_reset_failed(dev_priv)) {
1668                         i915_load_error(dev_priv,
1669                                         "Failed to initialize GPU, declaring it wedged!\n");
1670                         i915_gem_set_wedged(dev_priv);
1671                 }
1672
1673                 /* Minimal basic recovery for KMS */
1674                 ret = i915_ggtt_enable_hw(dev_priv);
1675                 i915_gem_restore_gtt_mappings(dev_priv);
1676                 i915_gem_restore_fences(dev_priv);
1677                 intel_init_clock_gating(dev_priv);
1678
1679                 mutex_unlock(&dev_priv->drm.struct_mutex);
1680         }
1681
1682         i915_gem_drain_freed_objects(dev_priv);
1683         return ret;
1684 }
1685
1686 void i915_gem_fini_hw(struct drm_i915_private *dev_priv)
1687 {
1688         GEM_BUG_ON(dev_priv->gt.awake);
1689
1690         intel_wakeref_auto_fini(&dev_priv->mm.userfault_wakeref);
1691
1692         i915_gem_suspend_late(dev_priv);
1693         intel_disable_gt_powersave(dev_priv);
1694
1695         /* Flush any outstanding unpin_work. */
1696         i915_gem_drain_workqueue(dev_priv);
1697
1698         mutex_lock(&dev_priv->drm.struct_mutex);
1699         intel_uc_fini_hw(dev_priv);
1700         intel_uc_fini(dev_priv);
1701         mutex_unlock(&dev_priv->drm.struct_mutex);
1702
1703         i915_gem_drain_freed_objects(dev_priv);
1704 }
1705
1706 void i915_gem_fini(struct drm_i915_private *dev_priv)
1707 {
1708         mutex_lock(&dev_priv->drm.struct_mutex);
1709         intel_engines_cleanup(dev_priv);
1710         i915_gem_contexts_fini(dev_priv);
1711         i915_gem_fini_scratch(dev_priv);
1712         mutex_unlock(&dev_priv->drm.struct_mutex);
1713
1714         intel_wa_list_free(&dev_priv->gt_wa_list);
1715
1716         intel_cleanup_gt_powersave(dev_priv);
1717
1718         intel_uc_fini_misc(dev_priv);
1719         i915_gem_cleanup_userptr(dev_priv);
1720         i915_timelines_fini(dev_priv);
1721
1722         i915_gem_drain_freed_objects(dev_priv);
1723
1724         WARN_ON(!list_empty(&dev_priv->contexts.list));
1725 }
1726
1727 void i915_gem_init_mmio(struct drm_i915_private *i915)
1728 {
1729         i915_gem_sanitize(i915);
1730 }
1731
1732 void
1733 i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
1734 {
1735         int i;
1736
1737         if (INTEL_GEN(dev_priv) >= 7 && !IS_VALLEYVIEW(dev_priv) &&
1738             !IS_CHERRYVIEW(dev_priv))
1739                 dev_priv->num_fence_regs = 32;
1740         else if (INTEL_GEN(dev_priv) >= 4 ||
1741                  IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
1742                  IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
1743                 dev_priv->num_fence_regs = 16;
1744         else
1745                 dev_priv->num_fence_regs = 8;
1746
1747         if (intel_vgpu_active(dev_priv))
1748                 dev_priv->num_fence_regs =
1749                                 I915_READ(vgtif_reg(avail_rs.fence_num));
1750
1751         /* Initialize fence registers to zero */
1752         for (i = 0; i < dev_priv->num_fence_regs; i++) {
1753                 struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];
1754
1755                 fence->i915 = dev_priv;
1756                 fence->id = i;
1757                 list_add_tail(&fence->link, &dev_priv->mm.fence_list);
1758         }
1759         i915_gem_restore_fences(dev_priv);
1760
1761         i915_gem_detect_bit_6_swizzle(dev_priv);
1762 }
1763
1764 static void i915_gem_init__mm(struct drm_i915_private *i915)
1765 {
1766         spin_lock_init(&i915->mm.obj_lock);
1767         spin_lock_init(&i915->mm.free_lock);
1768
1769         init_llist_head(&i915->mm.free_list);
1770
1771         INIT_LIST_HEAD(&i915->mm.purge_list);
1772         INIT_LIST_HEAD(&i915->mm.shrink_list);
1773         INIT_LIST_HEAD(&i915->mm.fence_list);
1774
1775         INIT_LIST_HEAD(&i915->mm.userfault_list);
1776         intel_wakeref_auto_init(&i915->mm.userfault_wakeref, i915);
1777
1778         i915_gem_init__objects(i915);
1779 }
1780
1781 int i915_gem_init_early(struct drm_i915_private *dev_priv)
1782 {
1783         static struct lock_class_key reset_key;
1784         int err;
1785
1786         intel_gt_pm_init(dev_priv);
1787
1788         INIT_LIST_HEAD(&dev_priv->gt.active_rings);
1789         INIT_LIST_HEAD(&dev_priv->gt.closed_vma);
1790         spin_lock_init(&dev_priv->gt.closed_lock);
1791         lockdep_init_map(&dev_priv->gt.reset_lockmap,
1792                          "i915.reset", &reset_key, 0);
1793
1794         i915_gem_init__mm(dev_priv);
1795         i915_gem_init__pm(dev_priv);
1796
1797         init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
1798         init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
1799         mutex_init(&dev_priv->gpu_error.wedge_mutex);
1800         init_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu);
1801
1802         atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
1803
1804         spin_lock_init(&dev_priv->fb_tracking.lock);
1805
1806         err = i915_gemfs_init(dev_priv);
1807         if (err)
1808                 DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", err);
1809
1810         return 0;
1811 }
1812
1813 void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
1814 {
1815         i915_gem_drain_freed_objects(dev_priv);
1816         GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
1817         GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
1818         WARN_ON(dev_priv->mm.shrink_count);
1819
1820         cleanup_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu);
1821
1822         i915_gemfs_fini(dev_priv);
1823 }
1824
1825 int i915_gem_freeze(struct drm_i915_private *dev_priv)
1826 {
1827         /* Discard all purgeable objects, let userspace recover those as
1828          * required after resuming.
1829          */
1830         i915_gem_shrink_all(dev_priv);
1831
1832         return 0;
1833 }
1834
1835 int i915_gem_freeze_late(struct drm_i915_private *i915)
1836 {
1837         struct drm_i915_gem_object *obj;
1838         intel_wakeref_t wakeref;
1839
1840         /*
1841          * Called just before we write the hibernation image.
1842          *
1843          * We need to update the domain tracking to reflect that the CPU
1844          * will be accessing all the pages to create and restore from the
1845          * hibernation, and so upon restoration those pages will be in the
1846          * CPU domain.
1847          *
1848          * To make sure the hibernation image contains the latest state,
1849          * we update that state just before writing out the image.
1850          *
1851          * To try and reduce the hibernation image, we manually shrink
1852          * the objects as well, see i915_gem_freeze()
1853          */
1854
1855         wakeref = intel_runtime_pm_get(i915);
1856
1857         i915_gem_shrink(i915, -1UL, NULL, ~0);
1858         i915_gem_drain_freed_objects(i915);
1859
1860         list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) {
1861                 i915_gem_object_lock(obj);
1862                 WARN_ON(i915_gem_object_set_to_cpu_domain(obj, true));
1863                 i915_gem_object_unlock(obj);
1864         }
1865
1866         intel_runtime_pm_put(i915, wakeref);
1867
1868         return 0;
1869 }
1870
1871 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
1872 {
1873         struct drm_i915_file_private *file_priv = file->driver_priv;
1874         struct i915_request *request;
1875
1876         /* Clean up our request list when the client is going away, so that
1877          * later retire_requests won't dereference our soon-to-be-gone
1878          * file_priv.
1879          */
1880         spin_lock(&file_priv->mm.lock);
1881         list_for_each_entry(request, &file_priv->mm.request_list, client_link)
1882                 request->file_priv = NULL;
1883         spin_unlock(&file_priv->mm.lock);
1884 }
1885
1886 int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
1887 {
1888         struct drm_i915_file_private *file_priv;
1889         int ret;
1890
1891         DRM_DEBUG("\n");
1892
1893         file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
1894         if (!file_priv)
1895                 return -ENOMEM;
1896
1897         file->driver_priv = file_priv;
1898         file_priv->dev_priv = i915;
1899         file_priv->file = file;
1900
1901         spin_lock_init(&file_priv->mm.lock);
1902         INIT_LIST_HEAD(&file_priv->mm.request_list);
1903
1904         file_priv->bsd_engine = -1;
1905         file_priv->hang_timestamp = jiffies;
1906
1907         ret = i915_gem_context_open(i915, file);
1908         if (ret)
1909                 kfree(file_priv);
1910
1911         return ret;
1912 }
1913
1914 /**
1915  * i915_gem_track_fb - update frontbuffer tracking
1916  * @old: current GEM buffer for the frontbuffer slots
1917  * @new: new GEM buffer for the frontbuffer slots
1918  * @frontbuffer_bits: bitmask of frontbuffer slots
1919  *
1920  * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
1921  * from @old and setting them in @new. Both @old and @new can be NULL.
1922  */
1923 void i915_gem_track_fb(struct drm_i915_gem_object *old,
1924                        struct drm_i915_gem_object *new,
1925                        unsigned frontbuffer_bits)
1926 {
1927         /* Control of individual bits within the mask are guarded by
1928          * the owning plane->mutex, i.e. we can never see concurrent
1929          * manipulation of individual bits. But since the bitfield as a whole
1930          * is updated using RMW, we need to use atomics in order to update
1931          * the bits.
1932          */
1933         BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
1934                      BITS_PER_TYPE(atomic_t));
1935
1936         if (old) {
1937                 WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
1938                 atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
1939         }
1940
1941         if (new) {
1942                 WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
1943                 atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
1944         }
1945 }
1946
1947 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1948 #include "selftests/mock_gem_device.c"
1949 #include "selftests/i915_gem.c"
1950 #endif