]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/i915/i915_gem.c
Merge branches 'iommu/fixes', 'arm/exynos', 'arm/renesas', 'arm/smmu', 'arm/mediatek...
[linux.git] / drivers / gpu / drm / i915 / i915_gem.c
1 /*
2  * Copyright © 2008-2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27
28 #include <drm/drmP.h>
29 #include <drm/drm_vma_manager.h>
30 #include <drm/i915_drm.h>
31 #include "i915_drv.h"
32 #include "i915_vgpu.h"
33 #include "i915_trace.h"
34 #include "intel_drv.h"
35 #include "intel_frontbuffer.h"
36 #include "intel_mocs.h"
37 #include <linux/dma-fence-array.h>
38 #include <linux/reservation.h>
39 #include <linux/shmem_fs.h>
40 #include <linux/slab.h>
41 #include <linux/swap.h>
42 #include <linux/pci.h>
43 #include <linux/dma-buf.h>
44
45 static void i915_gem_flush_free_objects(struct drm_i915_private *i915);
46 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
47 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
48
49 static bool cpu_cache_is_coherent(struct drm_device *dev,
50                                   enum i915_cache_level level)
51 {
52         return HAS_LLC(to_i915(dev)) || level != I915_CACHE_NONE;
53 }
54
55 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
56 {
57         if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
58                 return false;
59
60         if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
61                 return true;
62
63         return obj->pin_display;
64 }
65
66 static int
67 insert_mappable_node(struct i915_ggtt *ggtt,
68                      struct drm_mm_node *node, u32 size)
69 {
70         memset(node, 0, sizeof(*node));
71         return drm_mm_insert_node_in_range_generic(&ggtt->base.mm, node,
72                                                    size, 0, -1,
73                                                    0, ggtt->mappable_end,
74                                                    DRM_MM_SEARCH_DEFAULT,
75                                                    DRM_MM_CREATE_DEFAULT);
76 }
77
78 static void
79 remove_mappable_node(struct drm_mm_node *node)
80 {
81         drm_mm_remove_node(node);
82 }
83
84 /* some bookkeeping */
85 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
86                                   u64 size)
87 {
88         spin_lock(&dev_priv->mm.object_stat_lock);
89         dev_priv->mm.object_count++;
90         dev_priv->mm.object_memory += size;
91         spin_unlock(&dev_priv->mm.object_stat_lock);
92 }
93
94 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
95                                      u64 size)
96 {
97         spin_lock(&dev_priv->mm.object_stat_lock);
98         dev_priv->mm.object_count--;
99         dev_priv->mm.object_memory -= size;
100         spin_unlock(&dev_priv->mm.object_stat_lock);
101 }
102
103 static int
104 i915_gem_wait_for_error(struct i915_gpu_error *error)
105 {
106         int ret;
107
108         might_sleep();
109
110         if (!i915_reset_in_progress(error))
111                 return 0;
112
113         /*
114          * Only wait 10 seconds for the gpu reset to complete to avoid hanging
115          * userspace. If it takes that long something really bad is going on and
116          * we should simply try to bail out and fail as gracefully as possible.
117          */
118         ret = wait_event_interruptible_timeout(error->reset_queue,
119                                                !i915_reset_in_progress(error),
120                                                I915_RESET_TIMEOUT);
121         if (ret == 0) {
122                 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
123                 return -EIO;
124         } else if (ret < 0) {
125                 return ret;
126         } else {
127                 return 0;
128         }
129 }
130
131 int i915_mutex_lock_interruptible(struct drm_device *dev)
132 {
133         struct drm_i915_private *dev_priv = to_i915(dev);
134         int ret;
135
136         ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
137         if (ret)
138                 return ret;
139
140         ret = mutex_lock_interruptible(&dev->struct_mutex);
141         if (ret)
142                 return ret;
143
144         return 0;
145 }
146
147 int
148 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
149                             struct drm_file *file)
150 {
151         struct drm_i915_private *dev_priv = to_i915(dev);
152         struct i915_ggtt *ggtt = &dev_priv->ggtt;
153         struct drm_i915_gem_get_aperture *args = data;
154         struct i915_vma *vma;
155         size_t pinned;
156
157         pinned = 0;
158         mutex_lock(&dev->struct_mutex);
159         list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
160                 if (i915_vma_is_pinned(vma))
161                         pinned += vma->node.size;
162         list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
163                 if (i915_vma_is_pinned(vma))
164                         pinned += vma->node.size;
165         mutex_unlock(&dev->struct_mutex);
166
167         args->aper_size = ggtt->base.total;
168         args->aper_available_size = args->aper_size - pinned;
169
170         return 0;
171 }
172
173 static struct sg_table *
174 i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
175 {
176         struct address_space *mapping = obj->base.filp->f_mapping;
177         drm_dma_handle_t *phys;
178         struct sg_table *st;
179         struct scatterlist *sg;
180         char *vaddr;
181         int i;
182
183         if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
184                 return ERR_PTR(-EINVAL);
185
186         /* Always aligning to the object size, allows a single allocation
187          * to handle all possible callers, and given typical object sizes,
188          * the alignment of the buddy allocation will naturally match.
189          */
190         phys = drm_pci_alloc(obj->base.dev,
191                              obj->base.size,
192                              roundup_pow_of_two(obj->base.size));
193         if (!phys)
194                 return ERR_PTR(-ENOMEM);
195
196         vaddr = phys->vaddr;
197         for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
198                 struct page *page;
199                 char *src;
200
201                 page = shmem_read_mapping_page(mapping, i);
202                 if (IS_ERR(page)) {
203                         st = ERR_CAST(page);
204                         goto err_phys;
205                 }
206
207                 src = kmap_atomic(page);
208                 memcpy(vaddr, src, PAGE_SIZE);
209                 drm_clflush_virt_range(vaddr, PAGE_SIZE);
210                 kunmap_atomic(src);
211
212                 put_page(page);
213                 vaddr += PAGE_SIZE;
214         }
215
216         i915_gem_chipset_flush(to_i915(obj->base.dev));
217
218         st = kmalloc(sizeof(*st), GFP_KERNEL);
219         if (!st) {
220                 st = ERR_PTR(-ENOMEM);
221                 goto err_phys;
222         }
223
224         if (sg_alloc_table(st, 1, GFP_KERNEL)) {
225                 kfree(st);
226                 st = ERR_PTR(-ENOMEM);
227                 goto err_phys;
228         }
229
230         sg = st->sgl;
231         sg->offset = 0;
232         sg->length = obj->base.size;
233
234         sg_dma_address(sg) = phys->busaddr;
235         sg_dma_len(sg) = obj->base.size;
236
237         obj->phys_handle = phys;
238         return st;
239
240 err_phys:
241         drm_pci_free(obj->base.dev, phys);
242         return st;
243 }
244
245 static void
246 __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
247                                 struct sg_table *pages,
248                                 bool needs_clflush)
249 {
250         GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
251
252         if (obj->mm.madv == I915_MADV_DONTNEED)
253                 obj->mm.dirty = false;
254
255         if (needs_clflush &&
256             (obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
257             !cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
258                 drm_clflush_sg(pages);
259
260         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
261         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
262 }
263
264 static void
265 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
266                                struct sg_table *pages)
267 {
268         __i915_gem_object_release_shmem(obj, pages, false);
269
270         if (obj->mm.dirty) {
271                 struct address_space *mapping = obj->base.filp->f_mapping;
272                 char *vaddr = obj->phys_handle->vaddr;
273                 int i;
274
275                 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
276                         struct page *page;
277                         char *dst;
278
279                         page = shmem_read_mapping_page(mapping, i);
280                         if (IS_ERR(page))
281                                 continue;
282
283                         dst = kmap_atomic(page);
284                         drm_clflush_virt_range(vaddr, PAGE_SIZE);
285                         memcpy(dst, vaddr, PAGE_SIZE);
286                         kunmap_atomic(dst);
287
288                         set_page_dirty(page);
289                         if (obj->mm.madv == I915_MADV_WILLNEED)
290                                 mark_page_accessed(page);
291                         put_page(page);
292                         vaddr += PAGE_SIZE;
293                 }
294                 obj->mm.dirty = false;
295         }
296
297         sg_free_table(pages);
298         kfree(pages);
299
300         drm_pci_free(obj->base.dev, obj->phys_handle);
301 }
302
303 static void
304 i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
305 {
306         i915_gem_object_unpin_pages(obj);
307 }
308
309 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
310         .get_pages = i915_gem_object_get_pages_phys,
311         .put_pages = i915_gem_object_put_pages_phys,
312         .release = i915_gem_object_release_phys,
313 };
314
315 int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
316 {
317         struct i915_vma *vma;
318         LIST_HEAD(still_in_list);
319         int ret;
320
321         lockdep_assert_held(&obj->base.dev->struct_mutex);
322
323         /* Closed vma are removed from the obj->vma_list - but they may
324          * still have an active binding on the object. To remove those we
325          * must wait for all rendering to complete to the object (as unbinding
326          * must anyway), and retire the requests.
327          */
328         ret = i915_gem_object_wait(obj,
329                                    I915_WAIT_INTERRUPTIBLE |
330                                    I915_WAIT_LOCKED |
331                                    I915_WAIT_ALL,
332                                    MAX_SCHEDULE_TIMEOUT,
333                                    NULL);
334         if (ret)
335                 return ret;
336
337         i915_gem_retire_requests(to_i915(obj->base.dev));
338
339         while ((vma = list_first_entry_or_null(&obj->vma_list,
340                                                struct i915_vma,
341                                                obj_link))) {
342                 list_move_tail(&vma->obj_link, &still_in_list);
343                 ret = i915_vma_unbind(vma);
344                 if (ret)
345                         break;
346         }
347         list_splice(&still_in_list, &obj->vma_list);
348
349         return ret;
350 }
351
352 static long
353 i915_gem_object_wait_fence(struct dma_fence *fence,
354                            unsigned int flags,
355                            long timeout,
356                            struct intel_rps_client *rps)
357 {
358         struct drm_i915_gem_request *rq;
359
360         BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1);
361
362         if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
363                 return timeout;
364
365         if (!dma_fence_is_i915(fence))
366                 return dma_fence_wait_timeout(fence,
367                                               flags & I915_WAIT_INTERRUPTIBLE,
368                                               timeout);
369
370         rq = to_request(fence);
371         if (i915_gem_request_completed(rq))
372                 goto out;
373
374         /* This client is about to stall waiting for the GPU. In many cases
375          * this is undesirable and limits the throughput of the system, as
376          * many clients cannot continue processing user input/output whilst
377          * blocked. RPS autotuning may take tens of milliseconds to respond
378          * to the GPU load and thus incurs additional latency for the client.
379          * We can circumvent that by promoting the GPU frequency to maximum
380          * before we wait. This makes the GPU throttle up much more quickly
381          * (good for benchmarks and user experience, e.g. window animations),
382          * but at a cost of spending more power processing the workload
383          * (bad for battery). Not all clients even want their results
384          * immediately and for them we should just let the GPU select its own
385          * frequency to maximise efficiency. To prevent a single client from
386          * forcing the clocks too high for the whole system, we only allow
387          * each client to waitboost once in a busy period.
388          */
389         if (rps) {
390                 if (INTEL_GEN(rq->i915) >= 6)
391                         gen6_rps_boost(rq->i915, rps, rq->emitted_jiffies);
392                 else
393                         rps = NULL;
394         }
395
396         timeout = i915_wait_request(rq, flags, timeout);
397
398 out:
399         if (flags & I915_WAIT_LOCKED && i915_gem_request_completed(rq))
400                 i915_gem_request_retire_upto(rq);
401
402         if (rps && rq->global_seqno == intel_engine_last_submit(rq->engine)) {
403                 /* The GPU is now idle and this client has stalled.
404                  * Since no other client has submitted a request in the
405                  * meantime, assume that this client is the only one
406                  * supplying work to the GPU but is unable to keep that
407                  * work supplied because it is waiting. Since the GPU is
408                  * then never kept fully busy, RPS autoclocking will
409                  * keep the clocks relatively low, causing further delays.
410                  * Compensate by giving the synchronous client credit for
411                  * a waitboost next time.
412                  */
413                 spin_lock(&rq->i915->rps.client_lock);
414                 list_del_init(&rps->link);
415                 spin_unlock(&rq->i915->rps.client_lock);
416         }
417
418         return timeout;
419 }
420
421 static long
422 i915_gem_object_wait_reservation(struct reservation_object *resv,
423                                  unsigned int flags,
424                                  long timeout,
425                                  struct intel_rps_client *rps)
426 {
427         struct dma_fence *excl;
428
429         if (flags & I915_WAIT_ALL) {
430                 struct dma_fence **shared;
431                 unsigned int count, i;
432                 int ret;
433
434                 ret = reservation_object_get_fences_rcu(resv,
435                                                         &excl, &count, &shared);
436                 if (ret)
437                         return ret;
438
439                 for (i = 0; i < count; i++) {
440                         timeout = i915_gem_object_wait_fence(shared[i],
441                                                              flags, timeout,
442                                                              rps);
443                         if (timeout <= 0)
444                                 break;
445
446                         dma_fence_put(shared[i]);
447                 }
448
449                 for (; i < count; i++)
450                         dma_fence_put(shared[i]);
451                 kfree(shared);
452         } else {
453                 excl = reservation_object_get_excl_rcu(resv);
454         }
455
456         if (excl && timeout > 0)
457                 timeout = i915_gem_object_wait_fence(excl, flags, timeout, rps);
458
459         dma_fence_put(excl);
460
461         return timeout;
462 }
463
464 static void __fence_set_priority(struct dma_fence *fence, int prio)
465 {
466         struct drm_i915_gem_request *rq;
467         struct intel_engine_cs *engine;
468
469         if (!dma_fence_is_i915(fence))
470                 return;
471
472         rq = to_request(fence);
473         engine = rq->engine;
474         if (!engine->schedule)
475                 return;
476
477         engine->schedule(rq, prio);
478 }
479
480 static void fence_set_priority(struct dma_fence *fence, int prio)
481 {
482         /* Recurse once into a fence-array */
483         if (dma_fence_is_array(fence)) {
484                 struct dma_fence_array *array = to_dma_fence_array(fence);
485                 int i;
486
487                 for (i = 0; i < array->num_fences; i++)
488                         __fence_set_priority(array->fences[i], prio);
489         } else {
490                 __fence_set_priority(fence, prio);
491         }
492 }
493
494 int
495 i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
496                               unsigned int flags,
497                               int prio)
498 {
499         struct dma_fence *excl;
500
501         if (flags & I915_WAIT_ALL) {
502                 struct dma_fence **shared;
503                 unsigned int count, i;
504                 int ret;
505
506                 ret = reservation_object_get_fences_rcu(obj->resv,
507                                                         &excl, &count, &shared);
508                 if (ret)
509                         return ret;
510
511                 for (i = 0; i < count; i++) {
512                         fence_set_priority(shared[i], prio);
513                         dma_fence_put(shared[i]);
514                 }
515
516                 kfree(shared);
517         } else {
518                 excl = reservation_object_get_excl_rcu(obj->resv);
519         }
520
521         if (excl) {
522                 fence_set_priority(excl, prio);
523                 dma_fence_put(excl);
524         }
525         return 0;
526 }
527
528 /**
529  * Waits for rendering to the object to be completed
530  * @obj: i915 gem object
531  * @flags: how to wait (under a lock, for all rendering or just for writes etc)
532  * @timeout: how long to wait
533  * @rps: client (user process) to charge for any waitboosting
534  */
535 int
536 i915_gem_object_wait(struct drm_i915_gem_object *obj,
537                      unsigned int flags,
538                      long timeout,
539                      struct intel_rps_client *rps)
540 {
541         might_sleep();
542 #if IS_ENABLED(CONFIG_LOCKDEP)
543         GEM_BUG_ON(debug_locks &&
544                    !!lockdep_is_held(&obj->base.dev->struct_mutex) !=
545                    !!(flags & I915_WAIT_LOCKED));
546 #endif
547         GEM_BUG_ON(timeout < 0);
548
549         timeout = i915_gem_object_wait_reservation(obj->resv,
550                                                    flags, timeout,
551                                                    rps);
552         return timeout < 0 ? timeout : 0;
553 }
554
555 static struct intel_rps_client *to_rps_client(struct drm_file *file)
556 {
557         struct drm_i915_file_private *fpriv = file->driver_priv;
558
559         return &fpriv->rps;
560 }
561
562 int
563 i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
564                             int align)
565 {
566         int ret;
567
568         if (align > obj->base.size)
569                 return -EINVAL;
570
571         if (obj->ops == &i915_gem_phys_ops)
572                 return 0;
573
574         if (obj->mm.madv != I915_MADV_WILLNEED)
575                 return -EFAULT;
576
577         if (obj->base.filp == NULL)
578                 return -EINVAL;
579
580         ret = i915_gem_object_unbind(obj);
581         if (ret)
582                 return ret;
583
584         __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
585         if (obj->mm.pages)
586                 return -EBUSY;
587
588         obj->ops = &i915_gem_phys_ops;
589
590         return i915_gem_object_pin_pages(obj);
591 }
592
593 static int
594 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
595                      struct drm_i915_gem_pwrite *args,
596                      struct drm_file *file)
597 {
598         void *vaddr = obj->phys_handle->vaddr + args->offset;
599         char __user *user_data = u64_to_user_ptr(args->data_ptr);
600
601         /* We manually control the domain here and pretend that it
602          * remains coherent i.e. in the GTT domain, like shmem_pwrite.
603          */
604         intel_fb_obj_invalidate(obj, ORIGIN_CPU);
605         if (copy_from_user(vaddr, user_data, args->size))
606                 return -EFAULT;
607
608         drm_clflush_virt_range(vaddr, args->size);
609         i915_gem_chipset_flush(to_i915(obj->base.dev));
610
611         intel_fb_obj_flush(obj, false, ORIGIN_CPU);
612         return 0;
613 }
614
615 void *i915_gem_object_alloc(struct drm_device *dev)
616 {
617         struct drm_i915_private *dev_priv = to_i915(dev);
618         return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
619 }
620
621 void i915_gem_object_free(struct drm_i915_gem_object *obj)
622 {
623         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
624         kmem_cache_free(dev_priv->objects, obj);
625 }
626
627 static int
628 i915_gem_create(struct drm_file *file,
629                 struct drm_device *dev,
630                 uint64_t size,
631                 uint32_t *handle_p)
632 {
633         struct drm_i915_gem_object *obj;
634         int ret;
635         u32 handle;
636
637         size = roundup(size, PAGE_SIZE);
638         if (size == 0)
639                 return -EINVAL;
640
641         /* Allocate the new object */
642         obj = i915_gem_object_create(dev, size);
643         if (IS_ERR(obj))
644                 return PTR_ERR(obj);
645
646         ret = drm_gem_handle_create(file, &obj->base, &handle);
647         /* drop reference from allocate - handle holds it now */
648         i915_gem_object_put(obj);
649         if (ret)
650                 return ret;
651
652         *handle_p = handle;
653         return 0;
654 }
655
656 int
657 i915_gem_dumb_create(struct drm_file *file,
658                      struct drm_device *dev,
659                      struct drm_mode_create_dumb *args)
660 {
661         /* have to work out size/pitch and return them */
662         args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
663         args->size = args->pitch * args->height;
664         return i915_gem_create(file, dev,
665                                args->size, &args->handle);
666 }
667
668 /**
669  * Creates a new mm object and returns a handle to it.
670  * @dev: drm device pointer
671  * @data: ioctl data blob
672  * @file: drm file pointer
673  */
674 int
675 i915_gem_create_ioctl(struct drm_device *dev, void *data,
676                       struct drm_file *file)
677 {
678         struct drm_i915_gem_create *args = data;
679
680         i915_gem_flush_free_objects(to_i915(dev));
681
682         return i915_gem_create(file, dev,
683                                args->size, &args->handle);
684 }
685
686 static inline int
687 __copy_to_user_swizzled(char __user *cpu_vaddr,
688                         const char *gpu_vaddr, int gpu_offset,
689                         int length)
690 {
691         int ret, cpu_offset = 0;
692
693         while (length > 0) {
694                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
695                 int this_length = min(cacheline_end - gpu_offset, length);
696                 int swizzled_gpu_offset = gpu_offset ^ 64;
697
698                 ret = __copy_to_user(cpu_vaddr + cpu_offset,
699                                      gpu_vaddr + swizzled_gpu_offset,
700                                      this_length);
701                 if (ret)
702                         return ret + length;
703
704                 cpu_offset += this_length;
705                 gpu_offset += this_length;
706                 length -= this_length;
707         }
708
709         return 0;
710 }
711
712 static inline int
713 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
714                           const char __user *cpu_vaddr,
715                           int length)
716 {
717         int ret, cpu_offset = 0;
718
719         while (length > 0) {
720                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
721                 int this_length = min(cacheline_end - gpu_offset, length);
722                 int swizzled_gpu_offset = gpu_offset ^ 64;
723
724                 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
725                                        cpu_vaddr + cpu_offset,
726                                        this_length);
727                 if (ret)
728                         return ret + length;
729
730                 cpu_offset += this_length;
731                 gpu_offset += this_length;
732                 length -= this_length;
733         }
734
735         return 0;
736 }
737
738 /*
739  * Pins the specified object's pages and synchronizes the object with
740  * GPU accesses. Sets needs_clflush to non-zero if the caller should
741  * flush the object from the CPU cache.
742  */
743 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
744                                     unsigned int *needs_clflush)
745 {
746         int ret;
747
748         lockdep_assert_held(&obj->base.dev->struct_mutex);
749
750         *needs_clflush = 0;
751         if (!i915_gem_object_has_struct_page(obj))
752                 return -ENODEV;
753
754         ret = i915_gem_object_wait(obj,
755                                    I915_WAIT_INTERRUPTIBLE |
756                                    I915_WAIT_LOCKED,
757                                    MAX_SCHEDULE_TIMEOUT,
758                                    NULL);
759         if (ret)
760                 return ret;
761
762         ret = i915_gem_object_pin_pages(obj);
763         if (ret)
764                 return ret;
765
766         i915_gem_object_flush_gtt_write_domain(obj);
767
768         /* If we're not in the cpu read domain, set ourself into the gtt
769          * read domain and manually flush cachelines (if required). This
770          * optimizes for the case when the gpu will dirty the data
771          * anyway again before the next pread happens.
772          */
773         if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
774                 *needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
775                                                         obj->cache_level);
776
777         if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
778                 ret = i915_gem_object_set_to_cpu_domain(obj, false);
779                 if (ret)
780                         goto err_unpin;
781
782                 *needs_clflush = 0;
783         }
784
785         /* return with the pages pinned */
786         return 0;
787
788 err_unpin:
789         i915_gem_object_unpin_pages(obj);
790         return ret;
791 }
792
793 int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
794                                      unsigned int *needs_clflush)
795 {
796         int ret;
797
798         lockdep_assert_held(&obj->base.dev->struct_mutex);
799
800         *needs_clflush = 0;
801         if (!i915_gem_object_has_struct_page(obj))
802                 return -ENODEV;
803
804         ret = i915_gem_object_wait(obj,
805                                    I915_WAIT_INTERRUPTIBLE |
806                                    I915_WAIT_LOCKED |
807                                    I915_WAIT_ALL,
808                                    MAX_SCHEDULE_TIMEOUT,
809                                    NULL);
810         if (ret)
811                 return ret;
812
813         ret = i915_gem_object_pin_pages(obj);
814         if (ret)
815                 return ret;
816
817         i915_gem_object_flush_gtt_write_domain(obj);
818
819         /* If we're not in the cpu write domain, set ourself into the
820          * gtt write domain and manually flush cachelines (as required).
821          * This optimizes for the case when the gpu will use the data
822          * right away and we therefore have to clflush anyway.
823          */
824         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
825                 *needs_clflush |= cpu_write_needs_clflush(obj) << 1;
826
827         /* Same trick applies to invalidate partially written cachelines read
828          * before writing.
829          */
830         if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
831                 *needs_clflush |= !cpu_cache_is_coherent(obj->base.dev,
832                                                          obj->cache_level);
833
834         if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
835                 ret = i915_gem_object_set_to_cpu_domain(obj, true);
836                 if (ret)
837                         goto err_unpin;
838
839                 *needs_clflush = 0;
840         }
841
842         if ((*needs_clflush & CLFLUSH_AFTER) == 0)
843                 obj->cache_dirty = true;
844
845         intel_fb_obj_invalidate(obj, ORIGIN_CPU);
846         obj->mm.dirty = true;
847         /* return with the pages pinned */
848         return 0;
849
850 err_unpin:
851         i915_gem_object_unpin_pages(obj);
852         return ret;
853 }
854
855 static void
856 shmem_clflush_swizzled_range(char *addr, unsigned long length,
857                              bool swizzled)
858 {
859         if (unlikely(swizzled)) {
860                 unsigned long start = (unsigned long) addr;
861                 unsigned long end = (unsigned long) addr + length;
862
863                 /* For swizzling simply ensure that we always flush both
864                  * channels. Lame, but simple and it works. Swizzled
865                  * pwrite/pread is far from a hotpath - current userspace
866                  * doesn't use it at all. */
867                 start = round_down(start, 128);
868                 end = round_up(end, 128);
869
870                 drm_clflush_virt_range((void *)start, end - start);
871         } else {
872                 drm_clflush_virt_range(addr, length);
873         }
874
875 }
876
877 /* Only difference to the fast-path function is that this can handle bit17
878  * and uses non-atomic copy and kmap functions. */
879 static int
880 shmem_pread_slow(struct page *page, int offset, int length,
881                  char __user *user_data,
882                  bool page_do_bit17_swizzling, bool needs_clflush)
883 {
884         char *vaddr;
885         int ret;
886
887         vaddr = kmap(page);
888         if (needs_clflush)
889                 shmem_clflush_swizzled_range(vaddr + offset, length,
890                                              page_do_bit17_swizzling);
891
892         if (page_do_bit17_swizzling)
893                 ret = __copy_to_user_swizzled(user_data, vaddr, offset, length);
894         else
895                 ret = __copy_to_user(user_data, vaddr + offset, length);
896         kunmap(page);
897
898         return ret ? - EFAULT : 0;
899 }
900
901 static int
902 shmem_pread(struct page *page, int offset, int length, char __user *user_data,
903             bool page_do_bit17_swizzling, bool needs_clflush)
904 {
905         int ret;
906
907         ret = -ENODEV;
908         if (!page_do_bit17_swizzling) {
909                 char *vaddr = kmap_atomic(page);
910
911                 if (needs_clflush)
912                         drm_clflush_virt_range(vaddr + offset, length);
913                 ret = __copy_to_user_inatomic(user_data, vaddr + offset, length);
914                 kunmap_atomic(vaddr);
915         }
916         if (ret == 0)
917                 return 0;
918
919         return shmem_pread_slow(page, offset, length, user_data,
920                                 page_do_bit17_swizzling, needs_clflush);
921 }
922
923 static int
924 i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
925                      struct drm_i915_gem_pread *args)
926 {
927         char __user *user_data;
928         u64 remain;
929         unsigned int obj_do_bit17_swizzling;
930         unsigned int needs_clflush;
931         unsigned int idx, offset;
932         int ret;
933
934         obj_do_bit17_swizzling = 0;
935         if (i915_gem_object_needs_bit17_swizzle(obj))
936                 obj_do_bit17_swizzling = BIT(17);
937
938         ret = mutex_lock_interruptible(&obj->base.dev->struct_mutex);
939         if (ret)
940                 return ret;
941
942         ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
943         mutex_unlock(&obj->base.dev->struct_mutex);
944         if (ret)
945                 return ret;
946
947         remain = args->size;
948         user_data = u64_to_user_ptr(args->data_ptr);
949         offset = offset_in_page(args->offset);
950         for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
951                 struct page *page = i915_gem_object_get_page(obj, idx);
952                 int length;
953
954                 length = remain;
955                 if (offset + length > PAGE_SIZE)
956                         length = PAGE_SIZE - offset;
957
958                 ret = shmem_pread(page, offset, length, user_data,
959                                   page_to_phys(page) & obj_do_bit17_swizzling,
960                                   needs_clflush);
961                 if (ret)
962                         break;
963
964                 remain -= length;
965                 user_data += length;
966                 offset = 0;
967         }
968
969         i915_gem_obj_finish_shmem_access(obj);
970         return ret;
971 }
972
973 static inline bool
974 gtt_user_read(struct io_mapping *mapping,
975               loff_t base, int offset,
976               char __user *user_data, int length)
977 {
978         void *vaddr;
979         unsigned long unwritten;
980
981         /* We can use the cpu mem copy function because this is X86. */
982         vaddr = (void __force *)io_mapping_map_atomic_wc(mapping, base);
983         unwritten = __copy_to_user_inatomic(user_data, vaddr + offset, length);
984         io_mapping_unmap_atomic(vaddr);
985         if (unwritten) {
986                 vaddr = (void __force *)
987                         io_mapping_map_wc(mapping, base, PAGE_SIZE);
988                 unwritten = copy_to_user(user_data, vaddr + offset, length);
989                 io_mapping_unmap(vaddr);
990         }
991         return unwritten;
992 }
993
994 static int
995 i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
996                    const struct drm_i915_gem_pread *args)
997 {
998         struct drm_i915_private *i915 = to_i915(obj->base.dev);
999         struct i915_ggtt *ggtt = &i915->ggtt;
1000         struct drm_mm_node node;
1001         struct i915_vma *vma;
1002         void __user *user_data;
1003         u64 remain, offset;
1004         int ret;
1005
1006         ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1007         if (ret)
1008                 return ret;
1009
1010         intel_runtime_pm_get(i915);
1011         vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
1012                                        PIN_MAPPABLE | PIN_NONBLOCK);
1013         if (!IS_ERR(vma)) {
1014                 node.start = i915_ggtt_offset(vma);
1015                 node.allocated = false;
1016                 ret = i915_vma_put_fence(vma);
1017                 if (ret) {
1018                         i915_vma_unpin(vma);
1019                         vma = ERR_PTR(ret);
1020                 }
1021         }
1022         if (IS_ERR(vma)) {
1023                 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
1024                 if (ret)
1025                         goto out_unlock;
1026                 GEM_BUG_ON(!node.allocated);
1027         }
1028
1029         ret = i915_gem_object_set_to_gtt_domain(obj, false);
1030         if (ret)
1031                 goto out_unpin;
1032
1033         mutex_unlock(&i915->drm.struct_mutex);
1034
1035         user_data = u64_to_user_ptr(args->data_ptr);
1036         remain = args->size;
1037         offset = args->offset;
1038
1039         while (remain > 0) {
1040                 /* Operation in this page
1041                  *
1042                  * page_base = page offset within aperture
1043                  * page_offset = offset within page
1044                  * page_length = bytes to copy for this page
1045                  */
1046                 u32 page_base = node.start;
1047                 unsigned page_offset = offset_in_page(offset);
1048                 unsigned page_length = PAGE_SIZE - page_offset;
1049                 page_length = remain < page_length ? remain : page_length;
1050                 if (node.allocated) {
1051                         wmb();
1052                         ggtt->base.insert_page(&ggtt->base,
1053                                                i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
1054                                                node.start, I915_CACHE_NONE, 0);
1055                         wmb();
1056                 } else {
1057                         page_base += offset & PAGE_MASK;
1058                 }
1059
1060                 if (gtt_user_read(&ggtt->mappable, page_base, page_offset,
1061                                   user_data, page_length)) {
1062                         ret = -EFAULT;
1063                         break;
1064                 }
1065
1066                 remain -= page_length;
1067                 user_data += page_length;
1068                 offset += page_length;
1069         }
1070
1071         mutex_lock(&i915->drm.struct_mutex);
1072 out_unpin:
1073         if (node.allocated) {
1074                 wmb();
1075                 ggtt->base.clear_range(&ggtt->base,
1076                                        node.start, node.size);
1077                 remove_mappable_node(&node);
1078         } else {
1079                 i915_vma_unpin(vma);
1080         }
1081 out_unlock:
1082         intel_runtime_pm_put(i915);
1083         mutex_unlock(&i915->drm.struct_mutex);
1084
1085         return ret;
1086 }
1087
1088 /**
1089  * Reads data from the object referenced by handle.
1090  * @dev: drm device pointer
1091  * @data: ioctl data blob
1092  * @file: drm file pointer
1093  *
1094  * On error, the contents of *data are undefined.
1095  */
1096 int
1097 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
1098                      struct drm_file *file)
1099 {
1100         struct drm_i915_gem_pread *args = data;
1101         struct drm_i915_gem_object *obj;
1102         int ret;
1103
1104         if (args->size == 0)
1105                 return 0;
1106
1107         if (!access_ok(VERIFY_WRITE,
1108                        u64_to_user_ptr(args->data_ptr),
1109                        args->size))
1110                 return -EFAULT;
1111
1112         obj = i915_gem_object_lookup(file, args->handle);
1113         if (!obj)
1114                 return -ENOENT;
1115
1116         /* Bounds check source.  */
1117         if (args->offset > obj->base.size ||
1118             args->size > obj->base.size - args->offset) {
1119                 ret = -EINVAL;
1120                 goto out;
1121         }
1122
1123         trace_i915_gem_object_pread(obj, args->offset, args->size);
1124
1125         ret = i915_gem_object_wait(obj,
1126                                    I915_WAIT_INTERRUPTIBLE,
1127                                    MAX_SCHEDULE_TIMEOUT,
1128                                    to_rps_client(file));
1129         if (ret)
1130                 goto out;
1131
1132         ret = i915_gem_object_pin_pages(obj);
1133         if (ret)
1134                 goto out;
1135
1136         ret = i915_gem_shmem_pread(obj, args);
1137         if (ret == -EFAULT || ret == -ENODEV)
1138                 ret = i915_gem_gtt_pread(obj, args);
1139
1140         i915_gem_object_unpin_pages(obj);
1141 out:
1142         i915_gem_object_put(obj);
1143         return ret;
1144 }
1145
1146 /* This is the fast write path which cannot handle
1147  * page faults in the source data
1148  */
1149
1150 static inline bool
1151 ggtt_write(struct io_mapping *mapping,
1152            loff_t base, int offset,
1153            char __user *user_data, int length)
1154 {
1155         void *vaddr;
1156         unsigned long unwritten;
1157
1158         /* We can use the cpu mem copy function because this is X86. */
1159         vaddr = (void __force *)io_mapping_map_atomic_wc(mapping, base);
1160         unwritten = __copy_from_user_inatomic_nocache(vaddr + offset,
1161                                                       user_data, length);
1162         io_mapping_unmap_atomic(vaddr);
1163         if (unwritten) {
1164                 vaddr = (void __force *)
1165                         io_mapping_map_wc(mapping, base, PAGE_SIZE);
1166                 unwritten = copy_from_user(vaddr + offset, user_data, length);
1167                 io_mapping_unmap(vaddr);
1168         }
1169
1170         return unwritten;
1171 }
1172
1173 /**
1174  * This is the fast pwrite path, where we copy the data directly from the
1175  * user into the GTT, uncached.
1176  * @obj: i915 GEM object
1177  * @args: pwrite arguments structure
1178  */
1179 static int
1180 i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
1181                          const struct drm_i915_gem_pwrite *args)
1182 {
1183         struct drm_i915_private *i915 = to_i915(obj->base.dev);
1184         struct i915_ggtt *ggtt = &i915->ggtt;
1185         struct drm_mm_node node;
1186         struct i915_vma *vma;
1187         u64 remain, offset;
1188         void __user *user_data;
1189         int ret;
1190
1191         ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1192         if (ret)
1193                 return ret;
1194
1195         intel_runtime_pm_get(i915);
1196         vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
1197                                        PIN_MAPPABLE | PIN_NONBLOCK);
1198         if (!IS_ERR(vma)) {
1199                 node.start = i915_ggtt_offset(vma);
1200                 node.allocated = false;
1201                 ret = i915_vma_put_fence(vma);
1202                 if (ret) {
1203                         i915_vma_unpin(vma);
1204                         vma = ERR_PTR(ret);
1205                 }
1206         }
1207         if (IS_ERR(vma)) {
1208                 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
1209                 if (ret)
1210                         goto out_unlock;
1211                 GEM_BUG_ON(!node.allocated);
1212         }
1213
1214         ret = i915_gem_object_set_to_gtt_domain(obj, true);
1215         if (ret)
1216                 goto out_unpin;
1217
1218         mutex_unlock(&i915->drm.struct_mutex);
1219
1220         intel_fb_obj_invalidate(obj, ORIGIN_CPU);
1221
1222         user_data = u64_to_user_ptr(args->data_ptr);
1223         offset = args->offset;
1224         remain = args->size;
1225         while (remain) {
1226                 /* Operation in this page
1227                  *
1228                  * page_base = page offset within aperture
1229                  * page_offset = offset within page
1230                  * page_length = bytes to copy for this page
1231                  */
1232                 u32 page_base = node.start;
1233                 unsigned int page_offset = offset_in_page(offset);
1234                 unsigned int page_length = PAGE_SIZE - page_offset;
1235                 page_length = remain < page_length ? remain : page_length;
1236                 if (node.allocated) {
1237                         wmb(); /* flush the write before we modify the GGTT */
1238                         ggtt->base.insert_page(&ggtt->base,
1239                                                i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
1240                                                node.start, I915_CACHE_NONE, 0);
1241                         wmb(); /* flush modifications to the GGTT (insert_page) */
1242                 } else {
1243                         page_base += offset & PAGE_MASK;
1244                 }
1245                 /* If we get a fault while copying data, then (presumably) our
1246                  * source page isn't available.  Return the error and we'll
1247                  * retry in the slow path.
1248                  * If the object is non-shmem backed, we retry again with the
1249                  * path that handles page fault.
1250                  */
1251                 if (ggtt_write(&ggtt->mappable, page_base, page_offset,
1252                                user_data, page_length)) {
1253                         ret = -EFAULT;
1254                         break;
1255                 }
1256
1257                 remain -= page_length;
1258                 user_data += page_length;
1259                 offset += page_length;
1260         }
1261         intel_fb_obj_flush(obj, false, ORIGIN_CPU);
1262
1263         mutex_lock(&i915->drm.struct_mutex);
1264 out_unpin:
1265         if (node.allocated) {
1266                 wmb();
1267                 ggtt->base.clear_range(&ggtt->base,
1268                                        node.start, node.size);
1269                 remove_mappable_node(&node);
1270         } else {
1271                 i915_vma_unpin(vma);
1272         }
1273 out_unlock:
1274         intel_runtime_pm_put(i915);
1275         mutex_unlock(&i915->drm.struct_mutex);
1276         return ret;
1277 }
1278
1279 static int
1280 shmem_pwrite_slow(struct page *page, int offset, int length,
1281                   char __user *user_data,
1282                   bool page_do_bit17_swizzling,
1283                   bool needs_clflush_before,
1284                   bool needs_clflush_after)
1285 {
1286         char *vaddr;
1287         int ret;
1288
1289         vaddr = kmap(page);
1290         if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
1291                 shmem_clflush_swizzled_range(vaddr + offset, length,
1292                                              page_do_bit17_swizzling);
1293         if (page_do_bit17_swizzling)
1294                 ret = __copy_from_user_swizzled(vaddr, offset, user_data,
1295                                                 length);
1296         else
1297                 ret = __copy_from_user(vaddr + offset, user_data, length);
1298         if (needs_clflush_after)
1299                 shmem_clflush_swizzled_range(vaddr + offset, length,
1300                                              page_do_bit17_swizzling);
1301         kunmap(page);
1302
1303         return ret ? -EFAULT : 0;
1304 }
1305
1306 /* Per-page copy function for the shmem pwrite fastpath.
1307  * Flushes invalid cachelines before writing to the target if
1308  * needs_clflush_before is set and flushes out any written cachelines after
1309  * writing if needs_clflush is set.
1310  */
1311 static int
1312 shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
1313              bool page_do_bit17_swizzling,
1314              bool needs_clflush_before,
1315              bool needs_clflush_after)
1316 {
1317         int ret;
1318
1319         ret = -ENODEV;
1320         if (!page_do_bit17_swizzling) {
1321                 char *vaddr = kmap_atomic(page);
1322
1323                 if (needs_clflush_before)
1324                         drm_clflush_virt_range(vaddr + offset, len);
1325                 ret = __copy_from_user_inatomic(vaddr + offset, user_data, len);
1326                 if (needs_clflush_after)
1327                         drm_clflush_virt_range(vaddr + offset, len);
1328
1329                 kunmap_atomic(vaddr);
1330         }
1331         if (ret == 0)
1332                 return ret;
1333
1334         return shmem_pwrite_slow(page, offset, len, user_data,
1335                                  page_do_bit17_swizzling,
1336                                  needs_clflush_before,
1337                                  needs_clflush_after);
1338 }
1339
1340 static int
1341 i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
1342                       const struct drm_i915_gem_pwrite *args)
1343 {
1344         struct drm_i915_private *i915 = to_i915(obj->base.dev);
1345         void __user *user_data;
1346         u64 remain;
1347         unsigned int obj_do_bit17_swizzling;
1348         unsigned int partial_cacheline_write;
1349         unsigned int needs_clflush;
1350         unsigned int offset, idx;
1351         int ret;
1352
1353         ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1354         if (ret)
1355                 return ret;
1356
1357         ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
1358         mutex_unlock(&i915->drm.struct_mutex);
1359         if (ret)
1360                 return ret;
1361
1362         obj_do_bit17_swizzling = 0;
1363         if (i915_gem_object_needs_bit17_swizzle(obj))
1364                 obj_do_bit17_swizzling = BIT(17);
1365
1366         /* If we don't overwrite a cacheline completely we need to be
1367          * careful to have up-to-date data by first clflushing. Don't
1368          * overcomplicate things and flush the entire patch.
1369          */
1370         partial_cacheline_write = 0;
1371         if (needs_clflush & CLFLUSH_BEFORE)
1372                 partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
1373
1374         user_data = u64_to_user_ptr(args->data_ptr);
1375         remain = args->size;
1376         offset = offset_in_page(args->offset);
1377         for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
1378                 struct page *page = i915_gem_object_get_page(obj, idx);
1379                 int length;
1380
1381                 length = remain;
1382                 if (offset + length > PAGE_SIZE)
1383                         length = PAGE_SIZE - offset;
1384
1385                 ret = shmem_pwrite(page, offset, length, user_data,
1386                                    page_to_phys(page) & obj_do_bit17_swizzling,
1387                                    (offset | length) & partial_cacheline_write,
1388                                    needs_clflush & CLFLUSH_AFTER);
1389                 if (ret)
1390                         break;
1391
1392                 remain -= length;
1393                 user_data += length;
1394                 offset = 0;
1395         }
1396
1397         intel_fb_obj_flush(obj, false, ORIGIN_CPU);
1398         i915_gem_obj_finish_shmem_access(obj);
1399         return ret;
1400 }
1401
1402 /**
1403  * Writes data to the object referenced by handle.
1404  * @dev: drm device
1405  * @data: ioctl data blob
1406  * @file: drm file
1407  *
1408  * On error, the contents of the buffer that were to be modified are undefined.
1409  */
1410 int
1411 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1412                       struct drm_file *file)
1413 {
1414         struct drm_i915_gem_pwrite *args = data;
1415         struct drm_i915_gem_object *obj;
1416         int ret;
1417
1418         if (args->size == 0)
1419                 return 0;
1420
1421         if (!access_ok(VERIFY_READ,
1422                        u64_to_user_ptr(args->data_ptr),
1423                        args->size))
1424                 return -EFAULT;
1425
1426         obj = i915_gem_object_lookup(file, args->handle);
1427         if (!obj)
1428                 return -ENOENT;
1429
1430         /* Bounds check destination. */
1431         if (args->offset > obj->base.size ||
1432             args->size > obj->base.size - args->offset) {
1433                 ret = -EINVAL;
1434                 goto err;
1435         }
1436
1437         trace_i915_gem_object_pwrite(obj, args->offset, args->size);
1438
1439         ret = i915_gem_object_wait(obj,
1440                                    I915_WAIT_INTERRUPTIBLE |
1441                                    I915_WAIT_ALL,
1442                                    MAX_SCHEDULE_TIMEOUT,
1443                                    to_rps_client(file));
1444         if (ret)
1445                 goto err;
1446
1447         ret = i915_gem_object_pin_pages(obj);
1448         if (ret)
1449                 goto err;
1450
1451         ret = -EFAULT;
1452         /* We can only do the GTT pwrite on untiled buffers, as otherwise
1453          * it would end up going through the fenced access, and we'll get
1454          * different detiling behavior between reading and writing.
1455          * pread/pwrite currently are reading and writing from the CPU
1456          * perspective, requiring manual detiling by the client.
1457          */
1458         if (!i915_gem_object_has_struct_page(obj) ||
1459             cpu_write_needs_clflush(obj))
1460                 /* Note that the gtt paths might fail with non-page-backed user
1461                  * pointers (e.g. gtt mappings when moving data between
1462                  * textures). Fallback to the shmem path in that case.
1463                  */
1464                 ret = i915_gem_gtt_pwrite_fast(obj, args);
1465
1466         if (ret == -EFAULT || ret == -ENOSPC) {
1467                 if (obj->phys_handle)
1468                         ret = i915_gem_phys_pwrite(obj, args, file);
1469                 else
1470                         ret = i915_gem_shmem_pwrite(obj, args);
1471         }
1472
1473         i915_gem_object_unpin_pages(obj);
1474 err:
1475         i915_gem_object_put(obj);
1476         return ret;
1477 }
1478
1479 static inline enum fb_op_origin
1480 write_origin(struct drm_i915_gem_object *obj, unsigned domain)
1481 {
1482         return (domain == I915_GEM_DOMAIN_GTT ?
1483                 obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
1484 }
1485
1486 static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
1487 {
1488         struct drm_i915_private *i915;
1489         struct list_head *list;
1490         struct i915_vma *vma;
1491
1492         list_for_each_entry(vma, &obj->vma_list, obj_link) {
1493                 if (!i915_vma_is_ggtt(vma))
1494                         continue;
1495
1496                 if (i915_vma_is_active(vma))
1497                         continue;
1498
1499                 if (!drm_mm_node_allocated(&vma->node))
1500                         continue;
1501
1502                 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
1503         }
1504
1505         i915 = to_i915(obj->base.dev);
1506         list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list;
1507         list_move_tail(&obj->global_link, list);
1508 }
1509
1510 /**
1511  * Called when user space prepares to use an object with the CPU, either
1512  * through the mmap ioctl's mapping or a GTT mapping.
1513  * @dev: drm device
1514  * @data: ioctl data blob
1515  * @file: drm file
1516  */
1517 int
1518 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1519                           struct drm_file *file)
1520 {
1521         struct drm_i915_gem_set_domain *args = data;
1522         struct drm_i915_gem_object *obj;
1523         uint32_t read_domains = args->read_domains;
1524         uint32_t write_domain = args->write_domain;
1525         int err;
1526
1527         /* Only handle setting domains to types used by the CPU. */
1528         if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
1529                 return -EINVAL;
1530
1531         /* Having something in the write domain implies it's in the read
1532          * domain, and only that read domain.  Enforce that in the request.
1533          */
1534         if (write_domain != 0 && read_domains != write_domain)
1535                 return -EINVAL;
1536
1537         obj = i915_gem_object_lookup(file, args->handle);
1538         if (!obj)
1539                 return -ENOENT;
1540
1541         /* Try to flush the object off the GPU without holding the lock.
1542          * We will repeat the flush holding the lock in the normal manner
1543          * to catch cases where we are gazumped.
1544          */
1545         err = i915_gem_object_wait(obj,
1546                                    I915_WAIT_INTERRUPTIBLE |
1547                                    (write_domain ? I915_WAIT_ALL : 0),
1548                                    MAX_SCHEDULE_TIMEOUT,
1549                                    to_rps_client(file));
1550         if (err)
1551                 goto out;
1552
1553         /* Flush and acquire obj->pages so that we are coherent through
1554          * direct access in memory with previous cached writes through
1555          * shmemfs and that our cache domain tracking remains valid.
1556          * For example, if the obj->filp was moved to swap without us
1557          * being notified and releasing the pages, we would mistakenly
1558          * continue to assume that the obj remained out of the CPU cached
1559          * domain.
1560          */
1561         err = i915_gem_object_pin_pages(obj);
1562         if (err)
1563                 goto out;
1564
1565         err = i915_mutex_lock_interruptible(dev);
1566         if (err)
1567                 goto out_unpin;
1568
1569         if (read_domains & I915_GEM_DOMAIN_GTT)
1570                 err = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1571         else
1572                 err = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1573
1574         /* And bump the LRU for this access */
1575         i915_gem_object_bump_inactive_ggtt(obj);
1576
1577         mutex_unlock(&dev->struct_mutex);
1578
1579         if (write_domain != 0)
1580                 intel_fb_obj_invalidate(obj, write_origin(obj, write_domain));
1581
1582 out_unpin:
1583         i915_gem_object_unpin_pages(obj);
1584 out:
1585         i915_gem_object_put(obj);
1586         return err;
1587 }
1588
1589 /**
1590  * Called when user space has done writes to this buffer
1591  * @dev: drm device
1592  * @data: ioctl data blob
1593  * @file: drm file
1594  */
1595 int
1596 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1597                          struct drm_file *file)
1598 {
1599         struct drm_i915_gem_sw_finish *args = data;
1600         struct drm_i915_gem_object *obj;
1601         int err = 0;
1602
1603         obj = i915_gem_object_lookup(file, args->handle);
1604         if (!obj)
1605                 return -ENOENT;
1606
1607         /* Pinned buffers may be scanout, so flush the cache */
1608         if (READ_ONCE(obj->pin_display)) {
1609                 err = i915_mutex_lock_interruptible(dev);
1610                 if (!err) {
1611                         i915_gem_object_flush_cpu_write_domain(obj);
1612                         mutex_unlock(&dev->struct_mutex);
1613                 }
1614         }
1615
1616         i915_gem_object_put(obj);
1617         return err;
1618 }
1619
1620 /**
1621  * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
1622  *                       it is mapped to.
1623  * @dev: drm device
1624  * @data: ioctl data blob
1625  * @file: drm file
1626  *
1627  * While the mapping holds a reference on the contents of the object, it doesn't
1628  * imply a ref on the object itself.
1629  *
1630  * IMPORTANT:
1631  *
1632  * DRM driver writers who look a this function as an example for how to do GEM
1633  * mmap support, please don't implement mmap support like here. The modern way
1634  * to implement DRM mmap support is with an mmap offset ioctl (like
1635  * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
1636  * That way debug tooling like valgrind will understand what's going on, hiding
1637  * the mmap call in a driver private ioctl will break that. The i915 driver only
1638  * does cpu mmaps this way because we didn't know better.
1639  */
1640 int
1641 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1642                     struct drm_file *file)
1643 {
1644         struct drm_i915_gem_mmap *args = data;
1645         struct drm_i915_gem_object *obj;
1646         unsigned long addr;
1647
1648         if (args->flags & ~(I915_MMAP_WC))
1649                 return -EINVAL;
1650
1651         if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
1652                 return -ENODEV;
1653
1654         obj = i915_gem_object_lookup(file, args->handle);
1655         if (!obj)
1656                 return -ENOENT;
1657
1658         /* prime objects have no backing filp to GEM mmap
1659          * pages from.
1660          */
1661         if (!obj->base.filp) {
1662                 i915_gem_object_put(obj);
1663                 return -EINVAL;
1664         }
1665
1666         addr = vm_mmap(obj->base.filp, 0, args->size,
1667                        PROT_READ | PROT_WRITE, MAP_SHARED,
1668                        args->offset);
1669         if (args->flags & I915_MMAP_WC) {
1670                 struct mm_struct *mm = current->mm;
1671                 struct vm_area_struct *vma;
1672
1673                 if (down_write_killable(&mm->mmap_sem)) {
1674                         i915_gem_object_put(obj);
1675                         return -EINTR;
1676                 }
1677                 vma = find_vma(mm, addr);
1678                 if (vma)
1679                         vma->vm_page_prot =
1680                                 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1681                 else
1682                         addr = -ENOMEM;
1683                 up_write(&mm->mmap_sem);
1684
1685                 /* This may race, but that's ok, it only gets set */
1686                 WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
1687         }
1688         i915_gem_object_put(obj);
1689         if (IS_ERR((void *)addr))
1690                 return addr;
1691
1692         args->addr_ptr = (uint64_t) addr;
1693
1694         return 0;
1695 }
1696
1697 static unsigned int tile_row_pages(struct drm_i915_gem_object *obj)
1698 {
1699         u64 size;
1700
1701         size = i915_gem_object_get_stride(obj);
1702         size *= i915_gem_object_get_tiling(obj) == I915_TILING_Y ? 32 : 8;
1703
1704         return size >> PAGE_SHIFT;
1705 }
1706
1707 /**
1708  * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps
1709  *
1710  * A history of the GTT mmap interface:
1711  *
1712  * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to
1713  *     aligned and suitable for fencing, and still fit into the available
1714  *     mappable space left by the pinned display objects. A classic problem
1715  *     we called the page-fault-of-doom where we would ping-pong between
1716  *     two objects that could not fit inside the GTT and so the memcpy
1717  *     would page one object in at the expense of the other between every
1718  *     single byte.
1719  *
1720  * 1 - Objects can be any size, and have any compatible fencing (X Y, or none
1721  *     as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the
1722  *     object is too large for the available space (or simply too large
1723  *     for the mappable aperture!), a view is created instead and faulted
1724  *     into userspace. (This view is aligned and sized appropriately for
1725  *     fenced access.)
1726  *
1727  * Restrictions:
1728  *
1729  *  * snoopable objects cannot be accessed via the GTT. It can cause machine
1730  *    hangs on some architectures, corruption on others. An attempt to service
1731  *    a GTT page fault from a snoopable object will generate a SIGBUS.
1732  *
1733  *  * the object must be able to fit into RAM (physical memory, though no
1734  *    limited to the mappable aperture).
1735  *
1736  *
1737  * Caveats:
1738  *
1739  *  * a new GTT page fault will synchronize rendering from the GPU and flush
1740  *    all data to system memory. Subsequent access will not be synchronized.
1741  *
1742  *  * all mappings are revoked on runtime device suspend.
1743  *
1744  *  * there are only 8, 16 or 32 fence registers to share between all users
1745  *    (older machines require fence register for display and blitter access
1746  *    as well). Contention of the fence registers will cause the previous users
1747  *    to be unmapped and any new access will generate new page faults.
1748  *
1749  *  * running out of memory while servicing a fault may generate a SIGBUS,
1750  *    rather than the expected SIGSEGV.
1751  */
1752 int i915_gem_mmap_gtt_version(void)
1753 {
1754         return 1;
1755 }
1756
1757 /**
1758  * i915_gem_fault - fault a page into the GTT
1759  * @area: CPU VMA in question
1760  * @vmf: fault info
1761  *
1762  * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1763  * from userspace.  The fault handler takes care of binding the object to
1764  * the GTT (if needed), allocating and programming a fence register (again,
1765  * only if needed based on whether the old reg is still valid or the object
1766  * is tiled) and inserting a new PTE into the faulting process.
1767  *
1768  * Note that the faulting process may involve evicting existing objects
1769  * from the GTT and/or fence registers to make room.  So performance may
1770  * suffer if the GTT working set is large or there are few fence registers
1771  * left.
1772  *
1773  * The current feature set supported by i915_gem_fault() and thus GTT mmaps
1774  * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
1775  */
1776 int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
1777 {
1778 #define MIN_CHUNK_PAGES ((1 << 20) >> PAGE_SHIFT) /* 1 MiB */
1779         struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
1780         struct drm_device *dev = obj->base.dev;
1781         struct drm_i915_private *dev_priv = to_i915(dev);
1782         struct i915_ggtt *ggtt = &dev_priv->ggtt;
1783         bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1784         struct i915_vma *vma;
1785         pgoff_t page_offset;
1786         unsigned int flags;
1787         int ret;
1788
1789         /* We don't use vmf->pgoff since that has the fake offset */
1790         page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
1791
1792         trace_i915_gem_object_fault(obj, page_offset, true, write);
1793
1794         /* Try to flush the object off the GPU first without holding the lock.
1795          * Upon acquiring the lock, we will perform our sanity checks and then
1796          * repeat the flush holding the lock in the normal manner to catch cases
1797          * where we are gazumped.
1798          */
1799         ret = i915_gem_object_wait(obj,
1800                                    I915_WAIT_INTERRUPTIBLE,
1801                                    MAX_SCHEDULE_TIMEOUT,
1802                                    NULL);
1803         if (ret)
1804                 goto err;
1805
1806         ret = i915_gem_object_pin_pages(obj);
1807         if (ret)
1808                 goto err;
1809
1810         intel_runtime_pm_get(dev_priv);
1811
1812         ret = i915_mutex_lock_interruptible(dev);
1813         if (ret)
1814                 goto err_rpm;
1815
1816         /* Access to snoopable pages through the GTT is incoherent. */
1817         if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) {
1818                 ret = -EFAULT;
1819                 goto err_unlock;
1820         }
1821
1822         /* If the object is smaller than a couple of partial vma, it is
1823          * not worth only creating a single partial vma - we may as well
1824          * clear enough space for the full object.
1825          */
1826         flags = PIN_MAPPABLE;
1827         if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT)
1828                 flags |= PIN_NONBLOCK | PIN_NONFAULT;
1829
1830         /* Now pin it into the GTT as needed */
1831         vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags);
1832         if (IS_ERR(vma)) {
1833                 struct i915_ggtt_view view;
1834                 unsigned int chunk_size;
1835
1836                 /* Use a partial view if it is bigger than available space */
1837                 chunk_size = MIN_CHUNK_PAGES;
1838                 if (i915_gem_object_is_tiled(obj))
1839                         chunk_size = roundup(chunk_size, tile_row_pages(obj));
1840
1841                 memset(&view, 0, sizeof(view));
1842                 view.type = I915_GGTT_VIEW_PARTIAL;
1843                 view.params.partial.offset = rounddown(page_offset, chunk_size);
1844                 view.params.partial.size =
1845                         min_t(unsigned int, chunk_size,
1846                               vma_pages(area) - view.params.partial.offset);
1847
1848                 /* If the partial covers the entire object, just create a
1849                  * normal VMA.
1850                  */
1851                 if (chunk_size >= obj->base.size >> PAGE_SHIFT)
1852                         view.type = I915_GGTT_VIEW_NORMAL;
1853
1854                 /* Userspace is now writing through an untracked VMA, abandon
1855                  * all hope that the hardware is able to track future writes.
1856                  */
1857                 obj->frontbuffer_ggtt_origin = ORIGIN_CPU;
1858
1859                 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
1860         }
1861         if (IS_ERR(vma)) {
1862                 ret = PTR_ERR(vma);
1863                 goto err_unlock;
1864         }
1865
1866         ret = i915_gem_object_set_to_gtt_domain(obj, write);
1867         if (ret)
1868                 goto err_unpin;
1869
1870         ret = i915_vma_get_fence(vma);
1871         if (ret)
1872                 goto err_unpin;
1873
1874         /* Mark as being mmapped into userspace for later revocation */
1875         assert_rpm_wakelock_held(dev_priv);
1876         if (list_empty(&obj->userfault_link))
1877                 list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
1878
1879         /* Finally, remap it using the new GTT offset */
1880         ret = remap_io_mapping(area,
1881                                area->vm_start + (vma->ggtt_view.params.partial.offset << PAGE_SHIFT),
1882                                (ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT,
1883                                min_t(u64, vma->size, area->vm_end - area->vm_start),
1884                                &ggtt->mappable);
1885
1886 err_unpin:
1887         __i915_vma_unpin(vma);
1888 err_unlock:
1889         mutex_unlock(&dev->struct_mutex);
1890 err_rpm:
1891         intel_runtime_pm_put(dev_priv);
1892         i915_gem_object_unpin_pages(obj);
1893 err:
1894         switch (ret) {
1895         case -EIO:
1896                 /*
1897                  * We eat errors when the gpu is terminally wedged to avoid
1898                  * userspace unduly crashing (gl has no provisions for mmaps to
1899                  * fail). But any other -EIO isn't ours (e.g. swap in failure)
1900                  * and so needs to be reported.
1901                  */
1902                 if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
1903                         ret = VM_FAULT_SIGBUS;
1904                         break;
1905                 }
1906         case -EAGAIN:
1907                 /*
1908                  * EAGAIN means the gpu is hung and we'll wait for the error
1909                  * handler to reset everything when re-faulting in
1910                  * i915_mutex_lock_interruptible.
1911                  */
1912         case 0:
1913         case -ERESTARTSYS:
1914         case -EINTR:
1915         case -EBUSY:
1916                 /*
1917                  * EBUSY is ok: this just means that another thread
1918                  * already did the job.
1919                  */
1920                 ret = VM_FAULT_NOPAGE;
1921                 break;
1922         case -ENOMEM:
1923                 ret = VM_FAULT_OOM;
1924                 break;
1925         case -ENOSPC:
1926         case -EFAULT:
1927                 ret = VM_FAULT_SIGBUS;
1928                 break;
1929         default:
1930                 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
1931                 ret = VM_FAULT_SIGBUS;
1932                 break;
1933         }
1934         return ret;
1935 }
1936
1937 /**
1938  * i915_gem_release_mmap - remove physical page mappings
1939  * @obj: obj in question
1940  *
1941  * Preserve the reservation of the mmapping with the DRM core code, but
1942  * relinquish ownership of the pages back to the system.
1943  *
1944  * It is vital that we remove the page mapping if we have mapped a tiled
1945  * object through the GTT and then lose the fence register due to
1946  * resource pressure. Similarly if the object has been moved out of the
1947  * aperture, than pages mapped into userspace must be revoked. Removing the
1948  * mapping will then trigger a page fault on the next user access, allowing
1949  * fixup by i915_gem_fault().
1950  */
1951 void
1952 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1953 {
1954         struct drm_i915_private *i915 = to_i915(obj->base.dev);
1955
1956         /* Serialisation between user GTT access and our code depends upon
1957          * revoking the CPU's PTE whilst the mutex is held. The next user
1958          * pagefault then has to wait until we release the mutex.
1959          *
1960          * Note that RPM complicates somewhat by adding an additional
1961          * requirement that operations to the GGTT be made holding the RPM
1962          * wakeref.
1963          */
1964         lockdep_assert_held(&i915->drm.struct_mutex);
1965         intel_runtime_pm_get(i915);
1966
1967         if (list_empty(&obj->userfault_link))
1968                 goto out;
1969
1970         list_del_init(&obj->userfault_link);
1971         drm_vma_node_unmap(&obj->base.vma_node,
1972                            obj->base.dev->anon_inode->i_mapping);
1973
1974         /* Ensure that the CPU's PTE are revoked and there are not outstanding
1975          * memory transactions from userspace before we return. The TLB
1976          * flushing implied above by changing the PTE above *should* be
1977          * sufficient, an extra barrier here just provides us with a bit
1978          * of paranoid documentation about our requirement to serialise
1979          * memory writes before touching registers / GSM.
1980          */
1981         wmb();
1982
1983 out:
1984         intel_runtime_pm_put(i915);
1985 }
1986
1987 void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
1988 {
1989         struct drm_i915_gem_object *obj, *on;
1990         int i;
1991
1992         /*
1993          * Only called during RPM suspend. All users of the userfault_list
1994          * must be holding an RPM wakeref to ensure that this can not
1995          * run concurrently with themselves (and use the struct_mutex for
1996          * protection between themselves).
1997          */
1998
1999         list_for_each_entry_safe(obj, on,
2000                                  &dev_priv->mm.userfault_list, userfault_link) {
2001                 list_del_init(&obj->userfault_link);
2002                 drm_vma_node_unmap(&obj->base.vma_node,
2003                                    obj->base.dev->anon_inode->i_mapping);
2004         }
2005
2006         /* The fence will be lost when the device powers down. If any were
2007          * in use by hardware (i.e. they are pinned), we should not be powering
2008          * down! All other fences will be reacquired by the user upon waking.
2009          */
2010         for (i = 0; i < dev_priv->num_fence_regs; i++) {
2011                 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
2012
2013                 if (WARN_ON(reg->pin_count))
2014                         continue;
2015
2016                 if (!reg->vma)
2017                         continue;
2018
2019                 GEM_BUG_ON(!list_empty(&reg->vma->obj->userfault_link));
2020                 reg->dirty = true;
2021         }
2022 }
2023
2024 /**
2025  * i915_gem_get_ggtt_size - return required global GTT size for an object
2026  * @dev_priv: i915 device
2027  * @size: object size
2028  * @tiling_mode: tiling mode
2029  *
2030  * Return the required global GTT size for an object, taking into account
2031  * potential fence register mapping.
2032  */
2033 u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv,
2034                            u64 size, int tiling_mode)
2035 {
2036         u64 ggtt_size;
2037
2038         GEM_BUG_ON(size == 0);
2039
2040         if (INTEL_GEN(dev_priv) >= 4 ||
2041             tiling_mode == I915_TILING_NONE)
2042                 return size;
2043
2044         /* Previous chips need a power-of-two fence region when tiling */
2045         if (IS_GEN3(dev_priv))
2046                 ggtt_size = 1024*1024;
2047         else
2048                 ggtt_size = 512*1024;
2049
2050         while (ggtt_size < size)
2051                 ggtt_size <<= 1;
2052
2053         return ggtt_size;
2054 }
2055
2056 /**
2057  * i915_gem_get_ggtt_alignment - return required global GTT alignment
2058  * @dev_priv: i915 device
2059  * @size: object size
2060  * @tiling_mode: tiling mode
2061  * @fenced: is fenced alignment required or not
2062  *
2063  * Return the required global GTT alignment for an object, taking into account
2064  * potential fence register mapping.
2065  */
2066 u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size,
2067                                 int tiling_mode, bool fenced)
2068 {
2069         GEM_BUG_ON(size == 0);
2070
2071         /*
2072          * Minimum alignment is 4k (GTT page size), but might be greater
2073          * if a fence register is needed for the object.
2074          */
2075         if (INTEL_GEN(dev_priv) >= 4 || (!fenced && IS_G33(dev_priv)) ||
2076             tiling_mode == I915_TILING_NONE)
2077                 return 4096;
2078
2079         /*
2080          * Previous chips need to be aligned to the size of the smallest
2081          * fence register that can contain the object.
2082          */
2083         return i915_gem_get_ggtt_size(dev_priv, size, tiling_mode);
2084 }
2085
2086 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
2087 {
2088         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2089         int err;
2090
2091         err = drm_gem_create_mmap_offset(&obj->base);
2092         if (!err)
2093                 return 0;
2094
2095         /* We can idle the GPU locklessly to flush stale objects, but in order
2096          * to claim that space for ourselves, we need to take the big
2097          * struct_mutex to free the requests+objects and allocate our slot.
2098          */
2099         err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
2100         if (err)
2101                 return err;
2102
2103         err = i915_mutex_lock_interruptible(&dev_priv->drm);
2104         if (!err) {
2105                 i915_gem_retire_requests(dev_priv);
2106                 err = drm_gem_create_mmap_offset(&obj->base);
2107                 mutex_unlock(&dev_priv->drm.struct_mutex);
2108         }
2109
2110         return err;
2111 }
2112
2113 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
2114 {
2115         drm_gem_free_mmap_offset(&obj->base);
2116 }
2117
2118 int
2119 i915_gem_mmap_gtt(struct drm_file *file,
2120                   struct drm_device *dev,
2121                   uint32_t handle,
2122                   uint64_t *offset)
2123 {
2124         struct drm_i915_gem_object *obj;
2125         int ret;
2126
2127         obj = i915_gem_object_lookup(file, handle);
2128         if (!obj)
2129                 return -ENOENT;
2130
2131         ret = i915_gem_object_create_mmap_offset(obj);
2132         if (ret == 0)
2133                 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
2134
2135         i915_gem_object_put(obj);
2136         return ret;
2137 }
2138
2139 /**
2140  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
2141  * @dev: DRM device
2142  * @data: GTT mapping ioctl data
2143  * @file: GEM object info
2144  *
2145  * Simply returns the fake offset to userspace so it can mmap it.
2146  * The mmap call will end up in drm_gem_mmap(), which will set things
2147  * up so we can get faults in the handler above.
2148  *
2149  * The fault handler will take care of binding the object into the GTT
2150  * (since it may have been evicted to make room for something), allocating
2151  * a fence register, and mapping the appropriate aperture address into
2152  * userspace.
2153  */
2154 int
2155 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
2156                         struct drm_file *file)
2157 {
2158         struct drm_i915_gem_mmap_gtt *args = data;
2159
2160         return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
2161 }
2162
2163 /* Immediately discard the backing storage */
2164 static void
2165 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
2166 {
2167         i915_gem_object_free_mmap_offset(obj);
2168
2169         if (obj->base.filp == NULL)
2170                 return;
2171
2172         /* Our goal here is to return as much of the memory as
2173          * is possible back to the system as we are called from OOM.
2174          * To do this we must instruct the shmfs to drop all of its
2175          * backing pages, *now*.
2176          */
2177         shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
2178         obj->mm.madv = __I915_MADV_PURGED;
2179 }
2180
2181 /* Try to discard unwanted pages */
2182 void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
2183 {
2184         struct address_space *mapping;
2185
2186         lockdep_assert_held(&obj->mm.lock);
2187         GEM_BUG_ON(obj->mm.pages);
2188
2189         switch (obj->mm.madv) {
2190         case I915_MADV_DONTNEED:
2191                 i915_gem_object_truncate(obj);
2192         case __I915_MADV_PURGED:
2193                 return;
2194         }
2195
2196         if (obj->base.filp == NULL)
2197                 return;
2198
2199         mapping = obj->base.filp->f_mapping,
2200         invalidate_mapping_pages(mapping, 0, (loff_t)-1);
2201 }
2202
2203 static void
2204 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
2205                               struct sg_table *pages)
2206 {
2207         struct sgt_iter sgt_iter;
2208         struct page *page;
2209
2210         __i915_gem_object_release_shmem(obj, pages, true);
2211
2212         i915_gem_gtt_finish_pages(obj, pages);
2213
2214         if (i915_gem_object_needs_bit17_swizzle(obj))
2215                 i915_gem_object_save_bit_17_swizzle(obj, pages);
2216
2217         for_each_sgt_page(page, sgt_iter, pages) {
2218                 if (obj->mm.dirty)
2219                         set_page_dirty(page);
2220
2221                 if (obj->mm.madv == I915_MADV_WILLNEED)
2222                         mark_page_accessed(page);
2223
2224                 put_page(page);
2225         }
2226         obj->mm.dirty = false;
2227
2228         sg_free_table(pages);
2229         kfree(pages);
2230 }
2231
2232 static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
2233 {
2234         struct radix_tree_iter iter;
2235         void **slot;
2236
2237         radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
2238                 radix_tree_delete(&obj->mm.get_page.radix, iter.index);
2239 }
2240
2241 void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
2242                                  enum i915_mm_subclass subclass)
2243 {
2244         struct sg_table *pages;
2245
2246         if (i915_gem_object_has_pinned_pages(obj))
2247                 return;
2248
2249         GEM_BUG_ON(obj->bind_count);
2250         if (!READ_ONCE(obj->mm.pages))
2251                 return;
2252
2253         /* May be called by shrinker from within get_pages() (on another bo) */
2254         mutex_lock_nested(&obj->mm.lock, subclass);
2255         if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
2256                 goto unlock;
2257
2258         /* ->put_pages might need to allocate memory for the bit17 swizzle
2259          * array, hence protect them from being reaped by removing them from gtt
2260          * lists early. */
2261         pages = fetch_and_zero(&obj->mm.pages);
2262         GEM_BUG_ON(!pages);
2263
2264         if (obj->mm.mapping) {
2265                 void *ptr;
2266
2267                 ptr = ptr_mask_bits(obj->mm.mapping);
2268                 if (is_vmalloc_addr(ptr))
2269                         vunmap(ptr);
2270                 else
2271                         kunmap(kmap_to_page(ptr));
2272
2273                 obj->mm.mapping = NULL;
2274         }
2275
2276         __i915_gem_object_reset_page_iter(obj);
2277
2278         obj->ops->put_pages(obj, pages);
2279 unlock:
2280         mutex_unlock(&obj->mm.lock);
2281 }
2282
2283 static void i915_sg_trim(struct sg_table *orig_st)
2284 {
2285         struct sg_table new_st;
2286         struct scatterlist *sg, *new_sg;
2287         unsigned int i;
2288
2289         if (orig_st->nents == orig_st->orig_nents)
2290                 return;
2291
2292         if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL | __GFP_NOWARN))
2293                 return;
2294
2295         new_sg = new_st.sgl;
2296         for_each_sg(orig_st->sgl, sg, orig_st->nents, i) {
2297                 sg_set_page(new_sg, sg_page(sg), sg->length, 0);
2298                 /* called before being DMA mapped, no need to copy sg->dma_* */
2299                 new_sg = sg_next(new_sg);
2300         }
2301
2302         sg_free_table(orig_st);
2303
2304         *orig_st = new_st;
2305 }
2306
2307 static struct sg_table *
2308 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2309 {
2310         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2311         const unsigned long page_count = obj->base.size / PAGE_SIZE;
2312         unsigned long i;
2313         struct address_space *mapping;
2314         struct sg_table *st;
2315         struct scatterlist *sg;
2316         struct sgt_iter sgt_iter;
2317         struct page *page;
2318         unsigned long last_pfn = 0;     /* suppress gcc warning */
2319         unsigned int max_segment;
2320         int ret;
2321         gfp_t gfp;
2322
2323         /* Assert that the object is not currently in any GPU domain. As it
2324          * wasn't in the GTT, there shouldn't be any way it could have been in
2325          * a GPU cache
2326          */
2327         GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2328         GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2329
2330         max_segment = swiotlb_max_segment();
2331         if (!max_segment)
2332                 max_segment = rounddown(UINT_MAX, PAGE_SIZE);
2333
2334         st = kmalloc(sizeof(*st), GFP_KERNEL);
2335         if (st == NULL)
2336                 return ERR_PTR(-ENOMEM);
2337
2338 rebuild_st:
2339         if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
2340                 kfree(st);
2341                 return ERR_PTR(-ENOMEM);
2342         }
2343
2344         /* Get the list of pages out of our struct file.  They'll be pinned
2345          * at this point until we release them.
2346          *
2347          * Fail silently without starting the shrinker
2348          */
2349         mapping = obj->base.filp->f_mapping;
2350         gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM));
2351         gfp |= __GFP_NORETRY | __GFP_NOWARN;
2352         sg = st->sgl;
2353         st->nents = 0;
2354         for (i = 0; i < page_count; i++) {
2355                 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2356                 if (IS_ERR(page)) {
2357                         i915_gem_shrink(dev_priv,
2358                                         page_count,
2359                                         I915_SHRINK_BOUND |
2360                                         I915_SHRINK_UNBOUND |
2361                                         I915_SHRINK_PURGEABLE);
2362                         page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2363                 }
2364                 if (IS_ERR(page)) {
2365                         /* We've tried hard to allocate the memory by reaping
2366                          * our own buffer, now let the real VM do its job and
2367                          * go down in flames if truly OOM.
2368                          */
2369                         page = shmem_read_mapping_page(mapping, i);
2370                         if (IS_ERR(page)) {
2371                                 ret = PTR_ERR(page);
2372                                 goto err_sg;
2373                         }
2374                 }
2375                 if (!i ||
2376                     sg->length >= max_segment ||
2377                     page_to_pfn(page) != last_pfn + 1) {
2378                         if (i)
2379                                 sg = sg_next(sg);
2380                         st->nents++;
2381                         sg_set_page(sg, page, PAGE_SIZE, 0);
2382                 } else {
2383                         sg->length += PAGE_SIZE;
2384                 }
2385                 last_pfn = page_to_pfn(page);
2386
2387                 /* Check that the i965g/gm workaround works. */
2388                 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
2389         }
2390         if (sg) /* loop terminated early; short sg table */
2391                 sg_mark_end(sg);
2392
2393         /* Trim unused sg entries to avoid wasting memory. */
2394         i915_sg_trim(st);
2395
2396         ret = i915_gem_gtt_prepare_pages(obj, st);
2397         if (ret) {
2398                 /* DMA remapping failed? One possible cause is that
2399                  * it could not reserve enough large entries, asking
2400                  * for PAGE_SIZE chunks instead may be helpful.
2401                  */
2402                 if (max_segment > PAGE_SIZE) {
2403                         for_each_sgt_page(page, sgt_iter, st)
2404                                 put_page(page);
2405                         sg_free_table(st);
2406
2407                         max_segment = PAGE_SIZE;
2408                         goto rebuild_st;
2409                 } else {
2410                         dev_warn(&dev_priv->drm.pdev->dev,
2411                                  "Failed to DMA remap %lu pages\n",
2412                                  page_count);
2413                         goto err_pages;
2414                 }
2415         }
2416
2417         if (i915_gem_object_needs_bit17_swizzle(obj))
2418                 i915_gem_object_do_bit_17_swizzle(obj, st);
2419
2420         return st;
2421
2422 err_sg:
2423         sg_mark_end(sg);
2424 err_pages:
2425         for_each_sgt_page(page, sgt_iter, st)
2426                 put_page(page);
2427         sg_free_table(st);
2428         kfree(st);
2429
2430         /* shmemfs first checks if there is enough memory to allocate the page
2431          * and reports ENOSPC should there be insufficient, along with the usual
2432          * ENOMEM for a genuine allocation failure.
2433          *
2434          * We use ENOSPC in our driver to mean that we have run out of aperture
2435          * space and so want to translate the error from shmemfs back to our
2436          * usual understanding of ENOMEM.
2437          */
2438         if (ret == -ENOSPC)
2439                 ret = -ENOMEM;
2440
2441         return ERR_PTR(ret);
2442 }
2443
2444 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
2445                                  struct sg_table *pages)
2446 {
2447         lockdep_assert_held(&obj->mm.lock);
2448
2449         obj->mm.get_page.sg_pos = pages->sgl;
2450         obj->mm.get_page.sg_idx = 0;
2451
2452         obj->mm.pages = pages;
2453
2454         if (i915_gem_object_is_tiled(obj) &&
2455             to_i915(obj->base.dev)->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
2456                 GEM_BUG_ON(obj->mm.quirked);
2457                 __i915_gem_object_pin_pages(obj);
2458                 obj->mm.quirked = true;
2459         }
2460 }
2461
2462 static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2463 {
2464         struct sg_table *pages;
2465
2466         GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
2467
2468         if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
2469                 DRM_DEBUG("Attempting to obtain a purgeable object\n");
2470                 return -EFAULT;
2471         }
2472
2473         pages = obj->ops->get_pages(obj);
2474         if (unlikely(IS_ERR(pages)))
2475                 return PTR_ERR(pages);
2476
2477         __i915_gem_object_set_pages(obj, pages);
2478         return 0;
2479 }
2480
2481 /* Ensure that the associated pages are gathered from the backing storage
2482  * and pinned into our object. i915_gem_object_pin_pages() may be called
2483  * multiple times before they are released by a single call to
2484  * i915_gem_object_unpin_pages() - once the pages are no longer referenced
2485  * either as a result of memory pressure (reaping pages under the shrinker)
2486  * or as the object is itself released.
2487  */
2488 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2489 {
2490         int err;
2491
2492         err = mutex_lock_interruptible(&obj->mm.lock);
2493         if (err)
2494                 return err;
2495
2496         if (unlikely(!obj->mm.pages)) {
2497                 err = ____i915_gem_object_get_pages(obj);
2498                 if (err)
2499                         goto unlock;
2500
2501                 smp_mb__before_atomic();
2502         }
2503         atomic_inc(&obj->mm.pages_pin_count);
2504
2505 unlock:
2506         mutex_unlock(&obj->mm.lock);
2507         return err;
2508 }
2509
2510 /* The 'mapping' part of i915_gem_object_pin_map() below */
2511 static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
2512                                  enum i915_map_type type)
2513 {
2514         unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
2515         struct sg_table *sgt = obj->mm.pages;
2516         struct sgt_iter sgt_iter;
2517         struct page *page;
2518         struct page *stack_pages[32];
2519         struct page **pages = stack_pages;
2520         unsigned long i = 0;
2521         pgprot_t pgprot;
2522         void *addr;
2523
2524         /* A single page can always be kmapped */
2525         if (n_pages == 1 && type == I915_MAP_WB)
2526                 return kmap(sg_page(sgt->sgl));
2527
2528         if (n_pages > ARRAY_SIZE(stack_pages)) {
2529                 /* Too big for stack -- allocate temporary array instead */
2530                 pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY);
2531                 if (!pages)
2532                         return NULL;
2533         }
2534
2535         for_each_sgt_page(page, sgt_iter, sgt)
2536                 pages[i++] = page;
2537
2538         /* Check that we have the expected number of pages */
2539         GEM_BUG_ON(i != n_pages);
2540
2541         switch (type) {
2542         case I915_MAP_WB:
2543                 pgprot = PAGE_KERNEL;
2544                 break;
2545         case I915_MAP_WC:
2546                 pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
2547                 break;
2548         }
2549         addr = vmap(pages, n_pages, 0, pgprot);
2550
2551         if (pages != stack_pages)
2552                 drm_free_large(pages);
2553
2554         return addr;
2555 }
2556
2557 /* get, pin, and map the pages of the object into kernel space */
2558 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
2559                               enum i915_map_type type)
2560 {
2561         enum i915_map_type has_type;
2562         bool pinned;
2563         void *ptr;
2564         int ret;
2565
2566         GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
2567
2568         ret = mutex_lock_interruptible(&obj->mm.lock);
2569         if (ret)
2570                 return ERR_PTR(ret);
2571
2572         pinned = true;
2573         if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
2574                 if (unlikely(!obj->mm.pages)) {
2575                         ret = ____i915_gem_object_get_pages(obj);
2576                         if (ret)
2577                                 goto err_unlock;
2578
2579                         smp_mb__before_atomic();
2580                 }
2581                 atomic_inc(&obj->mm.pages_pin_count);
2582                 pinned = false;
2583         }
2584         GEM_BUG_ON(!obj->mm.pages);
2585
2586         ptr = ptr_unpack_bits(obj->mm.mapping, has_type);
2587         if (ptr && has_type != type) {
2588                 if (pinned) {
2589                         ret = -EBUSY;
2590                         goto err_unpin;
2591                 }
2592
2593                 if (is_vmalloc_addr(ptr))
2594                         vunmap(ptr);
2595                 else
2596                         kunmap(kmap_to_page(ptr));
2597
2598                 ptr = obj->mm.mapping = NULL;
2599         }
2600
2601         if (!ptr) {
2602                 ptr = i915_gem_object_map(obj, type);
2603                 if (!ptr) {
2604                         ret = -ENOMEM;
2605                         goto err_unpin;
2606                 }
2607
2608                 obj->mm.mapping = ptr_pack_bits(ptr, type);
2609         }
2610
2611 out_unlock:
2612         mutex_unlock(&obj->mm.lock);
2613         return ptr;
2614
2615 err_unpin:
2616         atomic_dec(&obj->mm.pages_pin_count);
2617 err_unlock:
2618         ptr = ERR_PTR(ret);
2619         goto out_unlock;
2620 }
2621
2622 static bool i915_context_is_banned(const struct i915_gem_context *ctx)
2623 {
2624         unsigned long elapsed;
2625
2626         if (ctx->hang_stats.banned)
2627                 return true;
2628
2629         elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
2630         if (ctx->hang_stats.ban_period_seconds &&
2631             elapsed <= ctx->hang_stats.ban_period_seconds) {
2632                 DRM_DEBUG("context hanging too fast, banning!\n");
2633                 return true;
2634         }
2635
2636         return false;
2637 }
2638
2639 static void i915_set_reset_status(struct i915_gem_context *ctx,
2640                                   const bool guilty)
2641 {
2642         struct i915_ctx_hang_stats *hs = &ctx->hang_stats;
2643
2644         if (guilty) {
2645                 hs->banned = i915_context_is_banned(ctx);
2646                 hs->batch_active++;
2647                 hs->guilty_ts = get_seconds();
2648         } else {
2649                 hs->batch_pending++;
2650         }
2651 }
2652
2653 struct drm_i915_gem_request *
2654 i915_gem_find_active_request(struct intel_engine_cs *engine)
2655 {
2656         struct drm_i915_gem_request *request;
2657
2658         /* We are called by the error capture and reset at a random
2659          * point in time. In particular, note that neither is crucially
2660          * ordered with an interrupt. After a hang, the GPU is dead and we
2661          * assume that no more writes can happen (we waited long enough for
2662          * all writes that were in transaction to be flushed) - adding an
2663          * extra delay for a recent interrupt is pointless. Hence, we do
2664          * not need an engine->irq_seqno_barrier() before the seqno reads.
2665          */
2666         list_for_each_entry(request, &engine->timeline->requests, link) {
2667                 if (__i915_gem_request_completed(request))
2668                         continue;
2669
2670                 return request;
2671         }
2672
2673         return NULL;
2674 }
2675
2676 static void reset_request(struct drm_i915_gem_request *request)
2677 {
2678         void *vaddr = request->ring->vaddr;
2679         u32 head;
2680
2681         /* As this request likely depends on state from the lost
2682          * context, clear out all the user operations leaving the
2683          * breadcrumb at the end (so we get the fence notifications).
2684          */
2685         head = request->head;
2686         if (request->postfix < head) {
2687                 memset(vaddr + head, 0, request->ring->size - head);
2688                 head = 0;
2689         }
2690         memset(vaddr + head, 0, request->postfix - head);
2691 }
2692
2693 static void i915_gem_reset_engine(struct intel_engine_cs *engine)
2694 {
2695         struct drm_i915_gem_request *request;
2696         struct i915_gem_context *incomplete_ctx;
2697         struct intel_timeline *timeline;
2698         unsigned long flags;
2699         bool ring_hung;
2700
2701         if (engine->irq_seqno_barrier)
2702                 engine->irq_seqno_barrier(engine);
2703
2704         request = i915_gem_find_active_request(engine);
2705         if (!request)
2706                 return;
2707
2708         ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
2709         if (engine->hangcheck.seqno != intel_engine_get_seqno(engine))
2710                 ring_hung = false;
2711
2712         i915_set_reset_status(request->ctx, ring_hung);
2713         if (!ring_hung)
2714                 return;
2715
2716         DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
2717                          engine->name, request->global_seqno);
2718
2719         /* Setup the CS to resume from the breadcrumb of the hung request */
2720         engine->reset_hw(engine, request);
2721
2722         /* Users of the default context do not rely on logical state
2723          * preserved between batches. They have to emit full state on
2724          * every batch and so it is safe to execute queued requests following
2725          * the hang.
2726          *
2727          * Other contexts preserve state, now corrupt. We want to skip all
2728          * queued requests that reference the corrupt context.
2729          */
2730         incomplete_ctx = request->ctx;
2731         if (i915_gem_context_is_default(incomplete_ctx))
2732                 return;
2733
2734         timeline = i915_gem_context_lookup_timeline(incomplete_ctx, engine);
2735
2736         spin_lock_irqsave(&engine->timeline->lock, flags);
2737         spin_lock(&timeline->lock);
2738
2739         list_for_each_entry_continue(request, &engine->timeline->requests, link)
2740                 if (request->ctx == incomplete_ctx)
2741                         reset_request(request);
2742
2743         list_for_each_entry(request, &timeline->requests, link)
2744                 reset_request(request);
2745
2746         spin_unlock(&timeline->lock);
2747         spin_unlock_irqrestore(&engine->timeline->lock, flags);
2748 }
2749
2750 void i915_gem_reset(struct drm_i915_private *dev_priv)
2751 {
2752         struct intel_engine_cs *engine;
2753         enum intel_engine_id id;
2754
2755         lockdep_assert_held(&dev_priv->drm.struct_mutex);
2756
2757         i915_gem_retire_requests(dev_priv);
2758
2759         for_each_engine(engine, dev_priv, id)
2760                 i915_gem_reset_engine(engine);
2761
2762         i915_gem_restore_fences(dev_priv);
2763
2764         if (dev_priv->gt.awake) {
2765                 intel_sanitize_gt_powersave(dev_priv);
2766                 intel_enable_gt_powersave(dev_priv);
2767                 if (INTEL_GEN(dev_priv) >= 6)
2768                         gen6_rps_busy(dev_priv);
2769         }
2770 }
2771
2772 static void nop_submit_request(struct drm_i915_gem_request *request)
2773 {
2774         i915_gem_request_submit(request);
2775         intel_engine_init_global_seqno(request->engine, request->global_seqno);
2776 }
2777
2778 static void i915_gem_cleanup_engine(struct intel_engine_cs *engine)
2779 {
2780         engine->submit_request = nop_submit_request;
2781
2782         /* Mark all pending requests as complete so that any concurrent
2783          * (lockless) lookup doesn't try and wait upon the request as we
2784          * reset it.
2785          */
2786         intel_engine_init_global_seqno(engine,
2787                                        intel_engine_last_submit(engine));
2788
2789         /*
2790          * Clear the execlists queue up before freeing the requests, as those
2791          * are the ones that keep the context and ringbuffer backing objects
2792          * pinned in place.
2793          */
2794
2795         if (i915.enable_execlists) {
2796                 unsigned long flags;
2797
2798                 spin_lock_irqsave(&engine->timeline->lock, flags);
2799
2800                 i915_gem_request_put(engine->execlist_port[0].request);
2801                 i915_gem_request_put(engine->execlist_port[1].request);
2802                 memset(engine->execlist_port, 0, sizeof(engine->execlist_port));
2803                 engine->execlist_queue = RB_ROOT;
2804                 engine->execlist_first = NULL;
2805
2806                 spin_unlock_irqrestore(&engine->timeline->lock, flags);
2807         }
2808 }
2809
2810 void i915_gem_set_wedged(struct drm_i915_private *dev_priv)
2811 {
2812         struct intel_engine_cs *engine;
2813         enum intel_engine_id id;
2814
2815         lockdep_assert_held(&dev_priv->drm.struct_mutex);
2816         set_bit(I915_WEDGED, &dev_priv->gpu_error.flags);
2817
2818         i915_gem_context_lost(dev_priv);
2819         for_each_engine(engine, dev_priv, id)
2820                 i915_gem_cleanup_engine(engine);
2821         mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
2822
2823         i915_gem_retire_requests(dev_priv);
2824 }
2825
2826 static void
2827 i915_gem_retire_work_handler(struct work_struct *work)
2828 {
2829         struct drm_i915_private *dev_priv =
2830                 container_of(work, typeof(*dev_priv), gt.retire_work.work);
2831         struct drm_device *dev = &dev_priv->drm;
2832
2833         /* Come back later if the device is busy... */
2834         if (mutex_trylock(&dev->struct_mutex)) {
2835                 i915_gem_retire_requests(dev_priv);
2836                 mutex_unlock(&dev->struct_mutex);
2837         }
2838
2839         /* Keep the retire handler running until we are finally idle.
2840          * We do not need to do this test under locking as in the worst-case
2841          * we queue the retire worker once too often.
2842          */
2843         if (READ_ONCE(dev_priv->gt.awake)) {
2844                 i915_queue_hangcheck(dev_priv);
2845                 queue_delayed_work(dev_priv->wq,
2846                                    &dev_priv->gt.retire_work,
2847                                    round_jiffies_up_relative(HZ));
2848         }
2849 }
2850
2851 static void
2852 i915_gem_idle_work_handler(struct work_struct *work)
2853 {
2854         struct drm_i915_private *dev_priv =
2855                 container_of(work, typeof(*dev_priv), gt.idle_work.work);
2856         struct drm_device *dev = &dev_priv->drm;
2857         struct intel_engine_cs *engine;
2858         enum intel_engine_id id;
2859         bool rearm_hangcheck;
2860
2861         if (!READ_ONCE(dev_priv->gt.awake))
2862                 return;
2863
2864         /*
2865          * Wait for last execlists context complete, but bail out in case a
2866          * new request is submitted.
2867          */
2868         wait_for(READ_ONCE(dev_priv->gt.active_requests) ||
2869                  intel_execlists_idle(dev_priv), 10);
2870
2871         if (READ_ONCE(dev_priv->gt.active_requests))
2872                 return;
2873
2874         rearm_hangcheck =
2875                 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
2876
2877         if (!mutex_trylock(&dev->struct_mutex)) {
2878                 /* Currently busy, come back later */
2879                 mod_delayed_work(dev_priv->wq,
2880                                  &dev_priv->gt.idle_work,
2881                                  msecs_to_jiffies(50));
2882                 goto out_rearm;
2883         }
2884
2885         /*
2886          * New request retired after this work handler started, extend active
2887          * period until next instance of the work.
2888          */
2889         if (work_pending(work))
2890                 goto out_unlock;
2891
2892         if (dev_priv->gt.active_requests)
2893                 goto out_unlock;
2894
2895         if (wait_for(intel_execlists_idle(dev_priv), 10))
2896                 DRM_ERROR("Timeout waiting for engines to idle\n");
2897
2898         for_each_engine(engine, dev_priv, id)
2899                 i915_gem_batch_pool_fini(&engine->batch_pool);
2900
2901         GEM_BUG_ON(!dev_priv->gt.awake);
2902         dev_priv->gt.awake = false;
2903         rearm_hangcheck = false;
2904
2905         if (INTEL_GEN(dev_priv) >= 6)
2906                 gen6_rps_idle(dev_priv);
2907         intel_runtime_pm_put(dev_priv);
2908 out_unlock:
2909         mutex_unlock(&dev->struct_mutex);
2910
2911 out_rearm:
2912         if (rearm_hangcheck) {
2913                 GEM_BUG_ON(!dev_priv->gt.awake);
2914                 i915_queue_hangcheck(dev_priv);
2915         }
2916 }
2917
2918 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
2919 {
2920         struct drm_i915_gem_object *obj = to_intel_bo(gem);
2921         struct drm_i915_file_private *fpriv = file->driver_priv;
2922         struct i915_vma *vma, *vn;
2923
2924         mutex_lock(&obj->base.dev->struct_mutex);
2925         list_for_each_entry_safe(vma, vn, &obj->vma_list, obj_link)
2926                 if (vma->vm->file == fpriv)
2927                         i915_vma_close(vma);
2928
2929         if (i915_gem_object_is_active(obj) &&
2930             !i915_gem_object_has_active_reference(obj)) {
2931                 i915_gem_object_set_active_reference(obj);
2932                 i915_gem_object_get(obj);
2933         }
2934         mutex_unlock(&obj->base.dev->struct_mutex);
2935 }
2936
2937 static unsigned long to_wait_timeout(s64 timeout_ns)
2938 {
2939         if (timeout_ns < 0)
2940                 return MAX_SCHEDULE_TIMEOUT;
2941
2942         if (timeout_ns == 0)
2943                 return 0;
2944
2945         return nsecs_to_jiffies_timeout(timeout_ns);
2946 }
2947
2948 /**
2949  * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2950  * @dev: drm device pointer
2951  * @data: ioctl data blob
2952  * @file: drm file pointer
2953  *
2954  * Returns 0 if successful, else an error is returned with the remaining time in
2955  * the timeout parameter.
2956  *  -ETIME: object is still busy after timeout
2957  *  -ERESTARTSYS: signal interrupted the wait
2958  *  -ENONENT: object doesn't exist
2959  * Also possible, but rare:
2960  *  -EAGAIN: GPU wedged
2961  *  -ENOMEM: damn
2962  *  -ENODEV: Internal IRQ fail
2963  *  -E?: The add request failed
2964  *
2965  * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2966  * non-zero timeout parameter the wait ioctl will wait for the given number of
2967  * nanoseconds on an object becoming unbusy. Since the wait itself does so
2968  * without holding struct_mutex the object may become re-busied before this
2969  * function completes. A similar but shorter * race condition exists in the busy
2970  * ioctl
2971  */
2972 int
2973 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2974 {
2975         struct drm_i915_gem_wait *args = data;
2976         struct drm_i915_gem_object *obj;
2977         ktime_t start;
2978         long ret;
2979
2980         if (args->flags != 0)
2981                 return -EINVAL;
2982
2983         obj = i915_gem_object_lookup(file, args->bo_handle);
2984         if (!obj)
2985                 return -ENOENT;
2986
2987         start = ktime_get();
2988
2989         ret = i915_gem_object_wait(obj,
2990                                    I915_WAIT_INTERRUPTIBLE | I915_WAIT_ALL,
2991                                    to_wait_timeout(args->timeout_ns),
2992                                    to_rps_client(file));
2993
2994         if (args->timeout_ns > 0) {
2995                 args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
2996                 if (args->timeout_ns < 0)
2997                         args->timeout_ns = 0;
2998         }
2999
3000         i915_gem_object_put(obj);
3001         return ret;
3002 }
3003
3004 static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags)
3005 {
3006         int ret, i;
3007
3008         for (i = 0; i < ARRAY_SIZE(tl->engine); i++) {
3009                 ret = i915_gem_active_wait(&tl->engine[i].last_request, flags);
3010                 if (ret)
3011                         return ret;
3012         }
3013
3014         return 0;
3015 }
3016
3017 int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
3018 {
3019         int ret;
3020
3021         if (flags & I915_WAIT_LOCKED) {
3022                 struct i915_gem_timeline *tl;
3023
3024                 lockdep_assert_held(&i915->drm.struct_mutex);
3025
3026                 list_for_each_entry(tl, &i915->gt.timelines, link) {
3027                         ret = wait_for_timeline(tl, flags);
3028                         if (ret)
3029                                 return ret;
3030                 }
3031         } else {
3032                 ret = wait_for_timeline(&i915->gt.global_timeline, flags);
3033                 if (ret)
3034                         return ret;
3035         }
3036
3037         return 0;
3038 }
3039
3040 void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3041                              bool force)
3042 {
3043         /* If we don't have a page list set up, then we're not pinned
3044          * to GPU, and we can ignore the cache flush because it'll happen
3045          * again at bind time.
3046          */
3047         if (!obj->mm.pages)
3048                 return;
3049
3050         /*
3051          * Stolen memory is always coherent with the GPU as it is explicitly
3052          * marked as wc by the system, or the system is cache-coherent.
3053          */
3054         if (obj->stolen || obj->phys_handle)
3055                 return;
3056
3057         /* If the GPU is snooping the contents of the CPU cache,
3058          * we do not need to manually clear the CPU cache lines.  However,
3059          * the caches are only snooped when the render cache is
3060          * flushed/invalidated.  As we always have to emit invalidations
3061          * and flushes when moving into and out of the RENDER domain, correct
3062          * snooping behaviour occurs naturally as the result of our domain
3063          * tracking.
3064          */
3065         if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) {
3066                 obj->cache_dirty = true;
3067                 return;
3068         }
3069
3070         trace_i915_gem_object_clflush(obj);
3071         drm_clflush_sg(obj->mm.pages);
3072         obj->cache_dirty = false;
3073 }
3074
3075 /** Flushes the GTT write domain for the object if it's dirty. */
3076 static void
3077 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3078 {
3079         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
3080
3081         if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3082                 return;
3083
3084         /* No actual flushing is required for the GTT write domain.  Writes
3085          * to it "immediately" go to main memory as far as we know, so there's
3086          * no chipset flush.  It also doesn't land in render cache.
3087          *
3088          * However, we do have to enforce the order so that all writes through
3089          * the GTT land before any writes to the device, such as updates to
3090          * the GATT itself.
3091          *
3092          * We also have to wait a bit for the writes to land from the GTT.
3093          * An uncached read (i.e. mmio) seems to be ideal for the round-trip
3094          * timing. This issue has only been observed when switching quickly
3095          * between GTT writes and CPU reads from inside the kernel on recent hw,
3096          * and it appears to only affect discrete GTT blocks (i.e. on LLC
3097          * system agents we cannot reproduce this behaviour).
3098          */
3099         wmb();
3100         if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv))
3101                 POSTING_READ(RING_ACTHD(dev_priv->engine[RCS]->mmio_base));
3102
3103         intel_fb_obj_flush(obj, false, write_origin(obj, I915_GEM_DOMAIN_GTT));
3104
3105         obj->base.write_domain = 0;
3106         trace_i915_gem_object_change_domain(obj,
3107                                             obj->base.read_domains,
3108                                             I915_GEM_DOMAIN_GTT);
3109 }
3110
3111 /** Flushes the CPU write domain for the object if it's dirty. */
3112 static void
3113 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
3114 {
3115         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3116                 return;
3117
3118         i915_gem_clflush_object(obj, obj->pin_display);
3119         intel_fb_obj_flush(obj, false, ORIGIN_CPU);
3120
3121         obj->base.write_domain = 0;
3122         trace_i915_gem_object_change_domain(obj,
3123                                             obj->base.read_domains,
3124                                             I915_GEM_DOMAIN_CPU);
3125 }
3126
3127 /**
3128  * Moves a single object to the GTT read, and possibly write domain.
3129  * @obj: object to act on
3130  * @write: ask for write access or read only
3131  *
3132  * This function returns when the move is complete, including waiting on
3133  * flushes to occur.
3134  */
3135 int
3136 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3137 {
3138         uint32_t old_write_domain, old_read_domains;
3139         int ret;
3140
3141         lockdep_assert_held(&obj->base.dev->struct_mutex);
3142
3143         ret = i915_gem_object_wait(obj,
3144                                    I915_WAIT_INTERRUPTIBLE |
3145                                    I915_WAIT_LOCKED |
3146                                    (write ? I915_WAIT_ALL : 0),
3147                                    MAX_SCHEDULE_TIMEOUT,
3148                                    NULL);
3149         if (ret)
3150                 return ret;
3151
3152         if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3153                 return 0;
3154
3155         /* Flush and acquire obj->pages so that we are coherent through
3156          * direct access in memory with previous cached writes through
3157          * shmemfs and that our cache domain tracking remains valid.
3158          * For example, if the obj->filp was moved to swap without us
3159          * being notified and releasing the pages, we would mistakenly
3160          * continue to assume that the obj remained out of the CPU cached
3161          * domain.
3162          */
3163         ret = i915_gem_object_pin_pages(obj);
3164         if (ret)
3165                 return ret;
3166
3167         i915_gem_object_flush_cpu_write_domain(obj);
3168
3169         /* Serialise direct access to this object with the barriers for
3170          * coherent writes from the GPU, by effectively invalidating the
3171          * GTT domain upon first access.
3172          */
3173         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3174                 mb();
3175
3176         old_write_domain = obj->base.write_domain;
3177         old_read_domains = obj->base.read_domains;
3178
3179         /* It should now be out of any other write domains, and we can update
3180          * the domain values for our changes.
3181          */
3182         GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3183         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3184         if (write) {
3185                 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3186                 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3187                 obj->mm.dirty = true;
3188         }
3189
3190         trace_i915_gem_object_change_domain(obj,
3191                                             old_read_domains,
3192                                             old_write_domain);
3193
3194         i915_gem_object_unpin_pages(obj);
3195         return 0;
3196 }
3197
3198 /**
3199  * Changes the cache-level of an object across all VMA.
3200  * @obj: object to act on
3201  * @cache_level: new cache level to set for the object
3202  *
3203  * After this function returns, the object will be in the new cache-level
3204  * across all GTT and the contents of the backing storage will be coherent,
3205  * with respect to the new cache-level. In order to keep the backing storage
3206  * coherent for all users, we only allow a single cache level to be set
3207  * globally on the object and prevent it from being changed whilst the
3208  * hardware is reading from the object. That is if the object is currently
3209  * on the scanout it will be set to uncached (or equivalent display
3210  * cache coherency) and all non-MOCS GPU access will also be uncached so
3211  * that all direct access to the scanout remains coherent.
3212  */
3213 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3214                                     enum i915_cache_level cache_level)
3215 {
3216         struct i915_vma *vma;
3217         int ret;
3218
3219         lockdep_assert_held(&obj->base.dev->struct_mutex);
3220
3221         if (obj->cache_level == cache_level)
3222                 return 0;
3223
3224         /* Inspect the list of currently bound VMA and unbind any that would
3225          * be invalid given the new cache-level. This is principally to
3226          * catch the issue of the CS prefetch crossing page boundaries and
3227          * reading an invalid PTE on older architectures.
3228          */
3229 restart:
3230         list_for_each_entry(vma, &obj->vma_list, obj_link) {
3231                 if (!drm_mm_node_allocated(&vma->node))
3232                         continue;
3233
3234                 if (i915_vma_is_pinned(vma)) {
3235                         DRM_DEBUG("can not change the cache level of pinned objects\n");
3236                         return -EBUSY;
3237                 }
3238
3239                 if (i915_gem_valid_gtt_space(vma, cache_level))
3240                         continue;
3241
3242                 ret = i915_vma_unbind(vma);
3243                 if (ret)
3244                         return ret;
3245
3246                 /* As unbinding may affect other elements in the
3247                  * obj->vma_list (due to side-effects from retiring
3248                  * an active vma), play safe and restart the iterator.
3249                  */
3250                 goto restart;
3251         }
3252
3253         /* We can reuse the existing drm_mm nodes but need to change the
3254          * cache-level on the PTE. We could simply unbind them all and
3255          * rebind with the correct cache-level on next use. However since
3256          * we already have a valid slot, dma mapping, pages etc, we may as
3257          * rewrite the PTE in the belief that doing so tramples upon less
3258          * state and so involves less work.
3259          */
3260         if (obj->bind_count) {
3261                 /* Before we change the PTE, the GPU must not be accessing it.
3262                  * If we wait upon the object, we know that all the bound
3263                  * VMA are no longer active.
3264                  */
3265                 ret = i915_gem_object_wait(obj,
3266                                            I915_WAIT_INTERRUPTIBLE |
3267                                            I915_WAIT_LOCKED |
3268                                            I915_WAIT_ALL,
3269                                            MAX_SCHEDULE_TIMEOUT,
3270                                            NULL);
3271                 if (ret)
3272                         return ret;
3273
3274                 if (!HAS_LLC(to_i915(obj->base.dev)) &&
3275                     cache_level != I915_CACHE_NONE) {
3276                         /* Access to snoopable pages through the GTT is
3277                          * incoherent and on some machines causes a hard
3278                          * lockup. Relinquish the CPU mmaping to force
3279                          * userspace to refault in the pages and we can
3280                          * then double check if the GTT mapping is still
3281                          * valid for that pointer access.
3282                          */
3283                         i915_gem_release_mmap(obj);
3284
3285                         /* As we no longer need a fence for GTT access,
3286                          * we can relinquish it now (and so prevent having
3287                          * to steal a fence from someone else on the next
3288                          * fence request). Note GPU activity would have
3289                          * dropped the fence as all snoopable access is
3290                          * supposed to be linear.
3291                          */
3292                         list_for_each_entry(vma, &obj->vma_list, obj_link) {
3293                                 ret = i915_vma_put_fence(vma);
3294                                 if (ret)
3295                                         return ret;
3296                         }
3297                 } else {
3298                         /* We either have incoherent backing store and
3299                          * so no GTT access or the architecture is fully
3300                          * coherent. In such cases, existing GTT mmaps
3301                          * ignore the cache bit in the PTE and we can
3302                          * rewrite it without confusing the GPU or having
3303                          * to force userspace to fault back in its mmaps.
3304                          */
3305                 }
3306
3307                 list_for_each_entry(vma, &obj->vma_list, obj_link) {
3308                         if (!drm_mm_node_allocated(&vma->node))
3309                                 continue;
3310
3311                         ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
3312                         if (ret)
3313                                 return ret;
3314                 }
3315         }
3316
3317         if (obj->base.write_domain == I915_GEM_DOMAIN_CPU &&
3318             cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
3319                 obj->cache_dirty = true;
3320
3321         list_for_each_entry(vma, &obj->vma_list, obj_link)
3322                 vma->node.color = cache_level;
3323         obj->cache_level = cache_level;
3324
3325         return 0;
3326 }
3327
3328 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3329                                struct drm_file *file)
3330 {
3331         struct drm_i915_gem_caching *args = data;
3332         struct drm_i915_gem_object *obj;
3333         int err = 0;
3334
3335         rcu_read_lock();
3336         obj = i915_gem_object_lookup_rcu(file, args->handle);
3337         if (!obj) {
3338                 err = -ENOENT;
3339                 goto out;
3340         }
3341
3342         switch (obj->cache_level) {
3343         case I915_CACHE_LLC:
3344         case I915_CACHE_L3_LLC:
3345                 args->caching = I915_CACHING_CACHED;
3346                 break;
3347
3348         case I915_CACHE_WT:
3349                 args->caching = I915_CACHING_DISPLAY;
3350                 break;
3351
3352         default:
3353                 args->caching = I915_CACHING_NONE;
3354                 break;
3355         }
3356 out:
3357         rcu_read_unlock();
3358         return err;
3359 }
3360
3361 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3362                                struct drm_file *file)
3363 {
3364         struct drm_i915_private *i915 = to_i915(dev);
3365         struct drm_i915_gem_caching *args = data;
3366         struct drm_i915_gem_object *obj;
3367         enum i915_cache_level level;
3368         int ret;
3369
3370         switch (args->caching) {
3371         case I915_CACHING_NONE:
3372                 level = I915_CACHE_NONE;
3373                 break;
3374         case I915_CACHING_CACHED:
3375                 /*
3376                  * Due to a HW issue on BXT A stepping, GPU stores via a
3377                  * snooped mapping may leave stale data in a corresponding CPU
3378                  * cacheline, whereas normally such cachelines would get
3379                  * invalidated.
3380                  */
3381                 if (!HAS_LLC(i915) && !HAS_SNOOP(i915))
3382                         return -ENODEV;
3383
3384                 level = I915_CACHE_LLC;
3385                 break;
3386         case I915_CACHING_DISPLAY:
3387                 level = HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE;
3388                 break;
3389         default:
3390                 return -EINVAL;
3391         }
3392
3393         ret = i915_mutex_lock_interruptible(dev);
3394         if (ret)
3395                 return ret;
3396
3397         obj = i915_gem_object_lookup(file, args->handle);
3398         if (!obj) {
3399                 ret = -ENOENT;
3400                 goto unlock;
3401         }
3402
3403         ret = i915_gem_object_set_cache_level(obj, level);
3404         i915_gem_object_put(obj);
3405 unlock:
3406         mutex_unlock(&dev->struct_mutex);
3407         return ret;
3408 }
3409
3410 /*
3411  * Prepare buffer for display plane (scanout, cursors, etc).
3412  * Can be called from an uninterruptible phase (modesetting) and allows
3413  * any flushes to be pipelined (for pageflips).
3414  */
3415 struct i915_vma *
3416 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3417                                      u32 alignment,
3418                                      const struct i915_ggtt_view *view)
3419 {
3420         struct i915_vma *vma;
3421         u32 old_read_domains, old_write_domain;
3422         int ret;
3423
3424         lockdep_assert_held(&obj->base.dev->struct_mutex);
3425
3426         /* Mark the pin_display early so that we account for the
3427          * display coherency whilst setting up the cache domains.
3428          */
3429         obj->pin_display++;
3430
3431         /* The display engine is not coherent with the LLC cache on gen6.  As
3432          * a result, we make sure that the pinning that is about to occur is
3433          * done with uncached PTEs. This is lowest common denominator for all
3434          * chipsets.
3435          *
3436          * However for gen6+, we could do better by using the GFDT bit instead
3437          * of uncaching, which would allow us to flush all the LLC-cached data
3438          * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3439          */
3440         ret = i915_gem_object_set_cache_level(obj,
3441                                               HAS_WT(to_i915(obj->base.dev)) ?
3442                                               I915_CACHE_WT : I915_CACHE_NONE);
3443         if (ret) {
3444                 vma = ERR_PTR(ret);
3445                 goto err_unpin_display;
3446         }
3447
3448         /* As the user may map the buffer once pinned in the display plane
3449          * (e.g. libkms for the bootup splash), we have to ensure that we
3450          * always use map_and_fenceable for all scanout buffers. However,
3451          * it may simply be too big to fit into mappable, in which case
3452          * put it anyway and hope that userspace can cope (but always first
3453          * try to preserve the existing ABI).
3454          */
3455         vma = ERR_PTR(-ENOSPC);
3456         if (view->type == I915_GGTT_VIEW_NORMAL)
3457                 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
3458                                                PIN_MAPPABLE | PIN_NONBLOCK);
3459         if (IS_ERR(vma)) {
3460                 struct drm_i915_private *i915 = to_i915(obj->base.dev);
3461                 unsigned int flags;
3462
3463                 /* Valleyview is definitely limited to scanning out the first
3464                  * 512MiB. Lets presume this behaviour was inherited from the
3465                  * g4x display engine and that all earlier gen are similarly
3466                  * limited. Testing suggests that it is a little more
3467                  * complicated than this. For example, Cherryview appears quite
3468                  * happy to scanout from anywhere within its global aperture.
3469                  */
3470                 flags = 0;
3471                 if (HAS_GMCH_DISPLAY(i915))
3472                         flags = PIN_MAPPABLE;
3473                 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
3474         }
3475         if (IS_ERR(vma))
3476                 goto err_unpin_display;
3477
3478         vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
3479
3480         /* Treat this as an end-of-frame, like intel_user_framebuffer_dirty() */
3481         if (obj->cache_dirty) {
3482                 i915_gem_clflush_object(obj, true);
3483                 intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
3484         }
3485
3486         old_write_domain = obj->base.write_domain;
3487         old_read_domains = obj->base.read_domains;
3488
3489         /* It should now be out of any other write domains, and we can update
3490          * the domain values for our changes.
3491          */
3492         obj->base.write_domain = 0;
3493         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3494
3495         trace_i915_gem_object_change_domain(obj,
3496                                             old_read_domains,
3497                                             old_write_domain);
3498
3499         return vma;
3500
3501 err_unpin_display:
3502         obj->pin_display--;
3503         return vma;
3504 }
3505
3506 void
3507 i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
3508 {
3509         lockdep_assert_held(&vma->vm->dev->struct_mutex);
3510
3511         if (WARN_ON(vma->obj->pin_display == 0))
3512                 return;
3513
3514         if (--vma->obj->pin_display == 0)
3515                 vma->display_alignment = 0;
3516
3517         /* Bump the LRU to try and avoid premature eviction whilst flipping  */
3518         if (!i915_vma_is_active(vma))
3519                 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
3520
3521         i915_vma_unpin(vma);
3522 }
3523
3524 /**
3525  * Moves a single object to the CPU read, and possibly write domain.
3526  * @obj: object to act on
3527  * @write: requesting write or read-only access
3528  *
3529  * This function returns when the move is complete, including waiting on
3530  * flushes to occur.
3531  */
3532 int
3533 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3534 {
3535         uint32_t old_write_domain, old_read_domains;
3536         int ret;
3537
3538         lockdep_assert_held(&obj->base.dev->struct_mutex);
3539
3540         ret = i915_gem_object_wait(obj,
3541                                    I915_WAIT_INTERRUPTIBLE |
3542                                    I915_WAIT_LOCKED |
3543                                    (write ? I915_WAIT_ALL : 0),
3544                                    MAX_SCHEDULE_TIMEOUT,
3545                                    NULL);
3546         if (ret)
3547                 return ret;
3548
3549         if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3550                 return 0;
3551
3552         i915_gem_object_flush_gtt_write_domain(obj);
3553
3554         old_write_domain = obj->base.write_domain;
3555         old_read_domains = obj->base.read_domains;
3556
3557         /* Flush the CPU cache if it's still invalid. */
3558         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3559                 i915_gem_clflush_object(obj, false);
3560
3561                 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3562         }
3563
3564         /* It should now be out of any other write domains, and we can update
3565          * the domain values for our changes.
3566          */
3567         GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3568
3569         /* If we're writing through the CPU, then the GPU read domains will
3570          * need to be invalidated at next use.
3571          */
3572         if (write) {
3573                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3574                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3575         }
3576
3577         trace_i915_gem_object_change_domain(obj,
3578                                             old_read_domains,
3579                                             old_write_domain);
3580
3581         return 0;
3582 }
3583
3584 /* Throttle our rendering by waiting until the ring has completed our requests
3585  * emitted over 20 msec ago.
3586  *
3587  * Note that if we were to use the current jiffies each time around the loop,
3588  * we wouldn't escape the function with any frames outstanding if the time to
3589  * render a frame was over 20ms.
3590  *
3591  * This should get us reasonable parallelism between CPU and GPU but also
3592  * relatively low latency when blocking on a particular request to finish.
3593  */
3594 static int
3595 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3596 {
3597         struct drm_i915_private *dev_priv = to_i915(dev);
3598         struct drm_i915_file_private *file_priv = file->driver_priv;
3599         unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
3600         struct drm_i915_gem_request *request, *target = NULL;
3601         long ret;
3602
3603         /* ABI: return -EIO if already wedged */
3604         if (i915_terminally_wedged(&dev_priv->gpu_error))
3605                 return -EIO;
3606
3607         spin_lock(&file_priv->mm.lock);
3608         list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3609                 if (time_after_eq(request->emitted_jiffies, recent_enough))
3610                         break;
3611
3612                 /*
3613                  * Note that the request might not have been submitted yet.
3614                  * In which case emitted_jiffies will be zero.
3615                  */
3616                 if (!request->emitted_jiffies)
3617                         continue;
3618
3619                 target = request;
3620         }
3621         if (target)
3622                 i915_gem_request_get(target);
3623         spin_unlock(&file_priv->mm.lock);
3624
3625         if (target == NULL)
3626                 return 0;
3627
3628         ret = i915_wait_request(target,
3629                                 I915_WAIT_INTERRUPTIBLE,
3630                                 MAX_SCHEDULE_TIMEOUT);
3631         i915_gem_request_put(target);
3632
3633         return ret < 0 ? ret : 0;
3634 }
3635
3636 struct i915_vma *
3637 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
3638                          const struct i915_ggtt_view *view,
3639                          u64 size,
3640                          u64 alignment,
3641                          u64 flags)
3642 {
3643         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
3644         struct i915_address_space *vm = &dev_priv->ggtt.base;
3645         struct i915_vma *vma;
3646         int ret;
3647
3648         lockdep_assert_held(&obj->base.dev->struct_mutex);
3649
3650         vma = i915_gem_obj_lookup_or_create_vma(obj, vm, view);
3651         if (IS_ERR(vma))
3652                 return vma;
3653
3654         if (i915_vma_misplaced(vma, size, alignment, flags)) {
3655                 if (flags & PIN_NONBLOCK &&
3656                     (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)))
3657                         return ERR_PTR(-ENOSPC);
3658
3659                 if (flags & PIN_MAPPABLE) {
3660                         u32 fence_size;
3661
3662                         fence_size = i915_gem_get_ggtt_size(dev_priv, vma->size,
3663                                                             i915_gem_object_get_tiling(obj));
3664                         /* If the required space is larger than the available
3665                          * aperture, we will not able to find a slot for the
3666                          * object and unbinding the object now will be in
3667                          * vain. Worse, doing so may cause us to ping-pong
3668                          * the object in and out of the Global GTT and
3669                          * waste a lot of cycles under the mutex.
3670                          */
3671                         if (fence_size > dev_priv->ggtt.mappable_end)
3672                                 return ERR_PTR(-E2BIG);
3673
3674                         /* If NONBLOCK is set the caller is optimistically
3675                          * trying to cache the full object within the mappable
3676                          * aperture, and *must* have a fallback in place for
3677                          * situations where we cannot bind the object. We
3678                          * can be a little more lax here and use the fallback
3679                          * more often to avoid costly migrations of ourselves
3680                          * and other objects within the aperture.
3681                          *
3682                          * Half-the-aperture is used as a simple heuristic.
3683                          * More interesting would to do search for a free
3684                          * block prior to making the commitment to unbind.
3685                          * That caters for the self-harm case, and with a
3686                          * little more heuristics (e.g. NOFAULT, NOEVICT)
3687                          * we could try to minimise harm to others.
3688                          */
3689                         if (flags & PIN_NONBLOCK &&
3690                             fence_size > dev_priv->ggtt.mappable_end / 2)
3691                                 return ERR_PTR(-ENOSPC);
3692                 }
3693
3694                 WARN(i915_vma_is_pinned(vma),
3695                      "bo is already pinned in ggtt with incorrect alignment:"
3696                      " offset=%08x, req.alignment=%llx,"
3697                      " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
3698                      i915_ggtt_offset(vma), alignment,
3699                      !!(flags & PIN_MAPPABLE),
3700                      i915_vma_is_map_and_fenceable(vma));
3701                 ret = i915_vma_unbind(vma);
3702                 if (ret)
3703                         return ERR_PTR(ret);
3704         }
3705
3706         ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
3707         if (ret)
3708                 return ERR_PTR(ret);
3709
3710         return vma;
3711 }
3712
3713 static __always_inline unsigned int __busy_read_flag(unsigned int id)
3714 {
3715         /* Note that we could alias engines in the execbuf API, but
3716          * that would be very unwise as it prevents userspace from
3717          * fine control over engine selection. Ahem.
3718          *
3719          * This should be something like EXEC_MAX_ENGINE instead of
3720          * I915_NUM_ENGINES.
3721          */
3722         BUILD_BUG_ON(I915_NUM_ENGINES > 16);
3723         return 0x10000 << id;
3724 }
3725
3726 static __always_inline unsigned int __busy_write_id(unsigned int id)
3727 {
3728         /* The uABI guarantees an active writer is also amongst the read
3729          * engines. This would be true if we accessed the activity tracking
3730          * under the lock, but as we perform the lookup of the object and
3731          * its activity locklessly we can not guarantee that the last_write
3732          * being active implies that we have set the same engine flag from
3733          * last_read - hence we always set both read and write busy for
3734          * last_write.
3735          */
3736         return id | __busy_read_flag(id);
3737 }
3738
3739 static __always_inline unsigned int
3740 __busy_set_if_active(const struct dma_fence *fence,
3741                      unsigned int (*flag)(unsigned int id))
3742 {
3743         struct drm_i915_gem_request *rq;
3744
3745         /* We have to check the current hw status of the fence as the uABI
3746          * guarantees forward progress. We could rely on the idle worker
3747          * to eventually flush us, but to minimise latency just ask the
3748          * hardware.
3749          *
3750          * Note we only report on the status of native fences.
3751          */
3752         if (!dma_fence_is_i915(fence))
3753                 return 0;
3754
3755         /* opencode to_request() in order to avoid const warnings */
3756         rq = container_of(fence, struct drm_i915_gem_request, fence);
3757         if (i915_gem_request_completed(rq))
3758                 return 0;
3759
3760         return flag(rq->engine->exec_id);
3761 }
3762
3763 static __always_inline unsigned int
3764 busy_check_reader(const struct dma_fence *fence)
3765 {
3766         return __busy_set_if_active(fence, __busy_read_flag);
3767 }
3768
3769 static __always_inline unsigned int
3770 busy_check_writer(const struct dma_fence *fence)
3771 {
3772         if (!fence)
3773                 return 0;
3774
3775         return __busy_set_if_active(fence, __busy_write_id);
3776 }
3777
3778 int
3779 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3780                     struct drm_file *file)
3781 {
3782         struct drm_i915_gem_busy *args = data;
3783         struct drm_i915_gem_object *obj;
3784         struct reservation_object_list *list;
3785         unsigned int seq;
3786         int err;
3787
3788         err = -ENOENT;
3789         rcu_read_lock();
3790         obj = i915_gem_object_lookup_rcu(file, args->handle);
3791         if (!obj)
3792                 goto out;
3793
3794         /* A discrepancy here is that we do not report the status of
3795          * non-i915 fences, i.e. even though we may report the object as idle,
3796          * a call to set-domain may still stall waiting for foreign rendering.
3797          * This also means that wait-ioctl may report an object as busy,
3798          * where busy-ioctl considers it idle.
3799          *
3800          * We trade the ability to warn of foreign fences to report on which
3801          * i915 engines are active for the object.
3802          *
3803          * Alternatively, we can trade that extra information on read/write
3804          * activity with
3805          *      args->busy =
3806          *              !reservation_object_test_signaled_rcu(obj->resv, true);
3807          * to report the overall busyness. This is what the wait-ioctl does.
3808          *
3809          */
3810 retry:
3811         seq = raw_read_seqcount(&obj->resv->seq);
3812
3813         /* Translate the exclusive fence to the READ *and* WRITE engine */
3814         args->busy = busy_check_writer(rcu_dereference(obj->resv->fence_excl));
3815
3816         /* Translate shared fences to READ set of engines */
3817         list = rcu_dereference(obj->resv->fence);
3818         if (list) {
3819                 unsigned int shared_count = list->shared_count, i;
3820
3821                 for (i = 0; i < shared_count; ++i) {
3822                         struct dma_fence *fence =
3823                                 rcu_dereference(list->shared[i]);
3824
3825                         args->busy |= busy_check_reader(fence);
3826                 }
3827         }
3828
3829         if (args->busy && read_seqcount_retry(&obj->resv->seq, seq))
3830                 goto retry;
3831
3832         err = 0;
3833 out:
3834         rcu_read_unlock();
3835         return err;
3836 }
3837
3838 int
3839 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3840                         struct drm_file *file_priv)
3841 {
3842         return i915_gem_ring_throttle(dev, file_priv);
3843 }
3844
3845 int
3846 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3847                        struct drm_file *file_priv)
3848 {
3849         struct drm_i915_private *dev_priv = to_i915(dev);
3850         struct drm_i915_gem_madvise *args = data;
3851         struct drm_i915_gem_object *obj;
3852         int err;
3853
3854         switch (args->madv) {
3855         case I915_MADV_DONTNEED:
3856         case I915_MADV_WILLNEED:
3857             break;
3858         default:
3859             return -EINVAL;
3860         }
3861
3862         obj = i915_gem_object_lookup(file_priv, args->handle);
3863         if (!obj)
3864                 return -ENOENT;
3865
3866         err = mutex_lock_interruptible(&obj->mm.lock);
3867         if (err)
3868                 goto out;
3869
3870         if (obj->mm.pages &&
3871             i915_gem_object_is_tiled(obj) &&
3872             dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
3873                 if (obj->mm.madv == I915_MADV_WILLNEED) {
3874                         GEM_BUG_ON(!obj->mm.quirked);
3875                         __i915_gem_object_unpin_pages(obj);
3876                         obj->mm.quirked = false;
3877                 }
3878                 if (args->madv == I915_MADV_WILLNEED) {
3879                         GEM_BUG_ON(obj->mm.quirked);
3880                         __i915_gem_object_pin_pages(obj);
3881                         obj->mm.quirked = true;
3882                 }
3883         }
3884
3885         if (obj->mm.madv != __I915_MADV_PURGED)
3886                 obj->mm.madv = args->madv;
3887
3888         /* if the object is no longer attached, discard its backing storage */
3889         if (obj->mm.madv == I915_MADV_DONTNEED && !obj->mm.pages)
3890                 i915_gem_object_truncate(obj);
3891
3892         args->retained = obj->mm.madv != __I915_MADV_PURGED;
3893         mutex_unlock(&obj->mm.lock);
3894
3895 out:
3896         i915_gem_object_put(obj);
3897         return err;
3898 }
3899
3900 static void
3901 frontbuffer_retire(struct i915_gem_active *active,
3902                    struct drm_i915_gem_request *request)
3903 {
3904         struct drm_i915_gem_object *obj =
3905                 container_of(active, typeof(*obj), frontbuffer_write);
3906
3907         intel_fb_obj_flush(obj, true, ORIGIN_CS);
3908 }
3909
3910 void i915_gem_object_init(struct drm_i915_gem_object *obj,
3911                           const struct drm_i915_gem_object_ops *ops)
3912 {
3913         mutex_init(&obj->mm.lock);
3914
3915         INIT_LIST_HEAD(&obj->global_link);
3916         INIT_LIST_HEAD(&obj->userfault_link);
3917         INIT_LIST_HEAD(&obj->obj_exec_link);
3918         INIT_LIST_HEAD(&obj->vma_list);
3919         INIT_LIST_HEAD(&obj->batch_pool_link);
3920
3921         obj->ops = ops;
3922
3923         reservation_object_init(&obj->__builtin_resv);
3924         obj->resv = &obj->__builtin_resv;
3925
3926         obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
3927         init_request_active(&obj->frontbuffer_write, frontbuffer_retire);
3928
3929         obj->mm.madv = I915_MADV_WILLNEED;
3930         INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
3931         mutex_init(&obj->mm.get_page.lock);
3932
3933         i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
3934 }
3935
3936 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
3937         .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
3938                  I915_GEM_OBJECT_IS_SHRINKABLE,
3939         .get_pages = i915_gem_object_get_pages_gtt,
3940         .put_pages = i915_gem_object_put_pages_gtt,
3941 };
3942
3943 /* Note we don't consider signbits :| */
3944 #define overflows_type(x, T) \
3945         (sizeof(x) > sizeof(T) && (x) >> (sizeof(T) * BITS_PER_BYTE))
3946
3947 struct drm_i915_gem_object *
3948 i915_gem_object_create(struct drm_device *dev, u64 size)
3949 {
3950         struct drm_i915_private *dev_priv = to_i915(dev);
3951         struct drm_i915_gem_object *obj;
3952         struct address_space *mapping;
3953         gfp_t mask;
3954         int ret;
3955
3956         /* There is a prevalence of the assumption that we fit the object's
3957          * page count inside a 32bit _signed_ variable. Let's document this and
3958          * catch if we ever need to fix it. In the meantime, if you do spot
3959          * such a local variable, please consider fixing!
3960          */
3961         if (WARN_ON(size >> PAGE_SHIFT > INT_MAX))
3962                 return ERR_PTR(-E2BIG);
3963
3964         if (overflows_type(size, obj->base.size))
3965                 return ERR_PTR(-E2BIG);
3966
3967         obj = i915_gem_object_alloc(dev);
3968         if (obj == NULL)
3969                 return ERR_PTR(-ENOMEM);
3970
3971         ret = drm_gem_object_init(dev, &obj->base, size);
3972         if (ret)
3973                 goto fail;
3974
3975         mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
3976         if (IS_CRESTLINE(dev_priv) || IS_BROADWATER(dev_priv)) {
3977                 /* 965gm cannot relocate objects above 4GiB. */
3978                 mask &= ~__GFP_HIGHMEM;
3979                 mask |= __GFP_DMA32;
3980         }
3981
3982         mapping = obj->base.filp->f_mapping;
3983         mapping_set_gfp_mask(mapping, mask);
3984
3985         i915_gem_object_init(obj, &i915_gem_object_ops);
3986
3987         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3988         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3989
3990         if (HAS_LLC(dev_priv)) {
3991                 /* On some devices, we can have the GPU use the LLC (the CPU
3992                  * cache) for about a 10% performance improvement
3993                  * compared to uncached.  Graphics requests other than
3994                  * display scanout are coherent with the CPU in
3995                  * accessing this cache.  This means in this mode we
3996                  * don't need to clflush on the CPU side, and on the
3997                  * GPU side we only need to flush internal caches to
3998                  * get data visible to the CPU.
3999                  *
4000                  * However, we maintain the display planes as UC, and so
4001                  * need to rebind when first used as such.
4002                  */
4003                 obj->cache_level = I915_CACHE_LLC;
4004         } else
4005                 obj->cache_level = I915_CACHE_NONE;
4006
4007         trace_i915_gem_object_create(obj);
4008
4009         return obj;
4010
4011 fail:
4012         i915_gem_object_free(obj);
4013         return ERR_PTR(ret);
4014 }
4015
4016 static bool discard_backing_storage(struct drm_i915_gem_object *obj)
4017 {
4018         /* If we are the last user of the backing storage (be it shmemfs
4019          * pages or stolen etc), we know that the pages are going to be
4020          * immediately released. In this case, we can then skip copying
4021          * back the contents from the GPU.
4022          */
4023
4024         if (obj->mm.madv != I915_MADV_WILLNEED)
4025                 return false;
4026
4027         if (obj->base.filp == NULL)
4028                 return true;
4029
4030         /* At first glance, this looks racy, but then again so would be
4031          * userspace racing mmap against close. However, the first external
4032          * reference to the filp can only be obtained through the
4033          * i915_gem_mmap_ioctl() which safeguards us against the user
4034          * acquiring such a reference whilst we are in the middle of
4035          * freeing the object.
4036          */
4037         return atomic_long_read(&obj->base.filp->f_count) == 1;
4038 }
4039
4040 static void __i915_gem_free_objects(struct drm_i915_private *i915,
4041                                     struct llist_node *freed)
4042 {
4043         struct drm_i915_gem_object *obj, *on;
4044
4045         mutex_lock(&i915->drm.struct_mutex);
4046         intel_runtime_pm_get(i915);
4047         llist_for_each_entry(obj, freed, freed) {
4048                 struct i915_vma *vma, *vn;
4049
4050                 trace_i915_gem_object_destroy(obj);
4051
4052                 GEM_BUG_ON(i915_gem_object_is_active(obj));
4053                 list_for_each_entry_safe(vma, vn,
4054                                          &obj->vma_list, obj_link) {
4055                         GEM_BUG_ON(!i915_vma_is_ggtt(vma));
4056                         GEM_BUG_ON(i915_vma_is_active(vma));
4057                         vma->flags &= ~I915_VMA_PIN_MASK;
4058                         i915_vma_close(vma);
4059                 }
4060                 GEM_BUG_ON(!list_empty(&obj->vma_list));
4061                 GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma_tree));
4062
4063                 list_del(&obj->global_link);
4064         }
4065         intel_runtime_pm_put(i915);
4066         mutex_unlock(&i915->drm.struct_mutex);
4067
4068         llist_for_each_entry_safe(obj, on, freed, freed) {
4069                 GEM_BUG_ON(obj->bind_count);
4070                 GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
4071
4072                 if (obj->ops->release)
4073                         obj->ops->release(obj);
4074
4075                 if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
4076                         atomic_set(&obj->mm.pages_pin_count, 0);
4077                 __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
4078                 GEM_BUG_ON(obj->mm.pages);
4079
4080                 if (obj->base.import_attach)
4081                         drm_prime_gem_destroy(&obj->base, NULL);
4082
4083                 reservation_object_fini(&obj->__builtin_resv);
4084                 drm_gem_object_release(&obj->base);
4085                 i915_gem_info_remove_obj(i915, obj->base.size);
4086
4087                 kfree(obj->bit_17);
4088                 i915_gem_object_free(obj);
4089         }
4090 }
4091
4092 static void i915_gem_flush_free_objects(struct drm_i915_private *i915)
4093 {
4094         struct llist_node *freed;
4095
4096         freed = llist_del_all(&i915->mm.free_list);
4097         if (unlikely(freed))
4098                 __i915_gem_free_objects(i915, freed);
4099 }
4100
4101 static void __i915_gem_free_work(struct work_struct *work)
4102 {
4103         struct drm_i915_private *i915 =
4104                 container_of(work, struct drm_i915_private, mm.free_work);
4105         struct llist_node *freed;
4106
4107         /* All file-owned VMA should have been released by this point through
4108          * i915_gem_close_object(), or earlier by i915_gem_context_close().
4109          * However, the object may also be bound into the global GTT (e.g.
4110          * older GPUs without per-process support, or for direct access through
4111          * the GTT either for the user or for scanout). Those VMA still need to
4112          * unbound now.
4113          */
4114
4115         while ((freed = llist_del_all(&i915->mm.free_list)))
4116                 __i915_gem_free_objects(i915, freed);
4117 }
4118
4119 static void __i915_gem_free_object_rcu(struct rcu_head *head)
4120 {
4121         struct drm_i915_gem_object *obj =
4122                 container_of(head, typeof(*obj), rcu);
4123         struct drm_i915_private *i915 = to_i915(obj->base.dev);
4124
4125         /* We can't simply use call_rcu() from i915_gem_free_object()
4126          * as we need to block whilst unbinding, and the call_rcu
4127          * task may be called from softirq context. So we take a
4128          * detour through a worker.
4129          */
4130         if (llist_add(&obj->freed, &i915->mm.free_list))
4131                 schedule_work(&i915->mm.free_work);
4132 }
4133
4134 void i915_gem_free_object(struct drm_gem_object *gem_obj)
4135 {
4136         struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4137
4138         if (obj->mm.quirked)
4139                 __i915_gem_object_unpin_pages(obj);
4140
4141         if (discard_backing_storage(obj))
4142                 obj->mm.madv = I915_MADV_DONTNEED;
4143
4144         /* Before we free the object, make sure any pure RCU-only
4145          * read-side critical sections are complete, e.g.
4146          * i915_gem_busy_ioctl(). For the corresponding synchronized
4147          * lookup see i915_gem_object_lookup_rcu().
4148          */
4149         call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
4150 }
4151
4152 void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
4153 {
4154         lockdep_assert_held(&obj->base.dev->struct_mutex);
4155
4156         GEM_BUG_ON(i915_gem_object_has_active_reference(obj));
4157         if (i915_gem_object_is_active(obj))
4158                 i915_gem_object_set_active_reference(obj);
4159         else
4160                 i915_gem_object_put(obj);
4161 }
4162
4163 static void assert_kernel_context_is_current(struct drm_i915_private *dev_priv)
4164 {
4165         struct intel_engine_cs *engine;
4166         enum intel_engine_id id;
4167
4168         for_each_engine(engine, dev_priv, id)
4169                 GEM_BUG_ON(engine->last_context != dev_priv->kernel_context);
4170 }
4171
4172 int i915_gem_suspend(struct drm_device *dev)
4173 {
4174         struct drm_i915_private *dev_priv = to_i915(dev);
4175         int ret;
4176
4177         intel_suspend_gt_powersave(dev_priv);
4178
4179         mutex_lock(&dev->struct_mutex);
4180
4181         /* We have to flush all the executing contexts to main memory so
4182          * that they can saved in the hibernation image. To ensure the last
4183          * context image is coherent, we have to switch away from it. That
4184          * leaves the dev_priv->kernel_context still active when
4185          * we actually suspend, and its image in memory may not match the GPU
4186          * state. Fortunately, the kernel_context is disposable and we do
4187          * not rely on its state.
4188          */
4189         ret = i915_gem_switch_to_kernel_context(dev_priv);
4190         if (ret)
4191                 goto err;
4192
4193         ret = i915_gem_wait_for_idle(dev_priv,
4194                                      I915_WAIT_INTERRUPTIBLE |
4195                                      I915_WAIT_LOCKED);
4196         if (ret)
4197                 goto err;
4198
4199         i915_gem_retire_requests(dev_priv);
4200         GEM_BUG_ON(dev_priv->gt.active_requests);
4201
4202         assert_kernel_context_is_current(dev_priv);
4203         i915_gem_context_lost(dev_priv);
4204         mutex_unlock(&dev->struct_mutex);
4205
4206         cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
4207         cancel_delayed_work_sync(&dev_priv->gt.retire_work);
4208         flush_delayed_work(&dev_priv->gt.idle_work);
4209         flush_work(&dev_priv->mm.free_work);
4210
4211         /* Assert that we sucessfully flushed all the work and
4212          * reset the GPU back to its idle, low power state.
4213          */
4214         WARN_ON(dev_priv->gt.awake);
4215         WARN_ON(!intel_execlists_idle(dev_priv));
4216
4217         /*
4218          * Neither the BIOS, ourselves or any other kernel
4219          * expects the system to be in execlists mode on startup,
4220          * so we need to reset the GPU back to legacy mode. And the only
4221          * known way to disable logical contexts is through a GPU reset.
4222          *
4223          * So in order to leave the system in a known default configuration,
4224          * always reset the GPU upon unload and suspend. Afterwards we then
4225          * clean up the GEM state tracking, flushing off the requests and
4226          * leaving the system in a known idle state.
4227          *
4228          * Note that is of the upmost importance that the GPU is idle and
4229          * all stray writes are flushed *before* we dismantle the backing
4230          * storage for the pinned objects.
4231          *
4232          * However, since we are uncertain that resetting the GPU on older
4233          * machines is a good idea, we don't - just in case it leaves the
4234          * machine in an unusable condition.
4235          */
4236         if (HAS_HW_CONTEXTS(dev_priv)) {
4237                 int reset = intel_gpu_reset(dev_priv, ALL_ENGINES);
4238                 WARN_ON(reset && reset != -ENODEV);
4239         }
4240
4241         return 0;
4242
4243 err:
4244         mutex_unlock(&dev->struct_mutex);
4245         return ret;
4246 }
4247
4248 void i915_gem_resume(struct drm_device *dev)
4249 {
4250         struct drm_i915_private *dev_priv = to_i915(dev);
4251
4252         WARN_ON(dev_priv->gt.awake);
4253
4254         mutex_lock(&dev->struct_mutex);
4255         i915_gem_restore_gtt_mappings(dev_priv);
4256
4257         /* As we didn't flush the kernel context before suspend, we cannot
4258          * guarantee that the context image is complete. So let's just reset
4259          * it and start again.
4260          */
4261         dev_priv->gt.resume(dev_priv);
4262
4263         mutex_unlock(&dev->struct_mutex);
4264 }
4265
4266 void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
4267 {
4268         if (INTEL_GEN(dev_priv) < 5 ||
4269             dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4270                 return;
4271
4272         I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4273                                  DISP_TILE_SURFACE_SWIZZLING);
4274
4275         if (IS_GEN5(dev_priv))
4276                 return;
4277
4278         I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4279         if (IS_GEN6(dev_priv))
4280                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4281         else if (IS_GEN7(dev_priv))
4282                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4283         else if (IS_GEN8(dev_priv))
4284                 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
4285         else
4286                 BUG();
4287 }
4288
4289 static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base)
4290 {
4291         I915_WRITE(RING_CTL(base), 0);
4292         I915_WRITE(RING_HEAD(base), 0);
4293         I915_WRITE(RING_TAIL(base), 0);
4294         I915_WRITE(RING_START(base), 0);
4295 }
4296
4297 static void init_unused_rings(struct drm_i915_private *dev_priv)
4298 {
4299         if (IS_I830(dev_priv)) {
4300                 init_unused_ring(dev_priv, PRB1_BASE);
4301                 init_unused_ring(dev_priv, SRB0_BASE);
4302                 init_unused_ring(dev_priv, SRB1_BASE);
4303                 init_unused_ring(dev_priv, SRB2_BASE);
4304                 init_unused_ring(dev_priv, SRB3_BASE);
4305         } else if (IS_GEN2(dev_priv)) {
4306                 init_unused_ring(dev_priv, SRB0_BASE);
4307                 init_unused_ring(dev_priv, SRB1_BASE);
4308         } else if (IS_GEN3(dev_priv)) {
4309                 init_unused_ring(dev_priv, PRB1_BASE);
4310                 init_unused_ring(dev_priv, PRB2_BASE);
4311         }
4312 }
4313
4314 int
4315 i915_gem_init_hw(struct drm_device *dev)
4316 {
4317         struct drm_i915_private *dev_priv = to_i915(dev);
4318         struct intel_engine_cs *engine;
4319         enum intel_engine_id id;
4320         int ret;
4321
4322         dev_priv->gt.last_init_time = ktime_get();
4323
4324         /* Double layer security blanket, see i915_gem_init() */
4325         intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4326
4327         if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9)
4328                 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4329
4330         if (IS_HASWELL(dev_priv))
4331                 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
4332                            LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
4333
4334         if (HAS_PCH_NOP(dev_priv)) {
4335                 if (IS_IVYBRIDGE(dev_priv)) {
4336                         u32 temp = I915_READ(GEN7_MSG_CTL);
4337                         temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4338                         I915_WRITE(GEN7_MSG_CTL, temp);
4339                 } else if (INTEL_GEN(dev_priv) >= 7) {
4340                         u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
4341                         temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
4342                         I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
4343                 }
4344         }
4345
4346         i915_gem_init_swizzling(dev_priv);
4347
4348         /*
4349          * At least 830 can leave some of the unused rings
4350          * "active" (ie. head != tail) after resume which
4351          * will prevent c3 entry. Makes sure all unused rings
4352          * are totally idle.
4353          */
4354         init_unused_rings(dev_priv);
4355
4356         BUG_ON(!dev_priv->kernel_context);
4357
4358         ret = i915_ppgtt_init_hw(dev_priv);
4359         if (ret) {
4360                 DRM_ERROR("PPGTT enable HW failed %d\n", ret);
4361                 goto out;
4362         }
4363
4364         /* Need to do basic initialisation of all rings first: */
4365         for_each_engine(engine, dev_priv, id) {
4366                 ret = engine->init_hw(engine);
4367                 if (ret)
4368                         goto out;
4369         }
4370
4371         intel_mocs_init_l3cc_table(dev);
4372
4373         /* We can't enable contexts until all firmware is loaded */
4374         ret = intel_guc_setup(dev);
4375         if (ret)
4376                 goto out;
4377
4378 out:
4379         intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4380         return ret;
4381 }
4382
4383 bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value)
4384 {
4385         if (INTEL_INFO(dev_priv)->gen < 6)
4386                 return false;
4387
4388         /* TODO: make semaphores and Execlists play nicely together */
4389         if (i915.enable_execlists)
4390                 return false;
4391
4392         if (value >= 0)
4393                 return value;
4394
4395 #ifdef CONFIG_INTEL_IOMMU
4396         /* Enable semaphores on SNB when IO remapping is off */
4397         if (INTEL_INFO(dev_priv)->gen == 6 && intel_iommu_gfx_mapped)
4398                 return false;
4399 #endif
4400
4401         return true;
4402 }
4403
4404 int i915_gem_init(struct drm_device *dev)
4405 {
4406         struct drm_i915_private *dev_priv = to_i915(dev);
4407         int ret;
4408
4409         mutex_lock(&dev->struct_mutex);
4410
4411         if (!i915.enable_execlists) {
4412                 dev_priv->gt.resume = intel_legacy_submission_resume;
4413                 dev_priv->gt.cleanup_engine = intel_engine_cleanup;
4414         } else {
4415                 dev_priv->gt.resume = intel_lr_context_resume;
4416                 dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
4417         }
4418
4419         /* This is just a security blanket to placate dragons.
4420          * On some systems, we very sporadically observe that the first TLBs
4421          * used by the CS may be stale, despite us poking the TLB reset. If
4422          * we hold the forcewake during initialisation these problems
4423          * just magically go away.
4424          */
4425         intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4426
4427         i915_gem_init_userptr(dev_priv);
4428
4429         ret = i915_gem_init_ggtt(dev_priv);
4430         if (ret)
4431                 goto out_unlock;
4432
4433         ret = i915_gem_context_init(dev);
4434         if (ret)
4435                 goto out_unlock;
4436
4437         ret = intel_engines_init(dev);
4438         if (ret)
4439                 goto out_unlock;
4440
4441         ret = i915_gem_init_hw(dev);
4442         if (ret == -EIO) {
4443                 /* Allow engine initialisation to fail by marking the GPU as
4444                  * wedged. But we only want to do this where the GPU is angry,
4445                  * for all other failure, such as an allocation failure, bail.
4446                  */
4447                 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
4448                 i915_gem_set_wedged(dev_priv);
4449                 ret = 0;
4450         }
4451
4452 out_unlock:
4453         intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4454         mutex_unlock(&dev->struct_mutex);
4455
4456         return ret;
4457 }
4458
4459 void
4460 i915_gem_cleanup_engines(struct drm_device *dev)
4461 {
4462         struct drm_i915_private *dev_priv = to_i915(dev);
4463         struct intel_engine_cs *engine;
4464         enum intel_engine_id id;
4465
4466         for_each_engine(engine, dev_priv, id)
4467                 dev_priv->gt.cleanup_engine(engine);
4468 }
4469
4470 void
4471 i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
4472 {
4473         int i;
4474
4475         if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
4476             !IS_CHERRYVIEW(dev_priv))
4477                 dev_priv->num_fence_regs = 32;
4478         else if (INTEL_INFO(dev_priv)->gen >= 4 || IS_I945G(dev_priv) ||
4479                  IS_I945GM(dev_priv) || IS_G33(dev_priv))
4480                 dev_priv->num_fence_regs = 16;
4481         else
4482                 dev_priv->num_fence_regs = 8;
4483
4484         if (intel_vgpu_active(dev_priv))
4485                 dev_priv->num_fence_regs =
4486                                 I915_READ(vgtif_reg(avail_rs.fence_num));
4487
4488         /* Initialize fence registers to zero */
4489         for (i = 0; i < dev_priv->num_fence_regs; i++) {
4490                 struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];
4491
4492                 fence->i915 = dev_priv;
4493                 fence->id = i;
4494                 list_add_tail(&fence->link, &dev_priv->mm.fence_list);
4495         }
4496         i915_gem_restore_fences(dev_priv);
4497
4498         i915_gem_detect_bit_6_swizzle(dev_priv);
4499 }
4500
4501 int
4502 i915_gem_load_init(struct drm_device *dev)
4503 {
4504         struct drm_i915_private *dev_priv = to_i915(dev);
4505         int err = -ENOMEM;
4506
4507         dev_priv->objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
4508         if (!dev_priv->objects)
4509                 goto err_out;
4510
4511         dev_priv->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
4512         if (!dev_priv->vmas)
4513                 goto err_objects;
4514
4515         dev_priv->requests = KMEM_CACHE(drm_i915_gem_request,
4516                                         SLAB_HWCACHE_ALIGN |
4517                                         SLAB_RECLAIM_ACCOUNT |
4518                                         SLAB_DESTROY_BY_RCU);
4519         if (!dev_priv->requests)
4520                 goto err_vmas;
4521
4522         dev_priv->dependencies = KMEM_CACHE(i915_dependency,
4523                                             SLAB_HWCACHE_ALIGN |
4524                                             SLAB_RECLAIM_ACCOUNT);
4525         if (!dev_priv->dependencies)
4526                 goto err_requests;
4527
4528         mutex_lock(&dev_priv->drm.struct_mutex);
4529         INIT_LIST_HEAD(&dev_priv->gt.timelines);
4530         err = i915_gem_timeline_init__global(dev_priv);
4531         mutex_unlock(&dev_priv->drm.struct_mutex);
4532         if (err)
4533                 goto err_dependencies;
4534
4535         INIT_LIST_HEAD(&dev_priv->context_list);
4536         INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work);
4537         init_llist_head(&dev_priv->mm.free_list);
4538         INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4539         INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4540         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4541         INIT_LIST_HEAD(&dev_priv->mm.userfault_list);
4542         INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
4543                           i915_gem_retire_work_handler);
4544         INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
4545                           i915_gem_idle_work_handler);
4546         init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
4547         init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
4548
4549         dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4550
4551         init_waitqueue_head(&dev_priv->pending_flip_queue);
4552
4553         dev_priv->mm.interruptible = true;
4554
4555         atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
4556
4557         spin_lock_init(&dev_priv->fb_tracking.lock);
4558
4559         return 0;
4560
4561 err_dependencies:
4562         kmem_cache_destroy(dev_priv->dependencies);
4563 err_requests:
4564         kmem_cache_destroy(dev_priv->requests);
4565 err_vmas:
4566         kmem_cache_destroy(dev_priv->vmas);
4567 err_objects:
4568         kmem_cache_destroy(dev_priv->objects);
4569 err_out:
4570         return err;
4571 }
4572
4573 void i915_gem_load_cleanup(struct drm_device *dev)
4574 {
4575         struct drm_i915_private *dev_priv = to_i915(dev);
4576
4577         WARN_ON(!llist_empty(&dev_priv->mm.free_list));
4578
4579         mutex_lock(&dev_priv->drm.struct_mutex);
4580         i915_gem_timeline_fini(&dev_priv->gt.global_timeline);
4581         WARN_ON(!list_empty(&dev_priv->gt.timelines));
4582         mutex_unlock(&dev_priv->drm.struct_mutex);
4583
4584         kmem_cache_destroy(dev_priv->dependencies);
4585         kmem_cache_destroy(dev_priv->requests);
4586         kmem_cache_destroy(dev_priv->vmas);
4587         kmem_cache_destroy(dev_priv->objects);
4588
4589         /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
4590         rcu_barrier();
4591 }
4592
4593 int i915_gem_freeze(struct drm_i915_private *dev_priv)
4594 {
4595         intel_runtime_pm_get(dev_priv);
4596
4597         mutex_lock(&dev_priv->drm.struct_mutex);
4598         i915_gem_shrink_all(dev_priv);
4599         mutex_unlock(&dev_priv->drm.struct_mutex);
4600
4601         intel_runtime_pm_put(dev_priv);
4602
4603         return 0;
4604 }
4605
4606 int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
4607 {
4608         struct drm_i915_gem_object *obj;
4609         struct list_head *phases[] = {
4610                 &dev_priv->mm.unbound_list,
4611                 &dev_priv->mm.bound_list,
4612                 NULL
4613         }, **p;
4614
4615         /* Called just before we write the hibernation image.
4616          *
4617          * We need to update the domain tracking to reflect that the CPU
4618          * will be accessing all the pages to create and restore from the
4619          * hibernation, and so upon restoration those pages will be in the
4620          * CPU domain.
4621          *
4622          * To make sure the hibernation image contains the latest state,
4623          * we update that state just before writing out the image.
4624          *
4625          * To try and reduce the hibernation image, we manually shrink
4626          * the objects as well.
4627          */
4628
4629         mutex_lock(&dev_priv->drm.struct_mutex);
4630         i915_gem_shrink(dev_priv, -1UL, I915_SHRINK_UNBOUND);
4631
4632         for (p = phases; *p; p++) {
4633                 list_for_each_entry(obj, *p, global_link) {
4634                         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4635                         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4636                 }
4637         }
4638         mutex_unlock(&dev_priv->drm.struct_mutex);
4639
4640         return 0;
4641 }
4642
4643 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4644 {
4645         struct drm_i915_file_private *file_priv = file->driver_priv;
4646         struct drm_i915_gem_request *request;
4647
4648         /* Clean up our request list when the client is going away, so that
4649          * later retire_requests won't dereference our soon-to-be-gone
4650          * file_priv.
4651          */
4652         spin_lock(&file_priv->mm.lock);
4653         list_for_each_entry(request, &file_priv->mm.request_list, client_list)
4654                 request->file_priv = NULL;
4655         spin_unlock(&file_priv->mm.lock);
4656
4657         if (!list_empty(&file_priv->rps.link)) {
4658                 spin_lock(&to_i915(dev)->rps.client_lock);
4659                 list_del(&file_priv->rps.link);
4660                 spin_unlock(&to_i915(dev)->rps.client_lock);
4661         }
4662 }
4663
4664 int i915_gem_open(struct drm_device *dev, struct drm_file *file)
4665 {
4666         struct drm_i915_file_private *file_priv;
4667         int ret;
4668
4669         DRM_DEBUG("\n");
4670
4671         file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
4672         if (!file_priv)
4673                 return -ENOMEM;
4674
4675         file->driver_priv = file_priv;
4676         file_priv->dev_priv = to_i915(dev);
4677         file_priv->file = file;
4678         INIT_LIST_HEAD(&file_priv->rps.link);
4679
4680         spin_lock_init(&file_priv->mm.lock);
4681         INIT_LIST_HEAD(&file_priv->mm.request_list);
4682
4683         file_priv->bsd_engine = -1;
4684
4685         ret = i915_gem_context_open(dev, file);
4686         if (ret)
4687                 kfree(file_priv);
4688
4689         return ret;
4690 }
4691
4692 /**
4693  * i915_gem_track_fb - update frontbuffer tracking
4694  * @old: current GEM buffer for the frontbuffer slots
4695  * @new: new GEM buffer for the frontbuffer slots
4696  * @frontbuffer_bits: bitmask of frontbuffer slots
4697  *
4698  * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
4699  * from @old and setting them in @new. Both @old and @new can be NULL.
4700  */
4701 void i915_gem_track_fb(struct drm_i915_gem_object *old,
4702                        struct drm_i915_gem_object *new,
4703                        unsigned frontbuffer_bits)
4704 {
4705         /* Control of individual bits within the mask are guarded by
4706          * the owning plane->mutex, i.e. we can never see concurrent
4707          * manipulation of individual bits. But since the bitfield as a whole
4708          * is updated using RMW, we need to use atomics in order to update
4709          * the bits.
4710          */
4711         BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
4712                      sizeof(atomic_t) * BITS_PER_BYTE);
4713
4714         if (old) {
4715                 WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
4716                 atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
4717         }
4718
4719         if (new) {
4720                 WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
4721                 atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
4722         }
4723 }
4724
4725 /* Allocate a new GEM object and fill it with the supplied data */
4726 struct drm_i915_gem_object *
4727 i915_gem_object_create_from_data(struct drm_device *dev,
4728                                  const void *data, size_t size)
4729 {
4730         struct drm_i915_gem_object *obj;
4731         struct sg_table *sg;
4732         size_t bytes;
4733         int ret;
4734
4735         obj = i915_gem_object_create(dev, round_up(size, PAGE_SIZE));
4736         if (IS_ERR(obj))
4737                 return obj;
4738
4739         ret = i915_gem_object_set_to_cpu_domain(obj, true);
4740         if (ret)
4741                 goto fail;
4742
4743         ret = i915_gem_object_pin_pages(obj);
4744         if (ret)
4745                 goto fail;
4746
4747         sg = obj->mm.pages;
4748         bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
4749         obj->mm.dirty = true; /* Backing store is now out of date */
4750         i915_gem_object_unpin_pages(obj);
4751
4752         if (WARN_ON(bytes != size)) {
4753                 DRM_ERROR("Incomplete copy, wrote %zu of %zu", bytes, size);
4754                 ret = -EFAULT;
4755                 goto fail;
4756         }
4757
4758         return obj;
4759
4760 fail:
4761         i915_gem_object_put(obj);
4762         return ERR_PTR(ret);
4763 }
4764
4765 struct scatterlist *
4766 i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
4767                        unsigned int n,
4768                        unsigned int *offset)
4769 {
4770         struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
4771         struct scatterlist *sg;
4772         unsigned int idx, count;
4773
4774         might_sleep();
4775         GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
4776         GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
4777
4778         /* As we iterate forward through the sg, we record each entry in a
4779          * radixtree for quick repeated (backwards) lookups. If we have seen
4780          * this index previously, we will have an entry for it.
4781          *
4782          * Initial lookup is O(N), but this is amortized to O(1) for
4783          * sequential page access (where each new request is consecutive
4784          * to the previous one). Repeated lookups are O(lg(obj->base.size)),
4785          * i.e. O(1) with a large constant!
4786          */
4787         if (n < READ_ONCE(iter->sg_idx))
4788                 goto lookup;
4789
4790         mutex_lock(&iter->lock);
4791
4792         /* We prefer to reuse the last sg so that repeated lookup of this
4793          * (or the subsequent) sg are fast - comparing against the last
4794          * sg is faster than going through the radixtree.
4795          */
4796
4797         sg = iter->sg_pos;
4798         idx = iter->sg_idx;
4799         count = __sg_page_count(sg);
4800
4801         while (idx + count <= n) {
4802                 unsigned long exception, i;
4803                 int ret;
4804
4805                 /* If we cannot allocate and insert this entry, or the
4806                  * individual pages from this range, cancel updating the
4807                  * sg_idx so that on this lookup we are forced to linearly
4808                  * scan onwards, but on future lookups we will try the
4809                  * insertion again (in which case we need to be careful of
4810                  * the error return reporting that we have already inserted
4811                  * this index).
4812                  */
4813                 ret = radix_tree_insert(&iter->radix, idx, sg);
4814                 if (ret && ret != -EEXIST)
4815                         goto scan;
4816
4817                 exception =
4818                         RADIX_TREE_EXCEPTIONAL_ENTRY |
4819                         idx << RADIX_TREE_EXCEPTIONAL_SHIFT;
4820                 for (i = 1; i < count; i++) {
4821                         ret = radix_tree_insert(&iter->radix, idx + i,
4822                                                 (void *)exception);
4823                         if (ret && ret != -EEXIST)
4824                                 goto scan;
4825                 }
4826
4827                 idx += count;
4828                 sg = ____sg_next(sg);
4829                 count = __sg_page_count(sg);
4830         }
4831
4832 scan:
4833         iter->sg_pos = sg;
4834         iter->sg_idx = idx;
4835
4836         mutex_unlock(&iter->lock);
4837
4838         if (unlikely(n < idx)) /* insertion completed by another thread */
4839                 goto lookup;
4840
4841         /* In case we failed to insert the entry into the radixtree, we need
4842          * to look beyond the current sg.
4843          */
4844         while (idx + count <= n) {
4845                 idx += count;
4846                 sg = ____sg_next(sg);
4847                 count = __sg_page_count(sg);
4848         }
4849
4850         *offset = n - idx;
4851         return sg;
4852
4853 lookup:
4854         rcu_read_lock();
4855
4856         sg = radix_tree_lookup(&iter->radix, n);
4857         GEM_BUG_ON(!sg);
4858
4859         /* If this index is in the middle of multi-page sg entry,
4860          * the radixtree will contain an exceptional entry that points
4861          * to the start of that range. We will return the pointer to
4862          * the base page and the offset of this page within the
4863          * sg entry's range.
4864          */
4865         *offset = 0;
4866         if (unlikely(radix_tree_exception(sg))) {
4867                 unsigned long base =
4868                         (unsigned long)sg >> RADIX_TREE_EXCEPTIONAL_SHIFT;
4869
4870                 sg = radix_tree_lookup(&iter->radix, base);
4871                 GEM_BUG_ON(!sg);
4872
4873                 *offset = n - base;
4874         }
4875
4876         rcu_read_unlock();
4877
4878         return sg;
4879 }
4880
4881 struct page *
4882 i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
4883 {
4884         struct scatterlist *sg;
4885         unsigned int offset;
4886
4887         GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
4888
4889         sg = i915_gem_object_get_sg(obj, n, &offset);
4890         return nth_page(sg_page(sg), offset);
4891 }
4892
4893 /* Like i915_gem_object_get_page(), but mark the returned page dirty */
4894 struct page *
4895 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
4896                                unsigned int n)
4897 {
4898         struct page *page;
4899
4900         page = i915_gem_object_get_page(obj, n);
4901         if (!obj->mm.dirty)
4902                 set_page_dirty(page);
4903
4904         return page;
4905 }
4906
4907 dma_addr_t
4908 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
4909                                 unsigned long n)
4910 {
4911         struct scatterlist *sg;
4912         unsigned int offset;
4913
4914         sg = i915_gem_object_get_sg(obj, n, &offset);
4915         return sg_dma_address(sg) + (offset << PAGE_SHIFT);
4916 }