]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/ttm/ttm_bo.c
drm/ttm: add number of bytes moved to the operation context
[linux.git] / drivers / gpu / drm / ttm / ttm_bo.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30
31 #define pr_fmt(fmt) "[TTM] " fmt
32
33 #include <drm/ttm/ttm_module.h>
34 #include <drm/ttm/ttm_bo_driver.h>
35 #include <drm/ttm/ttm_placement.h>
36 #include <linux/jiffies.h>
37 #include <linux/slab.h>
38 #include <linux/sched.h>
39 #include <linux/mm.h>
40 #include <linux/file.h>
41 #include <linux/module.h>
42 #include <linux/atomic.h>
43 #include <linux/reservation.h>
44
45 #define TTM_ASSERT_LOCKED(param)
46 #define TTM_DEBUG(fmt, arg...)
47 #define TTM_BO_HASH_ORDER 13
48
49 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
50 static void ttm_bo_global_kobj_release(struct kobject *kobj);
51
52 static struct attribute ttm_bo_count = {
53         .name = "bo_count",
54         .mode = S_IRUGO
55 };
56
57 static inline int ttm_mem_type_from_place(const struct ttm_place *place,
58                                           uint32_t *mem_type)
59 {
60         int pos;
61
62         pos = ffs(place->flags & TTM_PL_MASK_MEM);
63         if (unlikely(!pos))
64                 return -EINVAL;
65
66         *mem_type = pos - 1;
67         return 0;
68 }
69
70 static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
71 {
72         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
73         struct drm_printer p = drm_debug_printer(TTM_PFX);
74
75         pr_err("    has_type: %d\n", man->has_type);
76         pr_err("    use_type: %d\n", man->use_type);
77         pr_err("    flags: 0x%08X\n", man->flags);
78         pr_err("    gpu_offset: 0x%08llX\n", man->gpu_offset);
79         pr_err("    size: %llu\n", man->size);
80         pr_err("    available_caching: 0x%08X\n", man->available_caching);
81         pr_err("    default_caching: 0x%08X\n", man->default_caching);
82         if (mem_type != TTM_PL_SYSTEM)
83                 (*man->func->debug)(man, &p);
84 }
85
86 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
87                                         struct ttm_placement *placement)
88 {
89         int i, ret, mem_type;
90
91         pr_err("No space for %p (%lu pages, %luK, %luM)\n",
92                bo, bo->mem.num_pages, bo->mem.size >> 10,
93                bo->mem.size >> 20);
94         for (i = 0; i < placement->num_placement; i++) {
95                 ret = ttm_mem_type_from_place(&placement->placement[i],
96                                                 &mem_type);
97                 if (ret)
98                         return;
99                 pr_err("  placement[%d]=0x%08X (%d)\n",
100                        i, placement->placement[i].flags, mem_type);
101                 ttm_mem_type_debug(bo->bdev, mem_type);
102         }
103 }
104
105 static ssize_t ttm_bo_global_show(struct kobject *kobj,
106                                   struct attribute *attr,
107                                   char *buffer)
108 {
109         struct ttm_bo_global *glob =
110                 container_of(kobj, struct ttm_bo_global, kobj);
111
112         return snprintf(buffer, PAGE_SIZE, "%d\n",
113                                 atomic_read(&glob->bo_count));
114 }
115
116 static struct attribute *ttm_bo_global_attrs[] = {
117         &ttm_bo_count,
118         NULL
119 };
120
121 static const struct sysfs_ops ttm_bo_global_ops = {
122         .show = &ttm_bo_global_show
123 };
124
125 static struct kobj_type ttm_bo_glob_kobj_type  = {
126         .release = &ttm_bo_global_kobj_release,
127         .sysfs_ops = &ttm_bo_global_ops,
128         .default_attrs = ttm_bo_global_attrs
129 };
130
131
132 static inline uint32_t ttm_bo_type_flags(unsigned type)
133 {
134         return 1 << (type);
135 }
136
137 static void ttm_bo_release_list(struct kref *list_kref)
138 {
139         struct ttm_buffer_object *bo =
140             container_of(list_kref, struct ttm_buffer_object, list_kref);
141         struct ttm_bo_device *bdev = bo->bdev;
142         size_t acc_size = bo->acc_size;
143
144         BUG_ON(kref_read(&bo->list_kref));
145         BUG_ON(kref_read(&bo->kref));
146         BUG_ON(atomic_read(&bo->cpu_writers));
147         BUG_ON(bo->mem.mm_node != NULL);
148         BUG_ON(!list_empty(&bo->lru));
149         BUG_ON(!list_empty(&bo->ddestroy));
150         ttm_tt_destroy(bo->ttm);
151         atomic_dec(&bo->glob->bo_count);
152         dma_fence_put(bo->moving);
153         reservation_object_fini(&bo->ttm_resv);
154         mutex_destroy(&bo->wu_mutex);
155         if (bo->destroy)
156                 bo->destroy(bo);
157         else {
158                 kfree(bo);
159         }
160         ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
161 }
162
163 void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
164 {
165         struct ttm_bo_device *bdev = bo->bdev;
166         struct ttm_mem_type_manager *man;
167
168         lockdep_assert_held(&bo->resv->lock.base);
169
170         if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
171
172                 BUG_ON(!list_empty(&bo->lru));
173
174                 man = &bdev->man[bo->mem.mem_type];
175                 list_add_tail(&bo->lru, &man->lru[bo->priority]);
176                 kref_get(&bo->list_kref);
177
178                 if (bo->ttm && !(bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
179                         list_add_tail(&bo->swap,
180                                       &bo->glob->swap_lru[bo->priority]);
181                         kref_get(&bo->list_kref);
182                 }
183         }
184 }
185 EXPORT_SYMBOL(ttm_bo_add_to_lru);
186
187 static void ttm_bo_ref_bug(struct kref *list_kref)
188 {
189         BUG();
190 }
191
192 void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
193 {
194         if (!list_empty(&bo->swap)) {
195                 list_del_init(&bo->swap);
196                 kref_put(&bo->list_kref, ttm_bo_ref_bug);
197         }
198         if (!list_empty(&bo->lru)) {
199                 list_del_init(&bo->lru);
200                 kref_put(&bo->list_kref, ttm_bo_ref_bug);
201         }
202
203         /*
204          * TODO: Add a driver hook to delete from
205          * driver-specific LRU's here.
206          */
207 }
208
209 void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo)
210 {
211         spin_lock(&bo->glob->lru_lock);
212         ttm_bo_del_from_lru(bo);
213         spin_unlock(&bo->glob->lru_lock);
214 }
215 EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
216
217 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
218 {
219         lockdep_assert_held(&bo->resv->lock.base);
220
221         ttm_bo_del_from_lru(bo);
222         ttm_bo_add_to_lru(bo);
223 }
224 EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
225
226 /*
227  * Call bo->mutex locked.
228  */
229 static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
230 {
231         struct ttm_bo_device *bdev = bo->bdev;
232         struct ttm_bo_global *glob = bo->glob;
233         int ret = 0;
234         uint32_t page_flags = 0;
235
236         TTM_ASSERT_LOCKED(&bo->mutex);
237         bo->ttm = NULL;
238
239         if (bdev->need_dma32)
240                 page_flags |= TTM_PAGE_FLAG_DMA32;
241
242         switch (bo->type) {
243         case ttm_bo_type_device:
244                 if (zero_alloc)
245                         page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
246         case ttm_bo_type_kernel:
247                 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
248                                                       page_flags, glob->dummy_read_page);
249                 if (unlikely(bo->ttm == NULL))
250                         ret = -ENOMEM;
251                 break;
252         case ttm_bo_type_sg:
253                 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
254                                                       page_flags | TTM_PAGE_FLAG_SG,
255                                                       glob->dummy_read_page);
256                 if (unlikely(bo->ttm == NULL)) {
257                         ret = -ENOMEM;
258                         break;
259                 }
260                 bo->ttm->sg = bo->sg;
261                 break;
262         default:
263                 pr_err("Illegal buffer object type\n");
264                 ret = -EINVAL;
265                 break;
266         }
267
268         return ret;
269 }
270
271 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
272                                   struct ttm_mem_reg *mem, bool evict,
273                                   struct ttm_operation_ctx *ctx)
274 {
275         struct ttm_bo_device *bdev = bo->bdev;
276         bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
277         bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
278         struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
279         struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
280         int ret = 0;
281
282         if (old_is_pci || new_is_pci ||
283             ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) {
284                 ret = ttm_mem_io_lock(old_man, true);
285                 if (unlikely(ret != 0))
286                         goto out_err;
287                 ttm_bo_unmap_virtual_locked(bo);
288                 ttm_mem_io_unlock(old_man);
289         }
290
291         /*
292          * Create and bind a ttm if required.
293          */
294
295         if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
296                 if (bo->ttm == NULL) {
297                         bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
298                         ret = ttm_bo_add_ttm(bo, zero);
299                         if (ret)
300                                 goto out_err;
301                 }
302
303                 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
304                 if (ret)
305                         goto out_err;
306
307                 if (mem->mem_type != TTM_PL_SYSTEM) {
308                         ret = ttm_tt_bind(bo->ttm, mem);
309                         if (ret)
310                                 goto out_err;
311                 }
312
313                 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
314                         if (bdev->driver->move_notify)
315                                 bdev->driver->move_notify(bo, evict, mem);
316                         bo->mem = *mem;
317                         mem->mm_node = NULL;
318                         goto moved;
319                 }
320         }
321
322         if (bdev->driver->move_notify)
323                 bdev->driver->move_notify(bo, evict, mem);
324
325         if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
326             !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
327                 ret = ttm_bo_move_ttm(bo, ctx->interruptible,
328                                       ctx->no_wait_gpu, mem);
329         else if (bdev->driver->move)
330                 ret = bdev->driver->move(bo, evict, ctx, mem);
331         else
332                 ret = ttm_bo_move_memcpy(bo, ctx->interruptible,
333                                          ctx->no_wait_gpu, mem);
334
335         if (ret) {
336                 if (bdev->driver->move_notify) {
337                         struct ttm_mem_reg tmp_mem = *mem;
338                         *mem = bo->mem;
339                         bo->mem = tmp_mem;
340                         bdev->driver->move_notify(bo, false, mem);
341                         bo->mem = *mem;
342                         *mem = tmp_mem;
343                 }
344
345                 goto out_err;
346         }
347
348 moved:
349         if (bo->evicted) {
350                 if (bdev->driver->invalidate_caches) {
351                         ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
352                         if (ret)
353                                 pr_err("Can not flush read caches\n");
354                 }
355                 bo->evicted = false;
356         }
357
358         if (bo->mem.mm_node)
359                 bo->offset = (bo->mem.start << PAGE_SHIFT) +
360                     bdev->man[bo->mem.mem_type].gpu_offset;
361         else
362                 bo->offset = 0;
363
364         ctx->bytes_moved += bo->num_pages << PAGE_SHIFT;
365         return 0;
366
367 out_err:
368         new_man = &bdev->man[bo->mem.mem_type];
369         if (new_man->flags & TTM_MEMTYPE_FLAG_FIXED) {
370                 ttm_tt_destroy(bo->ttm);
371                 bo->ttm = NULL;
372         }
373
374         return ret;
375 }
376
377 /**
378  * Call bo::reserved.
379  * Will release GPU memory type usage on destruction.
380  * This is the place to put in driver specific hooks to release
381  * driver private resources.
382  * Will release the bo::reserved lock.
383  */
384
385 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
386 {
387         if (bo->bdev->driver->move_notify)
388                 bo->bdev->driver->move_notify(bo, false, NULL);
389
390         ttm_tt_destroy(bo->ttm);
391         bo->ttm = NULL;
392         ttm_bo_mem_put(bo, &bo->mem);
393 }
394
395 static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
396 {
397         int r;
398
399         if (bo->resv == &bo->ttm_resv)
400                 return 0;
401
402         BUG_ON(!reservation_object_trylock(&bo->ttm_resv));
403
404         r = reservation_object_copy_fences(&bo->ttm_resv, bo->resv);
405         if (r)
406                 reservation_object_unlock(&bo->ttm_resv);
407
408         return r;
409 }
410
411 static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
412 {
413         struct reservation_object_list *fobj;
414         struct dma_fence *fence;
415         int i;
416
417         fobj = reservation_object_get_list(&bo->ttm_resv);
418         fence = reservation_object_get_excl(&bo->ttm_resv);
419         if (fence && !fence->ops->signaled)
420                 dma_fence_enable_sw_signaling(fence);
421
422         for (i = 0; fobj && i < fobj->shared_count; ++i) {
423                 fence = rcu_dereference_protected(fobj->shared[i],
424                                         reservation_object_held(bo->resv));
425
426                 if (!fence->ops->signaled)
427                         dma_fence_enable_sw_signaling(fence);
428         }
429 }
430
431 static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
432 {
433         struct ttm_bo_device *bdev = bo->bdev;
434         struct ttm_bo_global *glob = bo->glob;
435         int ret;
436
437         ret = ttm_bo_individualize_resv(bo);
438         if (ret) {
439                 /* Last resort, if we fail to allocate memory for the
440                  * fences block for the BO to become idle
441                  */
442                 reservation_object_wait_timeout_rcu(bo->resv, true, false,
443                                                     30 * HZ);
444                 spin_lock(&glob->lru_lock);
445                 goto error;
446         }
447
448         spin_lock(&glob->lru_lock);
449         ret = reservation_object_trylock(bo->resv) ? 0 : -EBUSY;
450         if (!ret) {
451                 if (reservation_object_test_signaled_rcu(&bo->ttm_resv, true)) {
452                         ttm_bo_del_from_lru(bo);
453                         spin_unlock(&glob->lru_lock);
454                         if (bo->resv != &bo->ttm_resv)
455                                 reservation_object_unlock(&bo->ttm_resv);
456
457                         ttm_bo_cleanup_memtype_use(bo);
458                         reservation_object_unlock(bo->resv);
459                         return;
460                 }
461
462                 ttm_bo_flush_all_fences(bo);
463
464                 /*
465                  * Make NO_EVICT bos immediately available to
466                  * shrinkers, now that they are queued for
467                  * destruction.
468                  */
469                 if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
470                         bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
471                         ttm_bo_add_to_lru(bo);
472                 }
473
474                 reservation_object_unlock(bo->resv);
475         }
476         if (bo->resv != &bo->ttm_resv)
477                 reservation_object_unlock(&bo->ttm_resv);
478
479 error:
480         kref_get(&bo->list_kref);
481         list_add_tail(&bo->ddestroy, &bdev->ddestroy);
482         spin_unlock(&glob->lru_lock);
483
484         schedule_delayed_work(&bdev->wq,
485                               ((HZ / 100) < 1) ? 1 : HZ / 100);
486 }
487
488 /**
489  * function ttm_bo_cleanup_refs
490  * If bo idle, remove from delayed- and lru lists, and unref.
491  * If not idle, do nothing.
492  *
493  * Must be called with lru_lock and reservation held, this function
494  * will drop the lru lock and optionally the reservation lock before returning.
495  *
496  * @interruptible         Any sleeps should occur interruptibly.
497  * @no_wait_gpu           Never wait for gpu. Return -EBUSY instead.
498  * @unlock_resv           Unlock the reservation lock as well.
499  */
500
501 static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
502                                bool interruptible, bool no_wait_gpu,
503                                bool unlock_resv)
504 {
505         struct ttm_bo_global *glob = bo->glob;
506         struct reservation_object *resv;
507         int ret;
508
509         if (unlikely(list_empty(&bo->ddestroy)))
510                 resv = bo->resv;
511         else
512                 resv = &bo->ttm_resv;
513
514         if (reservation_object_test_signaled_rcu(resv, true))
515                 ret = 0;
516         else
517                 ret = -EBUSY;
518
519         if (ret && !no_wait_gpu) {
520                 long lret;
521
522                 if (unlock_resv)
523                         reservation_object_unlock(bo->resv);
524                 spin_unlock(&glob->lru_lock);
525
526                 lret = reservation_object_wait_timeout_rcu(resv, true,
527                                                            interruptible,
528                                                            30 * HZ);
529
530                 if (lret < 0)
531                         return lret;
532                 else if (lret == 0)
533                         return -EBUSY;
534
535                 spin_lock(&glob->lru_lock);
536                 if (unlock_resv && !reservation_object_trylock(bo->resv)) {
537                         /*
538                          * We raced, and lost, someone else holds the reservation now,
539                          * and is probably busy in ttm_bo_cleanup_memtype_use.
540                          *
541                          * Even if it's not the case, because we finished waiting any
542                          * delayed destruction would succeed, so just return success
543                          * here.
544                          */
545                         spin_unlock(&glob->lru_lock);
546                         return 0;
547                 }
548                 ret = 0;
549         }
550
551         if (ret || unlikely(list_empty(&bo->ddestroy))) {
552                 if (unlock_resv)
553                         reservation_object_unlock(bo->resv);
554                 spin_unlock(&glob->lru_lock);
555                 return ret;
556         }
557
558         ttm_bo_del_from_lru(bo);
559         list_del_init(&bo->ddestroy);
560         kref_put(&bo->list_kref, ttm_bo_ref_bug);
561
562         spin_unlock(&glob->lru_lock);
563         ttm_bo_cleanup_memtype_use(bo);
564
565         if (unlock_resv)
566                 reservation_object_unlock(bo->resv);
567
568         return 0;
569 }
570
571 /**
572  * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
573  * encountered buffers.
574  */
575 static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
576 {
577         struct ttm_bo_global *glob = bdev->glob;
578         struct list_head removed;
579         bool empty;
580
581         INIT_LIST_HEAD(&removed);
582
583         spin_lock(&glob->lru_lock);
584         while (!list_empty(&bdev->ddestroy)) {
585                 struct ttm_buffer_object *bo;
586
587                 bo = list_first_entry(&bdev->ddestroy, struct ttm_buffer_object,
588                                       ddestroy);
589                 kref_get(&bo->list_kref);
590                 list_move_tail(&bo->ddestroy, &removed);
591                 spin_unlock(&glob->lru_lock);
592
593                 reservation_object_lock(bo->resv, NULL);
594
595                 spin_lock(&glob->lru_lock);
596                 ttm_bo_cleanup_refs(bo, false, !remove_all, true);
597
598                 kref_put(&bo->list_kref, ttm_bo_release_list);
599                 spin_lock(&glob->lru_lock);
600         }
601         list_splice_tail(&removed, &bdev->ddestroy);
602         empty = list_empty(&bdev->ddestroy);
603         spin_unlock(&glob->lru_lock);
604
605         return empty;
606 }
607
608 static void ttm_bo_delayed_workqueue(struct work_struct *work)
609 {
610         struct ttm_bo_device *bdev =
611             container_of(work, struct ttm_bo_device, wq.work);
612
613         if (!ttm_bo_delayed_delete(bdev, false)) {
614                 schedule_delayed_work(&bdev->wq,
615                                       ((HZ / 100) < 1) ? 1 : HZ / 100);
616         }
617 }
618
619 static void ttm_bo_release(struct kref *kref)
620 {
621         struct ttm_buffer_object *bo =
622             container_of(kref, struct ttm_buffer_object, kref);
623         struct ttm_bo_device *bdev = bo->bdev;
624         struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
625
626         drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node);
627         ttm_mem_io_lock(man, false);
628         ttm_mem_io_free_vm(bo);
629         ttm_mem_io_unlock(man);
630         ttm_bo_cleanup_refs_or_queue(bo);
631         kref_put(&bo->list_kref, ttm_bo_release_list);
632 }
633
634 void ttm_bo_unref(struct ttm_buffer_object **p_bo)
635 {
636         struct ttm_buffer_object *bo = *p_bo;
637
638         *p_bo = NULL;
639         kref_put(&bo->kref, ttm_bo_release);
640 }
641 EXPORT_SYMBOL(ttm_bo_unref);
642
643 int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
644 {
645         return cancel_delayed_work_sync(&bdev->wq);
646 }
647 EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
648
649 void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
650 {
651         if (resched)
652                 schedule_delayed_work(&bdev->wq,
653                                       ((HZ / 100) < 1) ? 1 : HZ / 100);
654 }
655 EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
656
657 static int ttm_bo_evict(struct ttm_buffer_object *bo,
658                         struct ttm_operation_ctx *ctx)
659 {
660         struct ttm_bo_device *bdev = bo->bdev;
661         struct ttm_mem_reg evict_mem;
662         struct ttm_placement placement;
663         int ret = 0;
664
665         lockdep_assert_held(&bo->resv->lock.base);
666
667         evict_mem = bo->mem;
668         evict_mem.mm_node = NULL;
669         evict_mem.bus.io_reserved_vm = false;
670         evict_mem.bus.io_reserved_count = 0;
671
672         placement.num_placement = 0;
673         placement.num_busy_placement = 0;
674         bdev->driver->evict_flags(bo, &placement);
675         ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx);
676         if (ret) {
677                 if (ret != -ERESTARTSYS) {
678                         pr_err("Failed to find memory space for buffer 0x%p eviction\n",
679                                bo);
680                         ttm_bo_mem_space_debug(bo, &placement);
681                 }
682                 goto out;
683         }
684
685         ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, ctx);
686         if (unlikely(ret)) {
687                 if (ret != -ERESTARTSYS)
688                         pr_err("Buffer eviction failed\n");
689                 ttm_bo_mem_put(bo, &evict_mem);
690                 goto out;
691         }
692         bo->evicted = true;
693 out:
694         return ret;
695 }
696
697 bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
698                               const struct ttm_place *place)
699 {
700         /* Don't evict this BO if it's outside of the
701          * requested placement range
702          */
703         if (place->fpfn >= (bo->mem.start + bo->mem.size) ||
704             (place->lpfn && place->lpfn <= bo->mem.start))
705                 return false;
706
707         return true;
708 }
709 EXPORT_SYMBOL(ttm_bo_eviction_valuable);
710
711 static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
712                                struct reservation_object *resv,
713                                uint32_t mem_type,
714                                const struct ttm_place *place,
715                                struct ttm_operation_ctx *ctx)
716 {
717         struct ttm_bo_global *glob = bdev->glob;
718         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
719         struct ttm_buffer_object *bo = NULL;
720         bool locked = false;
721         unsigned i;
722         int ret;
723
724         spin_lock(&glob->lru_lock);
725         for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
726                 list_for_each_entry(bo, &man->lru[i], lru) {
727                         if (bo->resv == resv) {
728                                 if (list_empty(&bo->ddestroy))
729                                         continue;
730                         } else {
731                                 locked = reservation_object_trylock(bo->resv);
732                                 if (!locked)
733                                         continue;
734                         }
735
736                         if (place && !bdev->driver->eviction_valuable(bo,
737                                                                       place)) {
738                                 if (locked)
739                                         reservation_object_unlock(bo->resv);
740                                 continue;
741                         }
742                         break;
743                 }
744
745                 /* If the inner loop terminated early, we have our candidate */
746                 if (&bo->lru != &man->lru[i])
747                         break;
748
749                 bo = NULL;
750         }
751
752         if (!bo) {
753                 spin_unlock(&glob->lru_lock);
754                 return -EBUSY;
755         }
756
757         kref_get(&bo->list_kref);
758
759         if (!list_empty(&bo->ddestroy)) {
760                 ret = ttm_bo_cleanup_refs(bo, ctx->interruptible,
761                                           ctx->no_wait_gpu, locked);
762                 kref_put(&bo->list_kref, ttm_bo_release_list);
763                 return ret;
764         }
765
766         ttm_bo_del_from_lru(bo);
767         spin_unlock(&glob->lru_lock);
768
769         ret = ttm_bo_evict(bo, ctx);
770         if (locked) {
771                 ttm_bo_unreserve(bo);
772         } else {
773                 spin_lock(&glob->lru_lock);
774                 ttm_bo_add_to_lru(bo);
775                 spin_unlock(&glob->lru_lock);
776         }
777
778         kref_put(&bo->list_kref, ttm_bo_release_list);
779         return ret;
780 }
781
782 void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
783 {
784         struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
785
786         if (mem->mm_node)
787                 (*man->func->put_node)(man, mem);
788 }
789 EXPORT_SYMBOL(ttm_bo_mem_put);
790
791 /**
792  * Add the last move fence to the BO and reserve a new shared slot.
793  */
794 static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
795                                  struct ttm_mem_type_manager *man,
796                                  struct ttm_mem_reg *mem)
797 {
798         struct dma_fence *fence;
799         int ret;
800
801         spin_lock(&man->move_lock);
802         fence = dma_fence_get(man->move);
803         spin_unlock(&man->move_lock);
804
805         if (fence) {
806                 reservation_object_add_shared_fence(bo->resv, fence);
807
808                 ret = reservation_object_reserve_shared(bo->resv);
809                 if (unlikely(ret))
810                         return ret;
811
812                 dma_fence_put(bo->moving);
813                 bo->moving = fence;
814         }
815
816         return 0;
817 }
818
819 /**
820  * Repeatedly evict memory from the LRU for @mem_type until we create enough
821  * space, or we've evicted everything and there isn't enough space.
822  */
823 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
824                                         uint32_t mem_type,
825                                         const struct ttm_place *place,
826                                         struct ttm_mem_reg *mem,
827                                         struct ttm_operation_ctx *ctx)
828 {
829         struct ttm_bo_device *bdev = bo->bdev;
830         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
831         int ret;
832
833         do {
834                 ret = (*man->func->get_node)(man, bo, place, mem);
835                 if (unlikely(ret != 0))
836                         return ret;
837                 if (mem->mm_node)
838                         break;
839                 ret = ttm_mem_evict_first(bdev, bo->resv, mem_type, place, ctx);
840                 if (unlikely(ret != 0))
841                         return ret;
842         } while (1);
843         mem->mem_type = mem_type;
844         return ttm_bo_add_move_fence(bo, man, mem);
845 }
846
847 static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
848                                       uint32_t cur_placement,
849                                       uint32_t proposed_placement)
850 {
851         uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
852         uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
853
854         /**
855          * Keep current caching if possible.
856          */
857
858         if ((cur_placement & caching) != 0)
859                 result |= (cur_placement & caching);
860         else if ((man->default_caching & caching) != 0)
861                 result |= man->default_caching;
862         else if ((TTM_PL_FLAG_CACHED & caching) != 0)
863                 result |= TTM_PL_FLAG_CACHED;
864         else if ((TTM_PL_FLAG_WC & caching) != 0)
865                 result |= TTM_PL_FLAG_WC;
866         else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
867                 result |= TTM_PL_FLAG_UNCACHED;
868
869         return result;
870 }
871
872 static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
873                                  uint32_t mem_type,
874                                  const struct ttm_place *place,
875                                  uint32_t *masked_placement)
876 {
877         uint32_t cur_flags = ttm_bo_type_flags(mem_type);
878
879         if ((cur_flags & place->flags & TTM_PL_MASK_MEM) == 0)
880                 return false;
881
882         if ((place->flags & man->available_caching) == 0)
883                 return false;
884
885         cur_flags |= (place->flags & man->available_caching);
886
887         *masked_placement = cur_flags;
888         return true;
889 }
890
891 /**
892  * Creates space for memory region @mem according to its type.
893  *
894  * This function first searches for free space in compatible memory types in
895  * the priority order defined by the driver.  If free space isn't found, then
896  * ttm_bo_mem_force_space is attempted in priority order to evict and find
897  * space.
898  */
899 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
900                         struct ttm_placement *placement,
901                         struct ttm_mem_reg *mem,
902                         struct ttm_operation_ctx *ctx)
903 {
904         struct ttm_bo_device *bdev = bo->bdev;
905         struct ttm_mem_type_manager *man;
906         uint32_t mem_type = TTM_PL_SYSTEM;
907         uint32_t cur_flags = 0;
908         bool type_found = false;
909         bool type_ok = false;
910         bool has_erestartsys = false;
911         int i, ret;
912
913         ret = reservation_object_reserve_shared(bo->resv);
914         if (unlikely(ret))
915                 return ret;
916
917         mem->mm_node = NULL;
918         for (i = 0; i < placement->num_placement; ++i) {
919                 const struct ttm_place *place = &placement->placement[i];
920
921                 ret = ttm_mem_type_from_place(place, &mem_type);
922                 if (ret)
923                         return ret;
924                 man = &bdev->man[mem_type];
925                 if (!man->has_type || !man->use_type)
926                         continue;
927
928                 type_ok = ttm_bo_mt_compatible(man, mem_type, place,
929                                                 &cur_flags);
930
931                 if (!type_ok)
932                         continue;
933
934                 type_found = true;
935                 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
936                                                   cur_flags);
937                 /*
938                  * Use the access and other non-mapping-related flag bits from
939                  * the memory placement flags to the current flags
940                  */
941                 ttm_flag_masked(&cur_flags, place->flags,
942                                 ~TTM_PL_MASK_MEMTYPE);
943
944                 if (mem_type == TTM_PL_SYSTEM)
945                         break;
946
947                 ret = (*man->func->get_node)(man, bo, place, mem);
948                 if (unlikely(ret))
949                         return ret;
950
951                 if (mem->mm_node) {
952                         ret = ttm_bo_add_move_fence(bo, man, mem);
953                         if (unlikely(ret)) {
954                                 (*man->func->put_node)(man, mem);
955                                 return ret;
956                         }
957                         break;
958                 }
959         }
960
961         if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
962                 mem->mem_type = mem_type;
963                 mem->placement = cur_flags;
964                 return 0;
965         }
966
967         for (i = 0; i < placement->num_busy_placement; ++i) {
968                 const struct ttm_place *place = &placement->busy_placement[i];
969
970                 ret = ttm_mem_type_from_place(place, &mem_type);
971                 if (ret)
972                         return ret;
973                 man = &bdev->man[mem_type];
974                 if (!man->has_type || !man->use_type)
975                         continue;
976                 if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags))
977                         continue;
978
979                 type_found = true;
980                 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
981                                                   cur_flags);
982                 /*
983                  * Use the access and other non-mapping-related flag bits from
984                  * the memory placement flags to the current flags
985                  */
986                 ttm_flag_masked(&cur_flags, place->flags,
987                                 ~TTM_PL_MASK_MEMTYPE);
988
989                 if (mem_type == TTM_PL_SYSTEM) {
990                         mem->mem_type = mem_type;
991                         mem->placement = cur_flags;
992                         mem->mm_node = NULL;
993                         return 0;
994                 }
995
996                 ret = ttm_bo_mem_force_space(bo, mem_type, place, mem, ctx);
997                 if (ret == 0 && mem->mm_node) {
998                         mem->placement = cur_flags;
999                         return 0;
1000                 }
1001                 if (ret == -ERESTARTSYS)
1002                         has_erestartsys = true;
1003         }
1004
1005         if (!type_found) {
1006                 pr_err(TTM_PFX "No compatible memory type found\n");
1007                 return -EINVAL;
1008         }
1009
1010         return (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
1011 }
1012 EXPORT_SYMBOL(ttm_bo_mem_space);
1013
1014 static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1015                               struct ttm_placement *placement,
1016                               struct ttm_operation_ctx *ctx)
1017 {
1018         int ret = 0;
1019         struct ttm_mem_reg mem;
1020
1021         lockdep_assert_held(&bo->resv->lock.base);
1022
1023         mem.num_pages = bo->num_pages;
1024         mem.size = mem.num_pages << PAGE_SHIFT;
1025         mem.page_alignment = bo->mem.page_alignment;
1026         mem.bus.io_reserved_vm = false;
1027         mem.bus.io_reserved_count = 0;
1028         /*
1029          * Determine where to move the buffer.
1030          */
1031         ret = ttm_bo_mem_space(bo, placement, &mem, ctx);
1032         if (ret)
1033                 goto out_unlock;
1034         ret = ttm_bo_handle_move_mem(bo, &mem, false, ctx);
1035 out_unlock:
1036         if (ret && mem.mm_node)
1037                 ttm_bo_mem_put(bo, &mem);
1038         return ret;
1039 }
1040
1041 static bool ttm_bo_places_compat(const struct ttm_place *places,
1042                                  unsigned num_placement,
1043                                  struct ttm_mem_reg *mem,
1044                                  uint32_t *new_flags)
1045 {
1046         unsigned i;
1047
1048         for (i = 0; i < num_placement; i++) {
1049                 const struct ttm_place *heap = &places[i];
1050
1051                 if (mem->mm_node && (mem->start < heap->fpfn ||
1052                      (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
1053                         continue;
1054
1055                 *new_flags = heap->flags;
1056                 if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
1057                     (*new_flags & mem->placement & TTM_PL_MASK_MEM) &&
1058                     (!(*new_flags & TTM_PL_FLAG_CONTIGUOUS) ||
1059                      (mem->placement & TTM_PL_FLAG_CONTIGUOUS)))
1060                         return true;
1061         }
1062         return false;
1063 }
1064
1065 bool ttm_bo_mem_compat(struct ttm_placement *placement,
1066                        struct ttm_mem_reg *mem,
1067                        uint32_t *new_flags)
1068 {
1069         if (ttm_bo_places_compat(placement->placement, placement->num_placement,
1070                                  mem, new_flags))
1071                 return true;
1072
1073         if ((placement->busy_placement != placement->placement ||
1074              placement->num_busy_placement > placement->num_placement) &&
1075             ttm_bo_places_compat(placement->busy_placement,
1076                                  placement->num_busy_placement,
1077                                  mem, new_flags))
1078                 return true;
1079
1080         return false;
1081 }
1082 EXPORT_SYMBOL(ttm_bo_mem_compat);
1083
1084 int ttm_bo_validate(struct ttm_buffer_object *bo,
1085                     struct ttm_placement *placement,
1086                     struct ttm_operation_ctx *ctx)
1087 {
1088         int ret;
1089         uint32_t new_flags;
1090
1091         lockdep_assert_held(&bo->resv->lock.base);
1092         /*
1093          * Check whether we need to move buffer.
1094          */
1095         if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
1096                 ret = ttm_bo_move_buffer(bo, placement, ctx);
1097                 if (ret)
1098                         return ret;
1099         } else {
1100                 /*
1101                  * Use the access and other non-mapping-related flag bits from
1102                  * the compatible memory placement flags to the active flags
1103                  */
1104                 ttm_flag_masked(&bo->mem.placement, new_flags,
1105                                 ~TTM_PL_MASK_MEMTYPE);
1106         }
1107         /*
1108          * We might need to add a TTM.
1109          */
1110         if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
1111                 ret = ttm_bo_add_ttm(bo, true);
1112                 if (ret)
1113                         return ret;
1114         }
1115         return 0;
1116 }
1117 EXPORT_SYMBOL(ttm_bo_validate);
1118
1119 int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
1120                          struct ttm_buffer_object *bo,
1121                          unsigned long size,
1122                          enum ttm_bo_type type,
1123                          struct ttm_placement *placement,
1124                          uint32_t page_alignment,
1125                          struct ttm_operation_ctx *ctx,
1126                          struct file *persistent_swap_storage,
1127                          size_t acc_size,
1128                          struct sg_table *sg,
1129                          struct reservation_object *resv,
1130                          void (*destroy) (struct ttm_buffer_object *))
1131 {
1132         int ret = 0;
1133         unsigned long num_pages;
1134         struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1135         bool locked;
1136
1137         ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
1138         if (ret) {
1139                 pr_err("Out of kernel memory\n");
1140                 if (destroy)
1141                         (*destroy)(bo);
1142                 else
1143                         kfree(bo);
1144                 return -ENOMEM;
1145         }
1146
1147         num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1148         if (num_pages == 0) {
1149                 pr_err("Illegal buffer object size\n");
1150                 if (destroy)
1151                         (*destroy)(bo);
1152                 else
1153                         kfree(bo);
1154                 ttm_mem_global_free(mem_glob, acc_size);
1155                 return -EINVAL;
1156         }
1157         bo->destroy = destroy;
1158
1159         kref_init(&bo->kref);
1160         kref_init(&bo->list_kref);
1161         atomic_set(&bo->cpu_writers, 0);
1162         INIT_LIST_HEAD(&bo->lru);
1163         INIT_LIST_HEAD(&bo->ddestroy);
1164         INIT_LIST_HEAD(&bo->swap);
1165         INIT_LIST_HEAD(&bo->io_reserve_lru);
1166         mutex_init(&bo->wu_mutex);
1167         bo->bdev = bdev;
1168         bo->glob = bdev->glob;
1169         bo->type = type;
1170         bo->num_pages = num_pages;
1171         bo->mem.size = num_pages << PAGE_SHIFT;
1172         bo->mem.mem_type = TTM_PL_SYSTEM;
1173         bo->mem.num_pages = bo->num_pages;
1174         bo->mem.mm_node = NULL;
1175         bo->mem.page_alignment = page_alignment;
1176         bo->mem.bus.io_reserved_vm = false;
1177         bo->mem.bus.io_reserved_count = 0;
1178         bo->moving = NULL;
1179         bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
1180         bo->persistent_swap_storage = persistent_swap_storage;
1181         bo->acc_size = acc_size;
1182         bo->sg = sg;
1183         if (resv) {
1184                 bo->resv = resv;
1185                 lockdep_assert_held(&bo->resv->lock.base);
1186         } else {
1187                 bo->resv = &bo->ttm_resv;
1188         }
1189         reservation_object_init(&bo->ttm_resv);
1190         atomic_inc(&bo->glob->bo_count);
1191         drm_vma_node_reset(&bo->vma_node);
1192         bo->priority = 0;
1193
1194         /*
1195          * For ttm_bo_type_device buffers, allocate
1196          * address space from the device.
1197          */
1198         if (bo->type == ttm_bo_type_device ||
1199             bo->type == ttm_bo_type_sg)
1200                 ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node,
1201                                          bo->mem.num_pages);
1202
1203         /* passed reservation objects should already be locked,
1204          * since otherwise lockdep will be angered in radeon.
1205          */
1206         if (!resv) {
1207                 locked = ww_mutex_trylock(&bo->resv->lock);
1208                 WARN_ON(!locked);
1209         }
1210
1211         if (likely(!ret))
1212                 ret = ttm_bo_validate(bo, placement, ctx);
1213
1214         if (unlikely(ret)) {
1215                 if (!resv)
1216                         ttm_bo_unreserve(bo);
1217
1218                 ttm_bo_unref(&bo);
1219                 return ret;
1220         }
1221
1222         if (resv && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
1223                 spin_lock(&bo->glob->lru_lock);
1224                 ttm_bo_add_to_lru(bo);
1225                 spin_unlock(&bo->glob->lru_lock);
1226         }
1227
1228         return ret;
1229 }
1230 EXPORT_SYMBOL(ttm_bo_init_reserved);
1231
1232 int ttm_bo_init(struct ttm_bo_device *bdev,
1233                 struct ttm_buffer_object *bo,
1234                 unsigned long size,
1235                 enum ttm_bo_type type,
1236                 struct ttm_placement *placement,
1237                 uint32_t page_alignment,
1238                 bool interruptible,
1239                 struct file *persistent_swap_storage,
1240                 size_t acc_size,
1241                 struct sg_table *sg,
1242                 struct reservation_object *resv,
1243                 void (*destroy) (struct ttm_buffer_object *))
1244 {
1245         struct ttm_operation_ctx ctx = { interruptible, false };
1246         int ret;
1247
1248         ret = ttm_bo_init_reserved(bdev, bo, size, type, placement,
1249                                    page_alignment, &ctx,
1250                                    persistent_swap_storage, acc_size,
1251                                    sg, resv, destroy);
1252         if (ret)
1253                 return ret;
1254
1255         if (!resv)
1256                 ttm_bo_unreserve(bo);
1257
1258         return 0;
1259 }
1260 EXPORT_SYMBOL(ttm_bo_init);
1261
1262 size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
1263                        unsigned long bo_size,
1264                        unsigned struct_size)
1265 {
1266         unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
1267         size_t size = 0;
1268
1269         size += ttm_round_pot(struct_size);
1270         size += ttm_round_pot(npages * sizeof(void *));
1271         size += ttm_round_pot(sizeof(struct ttm_tt));
1272         return size;
1273 }
1274 EXPORT_SYMBOL(ttm_bo_acc_size);
1275
1276 size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
1277                            unsigned long bo_size,
1278                            unsigned struct_size)
1279 {
1280         unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
1281         size_t size = 0;
1282
1283         size += ttm_round_pot(struct_size);
1284         size += ttm_round_pot(npages * (2*sizeof(void *) + sizeof(dma_addr_t)));
1285         size += ttm_round_pot(sizeof(struct ttm_dma_tt));
1286         return size;
1287 }
1288 EXPORT_SYMBOL(ttm_bo_dma_acc_size);
1289
1290 int ttm_bo_create(struct ttm_bo_device *bdev,
1291                         unsigned long size,
1292                         enum ttm_bo_type type,
1293                         struct ttm_placement *placement,
1294                         uint32_t page_alignment,
1295                         bool interruptible,
1296                         struct file *persistent_swap_storage,
1297                         struct ttm_buffer_object **p_bo)
1298 {
1299         struct ttm_buffer_object *bo;
1300         size_t acc_size;
1301         int ret;
1302
1303         bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1304         if (unlikely(bo == NULL))
1305                 return -ENOMEM;
1306
1307         acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
1308         ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
1309                           interruptible, persistent_swap_storage, acc_size,
1310                           NULL, NULL, NULL);
1311         if (likely(ret == 0))
1312                 *p_bo = bo;
1313
1314         return ret;
1315 }
1316 EXPORT_SYMBOL(ttm_bo_create);
1317
1318 static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1319                                    unsigned mem_type)
1320 {
1321         struct ttm_operation_ctx ctx = { false, false };
1322         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1323         struct ttm_bo_global *glob = bdev->glob;
1324         struct dma_fence *fence;
1325         int ret;
1326         unsigned i;
1327
1328         /*
1329          * Can't use standard list traversal since we're unlocking.
1330          */
1331
1332         spin_lock(&glob->lru_lock);
1333         for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
1334                 while (!list_empty(&man->lru[i])) {
1335                         spin_unlock(&glob->lru_lock);
1336                         ret = ttm_mem_evict_first(bdev, NULL, mem_type,
1337                                                   NULL, &ctx);
1338                         if (ret)
1339                                 return ret;
1340                         spin_lock(&glob->lru_lock);
1341                 }
1342         }
1343         spin_unlock(&glob->lru_lock);
1344
1345         spin_lock(&man->move_lock);
1346         fence = dma_fence_get(man->move);
1347         spin_unlock(&man->move_lock);
1348
1349         if (fence) {
1350                 ret = dma_fence_wait(fence, false);
1351                 dma_fence_put(fence);
1352                 if (ret)
1353                         return ret;
1354         }
1355
1356         return 0;
1357 }
1358
1359 int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1360 {
1361         struct ttm_mem_type_manager *man;
1362         int ret = -EINVAL;
1363
1364         if (mem_type >= TTM_NUM_MEM_TYPES) {
1365                 pr_err("Illegal memory type %d\n", mem_type);
1366                 return ret;
1367         }
1368         man = &bdev->man[mem_type];
1369
1370         if (!man->has_type) {
1371                 pr_err("Trying to take down uninitialized memory manager type %u\n",
1372                        mem_type);
1373                 return ret;
1374         }
1375
1376         man->use_type = false;
1377         man->has_type = false;
1378
1379         ret = 0;
1380         if (mem_type > 0) {
1381                 ret = ttm_bo_force_list_clean(bdev, mem_type);
1382                 if (ret) {
1383                         pr_err("Cleanup eviction failed\n");
1384                         return ret;
1385                 }
1386
1387                 ret = (*man->func->takedown)(man);
1388         }
1389
1390         dma_fence_put(man->move);
1391         man->move = NULL;
1392
1393         return ret;
1394 }
1395 EXPORT_SYMBOL(ttm_bo_clean_mm);
1396
1397 int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1398 {
1399         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1400
1401         if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
1402                 pr_err("Illegal memory manager memory type %u\n", mem_type);
1403                 return -EINVAL;
1404         }
1405
1406         if (!man->has_type) {
1407                 pr_err("Memory type %u has not been initialized\n", mem_type);
1408                 return 0;
1409         }
1410
1411         return ttm_bo_force_list_clean(bdev, mem_type);
1412 }
1413 EXPORT_SYMBOL(ttm_bo_evict_mm);
1414
1415 int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1416                         unsigned long p_size)
1417 {
1418         int ret;
1419         struct ttm_mem_type_manager *man;
1420         unsigned i;
1421
1422         BUG_ON(type >= TTM_NUM_MEM_TYPES);
1423         man = &bdev->man[type];
1424         BUG_ON(man->has_type);
1425         man->io_reserve_fastpath = true;
1426         man->use_io_reserve_lru = false;
1427         mutex_init(&man->io_reserve_mutex);
1428         spin_lock_init(&man->move_lock);
1429         INIT_LIST_HEAD(&man->io_reserve_lru);
1430
1431         ret = bdev->driver->init_mem_type(bdev, type, man);
1432         if (ret)
1433                 return ret;
1434         man->bdev = bdev;
1435
1436         if (type != TTM_PL_SYSTEM) {
1437                 ret = (*man->func->init)(man, p_size);
1438                 if (ret)
1439                         return ret;
1440         }
1441         man->has_type = true;
1442         man->use_type = true;
1443         man->size = p_size;
1444
1445         for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
1446                 INIT_LIST_HEAD(&man->lru[i]);
1447         man->move = NULL;
1448
1449         return 0;
1450 }
1451 EXPORT_SYMBOL(ttm_bo_init_mm);
1452
1453 static void ttm_bo_global_kobj_release(struct kobject *kobj)
1454 {
1455         struct ttm_bo_global *glob =
1456                 container_of(kobj, struct ttm_bo_global, kobj);
1457
1458         ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
1459         __free_page(glob->dummy_read_page);
1460         kfree(glob);
1461 }
1462
1463 void ttm_bo_global_release(struct drm_global_reference *ref)
1464 {
1465         struct ttm_bo_global *glob = ref->object;
1466
1467         kobject_del(&glob->kobj);
1468         kobject_put(&glob->kobj);
1469 }
1470 EXPORT_SYMBOL(ttm_bo_global_release);
1471
1472 int ttm_bo_global_init(struct drm_global_reference *ref)
1473 {
1474         struct ttm_bo_global_ref *bo_ref =
1475                 container_of(ref, struct ttm_bo_global_ref, ref);
1476         struct ttm_bo_global *glob = ref->object;
1477         int ret;
1478         unsigned i;
1479
1480         mutex_init(&glob->device_list_mutex);
1481         spin_lock_init(&glob->lru_lock);
1482         glob->mem_glob = bo_ref->mem_glob;
1483         glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
1484
1485         if (unlikely(glob->dummy_read_page == NULL)) {
1486                 ret = -ENOMEM;
1487                 goto out_no_drp;
1488         }
1489
1490         for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
1491                 INIT_LIST_HEAD(&glob->swap_lru[i]);
1492         INIT_LIST_HEAD(&glob->device_list);
1493
1494         ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
1495         ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
1496         if (unlikely(ret != 0)) {
1497                 pr_err("Could not register buffer object swapout\n");
1498                 goto out_no_shrink;
1499         }
1500
1501         atomic_set(&glob->bo_count, 0);
1502
1503         ret = kobject_init_and_add(
1504                 &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
1505         if (unlikely(ret != 0))
1506                 kobject_put(&glob->kobj);
1507         return ret;
1508 out_no_shrink:
1509         __free_page(glob->dummy_read_page);
1510 out_no_drp:
1511         kfree(glob);
1512         return ret;
1513 }
1514 EXPORT_SYMBOL(ttm_bo_global_init);
1515
1516
1517 int ttm_bo_device_release(struct ttm_bo_device *bdev)
1518 {
1519         int ret = 0;
1520         unsigned i = TTM_NUM_MEM_TYPES;
1521         struct ttm_mem_type_manager *man;
1522         struct ttm_bo_global *glob = bdev->glob;
1523
1524         while (i--) {
1525                 man = &bdev->man[i];
1526                 if (man->has_type) {
1527                         man->use_type = false;
1528                         if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
1529                                 ret = -EBUSY;
1530                                 pr_err("DRM memory manager type %d is not clean\n",
1531                                        i);
1532                         }
1533                         man->has_type = false;
1534                 }
1535         }
1536
1537         mutex_lock(&glob->device_list_mutex);
1538         list_del(&bdev->device_list);
1539         mutex_unlock(&glob->device_list_mutex);
1540
1541         cancel_delayed_work_sync(&bdev->wq);
1542
1543         if (ttm_bo_delayed_delete(bdev, true))
1544                 TTM_DEBUG("Delayed destroy list was clean\n");
1545
1546         spin_lock(&glob->lru_lock);
1547         for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
1548                 if (list_empty(&bdev->man[0].lru[0]))
1549                         TTM_DEBUG("Swap list %d was clean\n", i);
1550         spin_unlock(&glob->lru_lock);
1551
1552         drm_vma_offset_manager_destroy(&bdev->vma_manager);
1553
1554         return ret;
1555 }
1556 EXPORT_SYMBOL(ttm_bo_device_release);
1557
1558 int ttm_bo_device_init(struct ttm_bo_device *bdev,
1559                        struct ttm_bo_global *glob,
1560                        struct ttm_bo_driver *driver,
1561                        struct address_space *mapping,
1562                        uint64_t file_page_offset,
1563                        bool need_dma32)
1564 {
1565         int ret = -EINVAL;
1566
1567         bdev->driver = driver;
1568
1569         memset(bdev->man, 0, sizeof(bdev->man));
1570
1571         /*
1572          * Initialize the system memory buffer type.
1573          * Other types need to be driver / IOCTL initialized.
1574          */
1575         ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
1576         if (unlikely(ret != 0))
1577                 goto out_no_sys;
1578
1579         drm_vma_offset_manager_init(&bdev->vma_manager, file_page_offset,
1580                                     0x10000000);
1581         INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1582         INIT_LIST_HEAD(&bdev->ddestroy);
1583         bdev->dev_mapping = mapping;
1584         bdev->glob = glob;
1585         bdev->need_dma32 = need_dma32;
1586         mutex_lock(&glob->device_list_mutex);
1587         list_add_tail(&bdev->device_list, &glob->device_list);
1588         mutex_unlock(&glob->device_list_mutex);
1589
1590         return 0;
1591 out_no_sys:
1592         return ret;
1593 }
1594 EXPORT_SYMBOL(ttm_bo_device_init);
1595
1596 /*
1597  * buffer object vm functions.
1598  */
1599
1600 bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1601 {
1602         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1603
1604         if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
1605                 if (mem->mem_type == TTM_PL_SYSTEM)
1606                         return false;
1607
1608                 if (man->flags & TTM_MEMTYPE_FLAG_CMA)
1609                         return false;
1610
1611                 if (mem->placement & TTM_PL_FLAG_CACHED)
1612                         return false;
1613         }
1614         return true;
1615 }
1616
1617 void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
1618 {
1619         struct ttm_bo_device *bdev = bo->bdev;
1620
1621         drm_vma_node_unmap(&bo->vma_node, bdev->dev_mapping);
1622         ttm_mem_io_free_vm(bo);
1623 }
1624
1625 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1626 {
1627         struct ttm_bo_device *bdev = bo->bdev;
1628         struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
1629
1630         ttm_mem_io_lock(man, false);
1631         ttm_bo_unmap_virtual_locked(bo);
1632         ttm_mem_io_unlock(man);
1633 }
1634
1635
1636 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1637
1638 int ttm_bo_wait(struct ttm_buffer_object *bo,
1639                 bool interruptible, bool no_wait)
1640 {
1641         long timeout = 15 * HZ;
1642
1643         if (no_wait) {
1644                 if (reservation_object_test_signaled_rcu(bo->resv, true))
1645                         return 0;
1646                 else
1647                         return -EBUSY;
1648         }
1649
1650         timeout = reservation_object_wait_timeout_rcu(bo->resv, true,
1651                                                       interruptible, timeout);
1652         if (timeout < 0)
1653                 return timeout;
1654
1655         if (timeout == 0)
1656                 return -EBUSY;
1657
1658         reservation_object_add_excl_fence(bo->resv, NULL);
1659         return 0;
1660 }
1661 EXPORT_SYMBOL(ttm_bo_wait);
1662
1663 int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1664 {
1665         int ret = 0;
1666
1667         /*
1668          * Using ttm_bo_reserve makes sure the lru lists are updated.
1669          */
1670
1671         ret = ttm_bo_reserve(bo, true, no_wait, NULL);
1672         if (unlikely(ret != 0))
1673                 return ret;
1674         ret = ttm_bo_wait(bo, true, no_wait);
1675         if (likely(ret == 0))
1676                 atomic_inc(&bo->cpu_writers);
1677         ttm_bo_unreserve(bo);
1678         return ret;
1679 }
1680 EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
1681
1682 void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
1683 {
1684         atomic_dec(&bo->cpu_writers);
1685 }
1686 EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
1687
1688 /**
1689  * A buffer object shrink method that tries to swap out the first
1690  * buffer object on the bo_global::swap_lru list.
1691  */
1692
1693 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1694 {
1695         struct ttm_bo_global *glob =
1696             container_of(shrink, struct ttm_bo_global, shrink);
1697         struct ttm_buffer_object *bo;
1698         int ret = -EBUSY;
1699         unsigned i;
1700
1701         spin_lock(&glob->lru_lock);
1702         for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
1703                 list_for_each_entry(bo, &glob->swap_lru[i], swap) {
1704                         ret = reservation_object_trylock(bo->resv) ? 0 : -EBUSY;
1705                         if (!ret)
1706                                 break;
1707                 }
1708                 if (!ret)
1709                         break;
1710         }
1711
1712         if (ret) {
1713                 spin_unlock(&glob->lru_lock);
1714                 return ret;
1715         }
1716
1717         kref_get(&bo->list_kref);
1718
1719         if (!list_empty(&bo->ddestroy)) {
1720                 ret = ttm_bo_cleanup_refs(bo, false, false, true);
1721                 kref_put(&bo->list_kref, ttm_bo_release_list);
1722                 return ret;
1723         }
1724
1725         ttm_bo_del_from_lru(bo);
1726         spin_unlock(&glob->lru_lock);
1727
1728         /**
1729          * Move to system cached
1730          */
1731
1732         if (bo->mem.mem_type != TTM_PL_SYSTEM ||
1733             bo->ttm->caching_state != tt_cached) {
1734                 struct ttm_operation_ctx ctx = { false, false };
1735                 struct ttm_mem_reg evict_mem;
1736
1737                 evict_mem = bo->mem;
1738                 evict_mem.mm_node = NULL;
1739                 evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
1740                 evict_mem.mem_type = TTM_PL_SYSTEM;
1741
1742                 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, &ctx);
1743                 if (unlikely(ret != 0))
1744                         goto out;
1745         }
1746
1747         /**
1748          * Make sure BO is idle.
1749          */
1750
1751         ret = ttm_bo_wait(bo, false, false);
1752         if (unlikely(ret != 0))
1753                 goto out;
1754
1755         ttm_bo_unmap_virtual(bo);
1756
1757         /**
1758          * Swap out. Buffer will be swapped in again as soon as
1759          * anyone tries to access a ttm page.
1760          */
1761
1762         if (bo->bdev->driver->swap_notify)
1763                 bo->bdev->driver->swap_notify(bo);
1764
1765         ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage);
1766 out:
1767
1768         /**
1769          *
1770          * Unreserve without putting on LRU to avoid swapping out an
1771          * already swapped buffer.
1772          */
1773
1774         reservation_object_unlock(bo->resv);
1775         kref_put(&bo->list_kref, ttm_bo_release_list);
1776         return ret;
1777 }
1778
1779 void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1780 {
1781         while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
1782                 ;
1783 }
1784 EXPORT_SYMBOL(ttm_bo_swapout_all);
1785
1786 /**
1787  * ttm_bo_wait_unreserved - interruptible wait for a buffer object to become
1788  * unreserved
1789  *
1790  * @bo: Pointer to buffer
1791  */
1792 int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo)
1793 {
1794         int ret;
1795
1796         /*
1797          * In the absense of a wait_unlocked API,
1798          * Use the bo::wu_mutex to avoid triggering livelocks due to
1799          * concurrent use of this function. Note that this use of
1800          * bo::wu_mutex can go away if we change locking order to
1801          * mmap_sem -> bo::reserve.
1802          */
1803         ret = mutex_lock_interruptible(&bo->wu_mutex);
1804         if (unlikely(ret != 0))
1805                 return -ERESTARTSYS;
1806         if (!ww_mutex_is_locked(&bo->resv->lock))
1807                 goto out_unlock;
1808         ret = reservation_object_lock_interruptible(bo->resv, NULL);
1809         if (ret == -EINTR)
1810                 ret = -ERESTARTSYS;
1811         if (unlikely(ret != 0))
1812                 goto out_unlock;
1813         reservation_object_unlock(bo->resv);
1814
1815 out_unlock:
1816         mutex_unlock(&bo->wu_mutex);
1817         return ret;
1818 }