]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/ttm/ttm_bo.c
Merge branches 'pm-core', 'pm-qos', 'pm-domains' and 'pm-opp'
[linux.git] / drivers / gpu / drm / ttm / ttm_bo.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30
31 #define pr_fmt(fmt) "[TTM] " fmt
32
33 #include <drm/ttm/ttm_module.h>
34 #include <drm/ttm/ttm_bo_driver.h>
35 #include <drm/ttm/ttm_placement.h>
36 #include <linux/jiffies.h>
37 #include <linux/slab.h>
38 #include <linux/sched.h>
39 #include <linux/mm.h>
40 #include <linux/file.h>
41 #include <linux/module.h>
42 #include <linux/atomic.h>
43 #include <linux/reservation.h>
44
45 #define TTM_ASSERT_LOCKED(param)
46 #define TTM_DEBUG(fmt, arg...)
47 #define TTM_BO_HASH_ORDER 13
48
49 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
50 static void ttm_bo_global_kobj_release(struct kobject *kobj);
51
52 static struct attribute ttm_bo_count = {
53         .name = "bo_count",
54         .mode = S_IRUGO
55 };
56
57 static inline int ttm_mem_type_from_place(const struct ttm_place *place,
58                                           uint32_t *mem_type)
59 {
60         int pos;
61
62         pos = ffs(place->flags & TTM_PL_MASK_MEM);
63         if (unlikely(!pos))
64                 return -EINVAL;
65
66         *mem_type = pos - 1;
67         return 0;
68 }
69
70 static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
71 {
72         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
73
74         pr_err("    has_type: %d\n", man->has_type);
75         pr_err("    use_type: %d\n", man->use_type);
76         pr_err("    flags: 0x%08X\n", man->flags);
77         pr_err("    gpu_offset: 0x%08llX\n", man->gpu_offset);
78         pr_err("    size: %llu\n", man->size);
79         pr_err("    available_caching: 0x%08X\n", man->available_caching);
80         pr_err("    default_caching: 0x%08X\n", man->default_caching);
81         if (mem_type != TTM_PL_SYSTEM)
82                 (*man->func->debug)(man, TTM_PFX);
83 }
84
85 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
86                                         struct ttm_placement *placement)
87 {
88         int i, ret, mem_type;
89
90         pr_err("No space for %p (%lu pages, %luK, %luM)\n",
91                bo, bo->mem.num_pages, bo->mem.size >> 10,
92                bo->mem.size >> 20);
93         for (i = 0; i < placement->num_placement; i++) {
94                 ret = ttm_mem_type_from_place(&placement->placement[i],
95                                                 &mem_type);
96                 if (ret)
97                         return;
98                 pr_err("  placement[%d]=0x%08X (%d)\n",
99                        i, placement->placement[i].flags, mem_type);
100                 ttm_mem_type_debug(bo->bdev, mem_type);
101         }
102 }
103
104 static ssize_t ttm_bo_global_show(struct kobject *kobj,
105                                   struct attribute *attr,
106                                   char *buffer)
107 {
108         struct ttm_bo_global *glob =
109                 container_of(kobj, struct ttm_bo_global, kobj);
110
111         return snprintf(buffer, PAGE_SIZE, "%lu\n",
112                         (unsigned long) atomic_read(&glob->bo_count));
113 }
114
115 static struct attribute *ttm_bo_global_attrs[] = {
116         &ttm_bo_count,
117         NULL
118 };
119
120 static const struct sysfs_ops ttm_bo_global_ops = {
121         .show = &ttm_bo_global_show
122 };
123
124 static struct kobj_type ttm_bo_glob_kobj_type  = {
125         .release = &ttm_bo_global_kobj_release,
126         .sysfs_ops = &ttm_bo_global_ops,
127         .default_attrs = ttm_bo_global_attrs
128 };
129
130
131 static inline uint32_t ttm_bo_type_flags(unsigned type)
132 {
133         return 1 << (type);
134 }
135
136 static void ttm_bo_release_list(struct kref *list_kref)
137 {
138         struct ttm_buffer_object *bo =
139             container_of(list_kref, struct ttm_buffer_object, list_kref);
140         struct ttm_bo_device *bdev = bo->bdev;
141         size_t acc_size = bo->acc_size;
142
143         BUG_ON(kref_read(&bo->list_kref));
144         BUG_ON(kref_read(&bo->kref));
145         BUG_ON(atomic_read(&bo->cpu_writers));
146         BUG_ON(bo->mem.mm_node != NULL);
147         BUG_ON(!list_empty(&bo->lru));
148         BUG_ON(!list_empty(&bo->ddestroy));
149         ttm_tt_destroy(bo->ttm);
150         atomic_dec(&bo->glob->bo_count);
151         dma_fence_put(bo->moving);
152         if (bo->resv == &bo->ttm_resv)
153                 reservation_object_fini(&bo->ttm_resv);
154         mutex_destroy(&bo->wu_mutex);
155         if (bo->destroy)
156                 bo->destroy(bo);
157         else {
158                 kfree(bo);
159         }
160         ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
161 }
162
163 void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
164 {
165         struct ttm_bo_device *bdev = bo->bdev;
166
167         lockdep_assert_held(&bo->resv->lock.base);
168
169         if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
170
171                 BUG_ON(!list_empty(&bo->lru));
172
173                 list_add(&bo->lru, bdev->driver->lru_tail(bo));
174                 kref_get(&bo->list_kref);
175
176                 if (bo->ttm && !(bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
177                         list_add(&bo->swap, bdev->driver->swap_lru_tail(bo));
178                         kref_get(&bo->list_kref);
179                 }
180         }
181 }
182 EXPORT_SYMBOL(ttm_bo_add_to_lru);
183
184 static void ttm_bo_ref_bug(struct kref *list_kref)
185 {
186         BUG();
187 }
188
189 void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
190 {
191         struct ttm_bo_device *bdev = bo->bdev;
192
193         if (bdev->driver->lru_removal)
194                 bdev->driver->lru_removal(bo);
195
196         if (!list_empty(&bo->swap)) {
197                 list_del_init(&bo->swap);
198                 kref_put(&bo->list_kref, ttm_bo_ref_bug);
199         }
200         if (!list_empty(&bo->lru)) {
201                 list_del_init(&bo->lru);
202                 kref_put(&bo->list_kref, ttm_bo_ref_bug);
203         }
204 }
205
206 void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo)
207 {
208         spin_lock(&bo->glob->lru_lock);
209         ttm_bo_del_from_lru(bo);
210         spin_unlock(&bo->glob->lru_lock);
211 }
212 EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
213
214 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
215 {
216         struct ttm_bo_device *bdev = bo->bdev;
217
218         lockdep_assert_held(&bo->resv->lock.base);
219
220         if (bdev->driver->lru_removal)
221                 bdev->driver->lru_removal(bo);
222
223         ttm_bo_del_from_lru(bo);
224         ttm_bo_add_to_lru(bo);
225 }
226 EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
227
228 struct list_head *ttm_bo_default_lru_tail(struct ttm_buffer_object *bo)
229 {
230         return bo->bdev->man[bo->mem.mem_type].lru.prev;
231 }
232 EXPORT_SYMBOL(ttm_bo_default_lru_tail);
233
234 struct list_head *ttm_bo_default_swap_lru_tail(struct ttm_buffer_object *bo)
235 {
236         return bo->glob->swap_lru.prev;
237 }
238 EXPORT_SYMBOL(ttm_bo_default_swap_lru_tail);
239
240 /*
241  * Call bo->mutex locked.
242  */
243 static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
244 {
245         struct ttm_bo_device *bdev = bo->bdev;
246         struct ttm_bo_global *glob = bo->glob;
247         int ret = 0;
248         uint32_t page_flags = 0;
249
250         TTM_ASSERT_LOCKED(&bo->mutex);
251         bo->ttm = NULL;
252
253         if (bdev->need_dma32)
254                 page_flags |= TTM_PAGE_FLAG_DMA32;
255
256         switch (bo->type) {
257         case ttm_bo_type_device:
258                 if (zero_alloc)
259                         page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
260         case ttm_bo_type_kernel:
261                 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
262                                                       page_flags, glob->dummy_read_page);
263                 if (unlikely(bo->ttm == NULL))
264                         ret = -ENOMEM;
265                 break;
266         case ttm_bo_type_sg:
267                 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
268                                                       page_flags | TTM_PAGE_FLAG_SG,
269                                                       glob->dummy_read_page);
270                 if (unlikely(bo->ttm == NULL)) {
271                         ret = -ENOMEM;
272                         break;
273                 }
274                 bo->ttm->sg = bo->sg;
275                 break;
276         default:
277                 pr_err("Illegal buffer object type\n");
278                 ret = -EINVAL;
279                 break;
280         }
281
282         return ret;
283 }
284
285 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
286                                   struct ttm_mem_reg *mem,
287                                   bool evict, bool interruptible,
288                                   bool no_wait_gpu)
289 {
290         struct ttm_bo_device *bdev = bo->bdev;
291         bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
292         bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
293         struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
294         struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
295         int ret = 0;
296
297         if (old_is_pci || new_is_pci ||
298             ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) {
299                 ret = ttm_mem_io_lock(old_man, true);
300                 if (unlikely(ret != 0))
301                         goto out_err;
302                 ttm_bo_unmap_virtual_locked(bo);
303                 ttm_mem_io_unlock(old_man);
304         }
305
306         /*
307          * Create and bind a ttm if required.
308          */
309
310         if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
311                 if (bo->ttm == NULL) {
312                         bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
313                         ret = ttm_bo_add_ttm(bo, zero);
314                         if (ret)
315                                 goto out_err;
316                 }
317
318                 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
319                 if (ret)
320                         goto out_err;
321
322                 if (mem->mem_type != TTM_PL_SYSTEM) {
323                         ret = ttm_tt_bind(bo->ttm, mem);
324                         if (ret)
325                                 goto out_err;
326                 }
327
328                 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
329                         if (bdev->driver->move_notify)
330                                 bdev->driver->move_notify(bo, mem);
331                         bo->mem = *mem;
332                         mem->mm_node = NULL;
333                         goto moved;
334                 }
335         }
336
337         if (bdev->driver->move_notify)
338                 bdev->driver->move_notify(bo, mem);
339
340         if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
341             !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
342                 ret = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, mem);
343         else if (bdev->driver->move)
344                 ret = bdev->driver->move(bo, evict, interruptible,
345                                          no_wait_gpu, mem);
346         else
347                 ret = ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, mem);
348
349         if (ret) {
350                 if (bdev->driver->move_notify) {
351                         struct ttm_mem_reg tmp_mem = *mem;
352                         *mem = bo->mem;
353                         bo->mem = tmp_mem;
354                         bdev->driver->move_notify(bo, mem);
355                         bo->mem = *mem;
356                         *mem = tmp_mem;
357                 }
358
359                 goto out_err;
360         }
361
362 moved:
363         if (bo->evicted) {
364                 if (bdev->driver->invalidate_caches) {
365                         ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
366                         if (ret)
367                                 pr_err("Can not flush read caches\n");
368                 }
369                 bo->evicted = false;
370         }
371
372         if (bo->mem.mm_node) {
373                 bo->offset = (bo->mem.start << PAGE_SHIFT) +
374                     bdev->man[bo->mem.mem_type].gpu_offset;
375                 bo->cur_placement = bo->mem.placement;
376         } else
377                 bo->offset = 0;
378
379         return 0;
380
381 out_err:
382         new_man = &bdev->man[bo->mem.mem_type];
383         if (new_man->flags & TTM_MEMTYPE_FLAG_FIXED) {
384                 ttm_tt_destroy(bo->ttm);
385                 bo->ttm = NULL;
386         }
387
388         return ret;
389 }
390
391 /**
392  * Call bo::reserved.
393  * Will release GPU memory type usage on destruction.
394  * This is the place to put in driver specific hooks to release
395  * driver private resources.
396  * Will release the bo::reserved lock.
397  */
398
399 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
400 {
401         if (bo->bdev->driver->move_notify)
402                 bo->bdev->driver->move_notify(bo, NULL);
403
404         ttm_tt_destroy(bo->ttm);
405         bo->ttm = NULL;
406         ttm_bo_mem_put(bo, &bo->mem);
407
408         ww_mutex_unlock (&bo->resv->lock);
409 }
410
411 static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
412 {
413         struct reservation_object_list *fobj;
414         struct dma_fence *fence;
415         int i;
416
417         fobj = reservation_object_get_list(bo->resv);
418         fence = reservation_object_get_excl(bo->resv);
419         if (fence && !fence->ops->signaled)
420                 dma_fence_enable_sw_signaling(fence);
421
422         for (i = 0; fobj && i < fobj->shared_count; ++i) {
423                 fence = rcu_dereference_protected(fobj->shared[i],
424                                         reservation_object_held(bo->resv));
425
426                 if (!fence->ops->signaled)
427                         dma_fence_enable_sw_signaling(fence);
428         }
429 }
430
431 static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
432 {
433         struct ttm_bo_device *bdev = bo->bdev;
434         struct ttm_bo_global *glob = bo->glob;
435         int ret;
436
437         spin_lock(&glob->lru_lock);
438         ret = __ttm_bo_reserve(bo, false, true, NULL);
439
440         if (!ret) {
441                 if (!ttm_bo_wait(bo, false, true)) {
442                         ttm_bo_del_from_lru(bo);
443                         spin_unlock(&glob->lru_lock);
444                         ttm_bo_cleanup_memtype_use(bo);
445
446                         return;
447                 } else
448                         ttm_bo_flush_all_fences(bo);
449
450                 /*
451                  * Make NO_EVICT bos immediately available to
452                  * shrinkers, now that they are queued for
453                  * destruction.
454                  */
455                 if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
456                         bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
457                         ttm_bo_add_to_lru(bo);
458                 }
459
460                 __ttm_bo_unreserve(bo);
461         }
462
463         kref_get(&bo->list_kref);
464         list_add_tail(&bo->ddestroy, &bdev->ddestroy);
465         spin_unlock(&glob->lru_lock);
466
467         schedule_delayed_work(&bdev->wq,
468                               ((HZ / 100) < 1) ? 1 : HZ / 100);
469 }
470
471 /**
472  * function ttm_bo_cleanup_refs_and_unlock
473  * If bo idle, remove from delayed- and lru lists, and unref.
474  * If not idle, do nothing.
475  *
476  * Must be called with lru_lock and reservation held, this function
477  * will drop both before returning.
478  *
479  * @interruptible         Any sleeps should occur interruptibly.
480  * @no_wait_gpu           Never wait for gpu. Return -EBUSY instead.
481  */
482
483 static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
484                                           bool interruptible,
485                                           bool no_wait_gpu)
486 {
487         struct ttm_bo_global *glob = bo->glob;
488         int ret;
489
490         ret = ttm_bo_wait(bo, false, true);
491
492         if (ret && !no_wait_gpu) {
493                 long lret;
494                 ww_mutex_unlock(&bo->resv->lock);
495                 spin_unlock(&glob->lru_lock);
496
497                 lret = reservation_object_wait_timeout_rcu(bo->resv,
498                                                            true,
499                                                            interruptible,
500                                                            30 * HZ);
501
502                 if (lret < 0)
503                         return lret;
504                 else if (lret == 0)
505                         return -EBUSY;
506
507                 spin_lock(&glob->lru_lock);
508                 ret = __ttm_bo_reserve(bo, false, true, NULL);
509
510                 /*
511                  * We raced, and lost, someone else holds the reservation now,
512                  * and is probably busy in ttm_bo_cleanup_memtype_use.
513                  *
514                  * Even if it's not the case, because we finished waiting any
515                  * delayed destruction would succeed, so just return success
516                  * here.
517                  */
518                 if (ret) {
519                         spin_unlock(&glob->lru_lock);
520                         return 0;
521                 }
522
523                 /*
524                  * remove sync_obj with ttm_bo_wait, the wait should be
525                  * finished, and no new wait object should have been added.
526                  */
527                 ret = ttm_bo_wait(bo, false, true);
528                 WARN_ON(ret);
529         }
530
531         if (ret || unlikely(list_empty(&bo->ddestroy))) {
532                 __ttm_bo_unreserve(bo);
533                 spin_unlock(&glob->lru_lock);
534                 return ret;
535         }
536
537         ttm_bo_del_from_lru(bo);
538         list_del_init(&bo->ddestroy);
539         kref_put(&bo->list_kref, ttm_bo_ref_bug);
540
541         spin_unlock(&glob->lru_lock);
542         ttm_bo_cleanup_memtype_use(bo);
543
544         return 0;
545 }
546
547 /**
548  * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
549  * encountered buffers.
550  */
551
552 static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
553 {
554         struct ttm_bo_global *glob = bdev->glob;
555         struct ttm_buffer_object *entry = NULL;
556         int ret = 0;
557
558         spin_lock(&glob->lru_lock);
559         if (list_empty(&bdev->ddestroy))
560                 goto out_unlock;
561
562         entry = list_first_entry(&bdev->ddestroy,
563                 struct ttm_buffer_object, ddestroy);
564         kref_get(&entry->list_kref);
565
566         for (;;) {
567                 struct ttm_buffer_object *nentry = NULL;
568
569                 if (entry->ddestroy.next != &bdev->ddestroy) {
570                         nentry = list_first_entry(&entry->ddestroy,
571                                 struct ttm_buffer_object, ddestroy);
572                         kref_get(&nentry->list_kref);
573                 }
574
575                 ret = __ttm_bo_reserve(entry, false, true, NULL);
576                 if (remove_all && ret) {
577                         spin_unlock(&glob->lru_lock);
578                         ret = __ttm_bo_reserve(entry, false, false, NULL);
579                         spin_lock(&glob->lru_lock);
580                 }
581
582                 if (!ret)
583                         ret = ttm_bo_cleanup_refs_and_unlock(entry, false,
584                                                              !remove_all);
585                 else
586                         spin_unlock(&glob->lru_lock);
587
588                 kref_put(&entry->list_kref, ttm_bo_release_list);
589                 entry = nentry;
590
591                 if (ret || !entry)
592                         goto out;
593
594                 spin_lock(&glob->lru_lock);
595                 if (list_empty(&entry->ddestroy))
596                         break;
597         }
598
599 out_unlock:
600         spin_unlock(&glob->lru_lock);
601 out:
602         if (entry)
603                 kref_put(&entry->list_kref, ttm_bo_release_list);
604         return ret;
605 }
606
607 static void ttm_bo_delayed_workqueue(struct work_struct *work)
608 {
609         struct ttm_bo_device *bdev =
610             container_of(work, struct ttm_bo_device, wq.work);
611
612         if (ttm_bo_delayed_delete(bdev, false)) {
613                 schedule_delayed_work(&bdev->wq,
614                                       ((HZ / 100) < 1) ? 1 : HZ / 100);
615         }
616 }
617
618 static void ttm_bo_release(struct kref *kref)
619 {
620         struct ttm_buffer_object *bo =
621             container_of(kref, struct ttm_buffer_object, kref);
622         struct ttm_bo_device *bdev = bo->bdev;
623         struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
624
625         drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node);
626         ttm_mem_io_lock(man, false);
627         ttm_mem_io_free_vm(bo);
628         ttm_mem_io_unlock(man);
629         ttm_bo_cleanup_refs_or_queue(bo);
630         kref_put(&bo->list_kref, ttm_bo_release_list);
631 }
632
633 void ttm_bo_unref(struct ttm_buffer_object **p_bo)
634 {
635         struct ttm_buffer_object *bo = *p_bo;
636
637         *p_bo = NULL;
638         kref_put(&bo->kref, ttm_bo_release);
639 }
640 EXPORT_SYMBOL(ttm_bo_unref);
641
642 int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
643 {
644         return cancel_delayed_work_sync(&bdev->wq);
645 }
646 EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
647
648 void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
649 {
650         if (resched)
651                 schedule_delayed_work(&bdev->wq,
652                                       ((HZ / 100) < 1) ? 1 : HZ / 100);
653 }
654 EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
655
656 static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
657                         bool no_wait_gpu)
658 {
659         struct ttm_bo_device *bdev = bo->bdev;
660         struct ttm_mem_reg evict_mem;
661         struct ttm_placement placement;
662         int ret = 0;
663
664         lockdep_assert_held(&bo->resv->lock.base);
665
666         evict_mem = bo->mem;
667         evict_mem.mm_node = NULL;
668         evict_mem.bus.io_reserved_vm = false;
669         evict_mem.bus.io_reserved_count = 0;
670
671         placement.num_placement = 0;
672         placement.num_busy_placement = 0;
673         bdev->driver->evict_flags(bo, &placement);
674         ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
675                                 no_wait_gpu);
676         if (ret) {
677                 if (ret != -ERESTARTSYS) {
678                         pr_err("Failed to find memory space for buffer 0x%p eviction\n",
679                                bo);
680                         ttm_bo_mem_space_debug(bo, &placement);
681                 }
682                 goto out;
683         }
684
685         ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
686                                      no_wait_gpu);
687         if (unlikely(ret)) {
688                 if (ret != -ERESTARTSYS)
689                         pr_err("Buffer eviction failed\n");
690                 ttm_bo_mem_put(bo, &evict_mem);
691                 goto out;
692         }
693         bo->evicted = true;
694 out:
695         return ret;
696 }
697
698 bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
699                               const struct ttm_place *place)
700 {
701         /* Don't evict this BO if it's outside of the
702          * requested placement range
703          */
704         if (place->fpfn >= (bo->mem.start + bo->mem.size) ||
705             (place->lpfn && place->lpfn <= bo->mem.start))
706                 return false;
707
708         return true;
709 }
710 EXPORT_SYMBOL(ttm_bo_eviction_valuable);
711
712 static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
713                                 uint32_t mem_type,
714                                 const struct ttm_place *place,
715                                 bool interruptible,
716                                 bool no_wait_gpu)
717 {
718         struct ttm_bo_global *glob = bdev->glob;
719         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
720         struct ttm_buffer_object *bo;
721         int ret = -EBUSY;
722
723         spin_lock(&glob->lru_lock);
724         list_for_each_entry(bo, &man->lru, lru) {
725                 ret = __ttm_bo_reserve(bo, false, true, NULL);
726                 if (ret)
727                         continue;
728
729                 if (place && !bdev->driver->eviction_valuable(bo, place)) {
730                         __ttm_bo_unreserve(bo);
731                         ret = -EBUSY;
732                         continue;
733                 }
734
735                 break;
736         }
737
738         if (ret) {
739                 spin_unlock(&glob->lru_lock);
740                 return ret;
741         }
742
743         kref_get(&bo->list_kref);
744
745         if (!list_empty(&bo->ddestroy)) {
746                 ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible,
747                                                      no_wait_gpu);
748                 kref_put(&bo->list_kref, ttm_bo_release_list);
749                 return ret;
750         }
751
752         ttm_bo_del_from_lru(bo);
753         spin_unlock(&glob->lru_lock);
754
755         BUG_ON(ret != 0);
756
757         ret = ttm_bo_evict(bo, interruptible, no_wait_gpu);
758         ttm_bo_unreserve(bo);
759
760         kref_put(&bo->list_kref, ttm_bo_release_list);
761         return ret;
762 }
763
764 void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
765 {
766         struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
767
768         if (mem->mm_node)
769                 (*man->func->put_node)(man, mem);
770 }
771 EXPORT_SYMBOL(ttm_bo_mem_put);
772
773 /**
774  * Add the last move fence to the BO and reserve a new shared slot.
775  */
776 static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
777                                  struct ttm_mem_type_manager *man,
778                                  struct ttm_mem_reg *mem)
779 {
780         struct dma_fence *fence;
781         int ret;
782
783         spin_lock(&man->move_lock);
784         fence = dma_fence_get(man->move);
785         spin_unlock(&man->move_lock);
786
787         if (fence) {
788                 reservation_object_add_shared_fence(bo->resv, fence);
789
790                 ret = reservation_object_reserve_shared(bo->resv);
791                 if (unlikely(ret))
792                         return ret;
793
794                 dma_fence_put(bo->moving);
795                 bo->moving = fence;
796         }
797
798         return 0;
799 }
800
801 /**
802  * Repeatedly evict memory from the LRU for @mem_type until we create enough
803  * space, or we've evicted everything and there isn't enough space.
804  */
805 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
806                                         uint32_t mem_type,
807                                         const struct ttm_place *place,
808                                         struct ttm_mem_reg *mem,
809                                         bool interruptible,
810                                         bool no_wait_gpu)
811 {
812         struct ttm_bo_device *bdev = bo->bdev;
813         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
814         int ret;
815
816         do {
817                 ret = (*man->func->get_node)(man, bo, place, mem);
818                 if (unlikely(ret != 0))
819                         return ret;
820                 if (mem->mm_node)
821                         break;
822                 ret = ttm_mem_evict_first(bdev, mem_type, place,
823                                           interruptible, no_wait_gpu);
824                 if (unlikely(ret != 0))
825                         return ret;
826         } while (1);
827         mem->mem_type = mem_type;
828         return ttm_bo_add_move_fence(bo, man, mem);
829 }
830
831 static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
832                                       uint32_t cur_placement,
833                                       uint32_t proposed_placement)
834 {
835         uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
836         uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
837
838         /**
839          * Keep current caching if possible.
840          */
841
842         if ((cur_placement & caching) != 0)
843                 result |= (cur_placement & caching);
844         else if ((man->default_caching & caching) != 0)
845                 result |= man->default_caching;
846         else if ((TTM_PL_FLAG_CACHED & caching) != 0)
847                 result |= TTM_PL_FLAG_CACHED;
848         else if ((TTM_PL_FLAG_WC & caching) != 0)
849                 result |= TTM_PL_FLAG_WC;
850         else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
851                 result |= TTM_PL_FLAG_UNCACHED;
852
853         return result;
854 }
855
856 static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
857                                  uint32_t mem_type,
858                                  const struct ttm_place *place,
859                                  uint32_t *masked_placement)
860 {
861         uint32_t cur_flags = ttm_bo_type_flags(mem_type);
862
863         if ((cur_flags & place->flags & TTM_PL_MASK_MEM) == 0)
864                 return false;
865
866         if ((place->flags & man->available_caching) == 0)
867                 return false;
868
869         cur_flags |= (place->flags & man->available_caching);
870
871         *masked_placement = cur_flags;
872         return true;
873 }
874
875 /**
876  * Creates space for memory region @mem according to its type.
877  *
878  * This function first searches for free space in compatible memory types in
879  * the priority order defined by the driver.  If free space isn't found, then
880  * ttm_bo_mem_force_space is attempted in priority order to evict and find
881  * space.
882  */
883 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
884                         struct ttm_placement *placement,
885                         struct ttm_mem_reg *mem,
886                         bool interruptible,
887                         bool no_wait_gpu)
888 {
889         struct ttm_bo_device *bdev = bo->bdev;
890         struct ttm_mem_type_manager *man;
891         uint32_t mem_type = TTM_PL_SYSTEM;
892         uint32_t cur_flags = 0;
893         bool type_found = false;
894         bool type_ok = false;
895         bool has_erestartsys = false;
896         int i, ret;
897
898         ret = reservation_object_reserve_shared(bo->resv);
899         if (unlikely(ret))
900                 return ret;
901
902         mem->mm_node = NULL;
903         for (i = 0; i < placement->num_placement; ++i) {
904                 const struct ttm_place *place = &placement->placement[i];
905
906                 ret = ttm_mem_type_from_place(place, &mem_type);
907                 if (ret)
908                         return ret;
909                 man = &bdev->man[mem_type];
910                 if (!man->has_type || !man->use_type)
911                         continue;
912
913                 type_ok = ttm_bo_mt_compatible(man, mem_type, place,
914                                                 &cur_flags);
915
916                 if (!type_ok)
917                         continue;
918
919                 type_found = true;
920                 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
921                                                   cur_flags);
922                 /*
923                  * Use the access and other non-mapping-related flag bits from
924                  * the memory placement flags to the current flags
925                  */
926                 ttm_flag_masked(&cur_flags, place->flags,
927                                 ~TTM_PL_MASK_MEMTYPE);
928
929                 if (mem_type == TTM_PL_SYSTEM)
930                         break;
931
932                 ret = (*man->func->get_node)(man, bo, place, mem);
933                 if (unlikely(ret))
934                         return ret;
935
936                 if (mem->mm_node) {
937                         ret = ttm_bo_add_move_fence(bo, man, mem);
938                         if (unlikely(ret)) {
939                                 (*man->func->put_node)(man, mem);
940                                 return ret;
941                         }
942                         break;
943                 }
944         }
945
946         if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
947                 mem->mem_type = mem_type;
948                 mem->placement = cur_flags;
949                 return 0;
950         }
951
952         for (i = 0; i < placement->num_busy_placement; ++i) {
953                 const struct ttm_place *place = &placement->busy_placement[i];
954
955                 ret = ttm_mem_type_from_place(place, &mem_type);
956                 if (ret)
957                         return ret;
958                 man = &bdev->man[mem_type];
959                 if (!man->has_type || !man->use_type)
960                         continue;
961                 if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags))
962                         continue;
963
964                 type_found = true;
965                 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
966                                                   cur_flags);
967                 /*
968                  * Use the access and other non-mapping-related flag bits from
969                  * the memory placement flags to the current flags
970                  */
971                 ttm_flag_masked(&cur_flags, place->flags,
972                                 ~TTM_PL_MASK_MEMTYPE);
973
974                 if (mem_type == TTM_PL_SYSTEM) {
975                         mem->mem_type = mem_type;
976                         mem->placement = cur_flags;
977                         mem->mm_node = NULL;
978                         return 0;
979                 }
980
981                 ret = ttm_bo_mem_force_space(bo, mem_type, place, mem,
982                                                 interruptible, no_wait_gpu);
983                 if (ret == 0 && mem->mm_node) {
984                         mem->placement = cur_flags;
985                         return 0;
986                 }
987                 if (ret == -ERESTARTSYS)
988                         has_erestartsys = true;
989         }
990
991         if (!type_found) {
992                 printk(KERN_ERR TTM_PFX "No compatible memory type found.\n");
993                 return -EINVAL;
994         }
995
996         return (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
997 }
998 EXPORT_SYMBOL(ttm_bo_mem_space);
999
1000 static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1001                         struct ttm_placement *placement,
1002                         bool interruptible,
1003                         bool no_wait_gpu)
1004 {
1005         int ret = 0;
1006         struct ttm_mem_reg mem;
1007
1008         lockdep_assert_held(&bo->resv->lock.base);
1009
1010         mem.num_pages = bo->num_pages;
1011         mem.size = mem.num_pages << PAGE_SHIFT;
1012         mem.page_alignment = bo->mem.page_alignment;
1013         mem.bus.io_reserved_vm = false;
1014         mem.bus.io_reserved_count = 0;
1015         /*
1016          * Determine where to move the buffer.
1017          */
1018         ret = ttm_bo_mem_space(bo, placement, &mem,
1019                                interruptible, no_wait_gpu);
1020         if (ret)
1021                 goto out_unlock;
1022         ret = ttm_bo_handle_move_mem(bo, &mem, false,
1023                                      interruptible, no_wait_gpu);
1024 out_unlock:
1025         if (ret && mem.mm_node)
1026                 ttm_bo_mem_put(bo, &mem);
1027         return ret;
1028 }
1029
1030 bool ttm_bo_mem_compat(struct ttm_placement *placement,
1031                        struct ttm_mem_reg *mem,
1032                        uint32_t *new_flags)
1033 {
1034         int i;
1035
1036         for (i = 0; i < placement->num_placement; i++) {
1037                 const struct ttm_place *heap = &placement->placement[i];
1038                 if (mem->mm_node &&
1039                     (mem->start < heap->fpfn ||
1040                      (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
1041                         continue;
1042
1043                 *new_flags = heap->flags;
1044                 if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
1045                     (*new_flags & mem->placement & TTM_PL_MASK_MEM))
1046                         return true;
1047         }
1048
1049         for (i = 0; i < placement->num_busy_placement; i++) {
1050                 const struct ttm_place *heap = &placement->busy_placement[i];
1051                 if (mem->mm_node &&
1052                     (mem->start < heap->fpfn ||
1053                      (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
1054                         continue;
1055
1056                 *new_flags = heap->flags;
1057                 if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
1058                     (*new_flags & mem->placement & TTM_PL_MASK_MEM))
1059                         return true;
1060         }
1061
1062         return false;
1063 }
1064 EXPORT_SYMBOL(ttm_bo_mem_compat);
1065
1066 int ttm_bo_validate(struct ttm_buffer_object *bo,
1067                         struct ttm_placement *placement,
1068                         bool interruptible,
1069                         bool no_wait_gpu)
1070 {
1071         int ret;
1072         uint32_t new_flags;
1073
1074         lockdep_assert_held(&bo->resv->lock.base);
1075         /*
1076          * Check whether we need to move buffer.
1077          */
1078         if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
1079                 ret = ttm_bo_move_buffer(bo, placement, interruptible,
1080                                          no_wait_gpu);
1081                 if (ret)
1082                         return ret;
1083         } else {
1084                 /*
1085                  * Use the access and other non-mapping-related flag bits from
1086                  * the compatible memory placement flags to the active flags
1087                  */
1088                 ttm_flag_masked(&bo->mem.placement, new_flags,
1089                                 ~TTM_PL_MASK_MEMTYPE);
1090         }
1091         /*
1092          * We might need to add a TTM.
1093          */
1094         if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
1095                 ret = ttm_bo_add_ttm(bo, true);
1096                 if (ret)
1097                         return ret;
1098         }
1099         return 0;
1100 }
1101 EXPORT_SYMBOL(ttm_bo_validate);
1102
1103 int ttm_bo_init(struct ttm_bo_device *bdev,
1104                 struct ttm_buffer_object *bo,
1105                 unsigned long size,
1106                 enum ttm_bo_type type,
1107                 struct ttm_placement *placement,
1108                 uint32_t page_alignment,
1109                 bool interruptible,
1110                 struct file *persistent_swap_storage,
1111                 size_t acc_size,
1112                 struct sg_table *sg,
1113                 struct reservation_object *resv,
1114                 void (*destroy) (struct ttm_buffer_object *))
1115 {
1116         int ret = 0;
1117         unsigned long num_pages;
1118         struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1119         bool locked;
1120
1121         ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
1122         if (ret) {
1123                 pr_err("Out of kernel memory\n");
1124                 if (destroy)
1125                         (*destroy)(bo);
1126                 else
1127                         kfree(bo);
1128                 return -ENOMEM;
1129         }
1130
1131         num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1132         if (num_pages == 0) {
1133                 pr_err("Illegal buffer object size\n");
1134                 if (destroy)
1135                         (*destroy)(bo);
1136                 else
1137                         kfree(bo);
1138                 ttm_mem_global_free(mem_glob, acc_size);
1139                 return -EINVAL;
1140         }
1141         bo->destroy = destroy;
1142
1143         kref_init(&bo->kref);
1144         kref_init(&bo->list_kref);
1145         atomic_set(&bo->cpu_writers, 0);
1146         INIT_LIST_HEAD(&bo->lru);
1147         INIT_LIST_HEAD(&bo->ddestroy);
1148         INIT_LIST_HEAD(&bo->swap);
1149         INIT_LIST_HEAD(&bo->io_reserve_lru);
1150         mutex_init(&bo->wu_mutex);
1151         bo->bdev = bdev;
1152         bo->glob = bdev->glob;
1153         bo->type = type;
1154         bo->num_pages = num_pages;
1155         bo->mem.size = num_pages << PAGE_SHIFT;
1156         bo->mem.mem_type = TTM_PL_SYSTEM;
1157         bo->mem.num_pages = bo->num_pages;
1158         bo->mem.mm_node = NULL;
1159         bo->mem.page_alignment = page_alignment;
1160         bo->mem.bus.io_reserved_vm = false;
1161         bo->mem.bus.io_reserved_count = 0;
1162         bo->moving = NULL;
1163         bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
1164         bo->persistent_swap_storage = persistent_swap_storage;
1165         bo->acc_size = acc_size;
1166         bo->sg = sg;
1167         if (resv) {
1168                 bo->resv = resv;
1169                 lockdep_assert_held(&bo->resv->lock.base);
1170         } else {
1171                 bo->resv = &bo->ttm_resv;
1172                 reservation_object_init(&bo->ttm_resv);
1173         }
1174         atomic_inc(&bo->glob->bo_count);
1175         drm_vma_node_reset(&bo->vma_node);
1176
1177         /*
1178          * For ttm_bo_type_device buffers, allocate
1179          * address space from the device.
1180          */
1181         if (bo->type == ttm_bo_type_device ||
1182             bo->type == ttm_bo_type_sg)
1183                 ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node,
1184                                          bo->mem.num_pages);
1185
1186         /* passed reservation objects should already be locked,
1187          * since otherwise lockdep will be angered in radeon.
1188          */
1189         if (!resv) {
1190                 locked = ww_mutex_trylock(&bo->resv->lock);
1191                 WARN_ON(!locked);
1192         }
1193
1194         if (likely(!ret))
1195                 ret = ttm_bo_validate(bo, placement, interruptible, false);
1196
1197         if (!resv) {
1198                 ttm_bo_unreserve(bo);
1199
1200         } else if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
1201                 spin_lock(&bo->glob->lru_lock);
1202                 ttm_bo_add_to_lru(bo);
1203                 spin_unlock(&bo->glob->lru_lock);
1204         }
1205
1206         if (unlikely(ret))
1207                 ttm_bo_unref(&bo);
1208
1209         return ret;
1210 }
1211 EXPORT_SYMBOL(ttm_bo_init);
1212
1213 size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
1214                        unsigned long bo_size,
1215                        unsigned struct_size)
1216 {
1217         unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
1218         size_t size = 0;
1219
1220         size += ttm_round_pot(struct_size);
1221         size += ttm_round_pot(npages * sizeof(void *));
1222         size += ttm_round_pot(sizeof(struct ttm_tt));
1223         return size;
1224 }
1225 EXPORT_SYMBOL(ttm_bo_acc_size);
1226
1227 size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
1228                            unsigned long bo_size,
1229                            unsigned struct_size)
1230 {
1231         unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
1232         size_t size = 0;
1233
1234         size += ttm_round_pot(struct_size);
1235         size += ttm_round_pot(npages * (2*sizeof(void *) + sizeof(dma_addr_t)));
1236         size += ttm_round_pot(sizeof(struct ttm_dma_tt));
1237         return size;
1238 }
1239 EXPORT_SYMBOL(ttm_bo_dma_acc_size);
1240
1241 int ttm_bo_create(struct ttm_bo_device *bdev,
1242                         unsigned long size,
1243                         enum ttm_bo_type type,
1244                         struct ttm_placement *placement,
1245                         uint32_t page_alignment,
1246                         bool interruptible,
1247                         struct file *persistent_swap_storage,
1248                         struct ttm_buffer_object **p_bo)
1249 {
1250         struct ttm_buffer_object *bo;
1251         size_t acc_size;
1252         int ret;
1253
1254         bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1255         if (unlikely(bo == NULL))
1256                 return -ENOMEM;
1257
1258         acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
1259         ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
1260                           interruptible, persistent_swap_storage, acc_size,
1261                           NULL, NULL, NULL);
1262         if (likely(ret == 0))
1263                 *p_bo = bo;
1264
1265         return ret;
1266 }
1267 EXPORT_SYMBOL(ttm_bo_create);
1268
1269 static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1270                                         unsigned mem_type, bool allow_errors)
1271 {
1272         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1273         struct ttm_bo_global *glob = bdev->glob;
1274         struct dma_fence *fence;
1275         int ret;
1276
1277         /*
1278          * Can't use standard list traversal since we're unlocking.
1279          */
1280
1281         spin_lock(&glob->lru_lock);
1282         while (!list_empty(&man->lru)) {
1283                 spin_unlock(&glob->lru_lock);
1284                 ret = ttm_mem_evict_first(bdev, mem_type, NULL, false, false);
1285                 if (ret) {
1286                         if (allow_errors) {
1287                                 return ret;
1288                         } else {
1289                                 pr_err("Cleanup eviction failed\n");
1290                         }
1291                 }
1292                 spin_lock(&glob->lru_lock);
1293         }
1294         spin_unlock(&glob->lru_lock);
1295
1296         spin_lock(&man->move_lock);
1297         fence = dma_fence_get(man->move);
1298         spin_unlock(&man->move_lock);
1299
1300         if (fence) {
1301                 ret = dma_fence_wait(fence, false);
1302                 dma_fence_put(fence);
1303                 if (ret) {
1304                         if (allow_errors) {
1305                                 return ret;
1306                         } else {
1307                                 pr_err("Cleanup eviction failed\n");
1308                         }
1309                 }
1310         }
1311
1312         return 0;
1313 }
1314
1315 int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1316 {
1317         struct ttm_mem_type_manager *man;
1318         int ret = -EINVAL;
1319
1320         if (mem_type >= TTM_NUM_MEM_TYPES) {
1321                 pr_err("Illegal memory type %d\n", mem_type);
1322                 return ret;
1323         }
1324         man = &bdev->man[mem_type];
1325
1326         if (!man->has_type) {
1327                 pr_err("Trying to take down uninitialized memory manager type %u\n",
1328                        mem_type);
1329                 return ret;
1330         }
1331         dma_fence_put(man->move);
1332
1333         man->use_type = false;
1334         man->has_type = false;
1335
1336         ret = 0;
1337         if (mem_type > 0) {
1338                 ttm_bo_force_list_clean(bdev, mem_type, false);
1339
1340                 ret = (*man->func->takedown)(man);
1341         }
1342
1343         return ret;
1344 }
1345 EXPORT_SYMBOL(ttm_bo_clean_mm);
1346
1347 int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1348 {
1349         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1350
1351         if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
1352                 pr_err("Illegal memory manager memory type %u\n", mem_type);
1353                 return -EINVAL;
1354         }
1355
1356         if (!man->has_type) {
1357                 pr_err("Memory type %u has not been initialized\n", mem_type);
1358                 return 0;
1359         }
1360
1361         return ttm_bo_force_list_clean(bdev, mem_type, true);
1362 }
1363 EXPORT_SYMBOL(ttm_bo_evict_mm);
1364
1365 int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1366                         unsigned long p_size)
1367 {
1368         int ret = -EINVAL;
1369         struct ttm_mem_type_manager *man;
1370
1371         BUG_ON(type >= TTM_NUM_MEM_TYPES);
1372         man = &bdev->man[type];
1373         BUG_ON(man->has_type);
1374         man->io_reserve_fastpath = true;
1375         man->use_io_reserve_lru = false;
1376         mutex_init(&man->io_reserve_mutex);
1377         spin_lock_init(&man->move_lock);
1378         INIT_LIST_HEAD(&man->io_reserve_lru);
1379
1380         ret = bdev->driver->init_mem_type(bdev, type, man);
1381         if (ret)
1382                 return ret;
1383         man->bdev = bdev;
1384
1385         ret = 0;
1386         if (type != TTM_PL_SYSTEM) {
1387                 ret = (*man->func->init)(man, p_size);
1388                 if (ret)
1389                         return ret;
1390         }
1391         man->has_type = true;
1392         man->use_type = true;
1393         man->size = p_size;
1394
1395         INIT_LIST_HEAD(&man->lru);
1396         man->move = NULL;
1397
1398         return 0;
1399 }
1400 EXPORT_SYMBOL(ttm_bo_init_mm);
1401
1402 static void ttm_bo_global_kobj_release(struct kobject *kobj)
1403 {
1404         struct ttm_bo_global *glob =
1405                 container_of(kobj, struct ttm_bo_global, kobj);
1406
1407         ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
1408         __free_page(glob->dummy_read_page);
1409         kfree(glob);
1410 }
1411
1412 void ttm_bo_global_release(struct drm_global_reference *ref)
1413 {
1414         struct ttm_bo_global *glob = ref->object;
1415
1416         kobject_del(&glob->kobj);
1417         kobject_put(&glob->kobj);
1418 }
1419 EXPORT_SYMBOL(ttm_bo_global_release);
1420
1421 int ttm_bo_global_init(struct drm_global_reference *ref)
1422 {
1423         struct ttm_bo_global_ref *bo_ref =
1424                 container_of(ref, struct ttm_bo_global_ref, ref);
1425         struct ttm_bo_global *glob = ref->object;
1426         int ret;
1427
1428         mutex_init(&glob->device_list_mutex);
1429         spin_lock_init(&glob->lru_lock);
1430         glob->mem_glob = bo_ref->mem_glob;
1431         glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
1432
1433         if (unlikely(glob->dummy_read_page == NULL)) {
1434                 ret = -ENOMEM;
1435                 goto out_no_drp;
1436         }
1437
1438         INIT_LIST_HEAD(&glob->swap_lru);
1439         INIT_LIST_HEAD(&glob->device_list);
1440
1441         ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
1442         ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
1443         if (unlikely(ret != 0)) {
1444                 pr_err("Could not register buffer object swapout\n");
1445                 goto out_no_shrink;
1446         }
1447
1448         atomic_set(&glob->bo_count, 0);
1449
1450         ret = kobject_init_and_add(
1451                 &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
1452         if (unlikely(ret != 0))
1453                 kobject_put(&glob->kobj);
1454         return ret;
1455 out_no_shrink:
1456         __free_page(glob->dummy_read_page);
1457 out_no_drp:
1458         kfree(glob);
1459         return ret;
1460 }
1461 EXPORT_SYMBOL(ttm_bo_global_init);
1462
1463
1464 int ttm_bo_device_release(struct ttm_bo_device *bdev)
1465 {
1466         int ret = 0;
1467         unsigned i = TTM_NUM_MEM_TYPES;
1468         struct ttm_mem_type_manager *man;
1469         struct ttm_bo_global *glob = bdev->glob;
1470
1471         while (i--) {
1472                 man = &bdev->man[i];
1473                 if (man->has_type) {
1474                         man->use_type = false;
1475                         if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
1476                                 ret = -EBUSY;
1477                                 pr_err("DRM memory manager type %d is not clean\n",
1478                                        i);
1479                         }
1480                         man->has_type = false;
1481                 }
1482         }
1483
1484         mutex_lock(&glob->device_list_mutex);
1485         list_del(&bdev->device_list);
1486         mutex_unlock(&glob->device_list_mutex);
1487
1488         cancel_delayed_work_sync(&bdev->wq);
1489
1490         while (ttm_bo_delayed_delete(bdev, true))
1491                 ;
1492
1493         spin_lock(&glob->lru_lock);
1494         if (list_empty(&bdev->ddestroy))
1495                 TTM_DEBUG("Delayed destroy list was clean\n");
1496
1497         if (list_empty(&bdev->man[0].lru))
1498                 TTM_DEBUG("Swap list was clean\n");
1499         spin_unlock(&glob->lru_lock);
1500
1501         drm_vma_offset_manager_destroy(&bdev->vma_manager);
1502
1503         return ret;
1504 }
1505 EXPORT_SYMBOL(ttm_bo_device_release);
1506
1507 int ttm_bo_device_init(struct ttm_bo_device *bdev,
1508                        struct ttm_bo_global *glob,
1509                        struct ttm_bo_driver *driver,
1510                        struct address_space *mapping,
1511                        uint64_t file_page_offset,
1512                        bool need_dma32)
1513 {
1514         int ret = -EINVAL;
1515
1516         bdev->driver = driver;
1517
1518         memset(bdev->man, 0, sizeof(bdev->man));
1519
1520         /*
1521          * Initialize the system memory buffer type.
1522          * Other types need to be driver / IOCTL initialized.
1523          */
1524         ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
1525         if (unlikely(ret != 0))
1526                 goto out_no_sys;
1527
1528         drm_vma_offset_manager_init(&bdev->vma_manager, file_page_offset,
1529                                     0x10000000);
1530         INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1531         INIT_LIST_HEAD(&bdev->ddestroy);
1532         bdev->dev_mapping = mapping;
1533         bdev->glob = glob;
1534         bdev->need_dma32 = need_dma32;
1535         mutex_lock(&glob->device_list_mutex);
1536         list_add_tail(&bdev->device_list, &glob->device_list);
1537         mutex_unlock(&glob->device_list_mutex);
1538
1539         return 0;
1540 out_no_sys:
1541         return ret;
1542 }
1543 EXPORT_SYMBOL(ttm_bo_device_init);
1544
1545 /*
1546  * buffer object vm functions.
1547  */
1548
1549 bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1550 {
1551         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1552
1553         if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
1554                 if (mem->mem_type == TTM_PL_SYSTEM)
1555                         return false;
1556
1557                 if (man->flags & TTM_MEMTYPE_FLAG_CMA)
1558                         return false;
1559
1560                 if (mem->placement & TTM_PL_FLAG_CACHED)
1561                         return false;
1562         }
1563         return true;
1564 }
1565
1566 void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
1567 {
1568         struct ttm_bo_device *bdev = bo->bdev;
1569
1570         drm_vma_node_unmap(&bo->vma_node, bdev->dev_mapping);
1571         ttm_mem_io_free_vm(bo);
1572 }
1573
1574 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1575 {
1576         struct ttm_bo_device *bdev = bo->bdev;
1577         struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
1578
1579         ttm_mem_io_lock(man, false);
1580         ttm_bo_unmap_virtual_locked(bo);
1581         ttm_mem_io_unlock(man);
1582 }
1583
1584
1585 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1586
1587 int ttm_bo_wait(struct ttm_buffer_object *bo,
1588                 bool interruptible, bool no_wait)
1589 {
1590         long timeout = 15 * HZ;
1591
1592         if (no_wait) {
1593                 if (reservation_object_test_signaled_rcu(bo->resv, true))
1594                         return 0;
1595                 else
1596                         return -EBUSY;
1597         }
1598
1599         timeout = reservation_object_wait_timeout_rcu(bo->resv, true,
1600                                                       interruptible, timeout);
1601         if (timeout < 0)
1602                 return timeout;
1603
1604         if (timeout == 0)
1605                 return -EBUSY;
1606
1607         reservation_object_add_excl_fence(bo->resv, NULL);
1608         return 0;
1609 }
1610 EXPORT_SYMBOL(ttm_bo_wait);
1611
1612 int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1613 {
1614         int ret = 0;
1615
1616         /*
1617          * Using ttm_bo_reserve makes sure the lru lists are updated.
1618          */
1619
1620         ret = ttm_bo_reserve(bo, true, no_wait, NULL);
1621         if (unlikely(ret != 0))
1622                 return ret;
1623         ret = ttm_bo_wait(bo, true, no_wait);
1624         if (likely(ret == 0))
1625                 atomic_inc(&bo->cpu_writers);
1626         ttm_bo_unreserve(bo);
1627         return ret;
1628 }
1629 EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
1630
1631 void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
1632 {
1633         atomic_dec(&bo->cpu_writers);
1634 }
1635 EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
1636
1637 /**
1638  * A buffer object shrink method that tries to swap out the first
1639  * buffer object on the bo_global::swap_lru list.
1640  */
1641
1642 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1643 {
1644         struct ttm_bo_global *glob =
1645             container_of(shrink, struct ttm_bo_global, shrink);
1646         struct ttm_buffer_object *bo;
1647         int ret = -EBUSY;
1648         uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
1649
1650         spin_lock(&glob->lru_lock);
1651         list_for_each_entry(bo, &glob->swap_lru, swap) {
1652                 ret = __ttm_bo_reserve(bo, false, true, NULL);
1653                 if (!ret)
1654                         break;
1655         }
1656
1657         if (ret) {
1658                 spin_unlock(&glob->lru_lock);
1659                 return ret;
1660         }
1661
1662         kref_get(&bo->list_kref);
1663
1664         if (!list_empty(&bo->ddestroy)) {
1665                 ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false);
1666                 kref_put(&bo->list_kref, ttm_bo_release_list);
1667                 return ret;
1668         }
1669
1670         ttm_bo_del_from_lru(bo);
1671         spin_unlock(&glob->lru_lock);
1672
1673         /**
1674          * Move to system cached
1675          */
1676
1677         if ((bo->mem.placement & swap_placement) != swap_placement) {
1678                 struct ttm_mem_reg evict_mem;
1679
1680                 evict_mem = bo->mem;
1681                 evict_mem.mm_node = NULL;
1682                 evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
1683                 evict_mem.mem_type = TTM_PL_SYSTEM;
1684
1685                 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
1686                                              false, false);
1687                 if (unlikely(ret != 0))
1688                         goto out;
1689         }
1690
1691         /**
1692          * Make sure BO is idle.
1693          */
1694
1695         ret = ttm_bo_wait(bo, false, false);
1696         if (unlikely(ret != 0))
1697                 goto out;
1698
1699         ttm_bo_unmap_virtual(bo);
1700
1701         /**
1702          * Swap out. Buffer will be swapped in again as soon as
1703          * anyone tries to access a ttm page.
1704          */
1705
1706         if (bo->bdev->driver->swap_notify)
1707                 bo->bdev->driver->swap_notify(bo);
1708
1709         ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage);
1710 out:
1711
1712         /**
1713          *
1714          * Unreserve without putting on LRU to avoid swapping out an
1715          * already swapped buffer.
1716          */
1717
1718         __ttm_bo_unreserve(bo);
1719         kref_put(&bo->list_kref, ttm_bo_release_list);
1720         return ret;
1721 }
1722
1723 void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1724 {
1725         while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
1726                 ;
1727 }
1728 EXPORT_SYMBOL(ttm_bo_swapout_all);
1729
1730 /**
1731  * ttm_bo_wait_unreserved - interruptible wait for a buffer object to become
1732  * unreserved
1733  *
1734  * @bo: Pointer to buffer
1735  */
1736 int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo)
1737 {
1738         int ret;
1739
1740         /*
1741          * In the absense of a wait_unlocked API,
1742          * Use the bo::wu_mutex to avoid triggering livelocks due to
1743          * concurrent use of this function. Note that this use of
1744          * bo::wu_mutex can go away if we change locking order to
1745          * mmap_sem -> bo::reserve.
1746          */
1747         ret = mutex_lock_interruptible(&bo->wu_mutex);
1748         if (unlikely(ret != 0))
1749                 return -ERESTARTSYS;
1750         if (!ww_mutex_is_locked(&bo->resv->lock))
1751                 goto out_unlock;
1752         ret = __ttm_bo_reserve(bo, true, false, NULL);
1753         if (unlikely(ret != 0))
1754                 goto out_unlock;
1755         __ttm_bo_unreserve(bo);
1756
1757 out_unlock:
1758         mutex_unlock(&bo->wu_mutex);
1759         return ret;
1760 }