2 * Copyright 2013 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Dave Airlie
26 #include <drm/ttm/ttm_bo_api.h>
27 #include <drm/ttm/ttm_bo_driver.h>
28 #include <drm/ttm/ttm_placement.h>
29 #include <drm/ttm/ttm_page_alloc.h>
30 #include <drm/ttm/ttm_module.h>
33 #include <drm/qxl_drm.h>
35 #include "qxl_object.h"
37 #include <linux/delay.h>
39 static struct qxl_device *qxl_get_qdev(struct ttm_bo_device *bdev)
41 struct qxl_mman *mman;
42 struct qxl_device *qdev;
44 mman = container_of(bdev, struct qxl_mman, bdev);
45 qdev = container_of(mman, struct qxl_device, mman);
49 static struct vm_operations_struct qxl_ttm_vm_ops;
50 static const struct vm_operations_struct *ttm_vm_ops;
52 static vm_fault_t qxl_ttm_fault(struct vm_fault *vmf)
54 struct ttm_buffer_object *bo;
57 bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data;
59 return VM_FAULT_NOPAGE;
60 ret = ttm_vm_ops->fault(vmf);
64 int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
67 struct drm_file *file_priv = filp->private_data;
68 struct qxl_device *qdev = file_priv->minor->dev->dev_private;
72 "filp->private_data->minor->dev->dev_private == NULL\n");
75 DRM_DEBUG_DRIVER("filp->private_data = 0x%p, vma->vm_pgoff = %lx\n",
76 filp->private_data, vma->vm_pgoff);
78 r = ttm_bo_mmap(filp, vma, &qdev->mman.bdev);
81 if (unlikely(ttm_vm_ops == NULL)) {
82 ttm_vm_ops = vma->vm_ops;
83 qxl_ttm_vm_ops = *ttm_vm_ops;
84 qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
86 vma->vm_ops = &qxl_ttm_vm_ops;
90 static int qxl_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
95 static int qxl_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
96 struct ttm_mem_type_manager *man)
98 struct qxl_device *qdev = qxl_get_qdev(bdev);
99 unsigned int gpu_offset_shift =
100 64 - (qdev->rom->slot_gen_bits + qdev->rom->slot_id_bits + 8);
101 struct qxl_memslot *slot;
106 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
107 man->available_caching = TTM_PL_MASK_CACHING;
108 man->default_caching = TTM_PL_FLAG_CACHED;
112 /* "On-card" video ram */
113 slot = (type == TTM_PL_VRAM) ?
114 &qdev->main_slot : &qdev->surfaces_slot;
115 slot->gpu_offset = (uint64_t)type << gpu_offset_shift;
116 man->func = &ttm_bo_manager_func;
117 man->gpu_offset = slot->gpu_offset;
118 man->flags = TTM_MEMTYPE_FLAG_FIXED |
119 TTM_MEMTYPE_FLAG_MAPPABLE;
120 man->available_caching = TTM_PL_MASK_CACHING;
121 man->default_caching = TTM_PL_FLAG_CACHED;
124 DRM_ERROR("Unsupported memory type %u\n", (unsigned int)type);
130 static void qxl_evict_flags(struct ttm_buffer_object *bo,
131 struct ttm_placement *placement)
134 static const struct ttm_place placements = {
137 .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
140 if (!qxl_ttm_bo_is_qxl_bo(bo)) {
141 placement->placement = &placements;
142 placement->busy_placement = &placements;
143 placement->num_placement = 1;
144 placement->num_busy_placement = 1;
148 qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU, false);
149 *placement = qbo->placement;
152 static int qxl_verify_access(struct ttm_buffer_object *bo, struct file *filp)
154 struct qxl_bo *qbo = to_qxl_bo(bo);
156 return drm_vma_node_verify_access(&qbo->gem_base.vma_node,
160 static int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
161 struct ttm_mem_reg *mem)
163 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
164 struct qxl_device *qdev = qxl_get_qdev(bdev);
166 mem->bus.addr = NULL;
168 mem->bus.size = mem->num_pages << PAGE_SHIFT;
170 mem->bus.is_iomem = false;
171 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
173 switch (mem->mem_type) {
178 mem->bus.is_iomem = true;
179 mem->bus.base = qdev->vram_base;
180 mem->bus.offset = mem->start << PAGE_SHIFT;
183 mem->bus.is_iomem = true;
184 mem->bus.base = qdev->surfaceram_base;
185 mem->bus.offset = mem->start << PAGE_SHIFT;
193 static void qxl_ttm_io_mem_free(struct ttm_bo_device *bdev,
194 struct ttm_mem_reg *mem)
199 * TTM backend functions.
203 struct qxl_device *qdev;
207 static int qxl_ttm_backend_bind(struct ttm_tt *ttm,
208 struct ttm_mem_reg *bo_mem)
210 struct qxl_ttm_tt *gtt = (void *)ttm;
212 gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
213 if (!ttm->num_pages) {
214 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
215 ttm->num_pages, bo_mem, ttm);
217 /* Not implemented */
221 static int qxl_ttm_backend_unbind(struct ttm_tt *ttm)
223 /* Not implemented */
227 static void qxl_ttm_backend_destroy(struct ttm_tt *ttm)
229 struct qxl_ttm_tt *gtt = (void *)ttm;
231 ttm_tt_fini(>t->ttm);
235 static struct ttm_backend_func qxl_backend_func = {
236 .bind = &qxl_ttm_backend_bind,
237 .unbind = &qxl_ttm_backend_unbind,
238 .destroy = &qxl_ttm_backend_destroy,
241 static struct ttm_tt *qxl_ttm_tt_create(struct ttm_buffer_object *bo,
244 struct qxl_device *qdev;
245 struct qxl_ttm_tt *gtt;
247 qdev = qxl_get_qdev(bo->bdev);
248 gtt = kzalloc(sizeof(struct qxl_ttm_tt), GFP_KERNEL);
251 gtt->ttm.func = &qxl_backend_func;
253 if (ttm_tt_init(>t->ttm, bo, page_flags)) {
260 static void qxl_move_null(struct ttm_buffer_object *bo,
261 struct ttm_mem_reg *new_mem)
263 struct ttm_mem_reg *old_mem = &bo->mem;
265 BUG_ON(old_mem->mm_node != NULL);
267 new_mem->mm_node = NULL;
270 static int qxl_bo_move(struct ttm_buffer_object *bo, bool evict,
271 struct ttm_operation_ctx *ctx,
272 struct ttm_mem_reg *new_mem)
274 struct ttm_mem_reg *old_mem = &bo->mem;
277 ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
281 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
282 qxl_move_null(bo, new_mem);
285 return ttm_bo_move_memcpy(bo, ctx, new_mem);
288 static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
290 struct ttm_mem_reg *new_mem)
293 struct qxl_device *qdev;
295 if (!qxl_ttm_bo_is_qxl_bo(bo))
298 qdev = qbo->gem_base.dev->dev_private;
300 if (bo->mem.mem_type == TTM_PL_PRIV && qbo->surface_id)
301 qxl_surface_evict(qdev, qbo, new_mem ? true : false);
304 static struct ttm_bo_driver qxl_bo_driver = {
305 .ttm_tt_create = &qxl_ttm_tt_create,
306 .invalidate_caches = &qxl_invalidate_caches,
307 .init_mem_type = &qxl_init_mem_type,
308 .eviction_valuable = ttm_bo_eviction_valuable,
309 .evict_flags = &qxl_evict_flags,
310 .move = &qxl_bo_move,
311 .verify_access = &qxl_verify_access,
312 .io_mem_reserve = &qxl_ttm_io_mem_reserve,
313 .io_mem_free = &qxl_ttm_io_mem_free,
314 .move_notify = &qxl_bo_move_notify,
317 int qxl_ttm_init(struct qxl_device *qdev)
320 int num_io_pages; /* != rom->num_io_pages, we include surface0 */
322 /* No others user of address space so set it to 0 */
323 r = ttm_bo_device_init(&qdev->mman.bdev,
325 qdev->ddev.anon_inode->i_mapping,
328 DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
331 /* NOTE: this includes the framebuffer (aka surface 0) */
332 num_io_pages = qdev->rom->ram_header_offset / PAGE_SIZE;
333 r = ttm_bo_init_mm(&qdev->mman.bdev, TTM_PL_VRAM,
336 DRM_ERROR("Failed initializing VRAM heap.\n");
339 r = ttm_bo_init_mm(&qdev->mman.bdev, TTM_PL_PRIV,
340 qdev->surfaceram_size / PAGE_SIZE);
342 DRM_ERROR("Failed initializing Surfaces heap.\n");
345 DRM_INFO("qxl: %uM of VRAM memory size\n",
346 (unsigned int)qdev->vram_size / (1024 * 1024));
347 DRM_INFO("qxl: %luM of IO pages memory ready (VRAM domain)\n",
348 ((unsigned int)num_io_pages * PAGE_SIZE) / (1024 * 1024));
349 DRM_INFO("qxl: %uM of Surface memory size\n",
350 (unsigned int)qdev->surfaceram_size / (1024 * 1024));
354 void qxl_ttm_fini(struct qxl_device *qdev)
356 ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_VRAM);
357 ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_PRIV);
358 ttm_bo_device_release(&qdev->mman.bdev);
359 DRM_INFO("qxl: ttm finalized\n");
362 #define QXL_DEBUGFS_MEM_TYPES 2
364 #if defined(CONFIG_DEBUG_FS)
365 static int qxl_mm_dump_table(struct seq_file *m, void *data)
367 struct drm_info_node *node = (struct drm_info_node *)m->private;
368 struct drm_mm *mm = (struct drm_mm *)node->info_ent->data;
369 struct drm_device *dev = node->minor->dev;
370 struct qxl_device *rdev = dev->dev_private;
371 struct ttm_bo_global *glob = rdev->mman.bdev.glob;
372 struct drm_printer p = drm_seq_file_printer(m);
374 spin_lock(&glob->lru_lock);
375 drm_mm_print(mm, &p);
376 spin_unlock(&glob->lru_lock);
381 int qxl_ttm_debugfs_init(struct qxl_device *qdev)
383 #if defined(CONFIG_DEBUG_FS)
384 static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
385 static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
388 for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
390 sprintf(qxl_mem_types_names[i], "qxl_mem_mm");
392 sprintf(qxl_mem_types_names[i], "qxl_surf_mm");
393 qxl_mem_types_list[i].name = qxl_mem_types_names[i];
394 qxl_mem_types_list[i].show = &qxl_mm_dump_table;
395 qxl_mem_types_list[i].driver_features = 0;
397 qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
399 qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV].priv;
402 return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);