1 // SPDX-License-Identifier: MIT
3 * Copyright © 2019 Intel Corporation
7 #include "gt/intel_context.h"
8 #include "gt/intel_engine_pm.h"
9 #include "gt/intel_engine_pool.h"
10 #include "gt/intel_gt.h"
11 #include "i915_gem_clflush.h"
12 #include "i915_gem_object_blt.h"
14 struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
18 struct drm_i915_private *i915 = ce->vm->i915;
19 const u32 block_size = S16_MAX * PAGE_SIZE;
20 struct intel_engine_pool_node *pool;
21 struct i915_vma *batch;
29 GEM_BUG_ON(intel_engine_is_virtual(ce->engine));
30 intel_engine_pm_get(ce->engine);
32 count = div_u64(vma->size, block_size);
33 size = (1 + 8 * count) * sizeof(u32);
34 size = round_up(size, PAGE_SIZE);
35 pool = intel_engine_pool_get(&ce->engine->pool, size);
41 cmd = i915_gem_object_pin_map(pool->obj, I915_MAP_WC);
48 offset = vma->node.start;
51 u32 size = min_t(u64, rem, block_size);
53 GEM_BUG_ON(size >> PAGE_SHIFT > S16_MAX);
55 if (INTEL_GEN(i915) >= 8) {
56 *cmd++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (7 - 2);
57 *cmd++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
59 *cmd++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
60 *cmd++ = lower_32_bits(offset);
61 *cmd++ = upper_32_bits(offset);
64 *cmd++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (6 - 2);
65 *cmd++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
67 *cmd++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
72 /* Allow ourselves to be preempted in between blocks. */
73 *cmd++ = MI_ARB_CHECK;
79 *cmd = MI_BATCH_BUFFER_END;
80 intel_gt_chipset_flush(ce->vm->gt);
82 i915_gem_object_unpin_map(pool->obj);
84 batch = i915_vma_instance(pool->obj, ce->vm, NULL);
90 err = i915_vma_pin(batch, 0, 0, PIN_USER);
94 batch->private = pool;
98 intel_engine_pool_put(pool);
100 intel_engine_pm_put(ce->engine);
104 int intel_emit_vma_mark_active(struct i915_vma *vma, struct i915_request *rq)
109 err = i915_request_await_object(rq, vma->obj, false);
111 err = i915_vma_move_to_active(vma, rq, 0);
112 i915_vma_unlock(vma);
116 return intel_engine_pool_mark_active(vma->private, rq);
119 void intel_emit_vma_release(struct intel_context *ce, struct i915_vma *vma)
122 intel_engine_pool_put(vma->private);
123 intel_engine_pm_put(ce->engine);
126 int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
127 struct intel_context *ce,
130 struct i915_request *rq;
131 struct i915_vma *batch;
132 struct i915_vma *vma;
135 vma = i915_vma_instance(obj, ce->vm, NULL);
139 err = i915_vma_pin(vma, 0, 0, PIN_USER);
143 if (obj->cache_dirty & ~obj->cache_coherent) {
144 i915_gem_object_lock(obj);
145 i915_gem_clflush_object(obj, 0);
146 i915_gem_object_unlock(obj);
149 batch = intel_emit_vma_fill_blt(ce, vma, value);
151 err = PTR_ERR(batch);
155 rq = intel_context_create_request(ce);
161 err = intel_emit_vma_mark_active(batch, rq);
165 err = i915_request_await_object(rq, obj, true);
169 if (ce->engine->emit_init_breadcrumb) {
170 err = ce->engine->emit_init_breadcrumb(rq);
176 err = i915_request_await_object(rq, vma->obj, true);
178 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
179 i915_vma_unlock(vma);
183 err = ce->engine->emit_bb_start(rq,
184 batch->node.start, batch->node.size,
188 i915_request_skip(rq, err);
190 i915_request_add(rq);
192 intel_emit_vma_release(ce, batch);
198 struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
199 struct i915_vma *src,
200 struct i915_vma *dst)
202 struct drm_i915_private *i915 = ce->vm->i915;
203 const u32 block_size = S16_MAX * PAGE_SIZE;
204 struct intel_engine_pool_node *pool;
205 struct i915_vma *batch;
206 u64 src_offset, dst_offset;
211 GEM_BUG_ON(src->size != dst->size);
213 GEM_BUG_ON(intel_engine_is_virtual(ce->engine));
214 intel_engine_pm_get(ce->engine);
216 count = div_u64(dst->size, block_size);
217 size = (1 + 11 * count) * sizeof(u32);
218 size = round_up(size, PAGE_SIZE);
219 pool = intel_engine_pool_get(&ce->engine->pool, size);
225 cmd = i915_gem_object_pin_map(pool->obj, I915_MAP_WC);
232 src_offset = src->node.start;
233 dst_offset = dst->node.start;
236 size = min_t(u64, rem, block_size);
237 GEM_BUG_ON(size >> PAGE_SHIFT > S16_MAX);
239 if (INTEL_GEN(i915) >= 9) {
240 *cmd++ = GEN9_XY_FAST_COPY_BLT_CMD | (10 - 2);
241 *cmd++ = BLT_DEPTH_32 | PAGE_SIZE;
243 *cmd++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
244 *cmd++ = lower_32_bits(dst_offset);
245 *cmd++ = upper_32_bits(dst_offset);
248 *cmd++ = lower_32_bits(src_offset);
249 *cmd++ = upper_32_bits(src_offset);
250 } else if (INTEL_GEN(i915) >= 8) {
251 *cmd++ = XY_SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (10 - 2);
252 *cmd++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | PAGE_SIZE;
254 *cmd++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
255 *cmd++ = lower_32_bits(dst_offset);
256 *cmd++ = upper_32_bits(dst_offset);
259 *cmd++ = lower_32_bits(src_offset);
260 *cmd++ = upper_32_bits(src_offset);
262 *cmd++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (6 - 2);
263 *cmd++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | PAGE_SIZE;
264 *cmd++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE;
270 /* Allow ourselves to be preempted in between blocks. */
271 *cmd++ = MI_ARB_CHECK;
278 *cmd = MI_BATCH_BUFFER_END;
279 intel_gt_chipset_flush(ce->vm->gt);
281 i915_gem_object_unpin_map(pool->obj);
283 batch = i915_vma_instance(pool->obj, ce->vm, NULL);
285 err = PTR_ERR(batch);
289 err = i915_vma_pin(batch, 0, 0, PIN_USER);
293 batch->private = pool;
297 intel_engine_pool_put(pool);
299 intel_engine_pm_put(ce->engine);
303 static int move_to_gpu(struct i915_vma *vma, struct i915_request *rq, bool write)
305 struct drm_i915_gem_object *obj = vma->obj;
307 if (obj->cache_dirty & ~obj->cache_coherent)
308 i915_gem_clflush_object(obj, 0);
310 return i915_request_await_object(rq, obj, write);
313 int i915_gem_object_copy_blt(struct drm_i915_gem_object *src,
314 struct drm_i915_gem_object *dst,
315 struct intel_context *ce)
317 struct drm_gem_object *objs[] = { &src->base, &dst->base };
318 struct i915_address_space *vm = ce->vm;
319 struct i915_vma *vma[2], *batch;
320 struct ww_acquire_ctx acquire;
321 struct i915_request *rq;
324 vma[0] = i915_vma_instance(src, vm, NULL);
326 return PTR_ERR(vma[0]);
328 err = i915_vma_pin(vma[0], 0, 0, PIN_USER);
332 vma[1] = i915_vma_instance(dst, vm, NULL);
336 err = i915_vma_pin(vma[1], 0, 0, PIN_USER);
340 batch = intel_emit_vma_copy_blt(ce, vma[0], vma[1]);
342 err = PTR_ERR(batch);
346 rq = intel_context_create_request(ce);
352 err = intel_emit_vma_mark_active(batch, rq);
356 err = drm_gem_lock_reservations(objs, ARRAY_SIZE(objs), &acquire);
360 for (i = 0; i < ARRAY_SIZE(vma); i++) {
361 err = move_to_gpu(vma[i], rq, i);
366 for (i = 0; i < ARRAY_SIZE(vma); i++) {
367 unsigned int flags = i ? EXEC_OBJECT_WRITE : 0;
369 err = i915_vma_move_to_active(vma[i], rq, flags);
374 if (rq->engine->emit_init_breadcrumb) {
375 err = rq->engine->emit_init_breadcrumb(rq);
380 err = rq->engine->emit_bb_start(rq,
381 batch->node.start, batch->node.size,
384 drm_gem_unlock_reservations(objs, ARRAY_SIZE(objs), &acquire);
387 i915_request_skip(rq, err);
389 i915_request_add(rq);
391 intel_emit_vma_release(ce, batch);
393 i915_vma_unpin(vma[1]);
395 i915_vma_unpin(vma[0]);
399 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
400 #include "selftests/i915_gem_object_blt.c"