/*
* Copyright © 2019 Intel Corporation
*/
-#include "i915_gem_client_blt.h"
+#include "i915_drv.h"
+#include "gt/intel_context.h"
+#include "gt/intel_engine_pm.h"
+#include "gt/intel_engine_pool.h"
+#include "i915_gem_client_blt.h"
#include "i915_gem_object_blt.h"
-#include "intel_drv.h"
struct i915_sleeve {
struct i915_vma *vma;
vma->ops = &proxy_vma_ops;
sleeve->vma = vma;
- sleeve->obj = i915_gem_object_get(obj);
sleeve->pages = pages;
sleeve->page_sizes = *page_sizes;
static void destroy_sleeve(struct i915_sleeve *sleeve)
{
- i915_gem_object_put(sleeve->obj);
kfree(sleeve);
}
static void clear_pages_worker(struct work_struct *work)
{
struct clear_pages_work *w = container_of(work, typeof(*w), work);
- struct drm_i915_private *i915 = w->ce->gem_context->i915;
- struct drm_i915_gem_object *obj = w->sleeve->obj;
+ struct drm_i915_private *i915 = w->ce->engine->i915;
+ struct drm_i915_gem_object *obj = w->sleeve->vma->obj;
struct i915_vma *vma = w->sleeve->vma;
struct i915_request *rq;
+ struct i915_vma *batch;
int err = w->dma.error;
if (unlikely(err))
goto out_signal;
if (obj->cache_dirty) {
- obj->write_domain = 0;
if (i915_gem_object_has_struct_page(obj))
drm_clflush_sg(w->sleeve->pages);
obj->cache_dirty = false;
}
+ obj->read_domains = I915_GEM_GPU_DOMAINS;
+ obj->write_domain = 0;
/* XXX: we need to kill this */
mutex_lock(&i915->drm.struct_mutex);
if (unlikely(err))
goto out_unlock;
- rq = i915_request_create(w->ce);
+ batch = intel_emit_vma_fill_blt(w->ce, vma, w->value);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto out_unpin;
+ }
+
+ rq = intel_context_create_request(w->ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- goto out_unpin;
+ goto out_batch;
}
/* There's no way the fence has signalled */
clear_pages_dma_fence_cb))
GEM_BUG_ON(1);
+ err = intel_emit_vma_mark_active(batch, rq);
+ if (unlikely(err))
+ goto out_request;
+
if (w->ce->engine->emit_init_breadcrumb) {
err = w->ce->engine->emit_init_breadcrumb(rq);
if (unlikely(err))
goto out_request;
}
- /* XXX: more feverish nightmares await */
- i915_vma_lock(vma);
- err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
- i915_vma_unlock(vma);
+ /*
+ * w->dma is already exported via (vma|obj)->resv we need only
+ * keep track of the GPU activity within this vma/request, and
+ * propagate the signal from the request to w->dma.
+ */
+ err = i915_active_ref(&vma->active, rq->timeline, rq);
if (err)
goto out_request;
- err = intel_emit_vma_fill_blt(rq, vma, w->value);
+ err = w->ce->engine->emit_bb_start(rq,
+ batch->node.start, batch->node.size,
+ 0);
out_request:
if (unlikely(err)) {
i915_request_skip(rq, err);
}
i915_request_add(rq);
+out_batch:
+ intel_emit_vma_release(w->ce, batch);
out_unpin:
i915_vma_unpin(vma);
out_unlock:
struct i915_page_sizes *page_sizes,
u32 value)
{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_gem_context *ctx = ce->gem_context;
- struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
struct clear_pages_work *work;
struct i915_sleeve *sleeve;
int err;
- sleeve = create_sleeve(vm, obj, pages, page_sizes);
+ sleeve = create_sleeve(ce->vm, obj, pages, page_sizes);
if (IS_ERR(sleeve))
return PTR_ERR(sleeve);
init_irq_work(&work->irq_work, clear_pages_signal_irq_worker);
- dma_fence_init(&work->dma,
- &clear_pages_work_ops,
- &fence_lock,
- i915->mm.unordered_timeline,
- 0);
+ dma_fence_init(&work->dma, &clear_pages_work_ops, &fence_lock, 0, 0);
i915_sw_fence_init(&work->wait, clear_pages_work_notify);
i915_gem_object_lock(obj);
if (err < 0) {
dma_fence_set_error(&work->dma, err);
} else {
- reservation_object_add_excl_fence(obj->base.resv, &work->dma);
+ dma_resv_add_excl_fence(obj->base.resv, &work->dma);
err = 0;
}
i915_gem_object_unlock(obj);