From: Chris Wilson Date: Tue, 2 Aug 2016 21:50:39 +0000 (+0100) Subject: drm/i915: Simplify calling engine->sync_to X-Git-Tag: v4.9-rc1~41^2~40^2~97 X-Git-Url: https://asedeno.scripts.mit.edu/gitweb/?a=commitdiff_plain;h=ddf07be7a2aeb80aa159a7eeade01b7b5e1e3e43;p=linux.git drm/i915: Simplify calling engine->sync_to Since requests can no longer be generated as a side-effect of intel_ring_begin(), we know that the seqno will be unchanged during ring-emission. This predicatablity then means we do not have to check for the seqno wrapping around whilst emitting the semaphore for engine->sync_to(). Signed-off-by: Chris Wilson Reviewed-by: Joonas Lahtinen Link: http://patchwork.freedesktop.org/patch/msgid/1469432687-22756-31-git-send-email-chris@chris-wilson.co.uk Link: http://patchwork.freedesktop.org/patch/msgid/1470174640-18242-22-git-send-email-chris@chris-wilson.co.uk --- diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 49ce21a82c29..9b18b9c875ec 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1757,7 +1757,7 @@ struct drm_i915_private { struct i915_gem_context *kernel_context; struct intel_engine_cs engine[I915_NUM_ENGINES]; struct drm_i915_gem_object *semaphore_obj; - uint32_t last_seqno, next_seqno; + u32 next_seqno; struct drm_dma_handle *status_page_dmah; struct resource mch_res; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index d79b949fb4c4..3df6b485d2d4 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2867,22 +2867,15 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj, i915_gem_object_retire_request(obj, from); } else { int idx = intel_engine_sync_index(from->engine, to->engine); - u32 seqno = i915_gem_request_get_seqno(from); - - if (seqno <= from->engine->semaphore.sync_seqno[idx]) + if (from->fence.seqno <= from->engine->semaphore.sync_seqno[idx]) return 0; trace_i915_gem_ring_sync_to(to, from); - ret = to->engine->semaphore.sync_to(to, from->engine, seqno); + ret = to->engine->semaphore.sync_to(to, from); if (ret) return ret; - /* We use last_read_req because sync_to() - * might have just caused seqno wrap under - * the radar. - */ - from->engine->semaphore.sync_seqno[idx] = - i915_gem_request_get_seqno(obj->last_read_req[from->engine->id]); + from->engine->semaphore.sync_seqno[idx] = from->fence.seqno; } return 0; diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c index e378eb61979b..11c19e7f82fa 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.c +++ b/drivers/gpu/drm/i915/i915_gem_request.c @@ -264,14 +264,7 @@ int i915_gem_set_seqno(struct drm_device *dev, u32 seqno) if (ret) return ret; - /* Carefully set the last_seqno value so that wrap - * detection still works - */ dev_priv->next_seqno = seqno; - dev_priv->last_seqno = seqno - 1; - if (dev_priv->last_seqno == 0) - dev_priv->last_seqno--; - return 0; } @@ -288,7 +281,7 @@ static int i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno) dev_priv->next_seqno = 1; } - *seqno = dev_priv->last_seqno = dev_priv->next_seqno++; + *seqno = dev_priv->next_seqno++; return 0; } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 0b6f12c36e58..51f3123b500c 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -1496,12 +1496,6 @@ static int gen8_render_emit_request(struct drm_i915_gem_request *req) return 0; } -static inline bool i915_gem_has_seqno_wrapped(struct drm_i915_private *dev_priv, - u32 seqno) -{ - return dev_priv->last_seqno < seqno; -} - /** * intel_ring_sync - sync the waiter to the signaller on seqno * @@ -1511,24 +1505,23 @@ static inline bool i915_gem_has_seqno_wrapped(struct drm_i915_private *dev_priv, */ static int -gen8_ring_sync(struct drm_i915_gem_request *waiter_req, - struct intel_engine_cs *signaller, - u32 seqno) +gen8_ring_sync(struct drm_i915_gem_request *wait, + struct drm_i915_gem_request *signal) { - struct intel_ring *waiter = waiter_req->ring; - struct drm_i915_private *dev_priv = waiter_req->i915; - u64 offset = GEN8_WAIT_OFFSET(waiter_req->engine, signaller->id); + struct intel_ring *waiter = wait->ring; + struct drm_i915_private *dev_priv = wait->i915; + u64 offset = GEN8_WAIT_OFFSET(wait->engine, signal->engine->id); struct i915_hw_ppgtt *ppgtt; int ret; - ret = intel_ring_begin(waiter_req, 4); + ret = intel_ring_begin(wait, 4); if (ret) return ret; intel_ring_emit(waiter, MI_SEMAPHORE_WAIT | MI_SEMAPHORE_GLOBAL_GTT | MI_SEMAPHORE_SAD_GTE_SDD); - intel_ring_emit(waiter, seqno); + intel_ring_emit(waiter, signal->fence.seqno); intel_ring_emit(waiter, lower_32_bits(offset)); intel_ring_emit(waiter, upper_32_bits(offset)); intel_ring_advance(waiter); @@ -1538,48 +1531,37 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req, * We do this on the i915_switch_context() following the wait and * before the dispatch. */ - ppgtt = waiter_req->ctx->ppgtt; - if (ppgtt && waiter_req->engine->id != RCS) - ppgtt->pd_dirty_rings |= intel_engine_flag(waiter_req->engine); + ppgtt = wait->ctx->ppgtt; + if (ppgtt && wait->engine->id != RCS) + ppgtt->pd_dirty_rings |= intel_engine_flag(wait->engine); return 0; } static int -gen6_ring_sync(struct drm_i915_gem_request *waiter_req, - struct intel_engine_cs *signaller, - u32 seqno) +gen6_ring_sync(struct drm_i915_gem_request *wait, + struct drm_i915_gem_request *signal) { - struct intel_ring *waiter = waiter_req->ring; + struct intel_ring *waiter = wait->ring; u32 dw1 = MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER; - u32 wait_mbox = signaller->semaphore.mbox.wait[waiter_req->engine->id]; + u32 wait_mbox = signal->engine->semaphore.mbox.wait[wait->engine->id]; int ret; - /* Throughout all of the GEM code, seqno passed implies our current - * seqno is >= the last seqno executed. However for hardware the - * comparison is strictly greater than. - */ - seqno -= 1; - WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID); - ret = intel_ring_begin(waiter_req, 4); + ret = intel_ring_begin(wait, 4); if (ret) return ret; - /* If seqno wrap happened, omit the wait with no-ops */ - if (likely(!i915_gem_has_seqno_wrapped(waiter_req->i915, seqno))) { - intel_ring_emit(waiter, dw1 | wait_mbox); - intel_ring_emit(waiter, seqno); - intel_ring_emit(waiter, 0); - intel_ring_emit(waiter, MI_NOOP); - } else { - intel_ring_emit(waiter, MI_NOOP); - intel_ring_emit(waiter, MI_NOOP); - intel_ring_emit(waiter, MI_NOOP); - intel_ring_emit(waiter, MI_NOOP); - } + intel_ring_emit(waiter, dw1 | wait_mbox); + /* Throughout all of the GEM code, seqno passed implies our current + * seqno is >= the last seqno executed. However for hardware the + * comparison is strictly greater than. + */ + intel_ring_emit(waiter, signal->fence.seqno - 1); + intel_ring_emit(waiter, 0); + intel_ring_emit(waiter, MI_NOOP); intel_ring_advance(waiter); return 0; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index c3d4b88615b3..c0b7ce325e86 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -277,9 +277,8 @@ struct intel_engine_cs { }; /* AKA wait() */ - int (*sync_to)(struct drm_i915_gem_request *to_req, - struct intel_engine_cs *from, - u32 seqno); + int (*sync_to)(struct drm_i915_gem_request *to, + struct drm_i915_gem_request *from); int (*signal)(struct drm_i915_gem_request *signaller_req); } semaphore;