2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Zhi Wang <zhi.a.wang@intel.com>
27 * Ping Gao <ping.a.gao@intel.com>
28 * Tina Zhang <tina.zhang@intel.com>
29 * Chanbin Du <changbin.du@intel.com>
30 * Min He <min.he@intel.com>
31 * Bing Niu <bing.niu@intel.com>
32 * Zhenyu Wang <zhenyuw@linux.intel.com>
36 #include <linux/kthread.h>
38 #include "gem/i915_gem_context.h"
39 #include "gem/i915_gem_pm.h"
40 #include "gt/intel_context.h"
45 #define RING_CTX_OFF(x) \
46 offsetof(struct execlist_ring_context, x)
48 static void set_context_pdp_root_pointer(
49 struct execlist_ring_context *ring_context,
54 for (i = 0; i < 8; i++)
55 ring_context->pdps[i].val = pdp[7 - i];
58 static void update_shadow_pdps(struct intel_vgpu_workload *workload)
60 struct drm_i915_gem_object *ctx_obj =
61 workload->req->hw_context->state->obj;
62 struct execlist_ring_context *shadow_ring_context;
65 if (WARN_ON(!workload->shadow_mm))
68 if (WARN_ON(!atomic_read(&workload->shadow_mm->pincount)))
71 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
72 shadow_ring_context = kmap(page);
73 set_context_pdp_root_pointer(shadow_ring_context,
74 (void *)workload->shadow_mm->ppgtt_mm.shadow_pdps);
79 * when populating shadow ctx from guest, we should not overrride oa related
80 * registers, so that they will not be overlapped by guest oa configs. Thus
81 * made it possible to capture oa data from host for both host and guests.
83 static void sr_oa_regs(struct intel_vgpu_workload *workload,
84 u32 *reg_state, bool save)
86 struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
87 u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset;
88 u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset;
91 i915_mmio_reg_offset(EU_PERF_CNTL0),
92 i915_mmio_reg_offset(EU_PERF_CNTL1),
93 i915_mmio_reg_offset(EU_PERF_CNTL2),
94 i915_mmio_reg_offset(EU_PERF_CNTL3),
95 i915_mmio_reg_offset(EU_PERF_CNTL4),
96 i915_mmio_reg_offset(EU_PERF_CNTL5),
97 i915_mmio_reg_offset(EU_PERF_CNTL6),
100 if (workload->ring_id != RCS0)
104 workload->oactxctrl = reg_state[ctx_oactxctrl + 1];
106 for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) {
107 u32 state_offset = ctx_flexeu0 + i * 2;
109 workload->flex_mmio[i] = reg_state[state_offset + 1];
112 reg_state[ctx_oactxctrl] =
113 i915_mmio_reg_offset(GEN8_OACTXCONTROL);
114 reg_state[ctx_oactxctrl + 1] = workload->oactxctrl;
116 for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) {
117 u32 state_offset = ctx_flexeu0 + i * 2;
118 u32 mmio = flex_mmio[i];
120 reg_state[state_offset] = mmio;
121 reg_state[state_offset + 1] = workload->flex_mmio[i];
126 static int populate_shadow_context(struct intel_vgpu_workload *workload)
128 struct intel_vgpu *vgpu = workload->vgpu;
129 struct intel_gvt *gvt = vgpu->gvt;
130 int ring_id = workload->ring_id;
131 struct drm_i915_gem_object *ctx_obj =
132 workload->req->hw_context->state->obj;
133 struct execlist_ring_context *shadow_ring_context;
136 unsigned long context_gpa, context_page_num;
139 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
140 shadow_ring_context = kmap(page);
142 sr_oa_regs(workload, (u32 *)shadow_ring_context, true);
143 #define COPY_REG(name) \
144 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
145 + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
146 #define COPY_REG_MASKED(name) {\
147 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
148 + RING_CTX_OFF(name.val),\
149 &shadow_ring_context->name.val, 4);\
150 shadow_ring_context->name.val |= 0xffff << 16;\
153 COPY_REG_MASKED(ctx_ctrl);
154 COPY_REG(ctx_timestamp);
156 if (ring_id == RCS0) {
157 COPY_REG(bb_per_ctx_ptr);
158 COPY_REG(rcs_indirect_ctx);
159 COPY_REG(rcs_indirect_ctx_offset);
162 #undef COPY_REG_MASKED
164 intel_gvt_hypervisor_read_gpa(vgpu,
165 workload->ring_context_gpa +
166 sizeof(*shadow_ring_context),
167 (void *)shadow_ring_context +
168 sizeof(*shadow_ring_context),
169 I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
171 sr_oa_regs(workload, (u32 *)shadow_ring_context, false);
174 if (IS_RESTORE_INHIBIT(shadow_ring_context->ctx_ctrl.val))
177 gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
178 workload->ctx_desc.lrca);
180 context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
182 context_page_num = context_page_num >> PAGE_SHIFT;
184 if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS0)
185 context_page_num = 19;
188 while (i < context_page_num) {
189 context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
190 (u32)((workload->ctx_desc.lrca + i) <<
191 I915_GTT_PAGE_SHIFT));
192 if (context_gpa == INTEL_GVT_INVALID_ADDR) {
193 gvt_vgpu_err("Invalid guest context descriptor\n");
197 page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
199 intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
207 static inline bool is_gvt_request(struct i915_request *req)
209 return i915_gem_context_force_single_submission(req->gem_context);
212 static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
214 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
215 u32 ring_base = dev_priv->engine[ring_id]->mmio_base;
218 reg = RING_INSTDONE(ring_base);
219 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
220 reg = RING_ACTHD(ring_base);
221 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
222 reg = RING_ACTHD_UDW(ring_base);
223 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
226 static int shadow_context_status_change(struct notifier_block *nb,
227 unsigned long action, void *data)
229 struct i915_request *req = data;
230 struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
231 shadow_ctx_notifier_block[req->engine->id]);
232 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
233 enum intel_engine_id ring_id = req->engine->id;
234 struct intel_vgpu_workload *workload;
237 if (!is_gvt_request(req)) {
238 spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
239 if (action == INTEL_CONTEXT_SCHEDULE_IN &&
240 scheduler->engine_owner[ring_id]) {
241 /* Switch ring from vGPU to host. */
242 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
244 scheduler->engine_owner[ring_id] = NULL;
246 spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
251 workload = scheduler->current_workload[ring_id];
252 if (unlikely(!workload))
256 case INTEL_CONTEXT_SCHEDULE_IN:
257 spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
258 if (workload->vgpu != scheduler->engine_owner[ring_id]) {
259 /* Switch ring from host to vGPU or vGPU to vGPU. */
260 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
261 workload->vgpu, ring_id);
262 scheduler->engine_owner[ring_id] = workload->vgpu;
264 gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
265 ring_id, workload->vgpu->id);
266 spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
267 atomic_set(&workload->shadow_ctx_active, 1);
269 case INTEL_CONTEXT_SCHEDULE_OUT:
270 save_ring_hw_state(workload->vgpu, ring_id);
271 atomic_set(&workload->shadow_ctx_active, 0);
273 case INTEL_CONTEXT_SCHEDULE_PREEMPTED:
274 save_ring_hw_state(workload->vgpu, ring_id);
280 wake_up(&workload->shadow_ctx_status_wq);
285 shadow_context_descriptor_update(struct intel_context *ce,
286 struct intel_vgpu_workload *workload)
288 u64 desc = ce->lrc_desc;
291 * Update bits 0-11 of the context descriptor which includes flags
292 * like GEN8_CTX_* cached in desc_template
294 desc &= U64_MAX << 12;
295 desc |= ce->gem_context->desc_template & ((1ULL << 12) - 1);
297 desc &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
298 desc |= workload->ctx_desc.addressing_mode <<
299 GEN8_CTX_ADDRESSING_MODE_SHIFT;
304 static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
306 struct intel_vgpu *vgpu = workload->vgpu;
307 struct i915_request *req = workload->req;
308 void *shadow_ring_buffer_va;
312 if (IS_GEN(req->i915, 9) && is_inhibit_context(req->hw_context))
313 intel_vgpu_restore_inhibit_context(vgpu, req);
316 * To track whether a request has started on HW, we can emit a
317 * breadcrumb at the beginning of the request and check its
318 * timeline's HWSP to see if the breadcrumb has advanced past the
319 * start of this request. Actually, the request must have the
320 * init_breadcrumb if its timeline set has_init_bread_crumb, or the
321 * scheduler might get a wrong state of it during reset. Since the
322 * requests from gvt always set the has_init_breadcrumb flag, here
323 * need to do the emit_init_breadcrumb for all the requests.
325 if (req->engine->emit_init_breadcrumb) {
326 err = req->engine->emit_init_breadcrumb(req);
328 gvt_vgpu_err("fail to emit init breadcrumb\n");
333 /* allocate shadow ring buffer */
334 cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
336 gvt_vgpu_err("fail to alloc size =%ld shadow ring buffer\n",
341 shadow_ring_buffer_va = workload->shadow_ring_buffer_va;
343 /* get shadow ring buffer va */
344 workload->shadow_ring_buffer_va = cs;
346 memcpy(cs, shadow_ring_buffer_va,
349 cs += workload->rb_len / sizeof(u32);
350 intel_ring_advance(workload->req, cs);
355 static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
357 if (!wa_ctx->indirect_ctx.obj)
360 i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
361 i915_gem_object_put(wa_ctx->indirect_ctx.obj);
363 wa_ctx->indirect_ctx.obj = NULL;
364 wa_ctx->indirect_ctx.shadow_va = NULL;
367 static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
368 struct i915_gem_context *ctx)
370 struct intel_vgpu_mm *mm = workload->shadow_mm;
371 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ctx->vm);
374 if (mm->type != INTEL_GVT_MM_PPGTT || !mm->ppgtt_mm.shadowed)
377 if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
378 px_dma(&ppgtt->pml4) = mm->ppgtt_mm.shadow_pdps[0];
380 for (i = 0; i < GVT_RING_CTX_NR_PDPS; i++) {
381 px_dma(ppgtt->pdp.page_directory[i]) =
382 mm->ppgtt_mm.shadow_pdps[i];
390 intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload)
392 struct intel_vgpu *vgpu = workload->vgpu;
393 struct intel_vgpu_submission *s = &vgpu->submission;
394 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
395 struct i915_request *rq;
397 lockdep_assert_held(&dev_priv->drm.struct_mutex);
402 rq = i915_request_create(s->shadow[workload->ring_id]);
404 gvt_vgpu_err("fail to allocate gem request\n");
408 workload->req = i915_request_get(rq);
413 * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
414 * shadow it as well, include ringbuffer,wa_ctx and ctx.
415 * @workload: an abstract entity for each execlist submission.
417 * This function is called before the workload submitting to i915, to make
418 * sure the content of the workload is valid.
420 int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
422 struct intel_vgpu *vgpu = workload->vgpu;
423 struct intel_vgpu_submission *s = &vgpu->submission;
424 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
427 lockdep_assert_held(&dev_priv->drm.struct_mutex);
429 if (workload->shadow)
432 if (!test_and_set_bit(workload->ring_id, s->shadow_ctx_desc_updated))
433 shadow_context_descriptor_update(s->shadow[workload->ring_id],
436 ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
440 if (workload->ring_id == RCS0 && workload->wa_ctx.indirect_ctx.size) {
441 ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
446 workload->shadow = true;
449 release_shadow_wa_ctx(&workload->wa_ctx);
453 static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload);
455 static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
457 struct intel_gvt *gvt = workload->vgpu->gvt;
458 const int gmadr_bytes = gvt->device_info.gmadr_bytes_in_cmd;
459 struct intel_vgpu_shadow_bb *bb;
462 list_for_each_entry(bb, &workload->shadow_bb, list) {
463 /* For privilge batch buffer and not wa_ctx, the bb_start_cmd_va
464 * is only updated into ring_scan_buffer, not real ring address
465 * allocated in later copy_workload_to_ring_buffer. pls be noted
466 * shadow_ring_buffer_va is now pointed to real ring buffer va
467 * in copy_workload_to_ring_buffer.
471 bb->bb_start_cmd_va = workload->shadow_ring_buffer_va
475 /* for non-priv bb, scan&shadow is only for
476 * debugging purpose, so the content of shadow bb
477 * is the same as original bb. Therefore,
478 * here, rather than switch to shadow bb's gma
479 * address, we directly use original batch buffer's
480 * gma address, and send original bb to hardware
483 if (bb->clflush & CLFLUSH_AFTER) {
484 drm_clflush_virt_range(bb->va,
486 bb->clflush &= ~CLFLUSH_AFTER;
488 i915_gem_object_finish_access(bb->obj);
489 bb->accessing = false;
492 bb->vma = i915_gem_object_ggtt_pin(bb->obj,
494 if (IS_ERR(bb->vma)) {
495 ret = PTR_ERR(bb->vma);
499 /* relocate shadow batch buffer */
500 bb->bb_start_cmd_va[1] = i915_ggtt_offset(bb->vma);
501 if (gmadr_bytes == 8)
502 bb->bb_start_cmd_va[2] = 0;
504 /* No one is going to touch shadow bb from now on. */
505 if (bb->clflush & CLFLUSH_AFTER) {
506 drm_clflush_virt_range(bb->va,
508 bb->clflush &= ~CLFLUSH_AFTER;
511 ret = i915_gem_object_set_to_gtt_domain(bb->obj,
516 ret = i915_vma_move_to_active(bb->vma,
522 i915_gem_object_finish_access(bb->obj);
523 bb->accessing = false;
528 release_shadow_batch_buffer(workload);
532 static void update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
534 struct intel_vgpu_workload *workload =
535 container_of(wa_ctx, struct intel_vgpu_workload, wa_ctx);
536 struct i915_request *rq = workload->req;
537 struct execlist_ring_context *shadow_ring_context =
538 (struct execlist_ring_context *)rq->hw_context->lrc_reg_state;
540 shadow_ring_context->bb_per_ctx_ptr.val =
541 (shadow_ring_context->bb_per_ctx_ptr.val &
542 (~PER_CTX_ADDR_MASK)) | wa_ctx->per_ctx.shadow_gma;
543 shadow_ring_context->rcs_indirect_ctx.val =
544 (shadow_ring_context->rcs_indirect_ctx.val &
545 (~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma;
548 static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
550 struct i915_vma *vma;
551 unsigned char *per_ctx_va =
552 (unsigned char *)wa_ctx->indirect_ctx.shadow_va +
553 wa_ctx->indirect_ctx.size;
555 if (wa_ctx->indirect_ctx.size == 0)
558 vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
559 0, CACHELINE_BYTES, 0);
563 /* FIXME: we are not tracking our pinned VMA leaving it
564 * up to the core to fix up the stray pin_count upon
568 wa_ctx->indirect_ctx.shadow_gma = i915_ggtt_offset(vma);
570 wa_ctx->per_ctx.shadow_gma = *((unsigned int *)per_ctx_va + 1);
571 memset(per_ctx_va, 0, CACHELINE_BYTES);
573 update_wa_ctx_2_shadow_ctx(wa_ctx);
577 static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
579 struct intel_vgpu *vgpu = workload->vgpu;
580 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
581 struct intel_vgpu_shadow_bb *bb, *pos;
583 if (list_empty(&workload->shadow_bb))
586 bb = list_first_entry(&workload->shadow_bb,
587 struct intel_vgpu_shadow_bb, list);
589 mutex_lock(&dev_priv->drm.struct_mutex);
591 list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) {
594 i915_gem_object_finish_access(bb->obj);
596 if (bb->va && !IS_ERR(bb->va))
597 i915_gem_object_unpin_map(bb->obj);
599 if (bb->vma && !IS_ERR(bb->vma)) {
600 i915_vma_unpin(bb->vma);
601 i915_vma_close(bb->vma);
603 i915_gem_object_put(bb->obj);
609 mutex_unlock(&dev_priv->drm.struct_mutex);
612 static int prepare_workload(struct intel_vgpu_workload *workload)
614 struct intel_vgpu *vgpu = workload->vgpu;
617 ret = intel_vgpu_pin_mm(workload->shadow_mm);
619 gvt_vgpu_err("fail to vgpu pin mm\n");
623 update_shadow_pdps(workload);
625 ret = intel_vgpu_sync_oos_pages(workload->vgpu);
627 gvt_vgpu_err("fail to vgpu sync oos pages\n");
631 ret = intel_vgpu_flush_post_shadow(workload->vgpu);
633 gvt_vgpu_err("fail to flush post shadow\n");
637 ret = copy_workload_to_ring_buffer(workload);
639 gvt_vgpu_err("fail to generate request\n");
643 ret = prepare_shadow_batch_buffer(workload);
645 gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n");
649 ret = prepare_shadow_wa_ctx(&workload->wa_ctx);
651 gvt_vgpu_err("fail to prepare_shadow_wa_ctx\n");
652 goto err_shadow_batch;
655 if (workload->prepare) {
656 ret = workload->prepare(workload);
658 goto err_shadow_wa_ctx;
663 release_shadow_wa_ctx(&workload->wa_ctx);
665 release_shadow_batch_buffer(workload);
667 intel_vgpu_unpin_mm(workload->shadow_mm);
671 static int dispatch_workload(struct intel_vgpu_workload *workload)
673 struct intel_vgpu *vgpu = workload->vgpu;
674 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
675 struct intel_vgpu_submission *s = &vgpu->submission;
676 struct i915_request *rq;
677 int ring_id = workload->ring_id;
680 gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
683 mutex_lock(&vgpu->vgpu_lock);
684 mutex_lock(&dev_priv->drm.struct_mutex);
686 ret = set_context_ppgtt_from_shadow(workload,
687 s->shadow[ring_id]->gem_context);
689 gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
693 ret = intel_gvt_workload_req_alloc(workload);
697 ret = intel_gvt_scan_and_shadow_workload(workload);
701 ret = populate_shadow_context(workload);
703 release_shadow_wa_ctx(&workload->wa_ctx);
707 ret = prepare_workload(workload);
710 /* We might still need to add request with
711 * clean ctx to retire it properly..
713 rq = fetch_and_zero(&workload->req);
714 i915_request_put(rq);
717 if (!IS_ERR_OR_NULL(workload->req)) {
718 gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
719 ring_id, workload->req);
720 i915_request_add(workload->req);
721 workload->dispatched = true;
725 workload->status = ret;
726 mutex_unlock(&dev_priv->drm.struct_mutex);
727 mutex_unlock(&vgpu->vgpu_lock);
731 static struct intel_vgpu_workload *pick_next_workload(
732 struct intel_gvt *gvt, int ring_id)
734 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
735 struct intel_vgpu_workload *workload = NULL;
737 mutex_lock(&gvt->sched_lock);
740 * no current vgpu / will be scheduled out / no workload
743 if (!scheduler->current_vgpu) {
744 gvt_dbg_sched("ring id %d stop - no current vgpu\n", ring_id);
748 if (scheduler->need_reschedule) {
749 gvt_dbg_sched("ring id %d stop - will reschedule\n", ring_id);
753 if (!scheduler->current_vgpu->active ||
754 list_empty(workload_q_head(scheduler->current_vgpu, ring_id)))
758 * still have current workload, maybe the workload disptacher
759 * fail to submit it for some reason, resubmit it.
761 if (scheduler->current_workload[ring_id]) {
762 workload = scheduler->current_workload[ring_id];
763 gvt_dbg_sched("ring id %d still have current workload %p\n",
769 * pick a workload as current workload
770 * once current workload is set, schedule policy routines
771 * will wait the current workload is finished when trying to
772 * schedule out a vgpu.
774 scheduler->current_workload[ring_id] = container_of(
775 workload_q_head(scheduler->current_vgpu, ring_id)->next,
776 struct intel_vgpu_workload, list);
778 workload = scheduler->current_workload[ring_id];
780 gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id, workload);
782 atomic_inc(&workload->vgpu->submission.running_workload_num);
784 mutex_unlock(&gvt->sched_lock);
788 static void update_guest_context(struct intel_vgpu_workload *workload)
790 struct i915_request *rq = workload->req;
791 struct intel_vgpu *vgpu = workload->vgpu;
792 struct intel_gvt *gvt = vgpu->gvt;
793 struct drm_i915_gem_object *ctx_obj = rq->hw_context->state->obj;
794 struct execlist_ring_context *shadow_ring_context;
797 unsigned long context_gpa, context_page_num;
800 gvt_dbg_sched("ring id %d workload lrca %x\n", rq->engine->id,
801 workload->ctx_desc.lrca);
803 context_page_num = rq->engine->context_size;
804 context_page_num = context_page_num >> PAGE_SHIFT;
806 if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS0)
807 context_page_num = 19;
811 while (i < context_page_num) {
812 context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
813 (u32)((workload->ctx_desc.lrca + i) <<
814 I915_GTT_PAGE_SHIFT));
815 if (context_gpa == INTEL_GVT_INVALID_ADDR) {
816 gvt_vgpu_err("invalid guest context descriptor\n");
820 page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
822 intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
828 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa +
829 RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
831 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
832 shadow_ring_context = kmap(page);
834 #define COPY_REG(name) \
835 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
836 RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
839 COPY_REG(ctx_timestamp);
843 intel_gvt_hypervisor_write_gpa(vgpu,
844 workload->ring_context_gpa +
845 sizeof(*shadow_ring_context),
846 (void *)shadow_ring_context +
847 sizeof(*shadow_ring_context),
848 I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
853 void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
854 intel_engine_mask_t engine_mask)
856 struct intel_vgpu_submission *s = &vgpu->submission;
857 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
858 struct intel_engine_cs *engine;
859 struct intel_vgpu_workload *pos, *n;
860 intel_engine_mask_t tmp;
862 /* free the unsubmited workloads in the queues. */
863 for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
864 list_for_each_entry_safe(pos, n,
865 &s->workload_q_head[engine->id], list) {
866 list_del_init(&pos->list);
867 intel_vgpu_destroy_workload(pos);
869 clear_bit(engine->id, s->shadow_ctx_desc_updated);
873 static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
875 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
876 struct intel_vgpu_workload *workload =
877 scheduler->current_workload[ring_id];
878 struct intel_vgpu *vgpu = workload->vgpu;
879 struct intel_vgpu_submission *s = &vgpu->submission;
880 struct i915_request *rq = workload->req;
883 mutex_lock(&vgpu->vgpu_lock);
884 mutex_lock(&gvt->sched_lock);
886 /* For the workload w/ request, needs to wait for the context
887 * switch to make sure request is completed.
888 * For the workload w/o request, directly complete the workload.
891 wait_event(workload->shadow_ctx_status_wq,
892 !atomic_read(&workload->shadow_ctx_active));
894 /* If this request caused GPU hang, req->fence.error will
895 * be set to -EIO. Use -EIO to set workload status so
896 * that when this request caused GPU hang, didn't trigger
897 * context switch interrupt to guest.
899 if (likely(workload->status == -EINPROGRESS)) {
900 if (workload->req->fence.error == -EIO)
901 workload->status = -EIO;
903 workload->status = 0;
906 if (!workload->status &&
907 !(vgpu->resetting_eng & BIT(ring_id))) {
908 update_guest_context(workload);
910 for_each_set_bit(event, workload->pending_events,
912 intel_vgpu_trigger_virtual_event(vgpu, event);
915 i915_request_put(fetch_and_zero(&workload->req));
918 gvt_dbg_sched("ring id %d complete workload %p status %d\n",
919 ring_id, workload, workload->status);
921 scheduler->current_workload[ring_id] = NULL;
923 list_del_init(&workload->list);
925 if (workload->status || vgpu->resetting_eng & BIT(ring_id)) {
926 /* if workload->status is not successful means HW GPU
927 * has occurred GPU hang or something wrong with i915/GVT,
928 * and GVT won't inject context switch interrupt to guest.
929 * So this error is a vGPU hang actually to the guest.
930 * According to this we should emunlate a vGPU hang. If
931 * there are pending workloads which are already submitted
932 * from guest, we should clean them up like HW GPU does.
934 * if it is in middle of engine resetting, the pending
935 * workloads won't be submitted to HW GPU and will be
936 * cleaned up during the resetting process later, so doing
937 * the workload clean up here doesn't have any impact.
939 intel_vgpu_clean_workloads(vgpu, BIT(ring_id));
942 workload->complete(workload);
944 atomic_dec(&s->running_workload_num);
945 wake_up(&scheduler->workload_complete_wq);
947 if (gvt->scheduler.need_reschedule)
948 intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
950 mutex_unlock(&gvt->sched_lock);
951 mutex_unlock(&vgpu->vgpu_lock);
954 struct workload_thread_param {
955 struct intel_gvt *gvt;
959 static int workload_thread(void *priv)
961 struct workload_thread_param *p = (struct workload_thread_param *)priv;
962 struct intel_gvt *gvt = p->gvt;
963 int ring_id = p->ring_id;
964 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
965 struct intel_vgpu_workload *workload = NULL;
966 struct intel_vgpu *vgpu = NULL;
968 bool need_force_wake = (INTEL_GEN(gvt->dev_priv) >= 9);
969 DEFINE_WAIT_FUNC(wait, woken_wake_function);
973 gvt_dbg_core("workload thread for ring %d started\n", ring_id);
975 while (!kthread_should_stop()) {
976 add_wait_queue(&scheduler->waitq[ring_id], &wait);
978 workload = pick_next_workload(gvt, ring_id);
981 wait_woken(&wait, TASK_INTERRUPTIBLE,
982 MAX_SCHEDULE_TIMEOUT);
983 } while (!kthread_should_stop());
984 remove_wait_queue(&scheduler->waitq[ring_id], &wait);
989 gvt_dbg_sched("ring id %d next workload %p vgpu %d\n",
990 workload->ring_id, workload,
993 gvt_dbg_sched("ring id %d will dispatch workload %p\n",
994 workload->ring_id, workload);
997 intel_uncore_forcewake_get(&gvt->dev_priv->uncore,
1000 ret = dispatch_workload(workload);
1003 vgpu = workload->vgpu;
1004 gvt_vgpu_err("fail to dispatch workload, skip\n");
1008 gvt_dbg_sched("ring id %d wait workload %p\n",
1009 workload->ring_id, workload);
1010 i915_request_wait(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
1013 gvt_dbg_sched("will complete workload %p, status: %d\n",
1014 workload, workload->status);
1016 complete_current_workload(gvt, ring_id);
1018 if (need_force_wake)
1019 intel_uncore_forcewake_put(&gvt->dev_priv->uncore,
1022 if (ret && (vgpu_is_vm_unhealthy(ret)))
1023 enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
1028 void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
1030 struct intel_vgpu_submission *s = &vgpu->submission;
1031 struct intel_gvt *gvt = vgpu->gvt;
1032 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
1034 if (atomic_read(&s->running_workload_num)) {
1035 gvt_dbg_sched("wait vgpu idle\n");
1037 wait_event(scheduler->workload_complete_wq,
1038 !atomic_read(&s->running_workload_num));
1042 void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
1044 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
1045 struct intel_engine_cs *engine;
1046 enum intel_engine_id i;
1048 gvt_dbg_core("clean workload scheduler\n");
1050 for_each_engine(engine, gvt->dev_priv, i) {
1051 atomic_notifier_chain_unregister(
1052 &engine->context_status_notifier,
1053 &gvt->shadow_ctx_notifier_block[i]);
1054 kthread_stop(scheduler->thread[i]);
1058 int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
1060 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
1061 struct workload_thread_param *param = NULL;
1062 struct intel_engine_cs *engine;
1063 enum intel_engine_id i;
1066 gvt_dbg_core("init workload scheduler\n");
1068 init_waitqueue_head(&scheduler->workload_complete_wq);
1070 for_each_engine(engine, gvt->dev_priv, i) {
1071 init_waitqueue_head(&scheduler->waitq[i]);
1073 param = kzalloc(sizeof(*param), GFP_KERNEL);
1082 scheduler->thread[i] = kthread_run(workload_thread, param,
1083 "gvt workload %d", i);
1084 if (IS_ERR(scheduler->thread[i])) {
1085 gvt_err("fail to create workload thread\n");
1086 ret = PTR_ERR(scheduler->thread[i]);
1090 gvt->shadow_ctx_notifier_block[i].notifier_call =
1091 shadow_context_status_change;
1092 atomic_notifier_chain_register(&engine->context_status_notifier,
1093 &gvt->shadow_ctx_notifier_block[i]);
1097 intel_gvt_clean_workload_scheduler(gvt);
1104 i915_context_ppgtt_root_restore(struct intel_vgpu_submission *s,
1105 struct i915_ppgtt *ppgtt)
1109 if (i915_vm_is_4lvl(&ppgtt->vm)) {
1110 px_dma(&ppgtt->pml4) = s->i915_context_pml4;
1112 for (i = 0; i < GEN8_3LVL_PDPES; i++)
1113 px_dma(ppgtt->pdp.page_directory[i]) =
1114 s->i915_context_pdps[i];
1119 * intel_vgpu_clean_submission - free submission-related resource for vGPU
1122 * This function is called when a vGPU is being destroyed.
1125 void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
1127 struct intel_vgpu_submission *s = &vgpu->submission;
1128 struct intel_engine_cs *engine;
1129 enum intel_engine_id id;
1131 intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
1133 i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(s->shadow[0]->gem_context->vm));
1134 for_each_engine(engine, vgpu->gvt->dev_priv, id)
1135 intel_context_unpin(s->shadow[id]);
1137 kmem_cache_destroy(s->workloads);
1142 * intel_vgpu_reset_submission - reset submission-related resource for vGPU
1144 * @engine_mask: engines expected to be reset
1146 * This function is called when a vGPU is being destroyed.
1149 void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
1150 intel_engine_mask_t engine_mask)
1152 struct intel_vgpu_submission *s = &vgpu->submission;
1157 intel_vgpu_clean_workloads(vgpu, engine_mask);
1158 s->ops->reset(vgpu, engine_mask);
1162 i915_context_ppgtt_root_save(struct intel_vgpu_submission *s,
1163 struct i915_ppgtt *ppgtt)
1167 if (i915_vm_is_4lvl(&ppgtt->vm)) {
1168 s->i915_context_pml4 = px_dma(&ppgtt->pml4);
1170 for (i = 0; i < GEN8_3LVL_PDPES; i++)
1171 s->i915_context_pdps[i] =
1172 px_dma(ppgtt->pdp.page_directory[i]);
1177 * intel_vgpu_setup_submission - setup submission-related resource for vGPU
1180 * This function is called when a vGPU is being created.
1183 * Zero on success, negative error code if failed.
1186 int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
1188 struct intel_vgpu_submission *s = &vgpu->submission;
1189 struct intel_engine_cs *engine;
1190 struct i915_gem_context *ctx;
1191 enum intel_engine_id i;
1194 ctx = i915_gem_context_create_gvt(&vgpu->gvt->dev_priv->drm);
1196 return PTR_ERR(ctx);
1198 i915_context_ppgtt_root_save(s, i915_vm_to_ppgtt(ctx->vm));
1200 for_each_engine(engine, vgpu->gvt->dev_priv, i) {
1201 struct intel_context *ce;
1203 INIT_LIST_HEAD(&s->workload_q_head[i]);
1204 s->shadow[i] = ERR_PTR(-EINVAL);
1206 ce = i915_gem_context_get_engine(ctx, i);
1209 goto out_shadow_ctx;
1212 ret = intel_context_pin(ce);
1213 intel_context_put(ce);
1215 goto out_shadow_ctx;
1220 bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES);
1222 s->workloads = kmem_cache_create_usercopy("gvt-g_vgpu_workload",
1223 sizeof(struct intel_vgpu_workload), 0,
1225 offsetof(struct intel_vgpu_workload, rb_tail),
1226 sizeof_field(struct intel_vgpu_workload, rb_tail),
1229 if (!s->workloads) {
1231 goto out_shadow_ctx;
1234 atomic_set(&s->running_workload_num, 0);
1235 bitmap_zero(s->tlb_handle_pending, I915_NUM_ENGINES);
1237 i915_gem_context_put(ctx);
1241 i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(ctx->vm));
1242 for_each_engine(engine, vgpu->gvt->dev_priv, i) {
1243 if (IS_ERR(s->shadow[i]))
1246 intel_context_unpin(s->shadow[i]);
1248 i915_gem_context_put(ctx);
1253 * intel_vgpu_select_submission_ops - select virtual submission interface
1255 * @engine_mask: either ALL_ENGINES or target engine mask
1256 * @interface: expected vGPU virtual submission interface
1258 * This function is called when guest configures submission interface.
1261 * Zero on success, negative error code if failed.
1264 int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
1265 intel_engine_mask_t engine_mask,
1266 unsigned int interface)
1268 struct intel_vgpu_submission *s = &vgpu->submission;
1269 const struct intel_vgpu_submission_ops *ops[] = {
1270 [INTEL_VGPU_EXECLIST_SUBMISSION] =
1271 &intel_vgpu_execlist_submission_ops,
1275 if (WARN_ON(interface >= ARRAY_SIZE(ops)))
1278 if (WARN_ON(interface == 0 && engine_mask != ALL_ENGINES))
1282 s->ops->clean(vgpu, engine_mask);
1284 if (interface == 0) {
1286 s->virtual_submission_interface = 0;
1288 gvt_dbg_core("vgpu%d: remove submission ops\n", vgpu->id);
1292 ret = ops[interface]->init(vgpu, engine_mask);
1296 s->ops = ops[interface];
1297 s->virtual_submission_interface = interface;
1300 gvt_dbg_core("vgpu%d: activate ops [ %s ]\n",
1301 vgpu->id, s->ops->name);
1307 * intel_vgpu_destroy_workload - destroy a vGPU workload
1308 * @workload: workload to destroy
1310 * This function is called when destroy a vGPU workload.
1313 void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload)
1315 struct intel_vgpu_submission *s = &workload->vgpu->submission;
1317 release_shadow_batch_buffer(workload);
1318 release_shadow_wa_ctx(&workload->wa_ctx);
1320 if (workload->shadow_mm)
1321 intel_vgpu_mm_put(workload->shadow_mm);
1323 kmem_cache_free(s->workloads, workload);
1326 static struct intel_vgpu_workload *
1327 alloc_workload(struct intel_vgpu *vgpu)
1329 struct intel_vgpu_submission *s = &vgpu->submission;
1330 struct intel_vgpu_workload *workload;
1332 workload = kmem_cache_zalloc(s->workloads, GFP_KERNEL);
1334 return ERR_PTR(-ENOMEM);
1336 INIT_LIST_HEAD(&workload->list);
1337 INIT_LIST_HEAD(&workload->shadow_bb);
1339 init_waitqueue_head(&workload->shadow_ctx_status_wq);
1340 atomic_set(&workload->shadow_ctx_active, 0);
1342 workload->status = -EINPROGRESS;
1343 workload->vgpu = vgpu;
1348 #define RING_CTX_OFF(x) \
1349 offsetof(struct execlist_ring_context, x)
1351 static void read_guest_pdps(struct intel_vgpu *vgpu,
1352 u64 ring_context_gpa, u32 pdp[8])
1357 gpa = ring_context_gpa + RING_CTX_OFF(pdps[0].val);
1359 for (i = 0; i < 8; i++)
1360 intel_gvt_hypervisor_read_gpa(vgpu,
1361 gpa + i * 8, &pdp[7 - i], 4);
1364 static int prepare_mm(struct intel_vgpu_workload *workload)
1366 struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
1367 struct intel_vgpu_mm *mm;
1368 struct intel_vgpu *vgpu = workload->vgpu;
1369 enum intel_gvt_gtt_type root_entry_type;
1370 u64 pdps[GVT_RING_CTX_NR_PDPS];
1372 switch (desc->addressing_mode) {
1373 case 1: /* legacy 32-bit */
1374 root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
1376 case 3: /* legacy 64-bit */
1377 root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
1380 gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n");
1384 read_guest_pdps(workload->vgpu, workload->ring_context_gpa, (void *)pdps);
1386 mm = intel_vgpu_get_ppgtt_mm(workload->vgpu, root_entry_type, pdps);
1390 workload->shadow_mm = mm;
1394 #define same_context(a, b) (((a)->context_id == (b)->context_id) && \
1395 ((a)->lrca == (b)->lrca))
1397 #define get_last_workload(q) \
1398 (list_empty(q) ? NULL : container_of(q->prev, \
1399 struct intel_vgpu_workload, list))
1401 * intel_vgpu_create_workload - create a vGPU workload
1403 * @ring_id: ring index
1404 * @desc: a guest context descriptor
1406 * This function is called when creating a vGPU workload.
1409 * struct intel_vgpu_workload * on success, negative error code in
1410 * pointer if failed.
1413 struct intel_vgpu_workload *
1414 intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
1415 struct execlist_ctx_descriptor_format *desc)
1417 struct intel_vgpu_submission *s = &vgpu->submission;
1418 struct list_head *q = workload_q_head(vgpu, ring_id);
1419 struct intel_vgpu_workload *last_workload = get_last_workload(q);
1420 struct intel_vgpu_workload *workload = NULL;
1421 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
1422 u64 ring_context_gpa;
1423 u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
1426 ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
1427 (u32)((desc->lrca + 1) << I915_GTT_PAGE_SHIFT));
1428 if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) {
1429 gvt_vgpu_err("invalid guest context LRCA: %x\n", desc->lrca);
1430 return ERR_PTR(-EINVAL);
1433 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1434 RING_CTX_OFF(ring_header.val), &head, 4);
1436 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1437 RING_CTX_OFF(ring_tail.val), &tail, 4);
1439 head &= RB_HEAD_OFF_MASK;
1440 tail &= RB_TAIL_OFF_MASK;
1442 if (last_workload && same_context(&last_workload->ctx_desc, desc)) {
1443 gvt_dbg_el("ring id %d cur workload == last\n", ring_id);
1444 gvt_dbg_el("ctx head %x real head %lx\n", head,
1445 last_workload->rb_tail);
1447 * cannot use guest context head pointer here,
1448 * as it might not be updated at this time
1450 head = last_workload->rb_tail;
1453 gvt_dbg_el("ring id %d begin a new workload\n", ring_id);
1455 /* record some ring buffer register values for scan and shadow */
1456 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1457 RING_CTX_OFF(rb_start.val), &start, 4);
1458 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1459 RING_CTX_OFF(rb_ctrl.val), &ctl, 4);
1460 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1461 RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);
1463 workload = alloc_workload(vgpu);
1464 if (IS_ERR(workload))
1467 workload->ring_id = ring_id;
1468 workload->ctx_desc = *desc;
1469 workload->ring_context_gpa = ring_context_gpa;
1470 workload->rb_head = head;
1471 workload->rb_tail = tail;
1472 workload->rb_start = start;
1473 workload->rb_ctl = ctl;
1475 if (ring_id == RCS0) {
1476 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1477 RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
1478 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1479 RING_CTX_OFF(rcs_indirect_ctx.val), &indirect_ctx, 4);
1481 workload->wa_ctx.indirect_ctx.guest_gma =
1482 indirect_ctx & INDIRECT_CTX_ADDR_MASK;
1483 workload->wa_ctx.indirect_ctx.size =
1484 (indirect_ctx & INDIRECT_CTX_SIZE_MASK) *
1486 workload->wa_ctx.per_ctx.guest_gma =
1487 per_ctx & PER_CTX_ADDR_MASK;
1488 workload->wa_ctx.per_ctx.valid = per_ctx & 1;
1491 gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
1492 workload, ring_id, head, tail, start, ctl);
1494 ret = prepare_mm(workload);
1496 kmem_cache_free(s->workloads, workload);
1497 return ERR_PTR(ret);
1500 /* Only scan and shadow the first workload in the queue
1501 * as there is only one pre-allocated buf-obj for shadow.
1503 if (list_empty(workload_q_head(vgpu, ring_id))) {
1504 intel_runtime_pm_get(dev_priv);
1505 mutex_lock(&dev_priv->drm.struct_mutex);
1506 ret = intel_gvt_scan_and_shadow_workload(workload);
1507 mutex_unlock(&dev_priv->drm.struct_mutex);
1508 intel_runtime_pm_put_unchecked(dev_priv);
1512 if (vgpu_is_vm_unhealthy(ret))
1513 enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
1514 intel_vgpu_destroy_workload(workload);
1515 return ERR_PTR(ret);
1522 * intel_vgpu_queue_workload - Qeue a vGPU workload
1523 * @workload: the workload to queue in
1525 void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload)
1527 list_add_tail(&workload->list,
1528 workload_q_head(workload->vgpu, workload->ring_id));
1529 intel_gvt_kick_schedule(workload->vgpu->gvt);
1530 wake_up(&workload->vgpu->gvt->scheduler.waitq[workload->ring_id]);