2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Zhiyuan Lv <zhiyuan.lv@intel.com>
25 * Zhi Wang <zhi.a.wang@intel.com>
28 * Min He <min.he@intel.com>
29 * Bing Niu <bing.niu@intel.com>
30 * Ping Gao <ping.a.gao@intel.com>
31 * Tina Zhang <tina.zhang@intel.com>
38 #define _EL_OFFSET_STATUS 0x234
39 #define _EL_OFFSET_STATUS_BUF 0x370
40 #define _EL_OFFSET_STATUS_PTR 0x3A0
42 #define execlist_ring_mmio(gvt, ring_id, offset) \
43 (gvt->dev_priv->engine[ring_id]->mmio_base + (offset))
45 #define valid_context(ctx) ((ctx)->valid)
46 #define same_context(a, b) (((a)->context_id == (b)->context_id) && \
47 ((a)->lrca == (b)->lrca))
49 static int context_switch_events[] = {
50 [RCS] = RCS_AS_CONTEXT_SWITCH,
51 [BCS] = BCS_AS_CONTEXT_SWITCH,
52 [VCS] = VCS_AS_CONTEXT_SWITCH,
53 [VCS2] = VCS2_AS_CONTEXT_SWITCH,
54 [VECS] = VECS_AS_CONTEXT_SWITCH,
57 static int ring_id_to_context_switch_event(int ring_id)
59 if (WARN_ON(ring_id < RCS && ring_id >
60 ARRAY_SIZE(context_switch_events)))
63 return context_switch_events[ring_id];
66 static void switch_virtual_execlist_slot(struct intel_vgpu_execlist *execlist)
68 gvt_dbg_el("[before] running slot %d/context %x pending slot %d\n",
69 execlist->running_slot ?
70 execlist->running_slot->index : -1,
71 execlist->running_context ?
72 execlist->running_context->context_id : 0,
73 execlist->pending_slot ?
74 execlist->pending_slot->index : -1);
76 execlist->running_slot = execlist->pending_slot;
77 execlist->pending_slot = NULL;
78 execlist->running_context = execlist->running_context ?
79 &execlist->running_slot->ctx[0] : NULL;
81 gvt_dbg_el("[after] running slot %d/context %x pending slot %d\n",
82 execlist->running_slot ?
83 execlist->running_slot->index : -1,
84 execlist->running_context ?
85 execlist->running_context->context_id : 0,
86 execlist->pending_slot ?
87 execlist->pending_slot->index : -1);
90 static void emulate_execlist_status(struct intel_vgpu_execlist *execlist)
92 struct intel_vgpu_execlist_slot *running = execlist->running_slot;
93 struct intel_vgpu_execlist_slot *pending = execlist->pending_slot;
94 struct execlist_ctx_descriptor_format *desc = execlist->running_context;
95 struct intel_vgpu *vgpu = execlist->vgpu;
96 struct execlist_status_format status;
97 int ring_id = execlist->ring_id;
98 u32 status_reg = execlist_ring_mmio(vgpu->gvt,
99 ring_id, _EL_OFFSET_STATUS);
101 status.ldw = vgpu_vreg(vgpu, status_reg);
102 status.udw = vgpu_vreg(vgpu, status_reg + 4);
105 status.current_execlist_pointer = !!running->index;
106 status.execlist_write_pointer = !!!running->index;
107 status.execlist_0_active = status.execlist_0_valid =
109 status.execlist_1_active = status.execlist_1_valid =
112 status.context_id = 0;
113 status.execlist_0_active = status.execlist_0_valid = 0;
114 status.execlist_1_active = status.execlist_1_valid = 0;
117 status.context_id = desc ? desc->context_id : 0;
118 status.execlist_queue_full = !!(pending);
120 vgpu_vreg(vgpu, status_reg) = status.ldw;
121 vgpu_vreg(vgpu, status_reg + 4) = status.udw;
123 gvt_dbg_el("vgpu%d: status reg offset %x ldw %x udw %x\n",
124 vgpu->id, status_reg, status.ldw, status.udw);
127 static void emulate_csb_update(struct intel_vgpu_execlist *execlist,
128 struct execlist_context_status_format *status,
129 bool trigger_interrupt_later)
131 struct intel_vgpu *vgpu = execlist->vgpu;
132 int ring_id = execlist->ring_id;
133 struct execlist_context_status_pointer_format ctx_status_ptr;
135 u32 ctx_status_ptr_reg, ctx_status_buf_reg, offset;
137 ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
138 _EL_OFFSET_STATUS_PTR);
139 ctx_status_buf_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
140 _EL_OFFSET_STATUS_BUF);
142 ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
144 write_pointer = ctx_status_ptr.write_ptr;
146 if (write_pointer == 0x7)
150 write_pointer %= 0x6;
153 offset = ctx_status_buf_reg + write_pointer * 8;
155 vgpu_vreg(vgpu, offset) = status->ldw;
156 vgpu_vreg(vgpu, offset + 4) = status->udw;
158 ctx_status_ptr.write_ptr = write_pointer;
159 vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
161 gvt_dbg_el("vgpu%d: w pointer %u reg %x csb l %x csb h %x\n",
162 vgpu->id, write_pointer, offset, status->ldw, status->udw);
164 if (trigger_interrupt_later)
167 intel_vgpu_trigger_virtual_event(vgpu,
168 ring_id_to_context_switch_event(execlist->ring_id));
171 static int emulate_execlist_ctx_schedule_out(
172 struct intel_vgpu_execlist *execlist,
173 struct execlist_ctx_descriptor_format *ctx)
175 struct intel_vgpu_execlist_slot *running = execlist->running_slot;
176 struct intel_vgpu_execlist_slot *pending = execlist->pending_slot;
177 struct execlist_ctx_descriptor_format *ctx0 = &running->ctx[0];
178 struct execlist_ctx_descriptor_format *ctx1 = &running->ctx[1];
179 struct execlist_context_status_format status;
181 memset(&status, 0, sizeof(status));
183 gvt_dbg_el("schedule out context id %x\n", ctx->context_id);
185 if (WARN_ON(!same_context(ctx, execlist->running_context))) {
186 gvt_err("schedule out context is not running context,"
187 "ctx id %x running ctx id %x\n",
189 execlist->running_context->context_id);
193 /* ctx1 is valid, ctx0/ctx is scheduled-out -> element switch */
194 if (valid_context(ctx1) && same_context(ctx0, ctx)) {
195 gvt_dbg_el("ctx 1 valid, ctx/ctx 0 is scheduled-out\n");
197 execlist->running_context = ctx1;
199 emulate_execlist_status(execlist);
201 status.context_complete = status.element_switch = 1;
202 status.context_id = ctx->context_id;
204 emulate_csb_update(execlist, &status, false);
206 * ctx1 is not valid, ctx == ctx0
207 * ctx1 is valid, ctx1 == ctx
208 * --> last element is finished
210 * active-to-idle if there is *no* pending execlist
211 * context-complete if there *is* pending execlist
213 } else if ((!valid_context(ctx1) && same_context(ctx0, ctx))
214 || (valid_context(ctx1) && same_context(ctx1, ctx))) {
215 gvt_dbg_el("need to switch virtual execlist slot\n");
217 switch_virtual_execlist_slot(execlist);
219 emulate_execlist_status(execlist);
221 status.context_complete = status.active_to_idle = 1;
222 status.context_id = ctx->context_id;
225 emulate_csb_update(execlist, &status, false);
227 emulate_csb_update(execlist, &status, true);
229 memset(&status, 0, sizeof(status));
231 status.idle_to_active = 1;
232 status.context_id = 0;
234 emulate_csb_update(execlist, &status, false);
244 static struct intel_vgpu_execlist_slot *get_next_execlist_slot(
245 struct intel_vgpu_execlist *execlist)
247 struct intel_vgpu *vgpu = execlist->vgpu;
248 int ring_id = execlist->ring_id;
249 u32 status_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
251 struct execlist_status_format status;
253 status.ldw = vgpu_vreg(vgpu, status_reg);
254 status.udw = vgpu_vreg(vgpu, status_reg + 4);
256 if (status.execlist_queue_full) {
257 gvt_err("virtual execlist slots are full\n");
261 return &execlist->slot[status.execlist_write_pointer];
264 static int emulate_execlist_schedule_in(struct intel_vgpu_execlist *execlist,
265 struct execlist_ctx_descriptor_format ctx[2])
267 struct intel_vgpu_execlist_slot *running = execlist->running_slot;
268 struct intel_vgpu_execlist_slot *slot =
269 get_next_execlist_slot(execlist);
271 struct execlist_ctx_descriptor_format *ctx0, *ctx1;
272 struct execlist_context_status_format status;
274 gvt_dbg_el("emulate schedule-in\n");
277 gvt_err("no available execlist slot\n");
281 memset(&status, 0, sizeof(status));
282 memset(slot->ctx, 0, sizeof(slot->ctx));
284 slot->ctx[0] = ctx[0];
285 slot->ctx[1] = ctx[1];
287 gvt_dbg_el("alloc slot index %d ctx 0 %x ctx 1 %x\n",
288 slot->index, ctx[0].context_id,
292 * no running execlist, make this write bundle as running execlist
296 gvt_dbg_el("no current running execlist\n");
298 execlist->running_slot = slot;
299 execlist->pending_slot = NULL;
300 execlist->running_context = &slot->ctx[0];
302 gvt_dbg_el("running slot index %d running context %x\n",
303 execlist->running_slot->index,
304 execlist->running_context->context_id);
306 emulate_execlist_status(execlist);
308 status.idle_to_active = 1;
309 status.context_id = 0;
311 emulate_csb_update(execlist, &status, false);
315 ctx0 = &running->ctx[0];
316 ctx1 = &running->ctx[1];
318 gvt_dbg_el("current running slot index %d ctx 0 %x ctx 1 %x\n",
319 running->index, ctx0->context_id, ctx1->context_id);
322 * already has an running execlist
323 * a. running ctx1 is valid,
324 * ctx0 is finished, and running ctx1 == new execlist ctx[0]
325 * b. running ctx1 is not valid,
326 * ctx0 == new execlist ctx[0]
327 * ----> lite-restore + preempted
329 if ((valid_context(ctx1) && same_context(ctx1, &slot->ctx[0]) &&
331 (!same_context(ctx0, execlist->running_context))) ||
332 (!valid_context(ctx1) &&
333 same_context(ctx0, &slot->ctx[0]))) { /* condition b */
334 gvt_dbg_el("need to switch virtual execlist slot\n");
336 execlist->pending_slot = slot;
337 switch_virtual_execlist_slot(execlist);
339 emulate_execlist_status(execlist);
341 status.lite_restore = status.preempted = 1;
342 status.context_id = ctx[0].context_id;
344 emulate_csb_update(execlist, &status, false);
346 gvt_dbg_el("emulate as pending slot\n");
349 * --> emulate pending execlist exist + but no preemption case
351 execlist->pending_slot = slot;
352 emulate_execlist_status(execlist);
357 static void free_workload(struct intel_vgpu_workload *workload)
359 intel_vgpu_unpin_mm(workload->shadow_mm);
360 intel_gvt_mm_unreference(workload->shadow_mm);
361 kmem_cache_free(workload->vgpu->workloads, workload);
364 #define get_desc_from_elsp_dwords(ed, i) \
365 ((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2]))
368 #define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2))
369 #define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U))
370 static int set_gma_to_bb_cmd(struct intel_shadow_bb_entry *entry_obj,
371 unsigned long add, int gmadr_bytes)
373 if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8))
376 *((u32 *)(entry_obj->bb_start_cmd_va + (1 << 2))) = add &
377 BATCH_BUFFER_ADDR_MASK;
378 if (gmadr_bytes == 8) {
379 *((u32 *)(entry_obj->bb_start_cmd_va + (2 << 2))) =
380 add & BATCH_BUFFER_ADDR_HIGH_MASK;
386 static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
388 int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
390 /* pin the gem object to ggtt */
391 if (!list_empty(&workload->shadow_bb)) {
392 struct intel_shadow_bb_entry *entry_obj =
393 list_first_entry(&workload->shadow_bb,
394 struct intel_shadow_bb_entry,
396 struct intel_shadow_bb_entry *temp;
398 list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb,
400 struct i915_vma *vma;
402 vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0,
405 gvt_err("Cannot pin\n");
409 /* FIXME: we are not tracking our pinned VMA leaving it
410 * up to the core to fix up the stray pin_count upon
414 /* update the relocate gma with shadow batch buffer*/
415 set_gma_to_bb_cmd(entry_obj,
416 i915_ggtt_offset(vma),
422 static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
424 int ring_id = wa_ctx->workload->ring_id;
425 struct i915_gem_context *shadow_ctx =
426 wa_ctx->workload->vgpu->shadow_ctx;
427 struct drm_i915_gem_object *ctx_obj =
428 shadow_ctx->engine[ring_id].state->obj;
429 struct execlist_ring_context *shadow_ring_context;
432 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
433 shadow_ring_context = kmap_atomic(page);
435 shadow_ring_context->bb_per_ctx_ptr.val =
436 (shadow_ring_context->bb_per_ctx_ptr.val &
437 (~PER_CTX_ADDR_MASK)) | wa_ctx->per_ctx.shadow_gma;
438 shadow_ring_context->rcs_indirect_ctx.val =
439 (shadow_ring_context->rcs_indirect_ctx.val &
440 (~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma;
442 kunmap_atomic(shadow_ring_context);
446 static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
448 struct i915_vma *vma;
449 unsigned char *per_ctx_va =
450 (unsigned char *)wa_ctx->indirect_ctx.shadow_va +
451 wa_ctx->indirect_ctx.size;
453 if (wa_ctx->indirect_ctx.size == 0)
456 vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
457 0, CACHELINE_BYTES, 0);
459 gvt_err("Cannot pin indirect ctx obj\n");
463 /* FIXME: we are not tracking our pinned VMA leaving it
464 * up to the core to fix up the stray pin_count upon
468 wa_ctx->indirect_ctx.shadow_gma = i915_ggtt_offset(vma);
470 wa_ctx->per_ctx.shadow_gma = *((unsigned int *)per_ctx_va + 1);
471 memset(per_ctx_va, 0, CACHELINE_BYTES);
473 update_wa_ctx_2_shadow_ctx(wa_ctx);
476 static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
478 struct intel_vgpu *vgpu = workload->vgpu;
479 struct execlist_ctx_descriptor_format ctx[2];
480 int ring_id = workload->ring_id;
482 intel_vgpu_pin_mm(workload->shadow_mm);
483 intel_vgpu_sync_oos_pages(workload->vgpu);
484 intel_vgpu_flush_post_shadow(workload->vgpu);
485 prepare_shadow_batch_buffer(workload);
486 prepare_shadow_wa_ctx(&workload->wa_ctx);
487 if (!workload->emulate_schedule_in)
490 ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1);
491 ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0);
493 return emulate_execlist_schedule_in(&vgpu->execlist[ring_id], ctx);
496 static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
498 /* release all the shadow batch buffer */
499 if (!list_empty(&workload->shadow_bb)) {
500 struct intel_shadow_bb_entry *entry_obj =
501 list_first_entry(&workload->shadow_bb,
502 struct intel_shadow_bb_entry,
504 struct intel_shadow_bb_entry *temp;
506 list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb,
508 i915_gem_object_unpin_map(entry_obj->obj);
509 i915_gem_object_put(entry_obj->obj);
510 list_del(&entry_obj->list);
516 static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
518 if (wa_ctx->indirect_ctx.size == 0)
521 i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
522 i915_gem_object_put(wa_ctx->indirect_ctx.obj);
525 static int complete_execlist_workload(struct intel_vgpu_workload *workload)
527 struct intel_vgpu *vgpu = workload->vgpu;
528 struct intel_vgpu_execlist *execlist =
529 &vgpu->execlist[workload->ring_id];
530 struct intel_vgpu_workload *next_workload;
531 struct list_head *next = workload_q_head(vgpu, workload->ring_id)->next;
532 bool lite_restore = false;
535 gvt_dbg_el("complete workload %p status %d\n", workload,
538 release_shadow_batch_buffer(workload);
539 release_shadow_wa_ctx(&workload->wa_ctx);
541 if (workload->status || vgpu->resetting)
544 if (!list_empty(workload_q_head(vgpu, workload->ring_id))) {
545 struct execlist_ctx_descriptor_format *this_desc, *next_desc;
547 next_workload = container_of(next,
548 struct intel_vgpu_workload, list);
549 this_desc = &workload->ctx_desc;
550 next_desc = &next_workload->ctx_desc;
552 lite_restore = same_context(this_desc, next_desc);
556 gvt_dbg_el("next context == current - no schedule-out\n");
557 free_workload(workload);
561 ret = emulate_execlist_ctx_schedule_out(execlist, &workload->ctx_desc);
565 free_workload(workload);
568 free_workload(workload);
572 #define RING_CTX_OFF(x) \
573 offsetof(struct execlist_ring_context, x)
575 static void read_guest_pdps(struct intel_vgpu *vgpu,
576 u64 ring_context_gpa, u32 pdp[8])
581 gpa = ring_context_gpa + RING_CTX_OFF(pdp3_UDW.val);
583 for (i = 0; i < 8; i++)
584 intel_gvt_hypervisor_read_gpa(vgpu,
585 gpa + i * 8, &pdp[7 - i], 4);
588 static int prepare_mm(struct intel_vgpu_workload *workload)
590 struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
591 struct intel_vgpu_mm *mm;
592 int page_table_level;
595 if (desc->addressing_mode == 1) { /* legacy 32-bit */
596 page_table_level = 3;
597 } else if (desc->addressing_mode == 3) { /* legacy 64 bit */
598 page_table_level = 4;
600 gvt_err("Advanced Context mode(SVM) is not supported!\n");
604 read_guest_pdps(workload->vgpu, workload->ring_context_gpa, pdp);
606 mm = intel_vgpu_find_ppgtt_mm(workload->vgpu, page_table_level, pdp);
608 intel_gvt_mm_reference(mm);
611 mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT,
612 pdp, page_table_level, 0);
614 gvt_err("fail to create mm object.\n");
618 workload->shadow_mm = mm;
622 #define get_last_workload(q) \
623 (list_empty(q) ? NULL : container_of(q->prev, \
624 struct intel_vgpu_workload, list))
626 static int submit_context(struct intel_vgpu *vgpu, int ring_id,
627 struct execlist_ctx_descriptor_format *desc,
628 bool emulate_schedule_in)
630 struct list_head *q = workload_q_head(vgpu, ring_id);
631 struct intel_vgpu_workload *last_workload = get_last_workload(q);
632 struct intel_vgpu_workload *workload = NULL;
633 u64 ring_context_gpa;
634 u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
637 ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
638 (u32)((desc->lrca + 1) << GTT_PAGE_SHIFT));
639 if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) {
640 gvt_err("invalid guest context LRCA: %x\n", desc->lrca);
644 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
645 RING_CTX_OFF(ring_header.val), &head, 4);
647 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
648 RING_CTX_OFF(ring_tail.val), &tail, 4);
650 head &= RB_HEAD_OFF_MASK;
651 tail &= RB_TAIL_OFF_MASK;
653 if (last_workload && same_context(&last_workload->ctx_desc, desc)) {
654 gvt_dbg_el("ring id %d cur workload == last\n", ring_id);
655 gvt_dbg_el("ctx head %x real head %lx\n", head,
656 last_workload->rb_tail);
658 * cannot use guest context head pointer here,
659 * as it might not be updated at this time
661 head = last_workload->rb_tail;
664 gvt_dbg_el("ring id %d begin a new workload\n", ring_id);
666 workload = kmem_cache_zalloc(vgpu->workloads, GFP_KERNEL);
670 /* record some ring buffer register values for scan and shadow */
671 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
672 RING_CTX_OFF(rb_start.val), &start, 4);
673 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
674 RING_CTX_OFF(rb_ctrl.val), &ctl, 4);
675 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
676 RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);
678 INIT_LIST_HEAD(&workload->list);
679 INIT_LIST_HEAD(&workload->shadow_bb);
681 init_waitqueue_head(&workload->shadow_ctx_status_wq);
682 atomic_set(&workload->shadow_ctx_active, 0);
684 workload->vgpu = vgpu;
685 workload->ring_id = ring_id;
686 workload->ctx_desc = *desc;
687 workload->ring_context_gpa = ring_context_gpa;
688 workload->rb_head = head;
689 workload->rb_tail = tail;
690 workload->rb_start = start;
691 workload->rb_ctl = ctl;
692 workload->prepare = prepare_execlist_workload;
693 workload->complete = complete_execlist_workload;
694 workload->status = -EINPROGRESS;
695 workload->emulate_schedule_in = emulate_schedule_in;
697 if (ring_id == RCS) {
698 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
699 RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
700 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
701 RING_CTX_OFF(rcs_indirect_ctx.val), &indirect_ctx, 4);
703 workload->wa_ctx.indirect_ctx.guest_gma =
704 indirect_ctx & INDIRECT_CTX_ADDR_MASK;
705 workload->wa_ctx.indirect_ctx.size =
706 (indirect_ctx & INDIRECT_CTX_SIZE_MASK) *
708 workload->wa_ctx.per_ctx.guest_gma =
709 per_ctx & PER_CTX_ADDR_MASK;
710 workload->wa_ctx.workload = workload;
712 WARN_ON(workload->wa_ctx.indirect_ctx.size && !(per_ctx & 0x1));
715 if (emulate_schedule_in)
716 memcpy(&workload->elsp_dwords,
717 &vgpu->execlist[ring_id].elsp_dwords,
718 sizeof(workload->elsp_dwords));
720 gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
721 workload, ring_id, head, tail, start, ctl);
723 gvt_dbg_el("workload %p emulate schedule_in %d\n", workload,
724 emulate_schedule_in);
726 ret = prepare_mm(workload);
728 kmem_cache_free(vgpu->workloads, workload);
732 queue_workload(workload);
736 int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
738 struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
739 struct execlist_ctx_descriptor_format *desc[2], valid_desc[2];
740 unsigned long valid_desc_bitmap = 0;
741 bool emulate_schedule_in = true;
745 memset(valid_desc, 0, sizeof(valid_desc));
747 desc[0] = get_desc_from_elsp_dwords(&execlist->elsp_dwords, 1);
748 desc[1] = get_desc_from_elsp_dwords(&execlist->elsp_dwords, 0);
750 for (i = 0; i < 2; i++) {
754 if (!desc[i]->privilege_access) {
755 gvt_err("vgpu%d: unexpected GGTT elsp submission\n",
760 /* TODO: add another guest context checks here. */
761 set_bit(i, &valid_desc_bitmap);
762 valid_desc[i] = *desc[i];
765 if (!valid_desc_bitmap) {
766 gvt_err("vgpu%d: no valid desc in a elsp submission\n",
771 if (!test_bit(0, (void *)&valid_desc_bitmap) &&
772 test_bit(1, (void *)&valid_desc_bitmap)) {
773 gvt_err("vgpu%d: weird elsp submission, desc 0 is not valid\n",
778 /* submit workload */
779 for_each_set_bit(i, (void *)&valid_desc_bitmap, 2) {
780 ret = submit_context(vgpu, ring_id, &valid_desc[i],
781 emulate_schedule_in);
783 gvt_err("vgpu%d: fail to schedule workload\n",
787 emulate_schedule_in = false;
792 static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
794 struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
795 struct execlist_context_status_pointer_format ctx_status_ptr;
796 u32 ctx_status_ptr_reg;
798 memset(execlist, 0, sizeof(*execlist));
800 execlist->vgpu = vgpu;
801 execlist->ring_id = ring_id;
802 execlist->slot[0].index = 0;
803 execlist->slot[1].index = 1;
805 ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
806 _EL_OFFSET_STATUS_PTR);
808 ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
809 ctx_status_ptr.read_ptr = ctx_status_ptr.write_ptr = 0x7;
810 vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
813 void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu)
815 kmem_cache_destroy(vgpu->workloads);
818 int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
820 enum intel_engine_id i;
821 struct intel_engine_cs *engine;
823 /* each ring has a virtual execlist engine */
824 for_each_engine(engine, vgpu->gvt->dev_priv, i) {
825 init_vgpu_execlist(vgpu, i);
826 INIT_LIST_HEAD(&vgpu->workload_q_head[i]);
829 vgpu->workloads = kmem_cache_create("gvt-g vgpu workload",
830 sizeof(struct intel_vgpu_workload), 0,
834 if (!vgpu->workloads)
840 void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
841 unsigned long engine_mask)
843 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
844 struct intel_engine_cs *engine;
845 struct intel_vgpu_workload *pos, *n;
848 for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
849 /* free the unsubmited workload in the queue */
850 list_for_each_entry_safe(pos, n,
851 &vgpu->workload_q_head[engine->id], list) {
852 list_del_init(&pos->list);
856 init_vgpu_execlist(vgpu, engine->id);