4 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26 * Zhi Wang <zhi.a.wang@intel.com>
27 * Zhenyu Wang <zhenyuw@linux.intel.com>
28 * Xiao Zheng <xiao.zheng@intel.com>
31 * Min He <min.he@intel.com>
32 * Bing Niu <bing.niu@intel.com>
38 #include "i915_pvinfo.h"
41 static bool enable_out_of_sync = false;
42 static int preallocated_oos_pages = 8192;
45 * validate a gm address and related range size,
46 * translate it to host gm address
48 bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
50 if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size
51 && !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) {
52 gvt_err("vgpu%d: invalid range gmadr 0x%llx size 0x%x\n",
53 vgpu->id, addr, size);
59 /* translate a guest gmadr to host gmadr */
60 int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr)
62 if (WARN(!vgpu_gmadr_is_valid(vgpu, g_addr),
63 "invalid guest gmadr %llx\n", g_addr))
66 if (vgpu_gmadr_is_aperture(vgpu, g_addr))
67 *h_addr = vgpu_aperture_gmadr_base(vgpu)
68 + (g_addr - vgpu_aperture_offset(vgpu));
70 *h_addr = vgpu_hidden_gmadr_base(vgpu)
71 + (g_addr - vgpu_hidden_offset(vgpu));
75 /* translate a host gmadr to guest gmadr */
76 int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr)
78 if (WARN(!gvt_gmadr_is_valid(vgpu->gvt, h_addr),
79 "invalid host gmadr %llx\n", h_addr))
82 if (gvt_gmadr_is_aperture(vgpu->gvt, h_addr))
83 *g_addr = vgpu_aperture_gmadr_base(vgpu)
84 + (h_addr - gvt_aperture_gmadr_base(vgpu->gvt));
86 *g_addr = vgpu_hidden_gmadr_base(vgpu)
87 + (h_addr - gvt_hidden_gmadr_base(vgpu->gvt));
91 int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
92 unsigned long *h_index)
97 ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << GTT_PAGE_SHIFT,
102 *h_index = h_addr >> GTT_PAGE_SHIFT;
106 int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
107 unsigned long *g_index)
112 ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << GTT_PAGE_SHIFT,
117 *g_index = g_addr >> GTT_PAGE_SHIFT;
121 #define gtt_type_is_entry(type) \
122 (type > GTT_TYPE_INVALID && type < GTT_TYPE_PPGTT_ENTRY \
123 && type != GTT_TYPE_PPGTT_PTE_ENTRY \
124 && type != GTT_TYPE_PPGTT_ROOT_ENTRY)
126 #define gtt_type_is_pt(type) \
127 (type >= GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX)
129 #define gtt_type_is_pte_pt(type) \
130 (type == GTT_TYPE_PPGTT_PTE_PT)
132 #define gtt_type_is_root_pointer(type) \
133 (gtt_type_is_entry(type) && type > GTT_TYPE_PPGTT_ROOT_ENTRY)
135 #define gtt_init_entry(e, t, p, v) do { \
138 memcpy(&(e)->val64, &v, sizeof(v)); \
142 * Mappings between GTT_TYPE* enumerations.
143 * Following information can be found according to the given type:
144 * - type of next level page table
145 * - type of entry inside this level page table
146 * - type of entry with PSE set
148 * If the given type doesn't have such a kind of information,
149 * e.g. give a l4 root entry type, then request to get its PSE type,
150 * give a PTE page table type, then request to get its next level page
151 * table type, as we know l4 root entry doesn't have a PSE bit,
152 * and a PTE page table doesn't have a next level page table type,
153 * GTT_TYPE_INVALID will be returned. This is useful when traversing a
157 struct gtt_type_table_entry {
163 #define GTT_TYPE_TABLE_ENTRY(type, e_type, npt_type, pse_type) \
165 .entry_type = e_type, \
166 .next_pt_type = npt_type, \
167 .pse_entry_type = pse_type, \
170 static struct gtt_type_table_entry gtt_type_table[] = {
171 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
172 GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
173 GTT_TYPE_PPGTT_PML4_PT,
175 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_PT,
176 GTT_TYPE_PPGTT_PML4_ENTRY,
177 GTT_TYPE_PPGTT_PDP_PT,
179 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_ENTRY,
180 GTT_TYPE_PPGTT_PML4_ENTRY,
181 GTT_TYPE_PPGTT_PDP_PT,
183 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_PT,
184 GTT_TYPE_PPGTT_PDP_ENTRY,
185 GTT_TYPE_PPGTT_PDE_PT,
186 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
187 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
188 GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
189 GTT_TYPE_PPGTT_PDE_PT,
190 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
191 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_ENTRY,
192 GTT_TYPE_PPGTT_PDP_ENTRY,
193 GTT_TYPE_PPGTT_PDE_PT,
194 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
195 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_PT,
196 GTT_TYPE_PPGTT_PDE_ENTRY,
197 GTT_TYPE_PPGTT_PTE_PT,
198 GTT_TYPE_PPGTT_PTE_2M_ENTRY),
199 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_ENTRY,
200 GTT_TYPE_PPGTT_PDE_ENTRY,
201 GTT_TYPE_PPGTT_PTE_PT,
202 GTT_TYPE_PPGTT_PTE_2M_ENTRY),
203 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_PT,
204 GTT_TYPE_PPGTT_PTE_4K_ENTRY,
207 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_4K_ENTRY,
208 GTT_TYPE_PPGTT_PTE_4K_ENTRY,
211 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_2M_ENTRY,
212 GTT_TYPE_PPGTT_PDE_ENTRY,
214 GTT_TYPE_PPGTT_PTE_2M_ENTRY),
215 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_1G_ENTRY,
216 GTT_TYPE_PPGTT_PDP_ENTRY,
218 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
219 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_GGTT_PTE,
225 static inline int get_next_pt_type(int type)
227 return gtt_type_table[type].next_pt_type;
230 static inline int get_entry_type(int type)
232 return gtt_type_table[type].entry_type;
235 static inline int get_pse_type(int type)
237 return gtt_type_table[type].pse_entry_type;
240 static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index)
242 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
247 static void write_pte64(struct drm_i915_private *dev_priv,
248 unsigned long index, u64 pte)
250 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
254 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
255 POSTING_READ(GFX_FLSH_CNTL_GEN6);
258 static inline struct intel_gvt_gtt_entry *gtt_get_entry64(void *pt,
259 struct intel_gvt_gtt_entry *e,
260 unsigned long index, bool hypervisor_access, unsigned long gpa,
261 struct intel_vgpu *vgpu)
263 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
266 if (WARN_ON(info->gtt_entry_size != 8))
269 if (hypervisor_access) {
270 ret = intel_gvt_hypervisor_read_gpa(vgpu, gpa +
271 (index << info->gtt_entry_size_shift),
275 e->val64 = read_pte64(vgpu->gvt->dev_priv, index);
277 e->val64 = *((u64 *)pt + index);
282 static inline struct intel_gvt_gtt_entry *gtt_set_entry64(void *pt,
283 struct intel_gvt_gtt_entry *e,
284 unsigned long index, bool hypervisor_access, unsigned long gpa,
285 struct intel_vgpu *vgpu)
287 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
290 if (WARN_ON(info->gtt_entry_size != 8))
293 if (hypervisor_access) {
294 ret = intel_gvt_hypervisor_write_gpa(vgpu, gpa +
295 (index << info->gtt_entry_size_shift),
299 write_pte64(vgpu->gvt->dev_priv, index, e->val64);
301 *((u64 *)pt + index) = e->val64;
308 #define ADDR_1G_MASK (((1UL << (GTT_HAW - 30 + 1)) - 1) << 30)
309 #define ADDR_2M_MASK (((1UL << (GTT_HAW - 21 + 1)) - 1) << 21)
310 #define ADDR_4K_MASK (((1UL << (GTT_HAW - 12 + 1)) - 1) << 12)
312 static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
316 if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY)
317 pfn = (e->val64 & ADDR_1G_MASK) >> 12;
318 else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY)
319 pfn = (e->val64 & ADDR_2M_MASK) >> 12;
321 pfn = (e->val64 & ADDR_4K_MASK) >> 12;
325 static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn)
327 if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) {
328 e->val64 &= ~ADDR_1G_MASK;
329 pfn &= (ADDR_1G_MASK >> 12);
330 } else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) {
331 e->val64 &= ~ADDR_2M_MASK;
332 pfn &= (ADDR_2M_MASK >> 12);
334 e->val64 &= ~ADDR_4K_MASK;
335 pfn &= (ADDR_4K_MASK >> 12);
338 e->val64 |= (pfn << 12);
341 static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e)
343 /* Entry doesn't have PSE bit. */
344 if (get_pse_type(e->type) == GTT_TYPE_INVALID)
347 e->type = get_entry_type(e->type);
348 if (!(e->val64 & (1 << 7)))
351 e->type = get_pse_type(e->type);
355 static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e)
358 * i915 writes PDP root pointer registers without present bit,
359 * it also works, so we need to treat root pointer entry
362 if (e->type == GTT_TYPE_PPGTT_ROOT_L3_ENTRY
363 || e->type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
364 return (e->val64 != 0);
366 return (e->val64 & (1 << 0));
369 static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e)
371 e->val64 &= ~(1 << 0);
375 * Per-platform GMA routines.
377 static unsigned long gma_to_ggtt_pte_index(unsigned long gma)
379 unsigned long x = (gma >> GTT_PAGE_SHIFT);
381 trace_gma_index(__func__, gma, x);
385 #define DEFINE_PPGTT_GMA_TO_INDEX(prefix, ename, exp) \
386 static unsigned long prefix##_gma_to_##ename##_index(unsigned long gma) \
388 unsigned long x = (exp); \
389 trace_gma_index(__func__, gma, x); \
393 DEFINE_PPGTT_GMA_TO_INDEX(gen8, pte, (gma >> 12 & 0x1ff));
394 DEFINE_PPGTT_GMA_TO_INDEX(gen8, pde, (gma >> 21 & 0x1ff));
395 DEFINE_PPGTT_GMA_TO_INDEX(gen8, l3_pdp, (gma >> 30 & 0x3));
396 DEFINE_PPGTT_GMA_TO_INDEX(gen8, l4_pdp, (gma >> 30 & 0x1ff));
397 DEFINE_PPGTT_GMA_TO_INDEX(gen8, pml4, (gma >> 39 & 0x1ff));
399 static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = {
400 .get_entry = gtt_get_entry64,
401 .set_entry = gtt_set_entry64,
402 .clear_present = gtt_entry_clear_present,
403 .test_present = gen8_gtt_test_present,
404 .test_pse = gen8_gtt_test_pse,
405 .get_pfn = gen8_gtt_get_pfn,
406 .set_pfn = gen8_gtt_set_pfn,
409 static struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops = {
410 .gma_to_ggtt_pte_index = gma_to_ggtt_pte_index,
411 .gma_to_pte_index = gen8_gma_to_pte_index,
412 .gma_to_pde_index = gen8_gma_to_pde_index,
413 .gma_to_l3_pdp_index = gen8_gma_to_l3_pdp_index,
414 .gma_to_l4_pdp_index = gen8_gma_to_l4_pdp_index,
415 .gma_to_pml4_index = gen8_gma_to_pml4_index,
418 static int gtt_entry_p2m(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *p,
419 struct intel_gvt_gtt_entry *m)
421 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
422 unsigned long gfn, mfn;
426 if (!ops->test_present(p))
429 gfn = ops->get_pfn(p);
431 mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gfn);
432 if (mfn == INTEL_GVT_INVALID_ADDR) {
433 gvt_err("fail to translate gfn: 0x%lx\n", gfn);
437 ops->set_pfn(m, mfn);
444 struct intel_gvt_gtt_entry *intel_vgpu_mm_get_entry(struct intel_vgpu_mm *mm,
445 void *page_table, struct intel_gvt_gtt_entry *e,
448 struct intel_gvt *gvt = mm->vgpu->gvt;
449 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
451 e->type = mm->page_table_entry_type;
453 ops->get_entry(page_table, e, index, false, 0, mm->vgpu);
458 struct intel_gvt_gtt_entry *intel_vgpu_mm_set_entry(struct intel_vgpu_mm *mm,
459 void *page_table, struct intel_gvt_gtt_entry *e,
462 struct intel_gvt *gvt = mm->vgpu->gvt;
463 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
465 return ops->set_entry(page_table, e, index, false, 0, mm->vgpu);
469 * PPGTT shadow page table helpers.
471 static inline struct intel_gvt_gtt_entry *ppgtt_spt_get_entry(
472 struct intel_vgpu_ppgtt_spt *spt,
473 void *page_table, int type,
474 struct intel_gvt_gtt_entry *e, unsigned long index,
477 struct intel_gvt *gvt = spt->vgpu->gvt;
478 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
480 e->type = get_entry_type(type);
482 if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
485 ops->get_entry(page_table, e, index, guest,
486 spt->guest_page.gfn << GTT_PAGE_SHIFT,
492 static inline struct intel_gvt_gtt_entry *ppgtt_spt_set_entry(
493 struct intel_vgpu_ppgtt_spt *spt,
494 void *page_table, int type,
495 struct intel_gvt_gtt_entry *e, unsigned long index,
498 struct intel_gvt *gvt = spt->vgpu->gvt;
499 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
501 if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
504 return ops->set_entry(page_table, e, index, guest,
505 spt->guest_page.gfn << GTT_PAGE_SHIFT,
509 #define ppgtt_get_guest_entry(spt, e, index) \
510 ppgtt_spt_get_entry(spt, NULL, \
511 spt->guest_page_type, e, index, true)
513 #define ppgtt_set_guest_entry(spt, e, index) \
514 ppgtt_spt_set_entry(spt, NULL, \
515 spt->guest_page_type, e, index, true)
517 #define ppgtt_get_shadow_entry(spt, e, index) \
518 ppgtt_spt_get_entry(spt, spt->shadow_page.vaddr, \
519 spt->shadow_page.type, e, index, false)
521 #define ppgtt_set_shadow_entry(spt, e, index) \
522 ppgtt_spt_set_entry(spt, spt->shadow_page.vaddr, \
523 spt->shadow_page.type, e, index, false)
526 * intel_vgpu_init_guest_page - init a guest page data structure
528 * @p: a guest page data structure
529 * @gfn: guest memory page frame number
530 * @handler: function will be called when target guest memory page has
533 * This function is called when user wants to track a guest memory page.
536 * Zero on success, negative error code if failed.
538 int intel_vgpu_init_guest_page(struct intel_vgpu *vgpu,
539 struct intel_vgpu_guest_page *p,
541 int (*handler)(void *, u64, void *, int),
544 INIT_HLIST_NODE(&p->node);
546 p->writeprotection = false;
548 p->handler = handler;
553 hash_add(vgpu->gtt.guest_page_hash_table, &p->node, p->gfn);
557 static int detach_oos_page(struct intel_vgpu *vgpu,
558 struct intel_vgpu_oos_page *oos_page);
561 * intel_vgpu_clean_guest_page - release the resource owned by guest page data
564 * @p: a tracked guest page
566 * This function is called when user tries to stop tracking a guest memory
569 void intel_vgpu_clean_guest_page(struct intel_vgpu *vgpu,
570 struct intel_vgpu_guest_page *p)
572 if (!hlist_unhashed(&p->node))
576 detach_oos_page(vgpu, p->oos_page);
578 if (p->writeprotection)
579 intel_gvt_hypervisor_unset_wp_page(vgpu, p);
583 * intel_vgpu_find_guest_page - find a guest page data structure by GFN.
585 * @gfn: guest memory page frame number
587 * This function is called when emulation logic wants to know if a trapped GFN
588 * is a tracked guest page.
591 * Pointer to guest page data structure, NULL if failed.
593 struct intel_vgpu_guest_page *intel_vgpu_find_guest_page(
594 struct intel_vgpu *vgpu, unsigned long gfn)
596 struct intel_vgpu_guest_page *p;
598 hash_for_each_possible(vgpu->gtt.guest_page_hash_table,
606 static inline int init_shadow_page(struct intel_vgpu *vgpu,
607 struct intel_vgpu_shadow_page *p, int type)
609 p->vaddr = page_address(p->page);
612 INIT_HLIST_NODE(&p->node);
614 p->mfn = intel_gvt_hypervisor_virt_to_mfn(p->vaddr);
615 if (p->mfn == INTEL_GVT_INVALID_ADDR)
618 hash_add(vgpu->gtt.shadow_page_hash_table, &p->node, p->mfn);
622 static inline void clean_shadow_page(struct intel_vgpu_shadow_page *p)
624 if (!hlist_unhashed(&p->node))
628 static inline struct intel_vgpu_shadow_page *find_shadow_page(
629 struct intel_vgpu *vgpu, unsigned long mfn)
631 struct intel_vgpu_shadow_page *p;
633 hash_for_each_possible(vgpu->gtt.shadow_page_hash_table,
641 #define guest_page_to_ppgtt_spt(ptr) \
642 container_of(ptr, struct intel_vgpu_ppgtt_spt, guest_page)
644 #define shadow_page_to_ppgtt_spt(ptr) \
645 container_of(ptr, struct intel_vgpu_ppgtt_spt, shadow_page)
647 static void *alloc_spt(gfp_t gfp_mask)
649 struct intel_vgpu_ppgtt_spt *spt;
651 spt = kzalloc(sizeof(*spt), gfp_mask);
655 spt->shadow_page.page = alloc_page(gfp_mask);
656 if (!spt->shadow_page.page) {
663 static void free_spt(struct intel_vgpu_ppgtt_spt *spt)
665 __free_page(spt->shadow_page.page);
669 static void ppgtt_free_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
671 trace_spt_free(spt->vgpu->id, spt, spt->shadow_page.type);
673 clean_shadow_page(&spt->shadow_page);
674 intel_vgpu_clean_guest_page(spt->vgpu, &spt->guest_page);
675 list_del_init(&spt->post_shadow_list);
680 static void ppgtt_free_all_shadow_page(struct intel_vgpu *vgpu)
682 struct hlist_node *n;
683 struct intel_vgpu_shadow_page *sp;
686 hash_for_each_safe(vgpu->gtt.shadow_page_hash_table, i, n, sp, node)
687 ppgtt_free_shadow_page(shadow_page_to_ppgtt_spt(sp));
690 static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
691 u64 pa, void *p_data, int bytes);
693 static int ppgtt_write_protection_handler(void *gp, u64 pa,
694 void *p_data, int bytes)
696 struct intel_vgpu_guest_page *gpt = (struct intel_vgpu_guest_page *)gp;
699 if (bytes != 4 && bytes != 8)
702 if (!gpt->writeprotection)
705 ret = ppgtt_handle_guest_write_page_table_bytes(gp,
712 static int reclaim_one_mm(struct intel_gvt *gvt);
714 static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_shadow_page(
715 struct intel_vgpu *vgpu, int type, unsigned long gfn)
717 struct intel_vgpu_ppgtt_spt *spt = NULL;
721 spt = alloc_spt(GFP_KERNEL | __GFP_ZERO);
723 if (reclaim_one_mm(vgpu->gvt))
726 gvt_err("fail to allocate ppgtt shadow page\n");
727 return ERR_PTR(-ENOMEM);
731 spt->guest_page_type = type;
732 atomic_set(&spt->refcount, 1);
733 INIT_LIST_HEAD(&spt->post_shadow_list);
736 * TODO: guest page type may be different with shadow page type,
737 * when we support PSE page in future.
739 ret = init_shadow_page(vgpu, &spt->shadow_page, type);
741 gvt_err("fail to initialize shadow page for spt\n");
745 ret = intel_vgpu_init_guest_page(vgpu, &spt->guest_page,
746 gfn, ppgtt_write_protection_handler, NULL);
748 gvt_err("fail to initialize guest page for spt\n");
752 trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn);
755 ppgtt_free_shadow_page(spt);
759 static struct intel_vgpu_ppgtt_spt *ppgtt_find_shadow_page(
760 struct intel_vgpu *vgpu, unsigned long mfn)
762 struct intel_vgpu_shadow_page *p = find_shadow_page(vgpu, mfn);
765 return shadow_page_to_ppgtt_spt(p);
767 gvt_err("vgpu%d: fail to find ppgtt shadow page: 0x%lx\n",
772 #define pt_entry_size_shift(spt) \
773 ((spt)->vgpu->gvt->device_info.gtt_entry_size_shift)
775 #define pt_entries(spt) \
776 (GTT_PAGE_SIZE >> pt_entry_size_shift(spt))
778 #define for_each_present_guest_entry(spt, e, i) \
779 for (i = 0; i < pt_entries(spt); i++) \
780 if (spt->vgpu->gvt->gtt.pte_ops->test_present( \
781 ppgtt_get_guest_entry(spt, e, i)))
783 #define for_each_present_shadow_entry(spt, e, i) \
784 for (i = 0; i < pt_entries(spt); i++) \
785 if (spt->vgpu->gvt->gtt.pte_ops->test_present( \
786 ppgtt_get_shadow_entry(spt, e, i)))
788 static void ppgtt_get_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
790 int v = atomic_read(&spt->refcount);
792 trace_spt_refcount(spt->vgpu->id, "inc", spt, v, (v + 1));
794 atomic_inc(&spt->refcount);
797 static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt);
799 static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu,
800 struct intel_gvt_gtt_entry *e)
802 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
803 struct intel_vgpu_ppgtt_spt *s;
804 intel_gvt_gtt_type_t cur_pt_type;
806 if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(e->type))))
809 if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY
810 && e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
811 cur_pt_type = get_next_pt_type(e->type) + 1;
812 if (ops->get_pfn(e) ==
813 vgpu->gtt.scratch_pt[cur_pt_type].page_mfn)
816 s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e));
818 gvt_err("vgpu%d: fail to find shadow page: mfn: 0x%lx\n",
819 vgpu->id, ops->get_pfn(e));
822 return ppgtt_invalidate_shadow_page(s);
825 static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
827 struct intel_gvt_gtt_entry e;
830 int v = atomic_read(&spt->refcount);
832 trace_spt_change(spt->vgpu->id, "die", spt,
833 spt->guest_page.gfn, spt->shadow_page.type);
835 trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1));
837 if (atomic_dec_return(&spt->refcount) > 0)
840 if (gtt_type_is_pte_pt(spt->shadow_page.type))
843 for_each_present_shadow_entry(spt, &e, index) {
844 if (!gtt_type_is_pt(get_next_pt_type(e.type))) {
845 gvt_err("GVT doesn't support pse bit for now\n");
848 ret = ppgtt_invalidate_shadow_page_by_shadow_entry(
854 trace_spt_change(spt->vgpu->id, "release", spt,
855 spt->guest_page.gfn, spt->shadow_page.type);
856 ppgtt_free_shadow_page(spt);
859 gvt_err("vgpu%d: fail: shadow page %p shadow entry 0x%llx type %d\n",
860 spt->vgpu->id, spt, e.val64, e.type);
864 static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt);
866 static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry(
867 struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *we)
869 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
870 struct intel_vgpu_ppgtt_spt *s = NULL;
871 struct intel_vgpu_guest_page *g;
874 if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(we->type)))) {
879 g = intel_vgpu_find_guest_page(vgpu, ops->get_pfn(we));
881 s = guest_page_to_ppgtt_spt(g);
882 ppgtt_get_shadow_page(s);
884 int type = get_next_pt_type(we->type);
886 s = ppgtt_alloc_shadow_page(vgpu, type, ops->get_pfn(we));
892 ret = intel_gvt_hypervisor_set_wp_page(vgpu, &s->guest_page);
896 ret = ppgtt_populate_shadow_page(s);
900 trace_spt_change(vgpu->id, "new", s, s->guest_page.gfn,
901 s->shadow_page.type);
905 gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n",
906 vgpu->id, s, we->val64, we->type);
910 static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
911 struct intel_vgpu_ppgtt_spt *s, struct intel_gvt_gtt_entry *ge)
913 struct intel_gvt_gtt_pte_ops *ops = s->vgpu->gvt->gtt.pte_ops;
916 se->val64 = ge->val64;
918 ops->set_pfn(se, s->shadow_page.mfn);
921 static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
923 struct intel_vgpu *vgpu = spt->vgpu;
924 struct intel_vgpu_ppgtt_spt *s;
925 struct intel_gvt_gtt_entry se, ge;
929 trace_spt_change(spt->vgpu->id, "born", spt,
930 spt->guest_page.gfn, spt->shadow_page.type);
932 if (gtt_type_is_pte_pt(spt->shadow_page.type)) {
933 for_each_present_guest_entry(spt, &ge, i) {
934 ret = gtt_entry_p2m(vgpu, &ge, &se);
937 ppgtt_set_shadow_entry(spt, &se, i);
942 for_each_present_guest_entry(spt, &ge, i) {
943 if (!gtt_type_is_pt(get_next_pt_type(ge.type))) {
944 gvt_err("GVT doesn't support pse bit now\n");
949 s = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge);
954 ppgtt_get_shadow_entry(spt, &se, i);
955 ppgtt_generate_shadow_entry(&se, s, &ge);
956 ppgtt_set_shadow_entry(spt, &se, i);
960 gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n",
961 vgpu->id, spt, ge.val64, ge.type);
965 static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
968 struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
969 struct intel_vgpu_shadow_page *sp = &spt->shadow_page;
970 struct intel_vgpu *vgpu = spt->vgpu;
971 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
972 struct intel_gvt_gtt_entry e;
975 ppgtt_get_shadow_entry(spt, &e, index);
977 trace_gpt_change(spt->vgpu->id, "remove", spt, sp->type, e.val64,
980 if (!ops->test_present(&e))
983 if (ops->get_pfn(&e) == vgpu->gtt.scratch_pt[sp->type].page_mfn)
986 if (gtt_type_is_pt(get_next_pt_type(e.type))) {
987 struct intel_vgpu_ppgtt_spt *s =
988 ppgtt_find_shadow_page(vgpu, ops->get_pfn(&e));
990 gvt_err("fail to find guest page\n");
994 ret = ppgtt_invalidate_shadow_page(s);
998 ops->set_pfn(&e, vgpu->gtt.scratch_pt[sp->type].page_mfn);
999 ppgtt_set_shadow_entry(spt, &e, index);
1002 gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n",
1003 vgpu->id, spt, e.val64, e.type);
1007 static int ppgtt_handle_guest_entry_add(struct intel_vgpu_guest_page *gpt,
1008 struct intel_gvt_gtt_entry *we, unsigned long index)
1010 struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
1011 struct intel_vgpu_shadow_page *sp = &spt->shadow_page;
1012 struct intel_vgpu *vgpu = spt->vgpu;
1013 struct intel_gvt_gtt_entry m;
1014 struct intel_vgpu_ppgtt_spt *s;
1017 trace_gpt_change(spt->vgpu->id, "add", spt, sp->type,
1020 if (gtt_type_is_pt(get_next_pt_type(we->type))) {
1021 s = ppgtt_populate_shadow_page_by_guest_entry(vgpu, we);
1026 ppgtt_get_shadow_entry(spt, &m, index);
1027 ppgtt_generate_shadow_entry(&m, s, we);
1028 ppgtt_set_shadow_entry(spt, &m, index);
1030 ret = gtt_entry_p2m(vgpu, we, &m);
1033 ppgtt_set_shadow_entry(spt, &m, index);
1037 gvt_err("vgpu%d: fail: spt %p guest entry 0x%llx type %d\n", vgpu->id,
1038 spt, we->val64, we->type);
1042 static int sync_oos_page(struct intel_vgpu *vgpu,
1043 struct intel_vgpu_oos_page *oos_page)
1045 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1046 struct intel_gvt *gvt = vgpu->gvt;
1047 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
1048 struct intel_vgpu_ppgtt_spt *spt =
1049 guest_page_to_ppgtt_spt(oos_page->guest_page);
1050 struct intel_gvt_gtt_entry old, new, m;
1054 trace_oos_change(vgpu->id, "sync", oos_page->id,
1055 oos_page->guest_page, spt->guest_page_type);
1057 old.type = new.type = get_entry_type(spt->guest_page_type);
1058 old.val64 = new.val64 = 0;
1060 for (index = 0; index < (GTT_PAGE_SIZE >> info->gtt_entry_size_shift);
1062 ops->get_entry(oos_page->mem, &old, index, false, 0, vgpu);
1063 ops->get_entry(NULL, &new, index, true,
1064 oos_page->guest_page->gfn << PAGE_SHIFT, vgpu);
1066 if (old.val64 == new.val64
1067 && !test_and_clear_bit(index, spt->post_shadow_bitmap))
1070 trace_oos_sync(vgpu->id, oos_page->id,
1071 oos_page->guest_page, spt->guest_page_type,
1074 ret = gtt_entry_p2m(vgpu, &new, &m);
1078 ops->set_entry(oos_page->mem, &new, index, false, 0, vgpu);
1079 ppgtt_set_shadow_entry(spt, &m, index);
1082 oos_page->guest_page->write_cnt = 0;
1083 list_del_init(&spt->post_shadow_list);
1087 static int detach_oos_page(struct intel_vgpu *vgpu,
1088 struct intel_vgpu_oos_page *oos_page)
1090 struct intel_gvt *gvt = vgpu->gvt;
1091 struct intel_vgpu_ppgtt_spt *spt =
1092 guest_page_to_ppgtt_spt(oos_page->guest_page);
1094 trace_oos_change(vgpu->id, "detach", oos_page->id,
1095 oos_page->guest_page, spt->guest_page_type);
1097 oos_page->guest_page->write_cnt = 0;
1098 oos_page->guest_page->oos_page = NULL;
1099 oos_page->guest_page = NULL;
1101 list_del_init(&oos_page->vm_list);
1102 list_move_tail(&oos_page->list, &gvt->gtt.oos_page_free_list_head);
1107 static int attach_oos_page(struct intel_vgpu *vgpu,
1108 struct intel_vgpu_oos_page *oos_page,
1109 struct intel_vgpu_guest_page *gpt)
1111 struct intel_gvt *gvt = vgpu->gvt;
1114 ret = intel_gvt_hypervisor_read_gpa(vgpu, gpt->gfn << GTT_PAGE_SHIFT,
1115 oos_page->mem, GTT_PAGE_SIZE);
1119 oos_page->guest_page = gpt;
1120 gpt->oos_page = oos_page;
1122 list_move_tail(&oos_page->list, &gvt->gtt.oos_page_use_list_head);
1124 trace_oos_change(vgpu->id, "attach", gpt->oos_page->id,
1125 gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
1129 static int ppgtt_set_guest_page_sync(struct intel_vgpu *vgpu,
1130 struct intel_vgpu_guest_page *gpt)
1134 ret = intel_gvt_hypervisor_set_wp_page(vgpu, gpt);
1138 trace_oos_change(vgpu->id, "set page sync", gpt->oos_page->id,
1139 gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
1141 list_del_init(&gpt->oos_page->vm_list);
1142 return sync_oos_page(vgpu, gpt->oos_page);
1145 static int ppgtt_allocate_oos_page(struct intel_vgpu *vgpu,
1146 struct intel_vgpu_guest_page *gpt)
1148 struct intel_gvt *gvt = vgpu->gvt;
1149 struct intel_gvt_gtt *gtt = &gvt->gtt;
1150 struct intel_vgpu_oos_page *oos_page = gpt->oos_page;
1153 WARN(oos_page, "shadow PPGTT page has already has a oos page\n");
1155 if (list_empty(>t->oos_page_free_list_head)) {
1156 oos_page = container_of(gtt->oos_page_use_list_head.next,
1157 struct intel_vgpu_oos_page, list);
1158 ret = ppgtt_set_guest_page_sync(vgpu, oos_page->guest_page);
1161 ret = detach_oos_page(vgpu, oos_page);
1165 oos_page = container_of(gtt->oos_page_free_list_head.next,
1166 struct intel_vgpu_oos_page, list);
1167 return attach_oos_page(vgpu, oos_page, gpt);
1170 static int ppgtt_set_guest_page_oos(struct intel_vgpu *vgpu,
1171 struct intel_vgpu_guest_page *gpt)
1173 struct intel_vgpu_oos_page *oos_page = gpt->oos_page;
1175 if (WARN(!oos_page, "shadow PPGTT page should have a oos page\n"))
1178 trace_oos_change(vgpu->id, "set page out of sync", gpt->oos_page->id,
1179 gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
1181 list_add_tail(&oos_page->vm_list, &vgpu->gtt.oos_page_list_head);
1182 return intel_gvt_hypervisor_unset_wp_page(vgpu, gpt);
1186 * intel_vgpu_sync_oos_pages - sync all the out-of-synced shadow for vGPU
1189 * This function is called before submitting a guest workload to host,
1190 * to sync all the out-of-synced shadow for vGPU
1193 * Zero on success, negative error code if failed.
1195 int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu)
1197 struct list_head *pos, *n;
1198 struct intel_vgpu_oos_page *oos_page;
1201 if (!enable_out_of_sync)
1204 list_for_each_safe(pos, n, &vgpu->gtt.oos_page_list_head) {
1205 oos_page = container_of(pos,
1206 struct intel_vgpu_oos_page, vm_list);
1207 ret = ppgtt_set_guest_page_sync(vgpu, oos_page->guest_page);
1215 * The heart of PPGTT shadow page table.
1217 static int ppgtt_handle_guest_write_page_table(
1218 struct intel_vgpu_guest_page *gpt,
1219 struct intel_gvt_gtt_entry *we, unsigned long index)
1221 struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
1222 struct intel_vgpu *vgpu = spt->vgpu;
1223 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1228 new_present = ops->test_present(we);
1230 ret = ppgtt_handle_guest_entry_removal(gpt, index);
1235 ret = ppgtt_handle_guest_entry_add(gpt, we, index);
1241 gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d.\n",
1242 vgpu->id, spt, we->val64, we->type);
1246 static inline bool can_do_out_of_sync(struct intel_vgpu_guest_page *gpt)
1248 return enable_out_of_sync
1249 && gtt_type_is_pte_pt(
1250 guest_page_to_ppgtt_spt(gpt)->guest_page_type)
1251 && gpt->write_cnt >= 2;
1254 static void ppgtt_set_post_shadow(struct intel_vgpu_ppgtt_spt *spt,
1255 unsigned long index)
1257 set_bit(index, spt->post_shadow_bitmap);
1258 if (!list_empty(&spt->post_shadow_list))
1261 list_add_tail(&spt->post_shadow_list,
1262 &spt->vgpu->gtt.post_shadow_list_head);
1266 * intel_vgpu_flush_post_shadow - flush the post shadow transactions
1269 * This function is called before submitting a guest workload to host,
1270 * to flush all the post shadows for a vGPU.
1273 * Zero on success, negative error code if failed.
1275 int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu)
1277 struct list_head *pos, *n;
1278 struct intel_vgpu_ppgtt_spt *spt;
1279 struct intel_gvt_gtt_entry ge;
1280 unsigned long index;
1283 list_for_each_safe(pos, n, &vgpu->gtt.post_shadow_list_head) {
1284 spt = container_of(pos, struct intel_vgpu_ppgtt_spt,
1287 for_each_set_bit(index, spt->post_shadow_bitmap,
1288 GTT_ENTRY_NUM_IN_ONE_PAGE) {
1289 ppgtt_get_guest_entry(spt, &ge, index);
1291 ret = ppgtt_handle_guest_write_page_table(
1292 &spt->guest_page, &ge, index);
1295 clear_bit(index, spt->post_shadow_bitmap);
1297 list_del_init(&spt->post_shadow_list);
1302 static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
1303 u64 pa, void *p_data, int bytes)
1305 struct intel_vgpu_guest_page *gpt = (struct intel_vgpu_guest_page *)gp;
1306 struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
1307 struct intel_vgpu *vgpu = spt->vgpu;
1308 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1309 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1310 struct intel_gvt_gtt_entry we;
1311 unsigned long index;
1314 index = (pa & (PAGE_SIZE - 1)) >> info->gtt_entry_size_shift;
1316 ppgtt_get_guest_entry(spt, &we, index);
1320 if (bytes == info->gtt_entry_size) {
1321 ret = ppgtt_handle_guest_write_page_table(gpt, &we, index);
1325 if (!test_bit(index, spt->post_shadow_bitmap)) {
1326 ret = ppgtt_handle_guest_entry_removal(gpt, index);
1331 ppgtt_set_post_shadow(spt, index);
1334 if (!enable_out_of_sync)
1340 ops->set_entry(gpt->oos_page->mem, &we, index,
1343 if (can_do_out_of_sync(gpt)) {
1345 ppgtt_allocate_oos_page(vgpu, gpt);
1347 ret = ppgtt_set_guest_page_oos(vgpu, gpt);
1355 * mm page table allocation policy for bdw+
1356 * - for ggtt, only virtual page table will be allocated.
1357 * - for ppgtt, dedicated virtual/shadow page table will be allocated.
1359 static int gen8_mm_alloc_page_table(struct intel_vgpu_mm *mm)
1361 struct intel_vgpu *vgpu = mm->vgpu;
1362 struct intel_gvt *gvt = vgpu->gvt;
1363 const struct intel_gvt_device_info *info = &gvt->device_info;
1366 if (mm->type == INTEL_GVT_MM_PPGTT) {
1367 mm->page_table_entry_cnt = 4;
1368 mm->page_table_entry_size = mm->page_table_entry_cnt *
1369 info->gtt_entry_size;
1370 mem = kzalloc(mm->has_shadow_page_table ?
1371 mm->page_table_entry_size * 2
1372 : mm->page_table_entry_size, GFP_KERNEL);
1375 mm->virtual_page_table = mem;
1376 if (!mm->has_shadow_page_table)
1378 mm->shadow_page_table = mem + mm->page_table_entry_size;
1379 } else if (mm->type == INTEL_GVT_MM_GGTT) {
1380 mm->page_table_entry_cnt =
1381 (gvt_ggtt_gm_sz(gvt) >> GTT_PAGE_SHIFT);
1382 mm->page_table_entry_size = mm->page_table_entry_cnt *
1383 info->gtt_entry_size;
1384 mem = vzalloc(mm->page_table_entry_size);
1387 mm->virtual_page_table = mem;
1392 static void gen8_mm_free_page_table(struct intel_vgpu_mm *mm)
1394 if (mm->type == INTEL_GVT_MM_PPGTT) {
1395 kfree(mm->virtual_page_table);
1396 } else if (mm->type == INTEL_GVT_MM_GGTT) {
1397 if (mm->virtual_page_table)
1398 vfree(mm->virtual_page_table);
1400 mm->virtual_page_table = mm->shadow_page_table = NULL;
1403 static void invalidate_mm(struct intel_vgpu_mm *mm)
1405 struct intel_vgpu *vgpu = mm->vgpu;
1406 struct intel_gvt *gvt = vgpu->gvt;
1407 struct intel_gvt_gtt *gtt = &gvt->gtt;
1408 struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
1409 struct intel_gvt_gtt_entry se;
1412 if (WARN_ON(!mm->has_shadow_page_table || !mm->shadowed))
1415 for (i = 0; i < mm->page_table_entry_cnt; i++) {
1416 ppgtt_get_shadow_root_entry(mm, &se, i);
1417 if (!ops->test_present(&se))
1419 ppgtt_invalidate_shadow_page_by_shadow_entry(
1422 ppgtt_set_shadow_root_entry(mm, &se, i);
1424 trace_gpt_change(vgpu->id, "destroy root pointer",
1425 NULL, se.type, se.val64, i);
1427 mm->shadowed = false;
1431 * intel_vgpu_destroy_mm - destroy a mm object
1432 * @mm: a kref object
1434 * This function is used to destroy a mm object for vGPU
1437 void intel_vgpu_destroy_mm(struct kref *mm_ref)
1439 struct intel_vgpu_mm *mm = container_of(mm_ref, typeof(*mm), ref);
1440 struct intel_vgpu *vgpu = mm->vgpu;
1441 struct intel_gvt *gvt = vgpu->gvt;
1442 struct intel_gvt_gtt *gtt = &gvt->gtt;
1444 if (!mm->initialized)
1447 list_del(&mm->list);
1448 list_del(&mm->lru_list);
1450 if (mm->has_shadow_page_table)
1453 gtt->mm_free_page_table(mm);
1458 static int shadow_mm(struct intel_vgpu_mm *mm)
1460 struct intel_vgpu *vgpu = mm->vgpu;
1461 struct intel_gvt *gvt = vgpu->gvt;
1462 struct intel_gvt_gtt *gtt = &gvt->gtt;
1463 struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
1464 struct intel_vgpu_ppgtt_spt *spt;
1465 struct intel_gvt_gtt_entry ge, se;
1469 if (WARN_ON(!mm->has_shadow_page_table || mm->shadowed))
1472 mm->shadowed = true;
1474 for (i = 0; i < mm->page_table_entry_cnt; i++) {
1475 ppgtt_get_guest_root_entry(mm, &ge, i);
1476 if (!ops->test_present(&ge))
1479 trace_gpt_change(vgpu->id, __func__, NULL,
1480 ge.type, ge.val64, i);
1482 spt = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge);
1484 gvt_err("fail to populate guest root pointer\n");
1488 ppgtt_generate_shadow_entry(&se, spt, &ge);
1489 ppgtt_set_shadow_root_entry(mm, &se, i);
1491 trace_gpt_change(vgpu->id, "populate root pointer",
1492 NULL, se.type, se.val64, i);
1501 * intel_vgpu_create_mm - create a mm object for a vGPU
1503 * @mm_type: mm object type, should be PPGTT or GGTT
1504 * @virtual_page_table: page table root pointers. Could be NULL if user wants
1505 * to populate shadow later.
1506 * @page_table_level: describe the page table level of the mm object
1507 * @pde_base_index: pde root pointer base in GGTT MMIO.
1509 * This function is used to create a mm object for a vGPU.
1512 * Zero on success, negative error code in pointer if failed.
1514 struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
1515 int mm_type, void *virtual_page_table, int page_table_level,
1518 struct intel_gvt *gvt = vgpu->gvt;
1519 struct intel_gvt_gtt *gtt = &gvt->gtt;
1520 struct intel_vgpu_mm *mm;
1523 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
1531 if (page_table_level == 1)
1532 mm->page_table_entry_type = GTT_TYPE_GGTT_PTE;
1533 else if (page_table_level == 3)
1534 mm->page_table_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
1535 else if (page_table_level == 4)
1536 mm->page_table_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
1543 mm->page_table_level = page_table_level;
1544 mm->pde_base_index = pde_base_index;
1547 mm->has_shadow_page_table = !!(mm_type == INTEL_GVT_MM_PPGTT);
1549 kref_init(&mm->ref);
1550 atomic_set(&mm->pincount, 0);
1551 INIT_LIST_HEAD(&mm->list);
1552 INIT_LIST_HEAD(&mm->lru_list);
1553 list_add_tail(&mm->list, &vgpu->gtt.mm_list_head);
1555 ret = gtt->mm_alloc_page_table(mm);
1557 gvt_err("fail to allocate page table for mm\n");
1561 mm->initialized = true;
1563 if (virtual_page_table)
1564 memcpy(mm->virtual_page_table, virtual_page_table,
1565 mm->page_table_entry_size);
1567 if (mm->has_shadow_page_table) {
1568 ret = shadow_mm(mm);
1571 list_add_tail(&mm->lru_list, &gvt->gtt.mm_lru_list_head);
1575 gvt_err("fail to create mm\n");
1577 intel_gvt_mm_unreference(mm);
1578 return ERR_PTR(ret);
1582 * intel_vgpu_unpin_mm - decrease the pin count of a vGPU mm object
1583 * @mm: a vGPU mm object
1585 * This function is called when user doesn't want to use a vGPU mm object
1587 void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
1589 if (WARN_ON(mm->type != INTEL_GVT_MM_PPGTT))
1592 atomic_dec(&mm->pincount);
1596 * intel_vgpu_pin_mm - increase the pin count of a vGPU mm object
1599 * This function is called when user wants to use a vGPU mm object. If this
1600 * mm object hasn't been shadowed yet, the shadow will be populated at this
1604 * Zero on success, negative error code if failed.
1606 int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
1610 if (WARN_ON(mm->type != INTEL_GVT_MM_PPGTT))
1613 atomic_inc(&mm->pincount);
1615 if (!mm->shadowed) {
1616 ret = shadow_mm(mm);
1621 list_del_init(&mm->lru_list);
1622 list_add_tail(&mm->lru_list, &mm->vgpu->gvt->gtt.mm_lru_list_head);
1626 static int reclaim_one_mm(struct intel_gvt *gvt)
1628 struct intel_vgpu_mm *mm;
1629 struct list_head *pos, *n;
1631 list_for_each_safe(pos, n, &gvt->gtt.mm_lru_list_head) {
1632 mm = container_of(pos, struct intel_vgpu_mm, lru_list);
1634 if (mm->type != INTEL_GVT_MM_PPGTT)
1636 if (atomic_read(&mm->pincount))
1639 list_del_init(&mm->lru_list);
1647 * GMA translation APIs.
1649 static inline int ppgtt_get_next_level_entry(struct intel_vgpu_mm *mm,
1650 struct intel_gvt_gtt_entry *e, unsigned long index, bool guest)
1652 struct intel_vgpu *vgpu = mm->vgpu;
1653 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1654 struct intel_vgpu_ppgtt_spt *s;
1656 if (WARN_ON(!mm->has_shadow_page_table))
1659 s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e));
1664 ppgtt_get_shadow_entry(s, e, index);
1666 ppgtt_get_guest_entry(s, e, index);
1671 * intel_vgpu_gma_to_gpa - translate a gma to GPA
1672 * @mm: mm object. could be a PPGTT or GGTT mm object
1673 * @gma: graphics memory address in this mm object
1675 * This function is used to translate a graphics memory address in specific
1676 * graphics memory space to guest physical address.
1679 * Guest physical address on success, INTEL_GVT_INVALID_ADDR if failed.
1681 unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
1683 struct intel_vgpu *vgpu = mm->vgpu;
1684 struct intel_gvt *gvt = vgpu->gvt;
1685 struct intel_gvt_gtt_pte_ops *pte_ops = gvt->gtt.pte_ops;
1686 struct intel_gvt_gtt_gma_ops *gma_ops = gvt->gtt.gma_ops;
1687 unsigned long gpa = INTEL_GVT_INVALID_ADDR;
1688 unsigned long gma_index[4];
1689 struct intel_gvt_gtt_entry e;
1693 if (mm->type != INTEL_GVT_MM_GGTT && mm->type != INTEL_GVT_MM_PPGTT)
1694 return INTEL_GVT_INVALID_ADDR;
1696 if (mm->type == INTEL_GVT_MM_GGTT) {
1697 if (!vgpu_gmadr_is_valid(vgpu, gma))
1700 ggtt_get_guest_entry(mm, &e,
1701 gma_ops->gma_to_ggtt_pte_index(gma));
1702 gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT)
1703 + (gma & ~GTT_PAGE_MASK);
1705 trace_gma_translate(vgpu->id, "ggtt", 0, 0, gma, gpa);
1709 switch (mm->page_table_level) {
1711 ppgtt_get_shadow_root_entry(mm, &e, 0);
1712 gma_index[0] = gma_ops->gma_to_pml4_index(gma);
1713 gma_index[1] = gma_ops->gma_to_l4_pdp_index(gma);
1714 gma_index[2] = gma_ops->gma_to_pde_index(gma);
1715 gma_index[3] = gma_ops->gma_to_pte_index(gma);
1719 ppgtt_get_shadow_root_entry(mm, &e,
1720 gma_ops->gma_to_l3_pdp_index(gma));
1721 gma_index[0] = gma_ops->gma_to_pde_index(gma);
1722 gma_index[1] = gma_ops->gma_to_pte_index(gma);
1726 ppgtt_get_shadow_root_entry(mm, &e,
1727 gma_ops->gma_to_pde_index(gma));
1728 gma_index[0] = gma_ops->gma_to_pte_index(gma);
1736 /* walk into the shadow page table and get gpa from guest entry */
1737 for (i = 0; i < index; i++) {
1738 ret = ppgtt_get_next_level_entry(mm, &e, gma_index[i],
1744 gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT)
1745 + (gma & ~GTT_PAGE_MASK);
1747 trace_gma_translate(vgpu->id, "ppgtt", 0,
1748 mm->page_table_level, gma, gpa);
1751 gvt_err("invalid mm type: %d gma %lx\n", mm->type, gma);
1752 return INTEL_GVT_INVALID_ADDR;
1755 static int emulate_gtt_mmio_read(struct intel_vgpu *vgpu,
1756 unsigned int off, void *p_data, unsigned int bytes)
1758 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
1759 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1760 unsigned long index = off >> info->gtt_entry_size_shift;
1761 struct intel_gvt_gtt_entry e;
1763 if (bytes != 4 && bytes != 8)
1766 ggtt_get_guest_entry(ggtt_mm, &e, index);
1767 memcpy(p_data, (void *)&e.val64 + (off & (info->gtt_entry_size - 1)),
1773 * intel_vgpu_emulate_gtt_mmio_read - emulate GTT MMIO register read
1775 * @off: register offset
1776 * @p_data: data will be returned to guest
1777 * @bytes: data length
1779 * This function is used to emulate the GTT MMIO register read
1782 * Zero on success, error code if failed.
1784 int intel_vgpu_emulate_gtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
1785 void *p_data, unsigned int bytes)
1787 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1790 if (bytes != 4 && bytes != 8)
1793 off -= info->gtt_start_offset;
1794 ret = emulate_gtt_mmio_read(vgpu, off, p_data, bytes);
1798 static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
1799 void *p_data, unsigned int bytes)
1801 struct intel_gvt *gvt = vgpu->gvt;
1802 const struct intel_gvt_device_info *info = &gvt->device_info;
1803 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
1804 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
1805 unsigned long g_gtt_index = off >> info->gtt_entry_size_shift;
1807 struct intel_gvt_gtt_entry e, m;
1810 if (bytes != 4 && bytes != 8)
1813 gma = g_gtt_index << GTT_PAGE_SHIFT;
1815 /* the VM may configure the whole GM space when ballooning is used */
1816 if (WARN_ONCE(!vgpu_gmadr_is_valid(vgpu, gma),
1817 "vgpu%d: found oob ggtt write, offset %x\n",
1822 ggtt_get_guest_entry(ggtt_mm, &e, g_gtt_index);
1824 memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
1827 if (ops->test_present(&e)) {
1828 ret = gtt_entry_p2m(vgpu, &e, &m);
1830 gvt_err("vgpu%d: fail to translate guest gtt entry\n",
1839 ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index);
1840 ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
1845 * intel_vgpu_emulate_gtt_mmio_write - emulate GTT MMIO register write
1847 * @off: register offset
1848 * @p_data: data from guest write
1849 * @bytes: data length
1851 * This function is used to emulate the GTT MMIO register write
1854 * Zero on success, error code if failed.
1856 int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
1857 void *p_data, unsigned int bytes)
1859 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1862 if (bytes != 4 && bytes != 8)
1865 off -= info->gtt_start_offset;
1866 ret = emulate_gtt_mmio_write(vgpu, off, p_data, bytes);
1870 static int alloc_scratch_pages(struct intel_vgpu *vgpu,
1871 intel_gvt_gtt_type_t type)
1873 struct intel_vgpu_gtt *gtt = &vgpu->gtt;
1874 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1875 int page_entry_num = GTT_PAGE_SIZE >>
1876 vgpu->gvt->device_info.gtt_entry_size_shift;
1881 if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
1884 scratch_pt = (void *)get_zeroed_page(GFP_KERNEL);
1886 gvt_err("fail to allocate scratch page\n");
1890 mfn = intel_gvt_hypervisor_virt_to_mfn(scratch_pt);
1891 if (mfn == INTEL_GVT_INVALID_ADDR) {
1892 gvt_err("fail to translate vaddr:0x%lx\n", (unsigned long)scratch_pt);
1893 free_page((unsigned long)scratch_pt);
1896 gtt->scratch_pt[type].page_mfn = mfn;
1897 gtt->scratch_pt[type].page = virt_to_page(scratch_pt);
1898 gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
1899 vgpu->id, type, mfn);
1901 /* Build the tree by full filled the scratch pt with the entries which
1902 * point to the next level scratch pt or scratch page. The
1903 * scratch_pt[type] indicate the scratch pt/scratch page used by the
1905 * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by
1906 * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self
1907 * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn.
1909 if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) {
1910 struct intel_gvt_gtt_entry se;
1912 memset(&se, 0, sizeof(struct intel_gvt_gtt_entry));
1913 se.type = get_entry_type(type - 1);
1914 ops->set_pfn(&se, gtt->scratch_pt[type - 1].page_mfn);
1916 /* The entry parameters like present/writeable/cache type
1917 * set to the same as i915's scratch page tree.
1919 se.val64 |= _PAGE_PRESENT | _PAGE_RW;
1920 if (type == GTT_TYPE_PPGTT_PDE_PT)
1921 se.val64 |= PPAT_CACHED_INDEX;
1923 for (i = 0; i < page_entry_num; i++)
1924 ops->set_entry(scratch_pt, &se, i, false, 0, vgpu);
1930 static int release_scratch_page_tree(struct intel_vgpu *vgpu)
1934 for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
1935 if (vgpu->gtt.scratch_pt[i].page != NULL) {
1936 __free_page(vgpu->gtt.scratch_pt[i].page);
1937 vgpu->gtt.scratch_pt[i].page = NULL;
1938 vgpu->gtt.scratch_pt[i].page_mfn = 0;
1945 static int create_scratch_page_tree(struct intel_vgpu *vgpu)
1949 for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
1950 ret = alloc_scratch_pages(vgpu, i);
1958 release_scratch_page_tree(vgpu);
1963 * intel_vgpu_init_gtt - initialize per-vGPU graphics memory virulization
1966 * This function is used to initialize per-vGPU graphics memory virtualization
1970 * Zero on success, error code if failed.
1972 int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
1974 struct intel_vgpu_gtt *gtt = &vgpu->gtt;
1975 struct intel_vgpu_mm *ggtt_mm;
1977 hash_init(gtt->guest_page_hash_table);
1978 hash_init(gtt->shadow_page_hash_table);
1980 INIT_LIST_HEAD(>t->mm_list_head);
1981 INIT_LIST_HEAD(>t->oos_page_list_head);
1982 INIT_LIST_HEAD(>t->post_shadow_list_head);
1984 intel_vgpu_reset_ggtt(vgpu);
1986 ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT,
1988 if (IS_ERR(ggtt_mm)) {
1989 gvt_err("fail to create mm for ggtt.\n");
1990 return PTR_ERR(ggtt_mm);
1993 gtt->ggtt_mm = ggtt_mm;
1995 return create_scratch_page_tree(vgpu);
1999 * intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization
2002 * This function is used to clean up per-vGPU graphics memory virtualization
2006 * Zero on success, error code if failed.
2008 void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu)
2010 struct list_head *pos, *n;
2011 struct intel_vgpu_mm *mm;
2013 ppgtt_free_all_shadow_page(vgpu);
2014 release_scratch_page_tree(vgpu);
2016 list_for_each_safe(pos, n, &vgpu->gtt.mm_list_head) {
2017 mm = container_of(pos, struct intel_vgpu_mm, list);
2018 vgpu->gvt->gtt.mm_free_page_table(mm);
2019 list_del(&mm->list);
2020 list_del(&mm->lru_list);
2025 static void clean_spt_oos(struct intel_gvt *gvt)
2027 struct intel_gvt_gtt *gtt = &gvt->gtt;
2028 struct list_head *pos, *n;
2029 struct intel_vgpu_oos_page *oos_page;
2031 WARN(!list_empty(>t->oos_page_use_list_head),
2032 "someone is still using oos page\n");
2034 list_for_each_safe(pos, n, >t->oos_page_free_list_head) {
2035 oos_page = container_of(pos, struct intel_vgpu_oos_page, list);
2036 list_del(&oos_page->list);
2041 static int setup_spt_oos(struct intel_gvt *gvt)
2043 struct intel_gvt_gtt *gtt = &gvt->gtt;
2044 struct intel_vgpu_oos_page *oos_page;
2048 INIT_LIST_HEAD(>t->oos_page_free_list_head);
2049 INIT_LIST_HEAD(>t->oos_page_use_list_head);
2051 for (i = 0; i < preallocated_oos_pages; i++) {
2052 oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL);
2054 gvt_err("fail to pre-allocate oos page\n");
2059 INIT_LIST_HEAD(&oos_page->list);
2060 INIT_LIST_HEAD(&oos_page->vm_list);
2062 list_add_tail(&oos_page->list, >t->oos_page_free_list_head);
2065 gvt_dbg_mm("%d oos pages preallocated\n", i);
2074 * intel_vgpu_find_ppgtt_mm - find a PPGTT mm object
2076 * @page_table_level: PPGTT page table level
2077 * @root_entry: PPGTT page table root pointers
2079 * This function is used to find a PPGTT mm object from mm object pool
2082 * pointer to mm object on success, NULL if failed.
2084 struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
2085 int page_table_level, void *root_entry)
2087 struct list_head *pos;
2088 struct intel_vgpu_mm *mm;
2091 list_for_each(pos, &vgpu->gtt.mm_list_head) {
2092 mm = container_of(pos, struct intel_vgpu_mm, list);
2093 if (mm->type != INTEL_GVT_MM_PPGTT)
2096 if (mm->page_table_level != page_table_level)
2100 dst = mm->virtual_page_table;
2102 if (page_table_level == 3) {
2103 if (src[0] == dst[0]
2106 && src[3] == dst[3])
2109 if (src[0] == dst[0])
2117 * intel_vgpu_g2v_create_ppgtt_mm - create a PPGTT mm object from
2120 * @page_table_level: PPGTT page table level
2122 * This function is used to create a PPGTT mm object from a guest to GVT-g
2126 * Zero on success, negative error code if failed.
2128 int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu,
2129 int page_table_level)
2131 u64 *pdp = (u64 *)&vgpu_vreg64(vgpu, vgtif_reg(pdp[0]));
2132 struct intel_vgpu_mm *mm;
2134 if (WARN_ON((page_table_level != 4) && (page_table_level != 3)))
2137 mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp);
2139 intel_gvt_mm_reference(mm);
2141 mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_PPGTT,
2142 pdp, page_table_level, 0);
2144 gvt_err("fail to create mm\n");
2152 * intel_vgpu_g2v_destroy_ppgtt_mm - destroy a PPGTT mm object from
2155 * @page_table_level: PPGTT page table level
2157 * This function is used to create a PPGTT mm object from a guest to GVT-g
2161 * Zero on success, negative error code if failed.
2163 int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
2164 int page_table_level)
2166 u64 *pdp = (u64 *)&vgpu_vreg64(vgpu, vgtif_reg(pdp[0]));
2167 struct intel_vgpu_mm *mm;
2169 if (WARN_ON((page_table_level != 4) && (page_table_level != 3)))
2172 mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp);
2174 gvt_err("fail to find ppgtt instance.\n");
2177 intel_gvt_mm_unreference(mm);
2182 * intel_gvt_init_gtt - initialize mm components of a GVT device
2185 * This function is called at the initialization stage, to initialize
2186 * the mm components of a GVT device.
2189 * zero on success, negative error code if failed.
2191 int intel_gvt_init_gtt(struct intel_gvt *gvt)
2196 gvt_dbg_core("init gtt\n");
2198 if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
2199 gvt->gtt.pte_ops = &gen8_gtt_pte_ops;
2200 gvt->gtt.gma_ops = &gen8_gtt_gma_ops;
2201 gvt->gtt.mm_alloc_page_table = gen8_mm_alloc_page_table;
2202 gvt->gtt.mm_free_page_table = gen8_mm_free_page_table;
2207 page = (void *)get_zeroed_page(GFP_KERNEL);
2209 gvt_err("fail to allocate scratch ggtt page\n");
2212 gvt->gtt.scratch_ggtt_page = virt_to_page(page);
2214 gvt->gtt.scratch_ggtt_mfn = intel_gvt_hypervisor_virt_to_mfn(page);
2215 if (gvt->gtt.scratch_ggtt_mfn == INTEL_GVT_INVALID_ADDR) {
2216 gvt_err("fail to translate scratch ggtt page\n");
2217 __free_page(gvt->gtt.scratch_ggtt_page);
2221 if (enable_out_of_sync) {
2222 ret = setup_spt_oos(gvt);
2224 gvt_err("fail to initialize SPT oos\n");
2228 INIT_LIST_HEAD(&gvt->gtt.mm_lru_list_head);
2233 * intel_gvt_clean_gtt - clean up mm components of a GVT device
2236 * This function is called at the driver unloading stage, to clean up the
2237 * the mm components of a GVT device.
2240 void intel_gvt_clean_gtt(struct intel_gvt *gvt)
2242 __free_page(gvt->gtt.scratch_ggtt_page);
2244 if (enable_out_of_sync)
2249 * intel_vgpu_reset_ggtt - reset the GGTT entry
2252 * This function is called at the vGPU create stage
2253 * to reset all the GGTT entries.
2256 void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
2258 struct intel_gvt *gvt = vgpu->gvt;
2259 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
2263 struct intel_gvt_gtt_entry e;
2265 memset(&e, 0, sizeof(struct intel_gvt_gtt_entry));
2266 e.type = GTT_TYPE_GGTT_PTE;
2267 ops->set_pfn(&e, gvt->gtt.scratch_ggtt_mfn);
2268 e.val64 |= _PAGE_PRESENT;
2270 index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
2271 num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
2272 for (offset = 0; offset < num_entries; offset++)
2273 ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
2275 index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
2276 num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
2277 for (offset = 0; offset < num_entries; offset++)
2278 ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
2282 * intel_vgpu_reset_gtt - reset the all GTT related status
2284 * @dmlr: true for vGPU Device Model Level Reset, false for GT Reset
2286 * This function is called from vfio core to reset reset all
2287 * GTT related status, including GGTT, PPGTT, scratch page.
2290 void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr)
2294 ppgtt_free_all_shadow_page(vgpu);
2298 intel_vgpu_reset_ggtt(vgpu);
2300 /* clear scratch page for security */
2301 for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
2302 if (vgpu->gtt.scratch_pt[i].page != NULL)
2303 memset(page_address(vgpu->gtt.scratch_pt[i].page),