2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Kevin Tian <kevin.tian@intel.com>
25 * Eddie Dong <eddie.dong@intel.com>
28 * Niu Bing <bing.niu@intel.com>
29 * Zhi Wang <zhi.a.wang@intel.com>
37 #include "hypercall.h"
40 #include "interrupt.h"
45 #include "scheduler.h"
46 #include "sched_policy.h"
49 #define GVT_MAX_VGPU 8
52 INTEL_GVT_HYPERVISOR_XEN = 0,
53 INTEL_GVT_HYPERVISOR_KVM,
56 struct intel_gvt_host {
59 struct intel_gvt_mpt *mpt;
62 extern struct intel_gvt_host intel_gvt_host;
64 /* Describe per-platform limitations. */
65 struct intel_gvt_device_info {
66 u32 max_support_vgpus;
70 unsigned long msi_cap_offset;
73 u32 gtt_entry_size_shift;
76 /* GM resources owned by a vGPU */
77 struct intel_vgpu_gm {
80 struct drm_mm_node low_gm_node;
81 struct drm_mm_node high_gm_node;
84 #define INTEL_GVT_MAX_NUM_FENCES 32
86 /* Fences owned by a vGPU */
87 struct intel_vgpu_fence {
88 struct drm_i915_fence_reg *regs[INTEL_GVT_MAX_NUM_FENCES];
93 struct intel_vgpu_mmio {
96 bool disable_warn_untrack;
99 #define INTEL_GVT_MAX_CFG_SPACE_SZ 256
100 #define INTEL_GVT_MAX_BAR_NUM 4
102 struct intel_vgpu_pci_bar {
107 struct intel_vgpu_cfg_space {
108 unsigned char virtual_cfg_space[INTEL_GVT_MAX_CFG_SPACE_SZ];
109 struct intel_vgpu_pci_bar bar[INTEL_GVT_MAX_BAR_NUM];
112 #define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space)
114 #define INTEL_GVT_MAX_PIPE 4
116 struct intel_vgpu_irq {
117 bool irq_warn_once[INTEL_GVT_EVENT_MAX];
118 DECLARE_BITMAP(flip_done_event[INTEL_GVT_MAX_PIPE],
119 INTEL_GVT_EVENT_MAX);
122 struct intel_vgpu_opregion {
124 u32 gfn[INTEL_GVT_OPREGION_PAGES];
125 struct page *pages[INTEL_GVT_OPREGION_PAGES];
128 #define vgpu_opregion(vgpu) (&(vgpu->opregion))
130 #define INTEL_GVT_MAX_PORT 5
132 struct intel_vgpu_display {
133 struct intel_vgpu_i2c_edid i2c_edid;
134 struct intel_vgpu_port ports[INTEL_GVT_MAX_PORT];
135 struct intel_vgpu_sbi sbi;
139 struct intel_gvt *gvt;
141 unsigned long handle; /* vGPU handle used by hypervisor MPT modules */
146 struct intel_vgpu_fence fence;
147 struct intel_vgpu_gm gm;
148 struct intel_vgpu_cfg_space cfg_space;
149 struct intel_vgpu_mmio mmio;
150 struct intel_vgpu_irq irq;
151 struct intel_vgpu_gtt gtt;
152 struct intel_vgpu_opregion opregion;
153 struct intel_vgpu_display display;
154 struct intel_vgpu_execlist execlist[I915_NUM_ENGINES];
155 struct list_head workload_q_head[I915_NUM_ENGINES];
156 struct kmem_cache *workloads;
157 atomic_t running_workload_num;
158 DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
159 struct i915_gem_context *shadow_ctx;
160 struct notifier_block shadow_ctx_notifier_block;
163 struct intel_gvt_gm {
164 unsigned long vgpu_allocated_low_gm_size;
165 unsigned long vgpu_allocated_high_gm_size;
168 struct intel_gvt_fence {
169 unsigned long vgpu_allocated_fence_num;
172 #define INTEL_GVT_MMIO_HASH_BITS 9
174 struct intel_gvt_mmio {
176 DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
179 struct intel_gvt_firmware {
182 bool firmware_loaded;
185 struct intel_gvt_opregion {
194 struct drm_i915_private *dev_priv;
195 struct idr vgpu_idr; /* vGPU IDR pool */
197 struct intel_gvt_device_info device_info;
198 struct intel_gvt_gm gm;
199 struct intel_gvt_fence fence;
200 struct intel_gvt_mmio mmio;
201 struct intel_gvt_firmware firmware;
202 struct intel_gvt_irq irq;
203 struct intel_gvt_gtt gtt;
204 struct intel_gvt_opregion opregion;
205 struct intel_gvt_workload_scheduler scheduler;
207 struct task_struct *service_thread;
208 wait_queue_head_t service_thread_wq;
209 unsigned long service_request;
213 INTEL_GVT_REQUEST_EMULATE_VBLANK = 0,
216 static inline void intel_gvt_request_service(struct intel_gvt *gvt,
219 set_bit(service, (void *)&gvt->service_request);
220 wake_up(&gvt->service_thread_wq);
223 void intel_gvt_free_firmware(struct intel_gvt *gvt);
224 int intel_gvt_load_firmware(struct intel_gvt *gvt);
226 /* Aperture/GM space definitions for GVT device */
227 #define gvt_aperture_sz(gvt) (gvt->dev_priv->ggtt.mappable_end)
228 #define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.mappable_base)
230 #define gvt_ggtt_gm_sz(gvt) (gvt->dev_priv->ggtt.base.total)
231 #define gvt_ggtt_sz(gvt) \
232 ((gvt->dev_priv->ggtt.base.total >> PAGE_SHIFT) << 3)
233 #define gvt_hidden_sz(gvt) (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt))
235 #define gvt_aperture_gmadr_base(gvt) (0)
236 #define gvt_aperture_gmadr_end(gvt) (gvt_aperture_gmadr_base(gvt) \
237 + gvt_aperture_sz(gvt) - 1)
239 #define gvt_hidden_gmadr_base(gvt) (gvt_aperture_gmadr_base(gvt) \
240 + gvt_aperture_sz(gvt))
241 #define gvt_hidden_gmadr_end(gvt) (gvt_hidden_gmadr_base(gvt) \
242 + gvt_hidden_sz(gvt) - 1)
244 #define gvt_fence_sz(gvt) (gvt->dev_priv->num_fence_regs)
246 /* Aperture/GM space definitions for vGPU */
247 #define vgpu_aperture_offset(vgpu) ((vgpu)->gm.low_gm_node.start)
248 #define vgpu_hidden_offset(vgpu) ((vgpu)->gm.high_gm_node.start)
249 #define vgpu_aperture_sz(vgpu) ((vgpu)->gm.aperture_sz)
250 #define vgpu_hidden_sz(vgpu) ((vgpu)->gm.hidden_sz)
252 #define vgpu_aperture_pa_base(vgpu) \
253 (gvt_aperture_pa_base(vgpu->gvt) + vgpu_aperture_offset(vgpu))
255 #define vgpu_ggtt_gm_sz(vgpu) ((vgpu)->gm.aperture_sz + (vgpu)->gm.hidden_sz)
257 #define vgpu_aperture_pa_end(vgpu) \
258 (vgpu_aperture_pa_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
260 #define vgpu_aperture_gmadr_base(vgpu) (vgpu_aperture_offset(vgpu))
261 #define vgpu_aperture_gmadr_end(vgpu) \
262 (vgpu_aperture_gmadr_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
264 #define vgpu_hidden_gmadr_base(vgpu) (vgpu_hidden_offset(vgpu))
265 #define vgpu_hidden_gmadr_end(vgpu) \
266 (vgpu_hidden_gmadr_base(vgpu) + vgpu_hidden_sz(vgpu) - 1)
268 #define vgpu_fence_base(vgpu) (vgpu->fence.base)
269 #define vgpu_fence_sz(vgpu) (vgpu->fence.size)
271 struct intel_vgpu_creation_params {
273 __u64 low_gm_sz; /* in MB */
274 __u64 high_gm_sz; /* in MB */
280 int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
281 struct intel_vgpu_creation_params *param);
282 void intel_vgpu_free_resource(struct intel_vgpu *vgpu);
283 void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
284 u32 fence, u64 value);
286 /* Macros for easily accessing vGPU virtual/shadow register */
287 #define vgpu_vreg(vgpu, reg) \
288 (*(u32 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
289 #define vgpu_vreg8(vgpu, reg) \
290 (*(u8 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
291 #define vgpu_vreg16(vgpu, reg) \
292 (*(u16 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
293 #define vgpu_vreg64(vgpu, reg) \
294 (*(u64 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
295 #define vgpu_sreg(vgpu, reg) \
296 (*(u32 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
297 #define vgpu_sreg8(vgpu, reg) \
298 (*(u8 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
299 #define vgpu_sreg16(vgpu, reg) \
300 (*(u16 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
301 #define vgpu_sreg64(vgpu, reg) \
302 (*(u64 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
304 #define for_each_active_vgpu(gvt, vgpu, id) \
305 idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \
306 for_each_if(vgpu->active)
308 static inline void intel_vgpu_write_pci_bar(struct intel_vgpu *vgpu,
309 u32 offset, u32 val, bool low)
313 /* BAR offset should be 32 bits algiend */
314 offset = rounddown(offset, 4);
315 pval = (u32 *)(vgpu_cfg_space(vgpu) + offset);
319 * only update bit 31 - bit 4,
320 * leave the bit 3 - bit 0 unchanged.
322 *pval = (val & GENMASK(31, 4)) | (*pval & GENMASK(3, 0));
326 struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
327 struct intel_vgpu_creation_params *
330 void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
332 /* validating GM functions */
333 #define vgpu_gmadr_is_aperture(vgpu, gmadr) \
334 ((gmadr >= vgpu_aperture_gmadr_base(vgpu)) && \
335 (gmadr <= vgpu_aperture_gmadr_end(vgpu)))
337 #define vgpu_gmadr_is_hidden(vgpu, gmadr) \
338 ((gmadr >= vgpu_hidden_gmadr_base(vgpu)) && \
339 (gmadr <= vgpu_hidden_gmadr_end(vgpu)))
341 #define vgpu_gmadr_is_valid(vgpu, gmadr) \
342 ((vgpu_gmadr_is_aperture(vgpu, gmadr) || \
343 (vgpu_gmadr_is_hidden(vgpu, gmadr))))
345 #define gvt_gmadr_is_aperture(gvt, gmadr) \
346 ((gmadr >= gvt_aperture_gmadr_base(gvt)) && \
347 (gmadr <= gvt_aperture_gmadr_end(gvt)))
349 #define gvt_gmadr_is_hidden(gvt, gmadr) \
350 ((gmadr >= gvt_hidden_gmadr_base(gvt)) && \
351 (gmadr <= gvt_hidden_gmadr_end(gvt)))
353 #define gvt_gmadr_is_valid(gvt, gmadr) \
354 (gvt_gmadr_is_aperture(gvt, gmadr) || \
355 gvt_gmadr_is_hidden(gvt, gmadr))
357 bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size);
358 int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr);
359 int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr);
360 int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
361 unsigned long *h_index);
362 int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
363 unsigned long *g_index);
365 int intel_vgpu_emulate_cfg_read(void *__vgpu, unsigned int offset,
366 void *p_data, unsigned int bytes);
368 int intel_vgpu_emulate_cfg_write(void *__vgpu, unsigned int offset,
369 void *p_data, unsigned int bytes);
371 void intel_gvt_clean_opregion(struct intel_gvt *gvt);
372 int intel_gvt_init_opregion(struct intel_gvt *gvt);
374 void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu);
375 int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa);
377 int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);