]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
drm/i915/gvt: vGPU HW resource management
authorZhi Wang <zhi.a.wang@intel.com>
Fri, 2 Sep 2016 04:41:29 +0000 (12:41 +0800)
committerZhenyu Wang <zhenyuw@linux.intel.com>
Fri, 14 Oct 2016 10:11:19 +0000 (18:11 +0800)
This patch introduces the GVT-g vGPU HW resource management. Under
GVT-g virtualizaion environment, each vGPU requires portions HW
resources, including aperture, hidden GM space, and fence registers.

When creating a vGPU, GVT-g will request these HW resources from host,
and return them to host after a vGPU is destroyed.

Signed-off-by: Zhi Wang <zhi.a.wang@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
drivers/gpu/drm/i915/gvt/Makefile
drivers/gpu/drm/i915/gvt/aperture_gm.c [new file with mode: 0644]
drivers/gpu/drm/i915/gvt/debug.h
drivers/gpu/drm/i915/gvt/gvt.c
drivers/gpu/drm/i915/gvt/gvt.h
drivers/gpu/drm/i915/intel_gvt.h

index d0f21a6ad60d54608fd7f84b54969be998366d35..867910902b825d66d14e7d93f82b7834174fd87b 100644 (file)
@@ -1,5 +1,5 @@
 GVT_DIR := gvt
-GVT_SOURCE := gvt.o
+GVT_SOURCE := gvt.o aperture_gm.o
 
 ccflags-y                      += -I$(src) -I$(src)/$(GVT_DIR) -Wall
 i915-y                        += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
new file mode 100644 (file)
index 0000000..e0211f8
--- /dev/null
@@ -0,0 +1,341 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Kevin Tian <kevin.tian@intel.com>
+ *    Dexuan Cui
+ *
+ * Contributors:
+ *    Pei Zhang <pei.zhang@intel.com>
+ *    Min He <min.he@intel.com>
+ *    Niu Bing <bing.niu@intel.com>
+ *    Yulei Zhang <yulei.zhang@intel.com>
+ *    Zhenyu Wang <zhenyuw@linux.intel.com>
+ *    Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+
+#define MB_TO_BYTES(mb) ((mb) << 20ULL)
+#define BYTES_TO_MB(b) ((b) >> 20ULL)
+
+#define HOST_LOW_GM_SIZE MB_TO_BYTES(128)
+#define HOST_HIGH_GM_SIZE MB_TO_BYTES(384)
+#define HOST_FENCE 4
+
+static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
+{
+       struct intel_gvt *gvt = vgpu->gvt;
+       struct drm_i915_private *dev_priv = gvt->dev_priv;
+       u32 alloc_flag, search_flag;
+       u64 start, end, size;
+       struct drm_mm_node *node;
+       int retried = 0;
+       int ret;
+
+       if (high_gm) {
+               search_flag = DRM_MM_SEARCH_BELOW;
+               alloc_flag = DRM_MM_CREATE_TOP;
+               node = &vgpu->gm.high_gm_node;
+               size = vgpu_hidden_sz(vgpu);
+               start = gvt_hidden_gmadr_base(gvt);
+               end = gvt_hidden_gmadr_end(gvt);
+       } else {
+               search_flag = DRM_MM_SEARCH_DEFAULT;
+               alloc_flag = DRM_MM_CREATE_DEFAULT;
+               node = &vgpu->gm.low_gm_node;
+               size = vgpu_aperture_sz(vgpu);
+               start = gvt_aperture_gmadr_base(gvt);
+               end = gvt_aperture_gmadr_end(gvt);
+       }
+
+       mutex_lock(&dev_priv->drm.struct_mutex);
+search_again:
+       ret = drm_mm_insert_node_in_range_generic(&dev_priv->ggtt.base.mm,
+                                                 node, size, 4096, 0,
+                                                 start, end, search_flag,
+                                                 alloc_flag);
+       if (ret) {
+               ret = i915_gem_evict_something(&dev_priv->ggtt.base,
+                                              size, 4096, 0, start, end, 0);
+               if (ret == 0 && ++retried < 3)
+                       goto search_again;
+
+               gvt_err("fail to alloc %s gm space from host, retried %d\n",
+                               high_gm ? "high" : "low", retried);
+       }
+       mutex_unlock(&dev_priv->drm.struct_mutex);
+       return ret;
+}
+
+static int alloc_vgpu_gm(struct intel_vgpu *vgpu)
+{
+       struct intel_gvt *gvt = vgpu->gvt;
+       struct drm_i915_private *dev_priv = gvt->dev_priv;
+       int ret;
+
+       ret = alloc_gm(vgpu, false);
+       if (ret)
+               return ret;
+
+       ret = alloc_gm(vgpu, true);
+       if (ret)
+               goto out_free_aperture;
+
+       gvt_dbg_core("vgpu%d: alloc low GM start %llx size %llx\n", vgpu->id,
+                    vgpu_aperture_offset(vgpu), vgpu_aperture_sz(vgpu));
+
+       gvt_dbg_core("vgpu%d: alloc high GM start %llx size %llx\n", vgpu->id,
+                    vgpu_hidden_offset(vgpu), vgpu_hidden_sz(vgpu));
+
+       return 0;
+out_free_aperture:
+       mutex_lock(&dev_priv->drm.struct_mutex);
+       drm_mm_remove_node(&vgpu->gm.low_gm_node);
+       mutex_unlock(&dev_priv->drm.struct_mutex);
+       return ret;
+}
+
+static void free_vgpu_gm(struct intel_vgpu *vgpu)
+{
+       struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+       mutex_lock(&dev_priv->drm.struct_mutex);
+       drm_mm_remove_node(&vgpu->gm.low_gm_node);
+       drm_mm_remove_node(&vgpu->gm.high_gm_node);
+       mutex_unlock(&dev_priv->drm.struct_mutex);
+}
+
+/**
+ * intel_vgpu_write_fence - write fence registers owned by a vGPU
+ * @vgpu: vGPU instance
+ * @fence: vGPU fence register number
+ * @value: Fence register value to be written
+ *
+ * This function is used to write fence registers owned by a vGPU. The vGPU
+ * fence register number will be translated into HW fence register number.
+ *
+ */
+void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
+               u32 fence, u64 value)
+{
+       struct intel_gvt *gvt = vgpu->gvt;
+       struct drm_i915_private *dev_priv = gvt->dev_priv;
+       struct drm_i915_fence_reg *reg;
+       i915_reg_t fence_reg_lo, fence_reg_hi;
+
+       if (WARN_ON(fence > vgpu_fence_sz(vgpu)))
+               return;
+
+       reg = vgpu->fence.regs[fence];
+       if (WARN_ON(!reg))
+               return;
+
+       fence_reg_lo = FENCE_REG_GEN6_LO(reg->id);
+       fence_reg_hi = FENCE_REG_GEN6_HI(reg->id);
+
+       I915_WRITE(fence_reg_lo, 0);
+       POSTING_READ(fence_reg_lo);
+
+       I915_WRITE(fence_reg_hi, upper_32_bits(value));
+       I915_WRITE(fence_reg_lo, lower_32_bits(value));
+       POSTING_READ(fence_reg_lo);
+}
+
+static void free_vgpu_fence(struct intel_vgpu *vgpu)
+{
+       struct intel_gvt *gvt = vgpu->gvt;
+       struct drm_i915_private *dev_priv = gvt->dev_priv;
+       struct drm_i915_fence_reg *reg;
+       u32 i;
+
+       if (WARN_ON(!vgpu_fence_sz(vgpu)))
+               return;
+
+       mutex_lock(&dev_priv->drm.struct_mutex);
+       for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
+               reg = vgpu->fence.regs[i];
+               intel_vgpu_write_fence(vgpu, i, 0);
+               list_add_tail(&reg->link,
+                             &dev_priv->mm.fence_list);
+       }
+       mutex_unlock(&dev_priv->drm.struct_mutex);
+}
+
+static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
+{
+       struct intel_gvt *gvt = vgpu->gvt;
+       struct drm_i915_private *dev_priv = gvt->dev_priv;
+       struct drm_i915_fence_reg *reg;
+       int i;
+       struct list_head *pos, *q;
+
+       /* Request fences from host */
+       mutex_lock(&dev_priv->drm.struct_mutex);
+       i = 0;
+       list_for_each_safe(pos, q, &dev_priv->mm.fence_list) {
+               reg = list_entry(pos, struct drm_i915_fence_reg, link);
+               if (reg->pin_count || reg->vma)
+                       continue;
+               list_del(pos);
+               vgpu->fence.regs[i] = reg;
+               intel_vgpu_write_fence(vgpu, i, 0);
+               if (++i == vgpu_fence_sz(vgpu))
+                       break;
+       }
+       if (i != vgpu_fence_sz(vgpu))
+               goto out_free_fence;
+
+       mutex_unlock(&dev_priv->drm.struct_mutex);
+       return 0;
+out_free_fence:
+       /* Return fences to host, if fail */
+       for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
+               reg = vgpu->fence.regs[i];
+               if (!reg)
+                       continue;
+               list_add_tail(&reg->link,
+                             &dev_priv->mm.fence_list);
+       }
+       mutex_unlock(&dev_priv->drm.struct_mutex);
+       return -ENOSPC;
+}
+
+static void free_resource(struct intel_vgpu *vgpu)
+{
+       struct intel_gvt *gvt = vgpu->gvt;
+
+       gvt->gm.vgpu_allocated_low_gm_size -= vgpu_aperture_sz(vgpu);
+       gvt->gm.vgpu_allocated_high_gm_size -= vgpu_hidden_sz(vgpu);
+       gvt->fence.vgpu_allocated_fence_num -= vgpu_fence_sz(vgpu);
+}
+
+static int alloc_resource(struct intel_vgpu *vgpu,
+               struct intel_vgpu_creation_params *param)
+{
+       struct intel_gvt *gvt = vgpu->gvt;
+       unsigned long request, avail, max, taken;
+       const char *item;
+
+       if (!param->low_gm_sz || !param->high_gm_sz || !param->fence_sz) {
+               gvt_err("Invalid vGPU creation params\n");
+               return -EINVAL;
+       }
+
+       item = "low GM space";
+       max = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
+       taken = gvt->gm.vgpu_allocated_low_gm_size;
+       avail = max - taken;
+       request = MB_TO_BYTES(param->low_gm_sz);
+
+       if (request > avail)
+               goto no_enough_resource;
+
+       vgpu_aperture_sz(vgpu) = request;
+
+       item = "high GM space";
+       max = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
+       taken = gvt->gm.vgpu_allocated_high_gm_size;
+       avail = max - taken;
+       request = MB_TO_BYTES(param->high_gm_sz);
+
+       if (request > avail)
+               goto no_enough_resource;
+
+       vgpu_hidden_sz(vgpu) = request;
+
+       item = "fence";
+       max = gvt_fence_sz(gvt) - HOST_FENCE;
+       taken = gvt->fence.vgpu_allocated_fence_num;
+       avail = max - taken;
+       request = param->fence_sz;
+
+       if (request > avail)
+               goto no_enough_resource;
+
+       vgpu_fence_sz(vgpu) = request;
+
+       gvt->gm.vgpu_allocated_low_gm_size += MB_TO_BYTES(param->low_gm_sz);
+       gvt->gm.vgpu_allocated_high_gm_size += MB_TO_BYTES(param->high_gm_sz);
+       gvt->fence.vgpu_allocated_fence_num += param->fence_sz;
+       return 0;
+
+no_enough_resource:
+       gvt_err("vgpu%d: fail to allocate resource %s\n", vgpu->id, item);
+       gvt_err("vgpu%d: request %luMB avail %luMB max %luMB taken %luMB\n",
+               vgpu->id, BYTES_TO_MB(request), BYTES_TO_MB(avail),
+               BYTES_TO_MB(max), BYTES_TO_MB(taken));
+       return -ENOSPC;
+}
+
+/**
+ * inte_gvt_free_vgpu_resource - free HW resource owned by a vGPU
+ * @vgpu: a vGPU
+ *
+ * This function is used to free the HW resource owned by a vGPU.
+ *
+ */
+void intel_vgpu_free_resource(struct intel_vgpu *vgpu)
+{
+       free_vgpu_gm(vgpu);
+       free_vgpu_fence(vgpu);
+       free_resource(vgpu);
+}
+
+/**
+ * intel_alloc_vgpu_resource - allocate HW resource for a vGPU
+ * @vgpu: vGPU
+ * @param: vGPU creation params
+ *
+ * This function is used to allocate HW resource for a vGPU. User specifies
+ * the resource configuration through the creation params.
+ *
+ * Returns:
+ * zero on success, negative error code if failed.
+ *
+ */
+int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
+               struct intel_vgpu_creation_params *param)
+{
+       int ret;
+
+       ret = alloc_resource(vgpu, param);
+       if (ret)
+               return ret;
+
+       ret = alloc_vgpu_gm(vgpu);
+       if (ret)
+               goto out_free_resource;
+
+       ret = alloc_vgpu_fence(vgpu);
+       if (ret)
+               goto out_free_vgpu_gm;
+
+       return 0;
+
+out_free_vgpu_gm:
+       free_vgpu_gm(vgpu);
+out_free_resource:
+       free_resource(vgpu);
+       return ret;
+}
index 7ef412be665fc0c585720c746d86afcb933da6c4..f9f0923feb9e4c625955ce023658a4eb0fa55a42 100644 (file)
@@ -24,6 +24,9 @@
 #ifndef __GVT_DEBUG_H__
 #define __GVT_DEBUG_H__
 
+#define gvt_err(fmt, args...) \
+       DRM_ERROR("gvt: "fmt, ##args)
+
 #define gvt_dbg_core(fmt, args...) \
        DRM_DEBUG_DRIVER("gvt: core: "fmt, ##args)
 
index 927f4579f5b6231618f03abf916b1381d4f36e88..6ec5b937af6309c6a3b74f70a0c8e4c561d214ea 100644 (file)
@@ -84,7 +84,7 @@ int intel_gvt_init_host(void)
 
 static void init_device_info(struct intel_gvt *gvt)
 {
-       if (IS_BROADWELL(gvt->dev_priv))
+       if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv))
                gvt->device_info.max_support_vgpus = 8;
        /* This function will grow large in GVT device model patches. */
 }
@@ -135,6 +135,9 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
 
        gvt_dbg_core("init gvt device\n");
 
+       mutex_init(&gvt->lock);
+       gvt->dev_priv = dev_priv;
+
        init_device_info(gvt);
        /*
         * Other initialization of GVT components will be introduce here.
index fb619a6e519dede2fffe7b744f6c0a23b38e5d6a..f42cdf74d5774ca47badb63a8273444e0cd69d06 100644 (file)
@@ -48,10 +48,39 @@ struct intel_gvt_device_info {
        /* This data structure will grow bigger in GVT device model patches */
 };
 
+/* GM resources owned by a vGPU */
+struct intel_vgpu_gm {
+       u64 aperture_sz;
+       u64 hidden_sz;
+       struct drm_mm_node low_gm_node;
+       struct drm_mm_node high_gm_node;
+};
+
+#define INTEL_GVT_MAX_NUM_FENCES 32
+
+/* Fences owned by a vGPU */
+struct intel_vgpu_fence {
+       struct drm_i915_fence_reg *regs[INTEL_GVT_MAX_NUM_FENCES];
+       u32 base;
+       u32 size;
+};
+
 struct intel_vgpu {
        struct intel_gvt *gvt;
        int id;
        unsigned long handle; /* vGPU handle used by hypervisor MPT modules */
+
+       struct intel_vgpu_fence fence;
+       struct intel_vgpu_gm gm;
+};
+
+struct intel_gvt_gm {
+       unsigned long vgpu_allocated_low_gm_size;
+       unsigned long vgpu_allocated_high_gm_size;
+};
+
+struct intel_gvt_fence {
+       unsigned long vgpu_allocated_fence_num;
 };
 
 struct intel_gvt {
@@ -62,8 +91,68 @@ struct intel_gvt {
        struct idr vgpu_idr;    /* vGPU IDR pool */
 
        struct intel_gvt_device_info device_info;
+       struct intel_gvt_gm gm;
+       struct intel_gvt_fence fence;
 };
 
+/* Aperture/GM space definitions for GVT device */
+#define gvt_aperture_sz(gvt)     (gvt->dev_priv->ggtt.mappable_end)
+#define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.mappable_base)
+
+#define gvt_ggtt_gm_sz(gvt)      (gvt->dev_priv->ggtt.base.total)
+#define gvt_hidden_sz(gvt)       (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt))
+
+#define gvt_aperture_gmadr_base(gvt) (0)
+#define gvt_aperture_gmadr_end(gvt) (gvt_aperture_gmadr_base(gvt) \
+                                    + gvt_aperture_sz(gvt) - 1)
+
+#define gvt_hidden_gmadr_base(gvt) (gvt_aperture_gmadr_base(gvt) \
+                                   + gvt_aperture_sz(gvt))
+#define gvt_hidden_gmadr_end(gvt) (gvt_hidden_gmadr_base(gvt) \
+                                  + gvt_hidden_sz(gvt) - 1)
+
+#define gvt_fence_sz(gvt) (gvt->dev_priv->num_fence_regs)
+
+/* Aperture/GM space definitions for vGPU */
+#define vgpu_aperture_offset(vgpu)     ((vgpu)->gm.low_gm_node.start)
+#define vgpu_hidden_offset(vgpu)       ((vgpu)->gm.high_gm_node.start)
+#define vgpu_aperture_sz(vgpu)         ((vgpu)->gm.aperture_sz)
+#define vgpu_hidden_sz(vgpu)           ((vgpu)->gm.hidden_sz)
+
+#define vgpu_aperture_pa_base(vgpu) \
+       (gvt_aperture_pa_base(vgpu->gvt) + vgpu_aperture_offset(vgpu))
+
+#define vgpu_ggtt_gm_sz(vgpu) ((vgpu)->gm.aperture_sz + (vgpu)->gm.hidden_sz)
+
+#define vgpu_aperture_pa_end(vgpu) \
+       (vgpu_aperture_pa_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
+
+#define vgpu_aperture_gmadr_base(vgpu) (vgpu_aperture_offset(vgpu))
+#define vgpu_aperture_gmadr_end(vgpu) \
+       (vgpu_aperture_gmadr_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
+
+#define vgpu_hidden_gmadr_base(vgpu) (vgpu_hidden_offset(vgpu))
+#define vgpu_hidden_gmadr_end(vgpu) \
+       (vgpu_hidden_gmadr_base(vgpu) + vgpu_hidden_sz(vgpu) - 1)
+
+#define vgpu_fence_base(vgpu) (vgpu->fence.base)
+#define vgpu_fence_sz(vgpu) (vgpu->fence.size)
+
+struct intel_vgpu_creation_params {
+       __u64 handle;
+       __u64 low_gm_sz;  /* in MB */
+       __u64 high_gm_sz; /* in MB */
+       __u64 fence_sz;
+       __s32 primary;
+       __u64 vgpu_id;
+};
+
+int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
+                             struct intel_vgpu_creation_params *param);
+void intel_vgpu_free_resource(struct intel_vgpu *vgpu);
+void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
+       u32 fence, u64 value);
+
 #include "mpt.h"
 
 #endif
index 960211df74db02587c479920fdedf516d3df8ff6..0f00105f4c5ddeb1ddcf300e5f3d72c0c1aad5c3 100644 (file)
@@ -24,6 +24,7 @@
 #ifndef _INTEL_GVT_H_
 #define _INTEL_GVT_H_
 
+#include "i915_pvinfo.h"
 #include "gvt/gvt.h"
 
 #ifdef CONFIG_DRM_I915_GVT