]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
drm/amdgpu: correctly sign extend 48bit addresses v3
authorChristian König <christian.koenig@amd.com>
Mon, 27 Aug 2018 16:22:31 +0000 (18:22 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Tue, 11 Sep 2018 03:41:24 +0000 (22:41 -0500)
Correct sign extend the GMC addresses to 48bit.

v2: sign extending turned out easier than thought.
v3: clean up the defines and move them into amdgpu_gmc.h as well

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h

index 8bee9a0a1dec52f196afb5a7b55654e440099389..db9872f83d0381be1ac50d1d1feb4d9ccf1e018c 100644 (file)
@@ -135,7 +135,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
                        .num_queue_per_pipe = adev->gfx.mec.num_queue_per_pipe,
                        .gpuvm_size = min(adev->vm_manager.max_pfn
                                          << AMDGPU_GPU_PAGE_SHIFT,
-                                         AMDGPU_VA_HOLE_START),
+                                         AMDGPU_GMC_HOLE_START),
                        .drm_render_minor = adev->ddev->render->index
                };
 
index 04a2733b5ccc4817151dd15ecb325fc466a19616..135d9d8c95067269e586d49b41fc99b63dfa7473 100644 (file)
@@ -835,7 +835,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
                        if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
                                continue;
 
-                       va_start = chunk_ib->va_start & AMDGPU_VA_HOLE_MASK;
+                       va_start = chunk_ib->va_start & AMDGPU_GMC_HOLE_MASK;
                        r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
                        if (r) {
                                DRM_ERROR("IB va_start is invalid\n");
index 71792d820ae0cba1306b721744b0d915cbe4da84..d30a0838851b6b1ed3528994230a00ee5f006d65 100644 (file)
@@ -572,16 +572,16 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
                return -EINVAL;
        }
 
-       if (args->va_address >= AMDGPU_VA_HOLE_START &&
-           args->va_address < AMDGPU_VA_HOLE_END) {
+       if (args->va_address >= AMDGPU_GMC_HOLE_START &&
+           args->va_address < AMDGPU_GMC_HOLE_END) {
                dev_dbg(&dev->pdev->dev,
                        "va_address 0x%LX is in VA hole 0x%LX-0x%LX\n",
-                       args->va_address, AMDGPU_VA_HOLE_START,
-                       AMDGPU_VA_HOLE_END);
+                       args->va_address, AMDGPU_GMC_HOLE_START,
+                       AMDGPU_GMC_HOLE_END);
                return -EINVAL;
        }
 
-       args->va_address &= AMDGPU_VA_HOLE_MASK;
+       args->va_address &= AMDGPU_GMC_HOLE_MASK;
 
        if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
                dev_dbg(&dev->pdev->dev, "invalid flags combination 0x%08X\n",
index 588a62f7aebc5c3b8d2e5aa03a0617f074a5d8a8..d84ef1634eb211754ccae6c26a455f7fbcca1b4d 100644 (file)
 
 #include "amdgpu_irq.h"
 
+/* VA hole for 48bit addresses on Vega10 */
+#define AMDGPU_GMC_HOLE_START  0x0000800000000000ULL
+#define AMDGPU_GMC_HOLE_END    0xffff800000000000ULL
+
+/*
+ * Hardware is programmed as if the hole doesn't exists with start and end
+ * address values.
+ *
+ * This mask is used to remove the upper 16bits of the VA and so come up with
+ * the linear addr value.
+ */
+#define AMDGPU_GMC_HOLE_MASK   0x0000ffffffffffffULL
+
 struct firmware;
 
 /*
@@ -133,6 +146,19 @@ static inline bool amdgpu_gmc_vram_full_visible(struct amdgpu_gmc *gmc)
        return (gmc->real_vram_size == gmc->visible_vram_size);
 }
 
+/**
+ * amdgpu_gmc_sign_extend - sign extend the given gmc address
+ *
+ * @addr: address to extend
+ */
+static inline uint64_t amdgpu_gmc_sign_extend(uint64_t addr)
+{
+       if (addr >= AMDGPU_GMC_HOLE_START)
+               addr |= AMDGPU_GMC_HOLE_END;
+
+       return addr;
+}
+
 void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
                               uint64_t *addr, uint64_t *flags);
 uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo);
index ad7978bab5fce29f4af3b0b2f5160f074a338773..86e8772b6852baa2b317048df5e6707937cec8dd 100644 (file)
@@ -655,11 +655,11 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
 
                dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
                dev_info.virtual_address_max =
-                       min(vm_size, AMDGPU_VA_HOLE_START);
+                       min(vm_size, AMDGPU_GMC_HOLE_START);
 
-               if (vm_size > AMDGPU_VA_HOLE_START) {
-                       dev_info.high_va_offset = AMDGPU_VA_HOLE_END;
-                       dev_info.high_va_max = AMDGPU_VA_HOLE_END | vm_size;
+               if (vm_size > AMDGPU_GMC_HOLE_START) {
+                       dev_info.high_va_offset = AMDGPU_GMC_HOLE_END;
+                       dev_info.high_va_max = AMDGPU_GMC_HOLE_END | vm_size;
                }
                dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
                dev_info.pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE;
index b5f20b42439e19c050731ec2fdc1de8f265c2c4f..0cbf651a88a6a97cc095ec0825ab7b56a49229ae 100644 (file)
@@ -1368,7 +1368,7 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
        WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
                     !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
 
-       return bo->tbo.offset;
+       return amdgpu_gmc_sign_extend(bo->tbo.offset);
 }
 
 /**
index 38856365580d7b418bb2a33c63835086c6567b0c..f2f358aa059717194fd02fe41fbfc0c04bc0b26a 100644 (file)
@@ -28,9 +28,7 @@ uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev)
        uint64_t addr = adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT;
 
        addr -= AMDGPU_VA_RESERVED_SIZE;
-
-       if (addr >= AMDGPU_VA_HOLE_START)
-               addr |= AMDGPU_VA_HOLE_END;
+       addr = amdgpu_gmc_sign_extend(addr);
 
        return addr;
 }
@@ -73,7 +71,7 @@ void amdgpu_free_static_csa(struct amdgpu_device *adev) {
 int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
                          struct amdgpu_bo_va **bo_va)
 {
-       uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_VA_HOLE_MASK;
+       uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK;
        struct ww_acquire_ctx ticket;
        struct list_head list;
        struct amdgpu_bo_list_entry pd;
index 1f79a0ddc78a9209fcb59983585877d351e8eb45..3163351508cf2201fb41aaeafabfcc781b26ab1e 100644 (file)
@@ -492,7 +492,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
                if (level == adev->vm_manager.root_level) {
                        ats_entries = amdgpu_vm_level_shift(adev, level);
                        ats_entries += AMDGPU_GPU_PAGE_SHIFT;
-                       ats_entries = AMDGPU_VA_HOLE_START >> ats_entries;
+                       ats_entries = AMDGPU_GMC_HOLE_START >> ats_entries;
                        ats_entries = min(ats_entries, entries);
                        entries -= ats_entries;
                } else {
@@ -722,7 +722,7 @@ int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
        eaddr = saddr + size - 1;
 
        if (vm->pte_support_ats)
-               ats = saddr < AMDGPU_VA_HOLE_START;
+               ats = saddr < AMDGPU_GMC_HOLE_START;
 
        saddr /= AMDGPU_GPU_PAGE_SIZE;
        eaddr /= AMDGPU_GPU_PAGE_SIZE;
@@ -2016,7 +2016,8 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
                        struct amdgpu_bo_va_mapping, list);
                list_del(&mapping->list);
 
-               if (vm->pte_support_ats && mapping->start < AMDGPU_VA_HOLE_START)
+               if (vm->pte_support_ats &&
+                   mapping->start < AMDGPU_GMC_HOLE_START)
                        init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
 
                r = amdgpu_vm_bo_update_mapping(adev, NULL, NULL, vm,
index 6ea162ca296a3a00c41bfbc37e15c7035b842a3b..e275ee7c1bc1f2d9cf9b50fbe3504537ac9952ab 100644 (file)
@@ -101,19 +101,6 @@ struct amdgpu_bo_list_entry;
 /* hardcode that limit for now */
 #define AMDGPU_VA_RESERVED_SIZE                        (1ULL << 20)
 
-/* VA hole for 48bit addresses on Vega10 */
-#define AMDGPU_VA_HOLE_START                   0x0000800000000000ULL
-#define AMDGPU_VA_HOLE_END                     0xffff800000000000ULL
-
-/*
- * Hardware is programmed as if the hole doesn't exists with start and end
- * address values.
- *
- * This mask is used to remove the upper 16bits of the VA and so come up with
- * the linear addr value.
- */
-#define AMDGPU_VA_HOLE_MASK                    0x0000ffffffffffffULL
-
 /* max vmids dedicated for process */
 #define AMDGPU_VM_MAX_RESERVED_VMID    1