]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
Merge tag 'drm-intel-fixes-2019-08-08' of git://anongit.freedesktop.org/drm/drm-intel...
authorDave Airlie <airlied@redhat.com>
Fri, 9 Aug 2019 05:46:09 +0000 (15:46 +1000)
committerDave Airlie <airlied@redhat.com>
Fri, 9 Aug 2019 05:46:10 +0000 (15:46 +1000)
drm/i915 fixes for v5.3-rc4:
- Fix GLK DSI escape clock setting
- Fix a memleak on HDCP revoked Ksv error path

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/87pnlghz79.fsf@intel.com
15 files changed:
drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
drivers/gpu/drm/amd/powerplay/navi10_ppt.c
drivers/gpu/drm/amd/powerplay/smu_v11_0.c
drivers/gpu/drm/drm_modes.c
drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
drivers/gpu/drm/tegra/output.c
drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
include/uapi/linux/kfd_ioctl.h

index df8a2355483172fa6bc45303d93439036f681f2c..f6ac1e9548f276d4279d2830a15e7ca45428973f 100644 (file)
@@ -32,7 +32,6 @@ struct amdgpu_gds {
        uint32_t gws_size;
        uint32_t oa_size;
        uint32_t gds_compute_max_wave_id;
-       uint32_t vgt_gs_max_wave_id;
 };
 
 struct amdgpu_gds_reg_offset {
index 99f14fcc1460550c66e0bf7dfdfedcfc8b297723..19661c645703e56809c584b61916d141d5ed32d0 100644 (file)
@@ -30,6 +30,7 @@
 #define AMDGPU_VCN_FIRMWARE_OFFSET     256
 #define AMDGPU_VCN_MAX_ENC_RINGS       3
 
+#define VCN_DEC_KMD_CMD                0x80000000
 #define VCN_DEC_CMD_FENCE              0x00000000
 #define VCN_DEC_CMD_TRAP               0x00000001
 #define VCN_DEC_CMD_WRITE_REG          0x00000004
index 32773b7523d204a8b29266931fb4a9e577c571a4..f41287f9000da599accefb3382575d405c8a030b 100644 (file)
@@ -4206,15 +4206,6 @@ static void gfx_v10_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
        unsigned vmid = AMDGPU_JOB_GET_VMID(job);
        u32 header, control = 0;
 
-       /* Prevent a hw deadlock due to a wave ID mismatch between ME and GDS.
-        * This resets the wave ID counters. (needed by transform feedback)
-        * TODO: This might only be needed on a VMID switch when we change
-        *       the GDS OA mapping, not sure.
-        */
-       amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
-       amdgpu_ring_write(ring, mmVGT_GS_MAX_WAVE_ID);
-       amdgpu_ring_write(ring, ring->adev->gds.vgt_gs_max_wave_id);
-
        if (ib->flags & AMDGPU_IB_FLAG_CE)
                header = PACKET3(PACKET3_INDIRECT_BUFFER_CNST, 2);
        else
@@ -4961,7 +4952,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
                5 + /* HDP_INVL */
                8 + 8 + /* FENCE x2 */
                2, /* SWITCH_BUFFER */
-       .emit_ib_size = 7, /* gfx_v10_0_ring_emit_ib_gfx */
+       .emit_ib_size = 4, /* gfx_v10_0_ring_emit_ib_gfx */
        .emit_ib = gfx_v10_0_ring_emit_ib_gfx,
        .emit_fence = gfx_v10_0_ring_emit_fence,
        .emit_pipeline_sync = gfx_v10_0_ring_emit_pipeline_sync,
@@ -5112,7 +5103,6 @@ static void gfx_v10_0_set_gds_init(struct amdgpu_device *adev)
        default:
                adev->gds.gds_size = 0x10000;
                adev->gds.gds_compute_max_wave_id = 0x4ff;
-               adev->gds.vgt_gs_max_wave_id = 0x3ff;
                break;
        }
 
index 751567f78567357c0099cf426c550fe7d9d72b3f..ee1ccdcf2d30e128751b82f9839397618bc2fb08 100644 (file)
@@ -1321,6 +1321,39 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
        return 0;
 }
 
+static int gfx_v8_0_csb_vram_pin(struct amdgpu_device *adev)
+{
+       int r;
+
+       r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
+       if (unlikely(r != 0))
+               return r;
+
+       r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj,
+                       AMDGPU_GEM_DOMAIN_VRAM);
+       if (!r)
+               adev->gfx.rlc.clear_state_gpu_addr =
+                       amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj);
+
+       amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+
+       return r;
+}
+
+static void gfx_v8_0_csb_vram_unpin(struct amdgpu_device *adev)
+{
+       int r;
+
+       if (!adev->gfx.rlc.clear_state_obj)
+               return;
+
+       r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
+       if (likely(r == 0)) {
+               amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
+               amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+       }
+}
+
 static void gfx_v8_0_mec_fini(struct amdgpu_device *adev)
 {
        amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
@@ -4785,6 +4818,10 @@ static int gfx_v8_0_hw_init(void *handle)
        gfx_v8_0_init_golden_registers(adev);
        gfx_v8_0_constants_init(adev);
 
+       r = gfx_v8_0_csb_vram_pin(adev);
+       if (r)
+               return r;
+
        r = adev->gfx.rlc.funcs->resume(adev);
        if (r)
                return r;
@@ -4901,6 +4938,9 @@ static int gfx_v8_0_hw_fini(void *handle)
        else
                pr_err("rlc is busy, skip halt rlc\n");
        amdgpu_gfx_rlc_exit_safe_mode(adev);
+
+       gfx_v8_0_csb_vram_unpin(adev);
+
        return 0;
 }
 
index 1cfc2620b2dd46b4105d5e69bae8e55551f1905b..dfde886cc6bd161928835e9c79f23b176329d15a 100644 (file)
@@ -1485,7 +1485,7 @@ static void vcn_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring)
        amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET, 0));
        amdgpu_ring_write(ring, 0);
        amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0));
-       amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_START << 1);
+       amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_START << 1));
 }
 
 /**
@@ -1498,7 +1498,7 @@ static void vcn_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring)
 static void vcn_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring)
 {
        amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0));
-       amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_END << 1);
+       amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_END << 1));
 }
 
 /**
@@ -1543,7 +1543,7 @@ static void vcn_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64
        amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
 
        amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0));
-       amdgpu_ring_write(ring, VCN_DEC_CMD_FENCE << 1);
+       amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_FENCE << 1));
 
        amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET, 0));
        amdgpu_ring_write(ring, 0);
@@ -1553,7 +1553,7 @@ static void vcn_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64
 
        amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0));
 
-       amdgpu_ring_write(ring, VCN_DEC_CMD_TRAP << 1);
+       amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_TRAP << 1));
 }
 
 /**
@@ -1597,7 +1597,7 @@ static void vcn_v2_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring,
 
        amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0));
 
-       amdgpu_ring_write(ring, VCN_DEC_CMD_REG_READ_COND_WAIT << 1);
+       amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_REG_READ_COND_WAIT << 1));
 }
 
 static void vcn_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
@@ -1626,7 +1626,7 @@ static void vcn_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring,
 
        amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0));
 
-       amdgpu_ring_write(ring, VCN_DEC_CMD_WRITE_REG << 1);
+       amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_WRITE_REG << 1));
 }
 
 /**
@@ -2079,6 +2079,36 @@ static int vcn_v2_0_process_interrupt(struct amdgpu_device *adev,
        return 0;
 }
 
+static int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+       uint32_t tmp = 0;
+       unsigned i;
+       int r;
+
+       WREG32(adev->vcn.external.scratch9, 0xCAFEDEAD);
+       r = amdgpu_ring_alloc(ring, 4);
+       if (r)
+               return r;
+       amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0));
+       amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_START << 1));
+       amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0));
+       amdgpu_ring_write(ring, 0xDEADBEEF);
+       amdgpu_ring_commit(ring);
+       for (i = 0; i < adev->usec_timeout; i++) {
+               tmp = RREG32(adev->vcn.external.scratch9);
+               if (tmp == 0xDEADBEEF)
+                       break;
+               DRM_UDELAY(1);
+       }
+
+       if (i >= adev->usec_timeout)
+               r = -ETIMEDOUT;
+
+       return r;
+}
+
+
 static int vcn_v2_0_set_powergating_state(void *handle,
                                          enum amd_powergating_state state)
 {
@@ -2142,7 +2172,7 @@ static const struct amdgpu_ring_funcs vcn_v2_0_dec_ring_vm_funcs = {
        .emit_ib = vcn_v2_0_dec_ring_emit_ib,
        .emit_fence = vcn_v2_0_dec_ring_emit_fence,
        .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
-       .test_ring = amdgpu_vcn_dec_ring_test_ring,
+       .test_ring = vcn_v2_0_dec_ring_test_ring,
        .test_ib = amdgpu_vcn_dec_ring_test_ib,
        .insert_nop = vcn_v2_0_dec_ring_insert_nop,
        .insert_start = vcn_v2_0_dec_ring_insert_start,
index 26b15cc56c31c60b624ba13eaf566cfad3525747..1d3cd5c50d5f2b06191e5bf8ff1fee7bed7a6225 100644 (file)
@@ -1567,32 +1567,6 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
        return err;
 }
 
-static int kfd_ioctl_alloc_queue_gws(struct file *filep,
-               struct kfd_process *p, void *data)
-{
-       int retval;
-       struct kfd_ioctl_alloc_queue_gws_args *args = data;
-       struct kfd_dev *dev;
-
-       if (!hws_gws_support)
-               return -ENODEV;
-
-       dev = kfd_device_by_id(args->gpu_id);
-       if (!dev) {
-               pr_debug("Could not find gpu id 0x%x\n", args->gpu_id);
-               return -ENODEV;
-       }
-       if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS)
-               return -ENODEV;
-
-       mutex_lock(&p->mutex);
-       retval = pqm_set_gws(&p->pqm, args->queue_id, args->num_gws ? dev->gws : NULL);
-       mutex_unlock(&p->mutex);
-
-       args->first_gws = 0;
-       return retval;
-}
-
 static int kfd_ioctl_get_dmabuf_info(struct file *filep,
                struct kfd_process *p, void *data)
 {
@@ -1795,8 +1769,6 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
        AMDKFD_IOCTL_DEF(AMDKFD_IOC_IMPORT_DMABUF,
                                kfd_ioctl_import_dmabuf, 0),
 
-       AMDKFD_IOCTL_DEF(AMDKFD_IOC_ALLOC_QUEUE_GWS,
-                       kfd_ioctl_alloc_queue_gws, 0),
 };
 
 #define AMDKFD_CORE_IOCTL_COUNT        ARRAY_SIZE(amdkfd_ioctls)
index 0685a3388e38ce7ae2667319a22726b0b26c947e..8a3eadeebdcb611c4bd685376514802679d48601 100644 (file)
@@ -315,6 +315,8 @@ int smu_get_power_num_states(struct smu_context *smu,
 int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
                           void *data, uint32_t *size)
 {
+       struct smu_power_context *smu_power = &smu->smu_power;
+       struct smu_power_gate *power_gate = &smu_power->power_gate;
        int ret = 0;
 
        switch (sensor) {
@@ -339,7 +341,7 @@ int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
                *size = 4;
                break;
        case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
-               *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT) ? 1 : 0;
+               *(uint32_t *)data = power_gate->vcn_gated ? 0 : 1;
                *size = 4;
                break;
        default:
index 208e6711d5068fc16d7359acaa198190ea7a5a94..a0f52c86d8c7efa608221d4f56627894cfd2a244 100644 (file)
@@ -451,6 +451,7 @@ struct smu_dpm_context {
 struct smu_power_gate {
        bool uvd_gated;
        bool vce_gated;
+       bool vcn_gated;
 };
 
 struct smu_power_context {
index cc0a3b2256aff71ff8d2f4c03499d45190b088fe..b81c7e715dc943b00b3c0d456c17d42f5c01240e 100644 (file)
@@ -502,6 +502,8 @@ static int navi10_store_powerplay_table(struct smu_context *smu)
 
 static int navi10_tables_init(struct smu_context *smu, struct smu_table *tables)
 {
+       struct smu_table_context *smu_table = &smu->smu_table;
+
        SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t),
                       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
        SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
@@ -516,9 +518,35 @@ static int navi10_tables_init(struct smu_context *smu, struct smu_table *tables)
                       sizeof(DpmActivityMonitorCoeffInt_t), PAGE_SIZE,
                       AMDGPU_GEM_DOMAIN_VRAM);
 
+       smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
+       if (!smu_table->metrics_table)
+               return -ENOMEM;
+       smu_table->metrics_time = 0;
+
        return 0;
 }
 
+static int navi10_get_metrics_table(struct smu_context *smu,
+                                   SmuMetrics_t *metrics_table)
+{
+       struct smu_table_context *smu_table= &smu->smu_table;
+       int ret = 0;
+
+       if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + HZ / 1000)) {
+               ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
+                               (void *)smu_table->metrics_table, false);
+               if (ret) {
+                       pr_info("Failed to export SMU metrics table!\n");
+                       return ret;
+               }
+               smu_table->metrics_time = jiffies;
+       }
+
+       memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t));
+
+       return ret;
+}
+
 static int navi10_allocate_dpm_context(struct smu_context *smu)
 {
        struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
@@ -577,20 +605,27 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
 
 static int navi10_dpm_set_uvd_enable(struct smu_context *smu, bool enable)
 {
+       struct smu_power_context *smu_power = &smu->smu_power;
+       struct smu_power_gate *power_gate = &smu_power->power_gate;
        int ret = 0;
 
        if (enable) {
-               ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1);
-               if (ret)
-                       return ret;
+               /* vcn dpm on is a prerequisite for vcn power gate messages */
+               if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
+                       ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1);
+                       if (ret)
+                               return ret;
+               }
+               power_gate->vcn_gated = false;
        } else {
-               ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn);
-               if (ret)
-                       return ret;
+               if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
+                       ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn);
+                       if (ret)
+                               return ret;
+               }
+               power_gate->vcn_gated = true;
        }
 
-       ret = smu_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, enable);
-
        return ret;
 }
 
@@ -598,15 +633,10 @@ static int navi10_get_current_clk_freq_by_table(struct smu_context *smu,
                                       enum smu_clk_type clk_type,
                                       uint32_t *value)
 {
-       static SmuMetrics_t metrics;
        int ret = 0, clk_id = 0;
+       SmuMetrics_t metrics;
 
-       if (!value)
-               return -EINVAL;
-
-       memset(&metrics, 0, sizeof(metrics));
-
-       ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, (void *)&metrics, false);
+       ret = navi10_get_metrics_table(smu, &metrics);
        if (ret)
                return ret;
 
@@ -894,8 +924,9 @@ static int navi10_get_gpu_power(struct smu_context *smu, uint32_t *value)
        if (!value)
                return -EINVAL;
 
-       ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, (void *)&metrics,
-                              false);
+       ret = navi10_get_metrics_table(smu, &metrics);
+       if (ret)
+               return ret;
        if (ret)
                return ret;
 
@@ -914,10 +945,7 @@ static int navi10_get_current_activity_percent(struct smu_context *smu,
        if (!value)
                return -EINVAL;
 
-       msleep(1);
-
-       ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
-                              (void *)&metrics, false);
+       ret = navi10_get_metrics_table(smu, &metrics);
        if (ret)
                return ret;
 
@@ -956,10 +984,9 @@ static int navi10_get_fan_speed_rpm(struct smu_context *smu,
        if (!speed)
                return -EINVAL;
 
-       memset(&metrics, 0, sizeof(metrics));
-
-       ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
-                              (void *)&metrics, false);
+       ret = navi10_get_metrics_table(smu, &metrics);
+       if (ret)
+               return ret;
        if (ret)
                return ret;
 
@@ -1307,7 +1334,7 @@ static int navi10_thermal_get_temperature(struct smu_context *smu,
        if (!value)
                return -EINVAL;
 
-       ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, (void *)&metrics, false);
+       ret = navi10_get_metrics_table(smu, &metrics);
        if (ret)
                return ret;
 
index ac5b26228e753e2071d32c10e3562aabc97c449f..5fde5cf65b4240a7428e3c89be0dabc0ea12902f 100644 (file)
@@ -1391,7 +1391,7 @@ smu_v11_0_smc_fan_control(struct smu_context *smu, bool start)
 {
        int ret = 0;
 
-       if (smu_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT))
+       if (!smu_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT))
                return 0;
 
        ret = smu_feature_set_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT, start);
index 80fcd5dc155889dbdbc64d422980b5566a00674e..b0369e690f36cf67a9d60eac23f7b767dbb66074 100644 (file)
@@ -1770,7 +1770,9 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
        }
 
        if (named_mode) {
-               strncpy(mode->name, name, mode_end);
+               if (mode_end + 1 > DRM_DISPLAY_MODE_LEN)
+                       return false;
+               strscpy(mode->name, name, mode_end + 1);
        } else {
                ret = drm_mode_parse_cmdline_res_mode(name, mode_end,
                                                      parse_extras,
index 95e5c517a15f766132981eebc148f488e13a4074..9aae3d8e99ef426743d21533f3b1168449a56fe5 100644 (file)
@@ -432,7 +432,7 @@ static int rockchip_dp_resume(struct device *dev)
 
 static const struct dev_pm_ops rockchip_dp_pm_ops = {
 #ifdef CONFIG_PM_SLEEP
-       .suspend = rockchip_dp_suspend,
+       .suspend_late = rockchip_dp_suspend,
        .resume_early = rockchip_dp_resume,
 #endif
 };
index 274cb955e2e19f496ed0be39c0610fe98609d645..bdcaa4c7168cfac967290347808bf08642dbe576 100644 (file)
@@ -126,8 +126,12 @@ int tegra_output_probe(struct tegra_output *output)
                                                       "nvidia,hpd-gpio", 0,
                                                       GPIOD_IN,
                                                       "HDMI hotplug detect");
-       if (IS_ERR(output->hpd_gpio))
-               return PTR_ERR(output->hpd_gpio);
+       if (IS_ERR(output->hpd_gpio)) {
+               if (PTR_ERR(output->hpd_gpio) != -ENOENT)
+                       return PTR_ERR(output->hpd_gpio);
+
+               output->hpd_gpio = NULL;
+       }
 
        if (output->hpd_gpio) {
                err = gpiod_to_irq(output->hpd_gpio);
index e4e09d47c5c0e001934a14a2df103e13a7a328aa..59e9d05ab928b49f2c6ca1f3a0cd5fbecdd56a1b 100644 (file)
@@ -389,8 +389,10 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
                break;
        }
 
-       if (retries == RETRIES)
+       if (retries == RETRIES) {
+               kfree(reply);
                return -EINVAL;
+       }
 
        *msg_len = reply_len;
        *msg     = reply;
index 070d1bc7e725df318d2297ae671f2a34f8dd5089..20917c59f39c9a2a27bf1b62e907d5e2a31f88a6 100644 (file)
@@ -410,21 +410,6 @@ struct kfd_ioctl_unmap_memory_from_gpu_args {
        __u32 n_success;                /* to/from KFD */
 };
 
-/* Allocate GWS for specific queue
- *
- * @gpu_id:      device identifier
- * @queue_id:    queue's id that GWS is allocated for
- * @num_gws:     how many GWS to allocate
- * @first_gws:   index of the first GWS allocated.
- *               only support contiguous GWS allocation
- */
-struct kfd_ioctl_alloc_queue_gws_args {
-       __u32 gpu_id;           /* to KFD */
-       __u32 queue_id;         /* to KFD */
-       __u32 num_gws;          /* to KFD */
-       __u32 first_gws;        /* from KFD */
-};
-
 struct kfd_ioctl_get_dmabuf_info_args {
        __u64 size;             /* from KFD */
        __u64 metadata_ptr;     /* to KFD */
@@ -544,10 +529,7 @@ enum kfd_mmio_remap {
 #define AMDKFD_IOC_IMPORT_DMABUF               \
                AMDKFD_IOWR(0x1D, struct kfd_ioctl_import_dmabuf_args)
 
-#define AMDKFD_IOC_ALLOC_QUEUE_GWS             \
-               AMDKFD_IOWR(0x1E, struct kfd_ioctl_alloc_queue_gws_args)
-
 #define AMDKFD_COMMAND_START           0x01
-#define AMDKFD_COMMAND_END             0x1F
+#define AMDKFD_COMMAND_END             0x1E
 
 #endif