]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
drm/amdgpu: use kiq to do invalidate tlb
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_virt.c
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include "amdgpu.h"
25
26 uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev)
27 {
28         uint64_t addr = adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT;
29
30         addr -= AMDGPU_VA_RESERVED_SIZE;
31
32         if (addr >= AMDGPU_VA_HOLE_START)
33                 addr |= AMDGPU_VA_HOLE_END;
34
35         return addr;
36 }
37
38 bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
39 {
40         /* By now all MMIO pages except mailbox are blocked */
41         /* if blocking is enabled in hypervisor. Choose the */
42         /* SCRATCH_REG0 to test. */
43         return RREG32_NO_KIQ(0xc040) == 0xffffffff;
44 }
45
46 int amdgpu_allocate_static_csa(struct amdgpu_device *adev)
47 {
48         int r;
49         void *ptr;
50
51         r = amdgpu_bo_create_kernel(adev, AMDGPU_CSA_SIZE, PAGE_SIZE,
52                                 AMDGPU_GEM_DOMAIN_VRAM, &adev->virt.csa_obj,
53                                 &adev->virt.csa_vmid0_addr, &ptr);
54         if (r)
55                 return r;
56
57         memset(ptr, 0, AMDGPU_CSA_SIZE);
58         return 0;
59 }
60
61 void amdgpu_free_static_csa(struct amdgpu_device *adev) {
62         amdgpu_bo_free_kernel(&adev->virt.csa_obj,
63                                                 &adev->virt.csa_vmid0_addr,
64                                                 NULL);
65 }
66
67 /*
68  * amdgpu_map_static_csa should be called during amdgpu_vm_init
69  * it maps virtual address amdgpu_csa_vaddr() to this VM, and each command
70  * submission of GFX should use this virtual address within META_DATA init
71  * package to support SRIOV gfx preemption.
72  */
73 int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
74                           struct amdgpu_bo_va **bo_va)
75 {
76         uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_VA_HOLE_MASK;
77         struct ww_acquire_ctx ticket;
78         struct list_head list;
79         struct amdgpu_bo_list_entry pd;
80         struct ttm_validate_buffer csa_tv;
81         int r;
82
83         INIT_LIST_HEAD(&list);
84         INIT_LIST_HEAD(&csa_tv.head);
85         csa_tv.bo = &adev->virt.csa_obj->tbo;
86         csa_tv.shared = true;
87
88         list_add(&csa_tv.head, &list);
89         amdgpu_vm_get_pd_bo(vm, &list, &pd);
90
91         r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
92         if (r) {
93                 DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
94                 return r;
95         }
96
97         *bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj);
98         if (!*bo_va) {
99                 ttm_eu_backoff_reservation(&ticket, &list);
100                 DRM_ERROR("failed to create bo_va for static CSA\n");
101                 return -ENOMEM;
102         }
103
104         r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, csa_addr,
105                                 AMDGPU_CSA_SIZE);
106         if (r) {
107                 DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r);
108                 amdgpu_vm_bo_rmv(adev, *bo_va);
109                 ttm_eu_backoff_reservation(&ticket, &list);
110                 return r;
111         }
112
113         r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, AMDGPU_CSA_SIZE,
114                              AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
115                              AMDGPU_PTE_EXECUTABLE);
116
117         if (r) {
118                 DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
119                 amdgpu_vm_bo_rmv(adev, *bo_va);
120                 ttm_eu_backoff_reservation(&ticket, &list);
121                 return r;
122         }
123
124         ttm_eu_backoff_reservation(&ticket, &list);
125         return 0;
126 }
127
128 void amdgpu_virt_init_setting(struct amdgpu_device *adev)
129 {
130         /* enable virtual display */
131         adev->mode_info.num_crtc = 1;
132         adev->enable_virtual_display = true;
133         adev->cg_flags = 0;
134         adev->pg_flags = 0;
135 }
136
137 uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
138 {
139         signed long r, cnt = 0;
140         unsigned long flags;
141         uint32_t seq;
142         struct amdgpu_kiq *kiq = &adev->gfx.kiq;
143         struct amdgpu_ring *ring = &kiq->ring;
144
145         BUG_ON(!ring->funcs->emit_rreg);
146
147         spin_lock_irqsave(&kiq->ring_lock, flags);
148         amdgpu_ring_alloc(ring, 32);
149         amdgpu_ring_emit_rreg(ring, reg);
150         amdgpu_fence_emit_polling(ring, &seq);
151         amdgpu_ring_commit(ring);
152         spin_unlock_irqrestore(&kiq->ring_lock, flags);
153
154         r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
155
156         /* don't wait anymore for gpu reset case because this way may
157          * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
158          * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
159          * never return if we keep waiting in virt_kiq_rreg, which cause
160          * gpu_recover() hang there.
161          *
162          * also don't wait anymore for IRQ context
163          * */
164         if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
165                 goto failed_kiq_read;
166
167         if (in_interrupt())
168                 might_sleep();
169
170         while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
171                 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
172                 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
173         }
174
175         if (cnt > MAX_KIQ_REG_TRY)
176                 goto failed_kiq_read;
177
178         return adev->wb.wb[adev->virt.reg_val_offs];
179
180 failed_kiq_read:
181         pr_err("failed to read reg:%x\n", reg);
182         return ~0;
183 }
184
185 void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
186 {
187         signed long r, cnt = 0;
188         unsigned long flags;
189         uint32_t seq;
190         struct amdgpu_kiq *kiq = &adev->gfx.kiq;
191         struct amdgpu_ring *ring = &kiq->ring;
192
193         BUG_ON(!ring->funcs->emit_wreg);
194
195         spin_lock_irqsave(&kiq->ring_lock, flags);
196         amdgpu_ring_alloc(ring, 32);
197         amdgpu_ring_emit_wreg(ring, reg, v);
198         amdgpu_fence_emit_polling(ring, &seq);
199         amdgpu_ring_commit(ring);
200         spin_unlock_irqrestore(&kiq->ring_lock, flags);
201
202         r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
203
204         /* don't wait anymore for gpu reset case because this way may
205          * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
206          * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
207          * never return if we keep waiting in virt_kiq_rreg, which cause
208          * gpu_recover() hang there.
209          *
210          * also don't wait anymore for IRQ context
211          * */
212         if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
213                 goto failed_kiq_write;
214
215         if (in_interrupt())
216                 might_sleep();
217
218         while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
219
220                 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
221                 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
222         }
223
224         if (cnt > MAX_KIQ_REG_TRY)
225                 goto failed_kiq_write;
226
227         return;
228
229 failed_kiq_write:
230         pr_err("failed to write reg:%x\n", reg);
231 }
232
233 /**
234  * amdgpu_virt_request_full_gpu() - request full gpu access
235  * @amdgpu:     amdgpu device.
236  * @init:       is driver init time.
237  * When start to init/fini driver, first need to request full gpu access.
238  * Return: Zero if request success, otherwise will return error.
239  */
240 int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init)
241 {
242         struct amdgpu_virt *virt = &adev->virt;
243         int r;
244
245         if (virt->ops && virt->ops->req_full_gpu) {
246                 r = virt->ops->req_full_gpu(adev, init);
247                 if (r)
248                         return r;
249
250                 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
251         }
252
253         return 0;
254 }
255
256 /**
257  * amdgpu_virt_release_full_gpu() - release full gpu access
258  * @amdgpu:     amdgpu device.
259  * @init:       is driver init time.
260  * When finishing driver init/fini, need to release full gpu access.
261  * Return: Zero if release success, otherwise will returen error.
262  */
263 int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init)
264 {
265         struct amdgpu_virt *virt = &adev->virt;
266         int r;
267
268         if (virt->ops && virt->ops->rel_full_gpu) {
269                 r = virt->ops->rel_full_gpu(adev, init);
270                 if (r)
271                         return r;
272
273                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
274         }
275         return 0;
276 }
277
278 /**
279  * amdgpu_virt_reset_gpu() - reset gpu
280  * @amdgpu:     amdgpu device.
281  * Send reset command to GPU hypervisor to reset GPU that VM is using
282  * Return: Zero if reset success, otherwise will return error.
283  */
284 int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
285 {
286         struct amdgpu_virt *virt = &adev->virt;
287         int r;
288
289         if (virt->ops && virt->ops->reset_gpu) {
290                 r = virt->ops->reset_gpu(adev);
291                 if (r)
292                         return r;
293
294                 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
295         }
296
297         return 0;
298 }
299
300 /**
301  * amdgpu_virt_wait_reset() - wait for reset gpu completed
302  * @amdgpu:     amdgpu device.
303  * Wait for GPU reset completed.
304  * Return: Zero if reset success, otherwise will return error.
305  */
306 int amdgpu_virt_wait_reset(struct amdgpu_device *adev)
307 {
308         struct amdgpu_virt *virt = &adev->virt;
309
310         if (!virt->ops || !virt->ops->wait_reset)
311                 return -EINVAL;
312
313         return virt->ops->wait_reset(adev);
314 }
315
316 /**
317  * amdgpu_virt_alloc_mm_table() - alloc memory for mm table
318  * @amdgpu:     amdgpu device.
319  * MM table is used by UVD and VCE for its initialization
320  * Return: Zero if allocate success.
321  */
322 int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
323 {
324         int r;
325
326         if (!amdgpu_sriov_vf(adev) || adev->virt.mm_table.gpu_addr)
327                 return 0;
328
329         r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
330                                     AMDGPU_GEM_DOMAIN_VRAM,
331                                     &adev->virt.mm_table.bo,
332                                     &adev->virt.mm_table.gpu_addr,
333                                     (void *)&adev->virt.mm_table.cpu_addr);
334         if (r) {
335                 DRM_ERROR("failed to alloc mm table and error = %d.\n", r);
336                 return r;
337         }
338
339         memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE);
340         DRM_INFO("MM table gpu addr = 0x%llx, cpu addr = %p.\n",
341                  adev->virt.mm_table.gpu_addr,
342                  adev->virt.mm_table.cpu_addr);
343         return 0;
344 }
345
346 /**
347  * amdgpu_virt_free_mm_table() - free mm table memory
348  * @amdgpu:     amdgpu device.
349  * Free MM table memory
350  */
351 void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
352 {
353         if (!amdgpu_sriov_vf(adev) || !adev->virt.mm_table.gpu_addr)
354                 return;
355
356         amdgpu_bo_free_kernel(&adev->virt.mm_table.bo,
357                               &adev->virt.mm_table.gpu_addr,
358                               (void *)&adev->virt.mm_table.cpu_addr);
359         adev->virt.mm_table.gpu_addr = 0;
360 }
361
362
363 int amdgpu_virt_fw_reserve_get_checksum(void *obj,
364                                         unsigned long obj_size,
365                                         unsigned int key,
366                                         unsigned int chksum)
367 {
368         unsigned int ret = key;
369         unsigned long i = 0;
370         unsigned char *pos;
371
372         pos = (char *)obj;
373         /* calculate checksum */
374         for (i = 0; i < obj_size; ++i)
375                 ret += *(pos + i);
376         /* minus the chksum itself */
377         pos = (char *)&chksum;
378         for (i = 0; i < sizeof(chksum); ++i)
379                 ret -= *(pos + i);
380         return ret;
381 }
382
383 void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
384 {
385         uint32_t pf2vf_size = 0;
386         uint32_t checksum = 0;
387         uint32_t checkval;
388         char *str;
389
390         adev->virt.fw_reserve.p_pf2vf = NULL;
391         adev->virt.fw_reserve.p_vf2pf = NULL;
392
393         if (adev->fw_vram_usage.va != NULL) {
394                 adev->virt.fw_reserve.p_pf2vf =
395                         (struct amdgim_pf2vf_info_header *)(
396                         adev->fw_vram_usage.va + AMDGIM_DATAEXCHANGE_OFFSET);
397                 AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size);
398                 AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum);
399                 AMDGPU_FW_VRAM_PF2VF_READ(adev, feature_flags, &adev->virt.gim_feature);
400
401                 /* pf2vf message must be in 4K */
402                 if (pf2vf_size > 0 && pf2vf_size < 4096) {
403                         checkval = amdgpu_virt_fw_reserve_get_checksum(
404                                 adev->virt.fw_reserve.p_pf2vf, pf2vf_size,
405                                 adev->virt.fw_reserve.checksum_key, checksum);
406                         if (checkval == checksum) {
407                                 adev->virt.fw_reserve.p_vf2pf =
408                                         ((void *)adev->virt.fw_reserve.p_pf2vf +
409                                         pf2vf_size);
410                                 memset((void *)adev->virt.fw_reserve.p_vf2pf, 0,
411                                         sizeof(amdgim_vf2pf_info));
412                                 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.version,
413                                         AMDGPU_FW_VRAM_VF2PF_VER);
414                                 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.size,
415                                         sizeof(amdgim_vf2pf_info));
416                                 AMDGPU_FW_VRAM_VF2PF_READ(adev, driver_version,
417                                         &str);
418 #ifdef MODULE
419                                 if (THIS_MODULE->version != NULL)
420                                         strcpy(str, THIS_MODULE->version);
421                                 else
422 #endif
423                                         strcpy(str, "N/A");
424                                 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, driver_cert,
425                                         0);
426                                 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, checksum,
427                                         amdgpu_virt_fw_reserve_get_checksum(
428                                         adev->virt.fw_reserve.p_vf2pf,
429                                         pf2vf_size,
430                                         adev->virt.fw_reserve.checksum_key, 0));
431                         }
432                 }
433         }
434 }
435
436