]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/i915/gt/uc/intel_uc.c
drm/i915/uc: Unify uc_fw status tracking
[linux.git] / drivers / gpu / drm / i915 / gt / uc / intel_uc.c
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24
25 #include "gt/intel_gt.h"
26 #include "gt/intel_reset.h"
27 #include "intel_guc.h"
28 #include "intel_guc_ads.h"
29 #include "intel_guc_submission.h"
30 #include "intel_uc.h"
31
32 #include "i915_drv.h"
33
34 static void guc_free_load_err_log(struct intel_guc *guc);
35
36 /* Reset GuC providing us with fresh state for both GuC and HuC.
37  */
38 static int __intel_uc_reset_hw(struct intel_uc *uc)
39 {
40         struct intel_gt *gt = uc_to_gt(uc);
41         int ret;
42         u32 guc_status;
43
44         ret = intel_reset_guc(gt);
45         if (ret) {
46                 DRM_ERROR("Failed to reset GuC, ret = %d\n", ret);
47                 return ret;
48         }
49
50         guc_status = intel_uncore_read(gt->uncore, GUC_STATUS);
51         WARN(!(guc_status & GS_MIA_IN_RESET),
52              "GuC status: 0x%x, MIA core expected to be in reset\n",
53              guc_status);
54
55         return ret;
56 }
57
58 static int __get_platform_enable_guc(struct intel_uc *uc)
59 {
60         struct intel_uc_fw *guc_fw = &uc->guc.fw;
61         struct intel_uc_fw *huc_fw = &uc->huc.fw;
62         int enable_guc = 0;
63
64         if (!HAS_GT_UC(uc_to_gt(uc)->i915))
65                 return 0;
66
67         /* We don't want to enable GuC/HuC on pre-Gen11 by default */
68         if (INTEL_GEN(uc_to_gt(uc)->i915) < 11)
69                 return 0;
70
71         if (intel_uc_fw_supported(guc_fw) && intel_uc_fw_supported(huc_fw))
72                 enable_guc |= ENABLE_GUC_LOAD_HUC;
73
74         return enable_guc;
75 }
76
77 static int __get_default_guc_log_level(struct intel_uc *uc)
78 {
79         int guc_log_level;
80
81         if (!intel_uc_fw_supported(&uc->guc.fw) || !intel_uc_is_using_guc(uc))
82                 guc_log_level = GUC_LOG_LEVEL_DISABLED;
83         else if (IS_ENABLED(CONFIG_DRM_I915_DEBUG) ||
84                  IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
85                 guc_log_level = GUC_LOG_LEVEL_MAX;
86         else
87                 guc_log_level = GUC_LOG_LEVEL_NON_VERBOSE;
88
89         /* Any platform specific fine-tuning can be done here */
90
91         return guc_log_level;
92 }
93
94 /**
95  * sanitize_options_early - sanitize uC related modparam options
96  * @uc: the intel_uc structure
97  *
98  * In case of "enable_guc" option this function will attempt to modify
99  * it only if it was initially set to "auto(-1)". Default value for this
100  * modparam varies between platforms and it is hardcoded in driver code.
101  * Any other modparam value is only monitored against availability of the
102  * related hardware or firmware definitions.
103  *
104  * In case of "guc_log_level" option this function will attempt to modify
105  * it only if it was initially set to "auto(-1)" or if initial value was
106  * "enable(1..4)" on platforms without the GuC. Default value for this
107  * modparam varies between platforms and is usually set to "disable(0)"
108  * unless GuC is enabled on given platform and the driver is compiled with
109  * debug config when this modparam will default to "enable(1..4)".
110  */
111 static void sanitize_options_early(struct intel_uc *uc)
112 {
113         struct intel_uc_fw *guc_fw = &uc->guc.fw;
114         struct intel_uc_fw *huc_fw = &uc->huc.fw;
115
116         /* A negative value means "use platform default" */
117         if (i915_modparams.enable_guc < 0)
118                 i915_modparams.enable_guc = __get_platform_enable_guc(uc);
119
120         DRM_DEBUG_DRIVER("enable_guc=%d (submission:%s huc:%s)\n",
121                          i915_modparams.enable_guc,
122                          yesno(intel_uc_is_using_guc_submission(uc)),
123                          yesno(intel_uc_is_using_huc(uc)));
124
125         /* Verify GuC firmware availability */
126         if (intel_uc_is_using_guc(uc) && !intel_uc_fw_supported(guc_fw)) {
127                 DRM_WARN("Incompatible option detected: enable_guc=%d, "
128                          "but GuC is not supported!\n",
129                          i915_modparams.enable_guc);
130                 DRM_INFO("Disabling GuC/HuC loading!\n");
131                 i915_modparams.enable_guc = 0;
132         }
133
134         /* Verify HuC firmware availability */
135         if (intel_uc_is_using_huc(uc) && !intel_uc_fw_supported(huc_fw)) {
136                 DRM_WARN("Incompatible option detected: enable_guc=%d, "
137                          "but HuC is not supported!\n",
138                          i915_modparams.enable_guc);
139                 DRM_INFO("Disabling HuC loading!\n");
140                 i915_modparams.enable_guc &= ~ENABLE_GUC_LOAD_HUC;
141         }
142
143         /* XXX: GuC submission is unavailable for now */
144         if (intel_uc_is_using_guc_submission(uc)) {
145                 DRM_INFO("Incompatible option detected: enable_guc=%d, "
146                          "but GuC submission is not supported!\n",
147                          i915_modparams.enable_guc);
148                 DRM_INFO("Switching to non-GuC submission mode!\n");
149                 i915_modparams.enable_guc &= ~ENABLE_GUC_SUBMISSION;
150         }
151
152         /* A negative value means "use platform/config default" */
153         if (i915_modparams.guc_log_level < 0)
154                 i915_modparams.guc_log_level =
155                         __get_default_guc_log_level(uc);
156
157         if (i915_modparams.guc_log_level > 0 && !intel_uc_is_using_guc(uc)) {
158                 DRM_WARN("Incompatible option detected: guc_log_level=%d, "
159                          "but GuC is not enabled!\n",
160                          i915_modparams.guc_log_level);
161                 i915_modparams.guc_log_level = 0;
162         }
163
164         if (i915_modparams.guc_log_level > GUC_LOG_LEVEL_MAX) {
165                 DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
166                          "guc_log_level", i915_modparams.guc_log_level,
167                          "verbosity too high");
168                 i915_modparams.guc_log_level = GUC_LOG_LEVEL_MAX;
169         }
170
171         DRM_DEBUG_DRIVER("guc_log_level=%d (enabled:%s, verbose:%s, verbosity:%d)\n",
172                          i915_modparams.guc_log_level,
173                          yesno(i915_modparams.guc_log_level),
174                          yesno(GUC_LOG_LEVEL_IS_VERBOSE(i915_modparams.guc_log_level)),
175                          GUC_LOG_LEVEL_TO_VERBOSITY(i915_modparams.guc_log_level));
176
177         /* Make sure that sanitization was done */
178         GEM_BUG_ON(i915_modparams.enable_guc < 0);
179         GEM_BUG_ON(i915_modparams.guc_log_level < 0);
180 }
181
182 void intel_uc_init_early(struct intel_uc *uc)
183 {
184         intel_guc_init_early(&uc->guc);
185         intel_huc_init_early(&uc->huc);
186
187         sanitize_options_early(uc);
188 }
189
190 void intel_uc_cleanup_early(struct intel_uc *uc)
191 {
192         guc_free_load_err_log(&uc->guc);
193 }
194
195 /**
196  * intel_uc_init_mmio - setup uC MMIO access
197  * @uc: the intel_uc structure
198  *
199  * Setup minimal state necessary for MMIO accesses later in the
200  * initialization sequence.
201  */
202 void intel_uc_init_mmio(struct intel_uc *uc)
203 {
204         intel_guc_init_send_regs(&uc->guc);
205 }
206
207 static void guc_capture_load_err_log(struct intel_guc *guc)
208 {
209         if (!guc->log.vma || !intel_guc_log_get_level(&guc->log))
210                 return;
211
212         if (!guc->load_err_log)
213                 guc->load_err_log = i915_gem_object_get(guc->log.vma->obj);
214
215         return;
216 }
217
218 static void guc_free_load_err_log(struct intel_guc *guc)
219 {
220         if (guc->load_err_log)
221                 i915_gem_object_put(guc->load_err_log);
222 }
223
224 /*
225  * Events triggered while CT buffers are disabled are logged in the SCRATCH_15
226  * register using the same bits used in the CT message payload. Since our
227  * communication channel with guc is turned off at this point, we can save the
228  * message and handle it after we turn it back on.
229  */
230 static void guc_clear_mmio_msg(struct intel_guc *guc)
231 {
232         intel_uncore_write(guc_to_gt(guc)->uncore, SOFT_SCRATCH(15), 0);
233 }
234
235 static void guc_get_mmio_msg(struct intel_guc *guc)
236 {
237         u32 val;
238
239         spin_lock_irq(&guc->irq_lock);
240
241         val = intel_uncore_read(guc_to_gt(guc)->uncore, SOFT_SCRATCH(15));
242         guc->mmio_msg |= val & guc->msg_enabled_mask;
243
244         /*
245          * clear all events, including the ones we're not currently servicing,
246          * to make sure we don't try to process a stale message if we enable
247          * handling of more events later.
248          */
249         guc_clear_mmio_msg(guc);
250
251         spin_unlock_irq(&guc->irq_lock);
252 }
253
254 static void guc_handle_mmio_msg(struct intel_guc *guc)
255 {
256         struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
257
258         /* we need communication to be enabled to reply to GuC */
259         GEM_BUG_ON(guc->handler == intel_guc_to_host_event_handler_nop);
260
261         if (!guc->mmio_msg)
262                 return;
263
264         spin_lock_irq(&i915->irq_lock);
265         intel_guc_to_host_process_recv_msg(guc, &guc->mmio_msg, 1);
266         spin_unlock_irq(&i915->irq_lock);
267
268         guc->mmio_msg = 0;
269 }
270
271 static void guc_reset_interrupts(struct intel_guc *guc)
272 {
273         guc->interrupts.reset(guc);
274 }
275
276 static void guc_enable_interrupts(struct intel_guc *guc)
277 {
278         guc->interrupts.enable(guc);
279 }
280
281 static void guc_disable_interrupts(struct intel_guc *guc)
282 {
283         guc->interrupts.disable(guc);
284 }
285
286 static int guc_enable_communication(struct intel_guc *guc)
287 {
288         struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
289         int ret;
290
291         ret = intel_guc_ct_enable(&guc->ct);
292         if (ret)
293                 return ret;
294
295         guc->send = intel_guc_send_ct;
296         guc->handler = intel_guc_to_host_event_handler_ct;
297
298         /* check for mmio messages received before/during the CT enable */
299         guc_get_mmio_msg(guc);
300         guc_handle_mmio_msg(guc);
301
302         guc_enable_interrupts(guc);
303
304         /* check for CT messages received before we enabled interrupts */
305         spin_lock_irq(&i915->irq_lock);
306         intel_guc_to_host_event_handler_ct(guc);
307         spin_unlock_irq(&i915->irq_lock);
308
309         DRM_INFO("GuC communication enabled\n");
310
311         return 0;
312 }
313
314 static void guc_stop_communication(struct intel_guc *guc)
315 {
316         intel_guc_ct_stop(&guc->ct);
317
318         guc->send = intel_guc_send_nop;
319         guc->handler = intel_guc_to_host_event_handler_nop;
320
321         guc_clear_mmio_msg(guc);
322 }
323
324 static void guc_disable_communication(struct intel_guc *guc)
325 {
326         /*
327          * Events generated during or after CT disable are logged by guc in
328          * via mmio. Make sure the register is clear before disabling CT since
329          * all events we cared about have already been processed via CT.
330          */
331         guc_clear_mmio_msg(guc);
332
333         guc_disable_interrupts(guc);
334
335         guc->send = intel_guc_send_nop;
336         guc->handler = intel_guc_to_host_event_handler_nop;
337
338         intel_guc_ct_disable(&guc->ct);
339
340         /*
341          * Check for messages received during/after the CT disable. We do not
342          * expect any messages to have arrived via CT between the interrupt
343          * disable and the CT disable because GuC should've been idle until we
344          * triggered the CT disable protocol.
345          */
346         guc_get_mmio_msg(guc);
347
348         DRM_INFO("GuC communication disabled\n");
349 }
350
351 void intel_uc_fetch_firmwares(struct intel_uc *uc)
352 {
353         struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
354
355         if (!intel_uc_is_using_guc(uc))
356                 return;
357
358         intel_uc_fw_fetch(i915, &uc->guc.fw);
359
360         if (intel_uc_is_using_huc(uc))
361                 intel_uc_fw_fetch(i915, &uc->huc.fw);
362 }
363
364 void intel_uc_cleanup_firmwares(struct intel_uc *uc)
365 {
366         if (!intel_uc_is_using_guc(uc))
367                 return;
368
369         if (intel_uc_is_using_huc(uc))
370                 intel_uc_fw_cleanup_fetch(&uc->huc.fw);
371
372         intel_uc_fw_cleanup_fetch(&uc->guc.fw);
373 }
374
375 int intel_uc_init(struct intel_uc *uc)
376 {
377         struct intel_guc *guc = &uc->guc;
378         struct intel_huc *huc = &uc->huc;
379         int ret;
380
381         if (!intel_uc_is_using_guc(uc))
382                 return 0;
383
384         if (!intel_uc_fw_supported(&guc->fw))
385                 return -ENODEV;
386
387         /* XXX: GuC submission is unavailable for now */
388         GEM_BUG_ON(intel_uc_is_using_guc_submission(uc));
389
390         ret = intel_guc_init(guc);
391         if (ret)
392                 return ret;
393
394         if (intel_uc_is_using_huc(uc)) {
395                 ret = intel_huc_init(huc);
396                 if (ret)
397                         goto err_guc;
398         }
399
400         if (intel_uc_is_using_guc_submission(uc)) {
401                 /*
402                  * This is stuff we need to have available at fw load time
403                  * if we are planning to enable submission later
404                  */
405                 ret = intel_guc_submission_init(guc);
406                 if (ret)
407                         goto err_huc;
408         }
409
410         return 0;
411
412 err_huc:
413         if (intel_uc_is_using_huc(uc))
414                 intel_huc_fini(huc);
415 err_guc:
416         intel_guc_fini(guc);
417         return ret;
418 }
419
420 void intel_uc_fini(struct intel_uc *uc)
421 {
422         struct intel_guc *guc = &uc->guc;
423
424         if (!intel_uc_is_using_guc(uc))
425                 return;
426
427         GEM_BUG_ON(!intel_uc_fw_supported(&guc->fw));
428
429         if (intel_uc_is_using_guc_submission(uc))
430                 intel_guc_submission_fini(guc);
431
432         if (intel_uc_is_using_huc(uc))
433                 intel_huc_fini(&uc->huc);
434
435         intel_guc_fini(guc);
436 }
437
438 static void __uc_sanitize(struct intel_uc *uc)
439 {
440         struct intel_guc *guc = &uc->guc;
441         struct intel_huc *huc = &uc->huc;
442
443         GEM_BUG_ON(!intel_uc_fw_supported(&guc->fw));
444
445         intel_huc_sanitize(huc);
446         intel_guc_sanitize(guc);
447
448         __intel_uc_reset_hw(uc);
449 }
450
451 void intel_uc_sanitize(struct intel_uc *uc)
452 {
453         if (!intel_uc_is_using_guc(uc))
454                 return;
455
456         __uc_sanitize(uc);
457 }
458
459 int intel_uc_init_hw(struct intel_uc *uc)
460 {
461         struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
462         struct intel_guc *guc = &uc->guc;
463         struct intel_huc *huc = &uc->huc;
464         int ret, attempts;
465
466         if (!intel_uc_is_using_guc(uc))
467                 return 0;
468
469         GEM_BUG_ON(!intel_uc_fw_supported(&guc->fw));
470
471         guc_reset_interrupts(guc);
472
473         /* WaEnableuKernelHeaderValidFix:skl */
474         /* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */
475         if (IS_GEN(i915, 9))
476                 attempts = 3;
477         else
478                 attempts = 1;
479
480         while (attempts--) {
481                 /*
482                  * Always reset the GuC just before (re)loading, so
483                  * that the state and timing are fairly predictable
484                  */
485                 ret = __intel_uc_reset_hw(uc);
486                 if (ret)
487                         goto err_out;
488
489                 if (intel_uc_is_using_huc(uc)) {
490                         ret = intel_huc_fw_upload(huc);
491                         if (ret)
492                                 goto err_out;
493                 }
494
495                 intel_guc_ads_reset(guc);
496                 intel_guc_write_params(guc);
497                 ret = intel_guc_fw_upload(guc);
498                 if (ret == 0)
499                         break;
500
501                 DRM_DEBUG_DRIVER("GuC fw load failed: %d; will reset and "
502                                  "retry %d more time(s)\n", ret, attempts);
503         }
504
505         /* Did we succeded or run out of retries? */
506         if (ret)
507                 goto err_log_capture;
508
509         ret = guc_enable_communication(guc);
510         if (ret)
511                 goto err_log_capture;
512
513         if (intel_uc_is_using_huc(uc)) {
514                 ret = intel_huc_auth(huc);
515                 if (ret)
516                         goto err_communication;
517         }
518
519         ret = intel_guc_sample_forcewake(guc);
520         if (ret)
521                 goto err_communication;
522
523         if (intel_uc_is_using_guc_submission(uc)) {
524                 ret = intel_guc_submission_enable(guc);
525                 if (ret)
526                         goto err_communication;
527         }
528
529         dev_info(i915->drm.dev, "GuC firmware version %u.%u\n",
530                  guc->fw.major_ver_found, guc->fw.minor_ver_found);
531         dev_info(i915->drm.dev, "GuC submission %s\n",
532                  enableddisabled(intel_uc_is_using_guc_submission(uc)));
533         dev_info(i915->drm.dev, "HuC %s\n",
534                  enableddisabled(intel_uc_is_using_huc(uc)));
535
536         return 0;
537
538         /*
539          * We've failed to load the firmware :(
540          */
541 err_communication:
542         guc_disable_communication(guc);
543 err_log_capture:
544         guc_capture_load_err_log(guc);
545 err_out:
546         __uc_sanitize(uc);
547
548         /*
549          * Note that there is no fallback as either user explicitly asked for
550          * the GuC or driver default option was to run with the GuC enabled.
551          */
552         if (GEM_WARN_ON(ret == -EIO))
553                 ret = -EINVAL;
554
555         dev_err(i915->drm.dev, "GuC initialization failed %d\n", ret);
556         return ret;
557 }
558
559 void intel_uc_fini_hw(struct intel_uc *uc)
560 {
561         struct intel_guc *guc = &uc->guc;
562
563         if (!intel_guc_is_running(guc))
564                 return;
565
566         GEM_BUG_ON(!intel_uc_fw_supported(&guc->fw));
567
568         if (intel_uc_is_using_guc_submission(uc))
569                 intel_guc_submission_disable(guc);
570
571         guc_disable_communication(guc);
572         __uc_sanitize(uc);
573 }
574
575 /**
576  * intel_uc_reset_prepare - Prepare for reset
577  * @uc: the intel_uc structure
578  *
579  * Preparing for full gpu reset.
580  */
581 void intel_uc_reset_prepare(struct intel_uc *uc)
582 {
583         struct intel_guc *guc = &uc->guc;
584
585         if (!intel_guc_is_running(guc))
586                 return;
587
588         guc_stop_communication(guc);
589         __uc_sanitize(uc);
590 }
591
592 void intel_uc_runtime_suspend(struct intel_uc *uc)
593 {
594         struct intel_guc *guc = &uc->guc;
595         int err;
596
597         if (!intel_guc_is_running(guc))
598                 return;
599
600         err = intel_guc_suspend(guc);
601         if (err)
602                 DRM_DEBUG_DRIVER("Failed to suspend GuC, err=%d", err);
603
604         guc_disable_communication(guc);
605 }
606
607 void intel_uc_suspend(struct intel_uc *uc)
608 {
609         struct intel_guc *guc = &uc->guc;
610         intel_wakeref_t wakeref;
611
612         if (!intel_guc_is_running(guc))
613                 return;
614
615         with_intel_runtime_pm(&uc_to_gt(uc)->i915->runtime_pm, wakeref)
616                 intel_uc_runtime_suspend(uc);
617 }
618
619 int intel_uc_resume(struct intel_uc *uc)
620 {
621         struct intel_guc *guc = &uc->guc;
622         int err;
623
624         if (!intel_guc_is_running(guc))
625                 return 0;
626
627         guc_enable_communication(guc);
628
629         err = intel_guc_resume(guc);
630         if (err) {
631                 DRM_DEBUG_DRIVER("Failed to resume GuC, err=%d", err);
632                 return err;
633         }
634
635         return 0;
636 }