2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "gt/intel_gt.h"
26 #include "gt/intel_reset.h"
27 #include "intel_guc.h"
28 #include "intel_guc_ads.h"
29 #include "intel_guc_submission.h"
34 static void guc_free_load_err_log(struct intel_guc *guc);
36 /* Reset GuC providing us with fresh state for both GuC and HuC.
38 static int __intel_uc_reset_hw(struct intel_uc *uc)
40 struct intel_gt *gt = uc_to_gt(uc);
44 ret = intel_reset_guc(gt);
46 DRM_ERROR("Failed to reset GuC, ret = %d\n", ret);
50 guc_status = intel_uncore_read(gt->uncore, GUC_STATUS);
51 WARN(!(guc_status & GS_MIA_IN_RESET),
52 "GuC status: 0x%x, MIA core expected to be in reset\n",
58 static int __get_platform_enable_guc(struct intel_uc *uc)
60 struct intel_uc_fw *guc_fw = &uc->guc.fw;
61 struct intel_uc_fw *huc_fw = &uc->huc.fw;
64 if (!HAS_GT_UC(uc_to_gt(uc)->i915))
67 /* We don't want to enable GuC/HuC on pre-Gen11 by default */
68 if (INTEL_GEN(uc_to_gt(uc)->i915) < 11)
71 if (intel_uc_fw_supported(guc_fw) && intel_uc_fw_supported(huc_fw))
72 enable_guc |= ENABLE_GUC_LOAD_HUC;
77 static int __get_default_guc_log_level(struct intel_uc *uc)
81 if (!intel_uc_fw_supported(&uc->guc.fw) || !intel_uc_is_using_guc(uc))
82 guc_log_level = GUC_LOG_LEVEL_DISABLED;
83 else if (IS_ENABLED(CONFIG_DRM_I915_DEBUG) ||
84 IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
85 guc_log_level = GUC_LOG_LEVEL_MAX;
87 guc_log_level = GUC_LOG_LEVEL_NON_VERBOSE;
89 /* Any platform specific fine-tuning can be done here */
95 * sanitize_options_early - sanitize uC related modparam options
96 * @uc: the intel_uc structure
98 * In case of "enable_guc" option this function will attempt to modify
99 * it only if it was initially set to "auto(-1)". Default value for this
100 * modparam varies between platforms and it is hardcoded in driver code.
101 * Any other modparam value is only monitored against availability of the
102 * related hardware or firmware definitions.
104 * In case of "guc_log_level" option this function will attempt to modify
105 * it only if it was initially set to "auto(-1)" or if initial value was
106 * "enable(1..4)" on platforms without the GuC. Default value for this
107 * modparam varies between platforms and is usually set to "disable(0)"
108 * unless GuC is enabled on given platform and the driver is compiled with
109 * debug config when this modparam will default to "enable(1..4)".
111 static void sanitize_options_early(struct intel_uc *uc)
113 struct intel_uc_fw *guc_fw = &uc->guc.fw;
114 struct intel_uc_fw *huc_fw = &uc->huc.fw;
116 /* A negative value means "use platform default" */
117 if (i915_modparams.enable_guc < 0)
118 i915_modparams.enable_guc = __get_platform_enable_guc(uc);
120 DRM_DEBUG_DRIVER("enable_guc=%d (submission:%s huc:%s)\n",
121 i915_modparams.enable_guc,
122 yesno(intel_uc_is_using_guc_submission(uc)),
123 yesno(intel_uc_is_using_huc(uc)));
125 /* Verify GuC firmware availability */
126 if (intel_uc_is_using_guc(uc) && !intel_uc_fw_supported(guc_fw)) {
127 DRM_WARN("Incompatible option detected: enable_guc=%d, "
128 "but GuC is not supported!\n",
129 i915_modparams.enable_guc);
130 DRM_INFO("Disabling GuC/HuC loading!\n");
131 i915_modparams.enable_guc = 0;
134 /* Verify HuC firmware availability */
135 if (intel_uc_is_using_huc(uc) && !intel_uc_fw_supported(huc_fw)) {
136 DRM_WARN("Incompatible option detected: enable_guc=%d, "
137 "but HuC is not supported!\n",
138 i915_modparams.enable_guc);
139 DRM_INFO("Disabling HuC loading!\n");
140 i915_modparams.enable_guc &= ~ENABLE_GUC_LOAD_HUC;
143 /* XXX: GuC submission is unavailable for now */
144 if (intel_uc_is_using_guc_submission(uc)) {
145 DRM_INFO("Incompatible option detected: enable_guc=%d, "
146 "but GuC submission is not supported!\n",
147 i915_modparams.enable_guc);
148 DRM_INFO("Switching to non-GuC submission mode!\n");
149 i915_modparams.enable_guc &= ~ENABLE_GUC_SUBMISSION;
152 /* A negative value means "use platform/config default" */
153 if (i915_modparams.guc_log_level < 0)
154 i915_modparams.guc_log_level =
155 __get_default_guc_log_level(uc);
157 if (i915_modparams.guc_log_level > 0 && !intel_uc_is_using_guc(uc)) {
158 DRM_WARN("Incompatible option detected: guc_log_level=%d, "
159 "but GuC is not enabled!\n",
160 i915_modparams.guc_log_level);
161 i915_modparams.guc_log_level = 0;
164 if (i915_modparams.guc_log_level > GUC_LOG_LEVEL_MAX) {
165 DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
166 "guc_log_level", i915_modparams.guc_log_level,
167 "verbosity too high");
168 i915_modparams.guc_log_level = GUC_LOG_LEVEL_MAX;
171 DRM_DEBUG_DRIVER("guc_log_level=%d (enabled:%s, verbose:%s, verbosity:%d)\n",
172 i915_modparams.guc_log_level,
173 yesno(i915_modparams.guc_log_level),
174 yesno(GUC_LOG_LEVEL_IS_VERBOSE(i915_modparams.guc_log_level)),
175 GUC_LOG_LEVEL_TO_VERBOSITY(i915_modparams.guc_log_level));
177 /* Make sure that sanitization was done */
178 GEM_BUG_ON(i915_modparams.enable_guc < 0);
179 GEM_BUG_ON(i915_modparams.guc_log_level < 0);
182 void intel_uc_init_early(struct intel_uc *uc)
184 intel_guc_init_early(&uc->guc);
185 intel_huc_init_early(&uc->huc);
187 sanitize_options_early(uc);
190 void intel_uc_cleanup_early(struct intel_uc *uc)
192 guc_free_load_err_log(&uc->guc);
196 * intel_uc_init_mmio - setup uC MMIO access
197 * @uc: the intel_uc structure
199 * Setup minimal state necessary for MMIO accesses later in the
200 * initialization sequence.
202 void intel_uc_init_mmio(struct intel_uc *uc)
204 intel_guc_init_send_regs(&uc->guc);
207 static void guc_capture_load_err_log(struct intel_guc *guc)
209 if (!guc->log.vma || !intel_guc_log_get_level(&guc->log))
212 if (!guc->load_err_log)
213 guc->load_err_log = i915_gem_object_get(guc->log.vma->obj);
218 static void guc_free_load_err_log(struct intel_guc *guc)
220 if (guc->load_err_log)
221 i915_gem_object_put(guc->load_err_log);
225 * Events triggered while CT buffers are disabled are logged in the SCRATCH_15
226 * register using the same bits used in the CT message payload. Since our
227 * communication channel with guc is turned off at this point, we can save the
228 * message and handle it after we turn it back on.
230 static void guc_clear_mmio_msg(struct intel_guc *guc)
232 intel_uncore_write(guc_to_gt(guc)->uncore, SOFT_SCRATCH(15), 0);
235 static void guc_get_mmio_msg(struct intel_guc *guc)
239 spin_lock_irq(&guc->irq_lock);
241 val = intel_uncore_read(guc_to_gt(guc)->uncore, SOFT_SCRATCH(15));
242 guc->mmio_msg |= val & guc->msg_enabled_mask;
245 * clear all events, including the ones we're not currently servicing,
246 * to make sure we don't try to process a stale message if we enable
247 * handling of more events later.
249 guc_clear_mmio_msg(guc);
251 spin_unlock_irq(&guc->irq_lock);
254 static void guc_handle_mmio_msg(struct intel_guc *guc)
256 struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
258 /* we need communication to be enabled to reply to GuC */
259 GEM_BUG_ON(guc->handler == intel_guc_to_host_event_handler_nop);
264 spin_lock_irq(&i915->irq_lock);
265 intel_guc_to_host_process_recv_msg(guc, &guc->mmio_msg, 1);
266 spin_unlock_irq(&i915->irq_lock);
271 static void guc_reset_interrupts(struct intel_guc *guc)
273 guc->interrupts.reset(guc);
276 static void guc_enable_interrupts(struct intel_guc *guc)
278 guc->interrupts.enable(guc);
281 static void guc_disable_interrupts(struct intel_guc *guc)
283 guc->interrupts.disable(guc);
286 static int guc_enable_communication(struct intel_guc *guc)
288 struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
291 ret = intel_guc_ct_enable(&guc->ct);
295 guc->send = intel_guc_send_ct;
296 guc->handler = intel_guc_to_host_event_handler_ct;
298 /* check for mmio messages received before/during the CT enable */
299 guc_get_mmio_msg(guc);
300 guc_handle_mmio_msg(guc);
302 guc_enable_interrupts(guc);
304 /* check for CT messages received before we enabled interrupts */
305 spin_lock_irq(&i915->irq_lock);
306 intel_guc_to_host_event_handler_ct(guc);
307 spin_unlock_irq(&i915->irq_lock);
309 DRM_INFO("GuC communication enabled\n");
314 static void guc_stop_communication(struct intel_guc *guc)
316 intel_guc_ct_stop(&guc->ct);
318 guc->send = intel_guc_send_nop;
319 guc->handler = intel_guc_to_host_event_handler_nop;
321 guc_clear_mmio_msg(guc);
324 static void guc_disable_communication(struct intel_guc *guc)
327 * Events generated during or after CT disable are logged by guc in
328 * via mmio. Make sure the register is clear before disabling CT since
329 * all events we cared about have already been processed via CT.
331 guc_clear_mmio_msg(guc);
333 guc_disable_interrupts(guc);
335 guc->send = intel_guc_send_nop;
336 guc->handler = intel_guc_to_host_event_handler_nop;
338 intel_guc_ct_disable(&guc->ct);
341 * Check for messages received during/after the CT disable. We do not
342 * expect any messages to have arrived via CT between the interrupt
343 * disable and the CT disable because GuC should've been idle until we
344 * triggered the CT disable protocol.
346 guc_get_mmio_msg(guc);
348 DRM_INFO("GuC communication disabled\n");
351 void intel_uc_fetch_firmwares(struct intel_uc *uc)
353 struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
355 if (!intel_uc_is_using_guc(uc))
358 intel_uc_fw_fetch(i915, &uc->guc.fw);
360 if (intel_uc_is_using_huc(uc))
361 intel_uc_fw_fetch(i915, &uc->huc.fw);
364 void intel_uc_cleanup_firmwares(struct intel_uc *uc)
366 if (!intel_uc_is_using_guc(uc))
369 if (intel_uc_is_using_huc(uc))
370 intel_uc_fw_cleanup_fetch(&uc->huc.fw);
372 intel_uc_fw_cleanup_fetch(&uc->guc.fw);
375 int intel_uc_init(struct intel_uc *uc)
377 struct intel_guc *guc = &uc->guc;
378 struct intel_huc *huc = &uc->huc;
381 if (!intel_uc_is_using_guc(uc))
384 if (!intel_uc_fw_supported(&guc->fw))
387 /* XXX: GuC submission is unavailable for now */
388 GEM_BUG_ON(intel_uc_is_using_guc_submission(uc));
390 ret = intel_guc_init(guc);
394 if (intel_uc_is_using_huc(uc)) {
395 ret = intel_huc_init(huc);
400 if (intel_uc_is_using_guc_submission(uc)) {
402 * This is stuff we need to have available at fw load time
403 * if we are planning to enable submission later
405 ret = intel_guc_submission_init(guc);
413 if (intel_uc_is_using_huc(uc))
420 void intel_uc_fini(struct intel_uc *uc)
422 struct intel_guc *guc = &uc->guc;
424 if (!intel_uc_is_using_guc(uc))
427 GEM_BUG_ON(!intel_uc_fw_supported(&guc->fw));
429 if (intel_uc_is_using_guc_submission(uc))
430 intel_guc_submission_fini(guc);
432 if (intel_uc_is_using_huc(uc))
433 intel_huc_fini(&uc->huc);
438 static void __uc_sanitize(struct intel_uc *uc)
440 struct intel_guc *guc = &uc->guc;
441 struct intel_huc *huc = &uc->huc;
443 GEM_BUG_ON(!intel_uc_fw_supported(&guc->fw));
445 intel_huc_sanitize(huc);
446 intel_guc_sanitize(guc);
448 __intel_uc_reset_hw(uc);
451 void intel_uc_sanitize(struct intel_uc *uc)
453 if (!intel_uc_is_using_guc(uc))
459 int intel_uc_init_hw(struct intel_uc *uc)
461 struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
462 struct intel_guc *guc = &uc->guc;
463 struct intel_huc *huc = &uc->huc;
466 if (!intel_uc_is_using_guc(uc))
469 GEM_BUG_ON(!intel_uc_fw_supported(&guc->fw));
471 guc_reset_interrupts(guc);
473 /* WaEnableuKernelHeaderValidFix:skl */
474 /* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */
482 * Always reset the GuC just before (re)loading, so
483 * that the state and timing are fairly predictable
485 ret = __intel_uc_reset_hw(uc);
489 if (intel_uc_is_using_huc(uc)) {
490 ret = intel_huc_fw_upload(huc);
495 intel_guc_ads_reset(guc);
496 intel_guc_write_params(guc);
497 ret = intel_guc_fw_upload(guc);
501 DRM_DEBUG_DRIVER("GuC fw load failed: %d; will reset and "
502 "retry %d more time(s)\n", ret, attempts);
505 /* Did we succeded or run out of retries? */
507 goto err_log_capture;
509 ret = guc_enable_communication(guc);
511 goto err_log_capture;
513 if (intel_uc_is_using_huc(uc)) {
514 ret = intel_huc_auth(huc);
516 goto err_communication;
519 ret = intel_guc_sample_forcewake(guc);
521 goto err_communication;
523 if (intel_uc_is_using_guc_submission(uc)) {
524 ret = intel_guc_submission_enable(guc);
526 goto err_communication;
529 dev_info(i915->drm.dev, "GuC firmware version %u.%u\n",
530 guc->fw.major_ver_found, guc->fw.minor_ver_found);
531 dev_info(i915->drm.dev, "GuC submission %s\n",
532 enableddisabled(intel_uc_is_using_guc_submission(uc)));
533 dev_info(i915->drm.dev, "HuC %s\n",
534 enableddisabled(intel_uc_is_using_huc(uc)));
539 * We've failed to load the firmware :(
542 guc_disable_communication(guc);
544 guc_capture_load_err_log(guc);
549 * Note that there is no fallback as either user explicitly asked for
550 * the GuC or driver default option was to run with the GuC enabled.
552 if (GEM_WARN_ON(ret == -EIO))
555 dev_err(i915->drm.dev, "GuC initialization failed %d\n", ret);
559 void intel_uc_fini_hw(struct intel_uc *uc)
561 struct intel_guc *guc = &uc->guc;
563 if (!intel_guc_is_running(guc))
566 GEM_BUG_ON(!intel_uc_fw_supported(&guc->fw));
568 if (intel_uc_is_using_guc_submission(uc))
569 intel_guc_submission_disable(guc);
571 guc_disable_communication(guc);
576 * intel_uc_reset_prepare - Prepare for reset
577 * @uc: the intel_uc structure
579 * Preparing for full gpu reset.
581 void intel_uc_reset_prepare(struct intel_uc *uc)
583 struct intel_guc *guc = &uc->guc;
585 if (!intel_guc_is_running(guc))
588 guc_stop_communication(guc);
592 void intel_uc_runtime_suspend(struct intel_uc *uc)
594 struct intel_guc *guc = &uc->guc;
597 if (!intel_guc_is_running(guc))
600 err = intel_guc_suspend(guc);
602 DRM_DEBUG_DRIVER("Failed to suspend GuC, err=%d", err);
604 guc_disable_communication(guc);
607 void intel_uc_suspend(struct intel_uc *uc)
609 struct intel_guc *guc = &uc->guc;
610 intel_wakeref_t wakeref;
612 if (!intel_guc_is_running(guc))
615 with_intel_runtime_pm(&uc_to_gt(uc)->i915->runtime_pm, wakeref)
616 intel_uc_runtime_suspend(uc);
619 int intel_uc_resume(struct intel_uc *uc)
621 struct intel_guc *guc = &uc->guc;
624 if (!intel_guc_is_running(guc))
627 guc_enable_communication(guc);
629 err = intel_guc_resume(guc);
631 DRM_DEBUG_DRIVER("Failed to resume GuC, err=%d", err);