2 * SPDX-License-Identifier: MIT
4 * Copyright © 2019 Intel Corporation
7 #include "i915_request.h"
9 #include "intel_gt_pm.h"
10 #include "intel_gt_requests.h"
11 #include "intel_timeline.h"
13 static void retire_requests(struct intel_timeline *tl)
15 struct i915_request *rq, *rn;
17 list_for_each_entry_safe(rq, rn, &tl->requests, link)
18 if (!i915_request_retire(rq))
22 long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
24 struct intel_gt_timelines *timelines = >->timelines;
25 struct intel_timeline *tl, *tn;
26 unsigned long active_count = 0;
32 if (unlikely(timeout < 0))
33 timeout = -timeout, interruptible = false;
35 spin_lock_irqsave(&timelines->lock, flags);
36 list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
37 if (!mutex_trylock(&tl->mutex))
40 intel_timeline_get(tl);
41 GEM_BUG_ON(!tl->active_count);
42 tl->active_count++; /* pin the list element */
43 spin_unlock_irqrestore(&timelines->lock, flags);
46 struct dma_fence *fence;
48 fence = i915_active_fence_get(&tl->last_request);
50 timeout = dma_fence_wait_timeout(fence,
59 spin_lock_irqsave(&timelines->lock, flags);
61 /* Resume iteration after dropping lock */
62 list_safe_reset_next(tl, tn, link);
63 if (--tl->active_count)
64 active_count += !!rcu_access_pointer(tl->last_request.fence);
68 mutex_unlock(&tl->mutex);
70 /* Defer the final release to after the spinlock */
71 if (refcount_dec_and_test(&tl->kref.refcount)) {
72 GEM_BUG_ON(tl->active_count);
73 list_add(&tl->link, &free);
76 spin_unlock_irqrestore(&timelines->lock, flags);
78 list_for_each_entry_safe(tl, tn, &free, link)
79 __intel_timeline_free(&tl->kref);
81 return active_count ? timeout : 0;
84 int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout)
86 /* If the device is asleep, we have no requests outstanding */
87 if (!intel_gt_pm_is_awake(gt))
90 while ((timeout = intel_gt_retire_requests_timeout(gt, timeout)) > 0) {
92 if (signal_pending(current))
99 static void retire_work_handler(struct work_struct *work)
101 struct intel_gt *gt =
102 container_of(work, typeof(*gt), requests.retire_work.work);
104 intel_gt_retire_requests(gt);
105 schedule_delayed_work(>->requests.retire_work,
106 round_jiffies_up_relative(HZ));
109 void intel_gt_init_requests(struct intel_gt *gt)
111 INIT_DELAYED_WORK(>->requests.retire_work, retire_work_handler);
114 void intel_gt_park_requests(struct intel_gt *gt)
116 cancel_delayed_work(>->requests.retire_work);
119 void intel_gt_unpark_requests(struct intel_gt *gt)
121 schedule_delayed_work(>->requests.retire_work,
122 round_jiffies_up_relative(HZ));