]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
drm/amdkfd: queue kfd interrupt work to different CPU
authorPhilip Yang <Philip.Yang@amd.com>
Wed, 30 Jan 2019 20:29:34 +0000 (15:29 -0500)
committerAlex Deucher <alexander.deucher@amd.com>
Wed, 18 Dec 2019 21:09:05 +0000 (16:09 -0500)
Because queue_work schedule the work on the same CPU the interrupt
handler is running, if there are many interrupts pending, it takes
longer time for work queue to start, or even worse system will hang.

v2: queue work to same NUMA node for better cache locality
v3: handle cpumask_next wraparound case

Signed-off-by: Philip Yang <Philip.Yang@amd.com>
Reviewed-by: Eric Huang <JinhuiEric.Huang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdkfd/kfd_device.c

index 209bfc849352b9f20abb06e9d1930c1257f69914..c6b6901bbda3bcc8b8b44f2cf78907bd5683e0c4 100644 (file)
@@ -822,6 +822,21 @@ static int kfd_resume(struct kfd_dev *kfd)
        return err;
 }
 
+static inline void kfd_queue_work(struct workqueue_struct *wq,
+                                 struct work_struct *work)
+{
+       int cpu, new_cpu;
+
+       cpu = new_cpu = smp_processor_id();
+       do {
+               new_cpu = cpumask_next(new_cpu, cpu_online_mask) % nr_cpu_ids;
+               if (cpu_to_node(new_cpu) == numa_node_id())
+                       break;
+       } while (cpu != new_cpu);
+
+       queue_work_on(new_cpu, wq, work);
+}
+
 /* This is called directly from KGD at ISR. */
 void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
 {
@@ -844,7 +859,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
                                   patched_ihre, &is_patched)
            && enqueue_ih_ring_entry(kfd,
                                     is_patched ? patched_ihre : ih_ring_entry))
-               queue_work(kfd->ih_wq, &kfd->interrupt_work);
+               kfd_queue_work(kfd->ih_wq, &kfd->interrupt_work);
 
        spin_unlock_irqrestore(&kfd->interrupt_lock, flags);
 }