]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
drm/i915/guc/ct: Group request-related variables in a sub-structure
[linux.git] / drivers / gpu / drm / i915 / gt / uc / intel_guc_ct.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2016-2019 Intel Corporation
4  */
5
6 #include "i915_drv.h"
7 #include "intel_guc_ct.h"
8
9 #ifdef CONFIG_DRM_I915_DEBUG_GUC
10 #define CT_DEBUG_DRIVER(...)    DRM_DEBUG_DRIVER(__VA_ARGS__)
11 #else
12 #define CT_DEBUG_DRIVER(...)    do { } while (0)
13 #endif
14
15 struct ct_request {
16         struct list_head link;
17         u32 fence;
18         u32 status;
19         u32 response_len;
20         u32 *response_buf;
21 };
22
23 struct ct_incoming_request {
24         struct list_head link;
25         u32 msg[];
26 };
27
28 enum { CTB_SEND = 0, CTB_RECV = 1 };
29
30 enum { CTB_OWNER_HOST = 0 };
31
32 static void ct_incoming_request_worker_func(struct work_struct *w);
33
34 /**
35  * intel_guc_ct_init_early - Initialize CT state without requiring device access
36  * @ct: pointer to CT struct
37  */
38 void intel_guc_ct_init_early(struct intel_guc_ct *ct)
39 {
40         spin_lock_init(&ct->requests.lock);
41         INIT_LIST_HEAD(&ct->requests.pending);
42         INIT_LIST_HEAD(&ct->requests.incoming);
43         INIT_WORK(&ct->requests.worker, ct_incoming_request_worker_func);
44 }
45
46 static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct)
47 {
48         return container_of(ct, struct intel_guc, ct);
49 }
50
51 static inline const char *guc_ct_buffer_type_to_str(u32 type)
52 {
53         switch (type) {
54         case INTEL_GUC_CT_BUFFER_TYPE_SEND:
55                 return "SEND";
56         case INTEL_GUC_CT_BUFFER_TYPE_RECV:
57                 return "RECV";
58         default:
59                 return "<invalid>";
60         }
61 }
62
63 static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc,
64                                     u32 cmds_addr, u32 size)
65 {
66         CT_DEBUG_DRIVER("CT: init addr=%#x size=%u\n", cmds_addr, size);
67         memset(desc, 0, sizeof(*desc));
68         desc->addr = cmds_addr;
69         desc->size = size;
70         desc->owner = CTB_OWNER_HOST;
71 }
72
73 static void guc_ct_buffer_desc_reset(struct guc_ct_buffer_desc *desc)
74 {
75         CT_DEBUG_DRIVER("CT: desc %p reset head=%u tail=%u\n",
76                         desc, desc->head, desc->tail);
77         desc->head = 0;
78         desc->tail = 0;
79         desc->is_in_error = 0;
80 }
81
82 static int guc_action_register_ct_buffer(struct intel_guc *guc,
83                                          u32 desc_addr,
84                                          u32 type)
85 {
86         u32 action[] = {
87                 INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER,
88                 desc_addr,
89                 sizeof(struct guc_ct_buffer_desc),
90                 type
91         };
92         int err;
93
94         /* Can't use generic send(), CT registration must go over MMIO */
95         err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
96         if (err)
97                 DRM_ERROR("CT: register %s buffer failed; err=%d\n",
98                           guc_ct_buffer_type_to_str(type), err);
99         return err;
100 }
101
102 static int guc_action_deregister_ct_buffer(struct intel_guc *guc,
103                                            u32 type)
104 {
105         u32 action[] = {
106                 INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER,
107                 CTB_OWNER_HOST,
108                 type
109         };
110         int err;
111
112         /* Can't use generic send(), CT deregistration must go over MMIO */
113         err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
114         if (err)
115                 DRM_ERROR("CT: deregister %s buffer failed; err=%d\n",
116                           guc_ct_buffer_type_to_str(type), err);
117         return err;
118 }
119
120 /**
121  * intel_guc_ct_init - Init buffer-based communication
122  * @ct: pointer to CT struct
123  *
124  * Allocate memory required for buffer-based communication.
125  *
126  * Return: 0 on success, a negative errno code on failure.
127  */
128 int intel_guc_ct_init(struct intel_guc_ct *ct)
129 {
130         struct intel_guc *guc = ct_to_guc(ct);
131         void *blob;
132         int err;
133         int i;
134
135         GEM_BUG_ON(ct->vma);
136
137         /* We allocate 1 page to hold both descriptors and both buffers.
138          *       ___________.....................
139          *      |desc (SEND)|                   :
140          *      |___________|                   PAGE/4
141          *      :___________....................:
142          *      |desc (RECV)|                   :
143          *      |___________|                   PAGE/4
144          *      :_______________________________:
145          *      |cmds (SEND)                    |
146          *      |                               PAGE/4
147          *      |_______________________________|
148          *      |cmds (RECV)                    |
149          *      |                               PAGE/4
150          *      |_______________________________|
151          *
152          * Each message can use a maximum of 32 dwords and we don't expect to
153          * have more than 1 in flight at any time, so we have enough space.
154          * Some logic further ahead will rely on the fact that there is only 1
155          * page and that it is always mapped, so if the size is changed the
156          * other code will need updating as well.
157          */
158
159         err = intel_guc_allocate_and_map_vma(guc, PAGE_SIZE, &ct->vma, &blob);
160         if (err) {
161                 DRM_ERROR("CT: channel allocation failed; err=%d\n", err);
162                 return err;
163         }
164
165         CT_DEBUG_DRIVER("CT: vma base=%#x\n",
166                         intel_guc_ggtt_offset(guc, ct->vma));
167
168         /* store pointers to desc and cmds */
169         for (i = 0; i < ARRAY_SIZE(ct->ctbs); i++) {
170                 GEM_BUG_ON((i !=  CTB_SEND) && (i != CTB_RECV));
171                 ct->ctbs[i].desc = blob + PAGE_SIZE/4 * i;
172                 ct->ctbs[i].cmds = blob + PAGE_SIZE/4 * i + PAGE_SIZE/2;
173         }
174
175         return 0;
176 }
177
178 /**
179  * intel_guc_ct_fini - Fini buffer-based communication
180  * @ct: pointer to CT struct
181  *
182  * Deallocate memory required for buffer-based communication.
183  */
184 void intel_guc_ct_fini(struct intel_guc_ct *ct)
185 {
186         GEM_BUG_ON(ct->enabled);
187
188         i915_vma_unpin_and_release(&ct->vma, I915_VMA_RELEASE_MAP);
189 }
190
191 /**
192  * intel_guc_ct_enable - Enable buffer based command transport.
193  * @ct: pointer to CT struct
194  *
195  * Return: 0 on success, a negative errno code on failure.
196  */
197 int intel_guc_ct_enable(struct intel_guc_ct *ct)
198 {
199         struct intel_guc *guc = ct_to_guc(ct);
200         u32 base;
201         int err;
202         int i;
203
204         GEM_BUG_ON(ct->enabled);
205
206         /* vma should be already allocated and map'ed */
207         GEM_BUG_ON(!ct->vma);
208         base = intel_guc_ggtt_offset(guc, ct->vma);
209
210         /* (re)initialize descriptors
211          * cmds buffers are in the second half of the blob page
212          */
213         for (i = 0; i < ARRAY_SIZE(ct->ctbs); i++) {
214                 GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV));
215                 guc_ct_buffer_desc_init(ct->ctbs[i].desc,
216                                         base + PAGE_SIZE/4 * i + PAGE_SIZE/2,
217                                         PAGE_SIZE/4);
218         }
219
220         /* register buffers, starting wirh RECV buffer
221          * descriptors are in first half of the blob
222          */
223         err = guc_action_register_ct_buffer(guc,
224                                             base + PAGE_SIZE/4 * CTB_RECV,
225                                             INTEL_GUC_CT_BUFFER_TYPE_RECV);
226         if (unlikely(err))
227                 goto err_out;
228
229         err = guc_action_register_ct_buffer(guc,
230                                             base + PAGE_SIZE/4 * CTB_SEND,
231                                             INTEL_GUC_CT_BUFFER_TYPE_SEND);
232         if (unlikely(err))
233                 goto err_deregister;
234
235         ct->enabled = true;
236
237         return 0;
238
239 err_deregister:
240         guc_action_deregister_ct_buffer(guc,
241                                         INTEL_GUC_CT_BUFFER_TYPE_RECV);
242 err_out:
243         DRM_ERROR("CT: can't open channel; err=%d\n", err);
244         return err;
245 }
246
247 /**
248  * intel_guc_ct_disable - Disable buffer based command transport.
249  * @ct: pointer to CT struct
250  */
251 void intel_guc_ct_disable(struct intel_guc_ct *ct)
252 {
253         struct intel_guc *guc = ct_to_guc(ct);
254
255         GEM_BUG_ON(!ct->enabled);
256
257         ct->enabled = false;
258
259         if (intel_guc_is_running(guc)) {
260                 guc_action_deregister_ct_buffer(guc,
261                                                 INTEL_GUC_CT_BUFFER_TYPE_SEND);
262                 guc_action_deregister_ct_buffer(guc,
263                                                 INTEL_GUC_CT_BUFFER_TYPE_RECV);
264         }
265 }
266
267 static u32 ct_get_next_fence(struct intel_guc_ct *ct)
268 {
269         /* For now it's trivial */
270         return ++ct->requests.next_fence;
271 }
272
273 /**
274  * DOC: CTB Host to GuC request
275  *
276  * Format of the CTB Host to GuC request message is as follows::
277  *
278  *      +------------+---------+---------+---------+---------+
279  *      |   msg[0]   |   [1]   |   [2]   |   ...   |  [n-1]  |
280  *      +------------+---------+---------+---------+---------+
281  *      |   MESSAGE  |       MESSAGE PAYLOAD                 |
282  *      +   HEADER   +---------+---------+---------+---------+
283  *      |            |    0    |    1    |   ...   |    n    |
284  *      +============+=========+=========+=========+=========+
285  *      |  len >= 1  |  FENCE  |     request specific data   |
286  *      +------+-----+---------+---------+---------+---------+
287  *
288  *                   ^-----------------len-------------------^
289  */
290
291 static int ctb_write(struct intel_guc_ct_buffer *ctb,
292                      const u32 *action,
293                      u32 len /* in dwords */,
294                      u32 fence,
295                      bool want_response)
296 {
297         struct guc_ct_buffer_desc *desc = ctb->desc;
298         u32 head = desc->head / 4;      /* in dwords */
299         u32 tail = desc->tail / 4;      /* in dwords */
300         u32 size = desc->size / 4;      /* in dwords */
301         u32 used;                       /* in dwords */
302         u32 header;
303         u32 *cmds = ctb->cmds;
304         unsigned int i;
305
306         GEM_BUG_ON(desc->size % 4);
307         GEM_BUG_ON(desc->head % 4);
308         GEM_BUG_ON(desc->tail % 4);
309         GEM_BUG_ON(tail >= size);
310
311         /*
312          * tail == head condition indicates empty. GuC FW does not support
313          * using up the entire buffer to get tail == head meaning full.
314          */
315         if (tail < head)
316                 used = (size - head) + tail;
317         else
318                 used = tail - head;
319
320         /* make sure there is a space including extra dw for the fence */
321         if (unlikely(used + len + 1 >= size))
322                 return -ENOSPC;
323
324         /*
325          * Write the message. The format is the following:
326          * DW0: header (including action code)
327          * DW1: fence
328          * DW2+: action data
329          */
330         header = (len << GUC_CT_MSG_LEN_SHIFT) |
331                  (GUC_CT_MSG_WRITE_FENCE_TO_DESC) |
332                  (want_response ? GUC_CT_MSG_SEND_STATUS : 0) |
333                  (action[0] << GUC_CT_MSG_ACTION_SHIFT);
334
335         CT_DEBUG_DRIVER("CT: writing %*ph %*ph %*ph\n",
336                         4, &header, 4, &fence,
337                         4 * (len - 1), &action[1]);
338
339         cmds[tail] = header;
340         tail = (tail + 1) % size;
341
342         cmds[tail] = fence;
343         tail = (tail + 1) % size;
344
345         for (i = 1; i < len; i++) {
346                 cmds[tail] = action[i];
347                 tail = (tail + 1) % size;
348         }
349
350         /* now update desc tail (back in bytes) */
351         desc->tail = tail * 4;
352         GEM_BUG_ON(desc->tail > desc->size);
353
354         return 0;
355 }
356
357 /**
358  * wait_for_ctb_desc_update - Wait for the CT buffer descriptor update.
359  * @desc:       buffer descriptor
360  * @fence:      response fence
361  * @status:     placeholder for status
362  *
363  * Guc will update CT buffer descriptor with new fence and status
364  * after processing the command identified by the fence. Wait for
365  * specified fence and then read from the descriptor status of the
366  * command.
367  *
368  * Return:
369  * *    0 response received (status is valid)
370  * *    -ETIMEDOUT no response within hardcoded timeout
371  * *    -EPROTO no response, CT buffer is in error
372  */
373 static int wait_for_ctb_desc_update(struct guc_ct_buffer_desc *desc,
374                                     u32 fence,
375                                     u32 *status)
376 {
377         int err;
378
379         /*
380          * Fast commands should complete in less than 10us, so sample quickly
381          * up to that length of time, then switch to a slower sleep-wait loop.
382          * No GuC command should ever take longer than 10ms.
383          */
384 #define done (READ_ONCE(desc->fence) == fence)
385         err = wait_for_us(done, 10);
386         if (err)
387                 err = wait_for(done, 10);
388 #undef done
389
390         if (unlikely(err)) {
391                 DRM_ERROR("CT: fence %u failed; reported fence=%u\n",
392                           fence, desc->fence);
393
394                 if (WARN_ON(desc->is_in_error)) {
395                         /* Something went wrong with the messaging, try to reset
396                          * the buffer and hope for the best
397                          */
398                         guc_ct_buffer_desc_reset(desc);
399                         err = -EPROTO;
400                 }
401         }
402
403         *status = desc->status;
404         return err;
405 }
406
407 /**
408  * wait_for_ct_request_update - Wait for CT request state update.
409  * @req:        pointer to pending request
410  * @status:     placeholder for status
411  *
412  * For each sent request, Guc shall send bac CT response message.
413  * Our message handler will update status of tracked request once
414  * response message with given fence is received. Wait here and
415  * check for valid response status value.
416  *
417  * Return:
418  * *    0 response received (status is valid)
419  * *    -ETIMEDOUT no response within hardcoded timeout
420  */
421 static int wait_for_ct_request_update(struct ct_request *req, u32 *status)
422 {
423         int err;
424
425         /*
426          * Fast commands should complete in less than 10us, so sample quickly
427          * up to that length of time, then switch to a slower sleep-wait loop.
428          * No GuC command should ever take longer than 10ms.
429          */
430 #define done INTEL_GUC_MSG_IS_RESPONSE(READ_ONCE(req->status))
431         err = wait_for_us(done, 10);
432         if (err)
433                 err = wait_for(done, 10);
434 #undef done
435
436         if (unlikely(err))
437                 DRM_ERROR("CT: fence %u err %d\n", req->fence, err);
438
439         *status = req->status;
440         return err;
441 }
442
443 static int ct_send(struct intel_guc_ct *ct,
444                    const u32 *action,
445                    u32 len,
446                    u32 *response_buf,
447                    u32 response_buf_size,
448                    u32 *status)
449 {
450         struct intel_guc_ct_buffer *ctb = &ct->ctbs[CTB_SEND];
451         struct guc_ct_buffer_desc *desc = ctb->desc;
452         struct ct_request request;
453         unsigned long flags;
454         u32 fence;
455         int err;
456
457         GEM_BUG_ON(!ct->enabled);
458         GEM_BUG_ON(!len);
459         GEM_BUG_ON(len & ~GUC_CT_MSG_LEN_MASK);
460         GEM_BUG_ON(!response_buf && response_buf_size);
461
462         fence = ct_get_next_fence(ct);
463         request.fence = fence;
464         request.status = 0;
465         request.response_len = response_buf_size;
466         request.response_buf = response_buf;
467
468         spin_lock_irqsave(&ct->requests.lock, flags);
469         list_add_tail(&request.link, &ct->requests.pending);
470         spin_unlock_irqrestore(&ct->requests.lock, flags);
471
472         err = ctb_write(ctb, action, len, fence, !!response_buf);
473         if (unlikely(err))
474                 goto unlink;
475
476         intel_guc_notify(ct_to_guc(ct));
477
478         if (response_buf)
479                 err = wait_for_ct_request_update(&request, status);
480         else
481                 err = wait_for_ctb_desc_update(desc, fence, status);
482         if (unlikely(err))
483                 goto unlink;
484
485         if (!INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(*status)) {
486                 err = -EIO;
487                 goto unlink;
488         }
489
490         if (response_buf) {
491                 /* There shall be no data in the status */
492                 WARN_ON(INTEL_GUC_MSG_TO_DATA(request.status));
493                 /* Return actual response len */
494                 err = request.response_len;
495         } else {
496                 /* There shall be no response payload */
497                 WARN_ON(request.response_len);
498                 /* Return data decoded from the status dword */
499                 err = INTEL_GUC_MSG_TO_DATA(*status);
500         }
501
502 unlink:
503         spin_lock_irqsave(&ct->requests.lock, flags);
504         list_del(&request.link);
505         spin_unlock_irqrestore(&ct->requests.lock, flags);
506
507         return err;
508 }
509
510 /*
511  * Command Transport (CT) buffer based GuC send function.
512  */
513 int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, u32 len,
514                       u32 *response_buf, u32 response_buf_size)
515 {
516         struct intel_guc_ct *ct = &guc->ct;
517         u32 status = ~0; /* undefined */
518         int ret;
519
520         mutex_lock(&guc->send_mutex);
521
522         ret = ct_send(ct, action, len, response_buf, response_buf_size, &status);
523         if (unlikely(ret < 0)) {
524                 DRM_ERROR("CT: send action %#X failed; err=%d status=%#X\n",
525                           action[0], ret, status);
526         } else if (unlikely(ret)) {
527                 CT_DEBUG_DRIVER("CT: send action %#x returned %d (%#x)\n",
528                                 action[0], ret, ret);
529         }
530
531         mutex_unlock(&guc->send_mutex);
532         return ret;
533 }
534
535 static inline unsigned int ct_header_get_len(u32 header)
536 {
537         return (header >> GUC_CT_MSG_LEN_SHIFT) & GUC_CT_MSG_LEN_MASK;
538 }
539
540 static inline unsigned int ct_header_get_action(u32 header)
541 {
542         return (header >> GUC_CT_MSG_ACTION_SHIFT) & GUC_CT_MSG_ACTION_MASK;
543 }
544
545 static inline bool ct_header_is_response(u32 header)
546 {
547         return !!(header & GUC_CT_MSG_IS_RESPONSE);
548 }
549
550 static int ctb_read(struct intel_guc_ct_buffer *ctb, u32 *data)
551 {
552         struct guc_ct_buffer_desc *desc = ctb->desc;
553         u32 head = desc->head / 4;      /* in dwords */
554         u32 tail = desc->tail / 4;      /* in dwords */
555         u32 size = desc->size / 4;      /* in dwords */
556         u32 *cmds = ctb->cmds;
557         s32 available;                  /* in dwords */
558         unsigned int len;
559         unsigned int i;
560
561         GEM_BUG_ON(desc->size % 4);
562         GEM_BUG_ON(desc->head % 4);
563         GEM_BUG_ON(desc->tail % 4);
564         GEM_BUG_ON(tail >= size);
565         GEM_BUG_ON(head >= size);
566
567         /* tail == head condition indicates empty */
568         available = tail - head;
569         if (unlikely(available == 0))
570                 return -ENODATA;
571
572         /* beware of buffer wrap case */
573         if (unlikely(available < 0))
574                 available += size;
575         CT_DEBUG_DRIVER("CT: available %d (%u:%u)\n", available, head, tail);
576         GEM_BUG_ON(available < 0);
577
578         data[0] = cmds[head];
579         head = (head + 1) % size;
580
581         /* message len with header */
582         len = ct_header_get_len(data[0]) + 1;
583         if (unlikely(len > (u32)available)) {
584                 DRM_ERROR("CT: incomplete message %*ph %*ph %*ph\n",
585                           4, data,
586                           4 * (head + available - 1 > size ?
587                                size - head : available - 1), &cmds[head],
588                           4 * (head + available - 1 > size ?
589                                available - 1 - size + head : 0), &cmds[0]);
590                 return -EPROTO;
591         }
592
593         for (i = 1; i < len; i++) {
594                 data[i] = cmds[head];
595                 head = (head + 1) % size;
596         }
597         CT_DEBUG_DRIVER("CT: received %*ph\n", 4 * len, data);
598
599         desc->head = head * 4;
600         return 0;
601 }
602
603 /**
604  * DOC: CTB GuC to Host response
605  *
606  * Format of the CTB GuC to Host response message is as follows::
607  *
608  *      +------------+---------+---------+---------+---------+---------+
609  *      |   msg[0]   |   [1]   |   [2]   |   [3]   |   ...   |  [n-1]  |
610  *      +------------+---------+---------+---------+---------+---------+
611  *      |   MESSAGE  |       MESSAGE PAYLOAD                           |
612  *      +   HEADER   +---------+---------+---------+---------+---------+
613  *      |            |    0    |    1    |    2    |   ...   |    n    |
614  *      +============+=========+=========+=========+=========+=========+
615  *      |  len >= 2  |  FENCE  |  STATUS |   response specific data    |
616  *      +------+-----+---------+---------+---------+---------+---------+
617  *
618  *                   ^-----------------------len-----------------------^
619  */
620
621 static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
622 {
623         u32 header = msg[0];
624         u32 len = ct_header_get_len(header);
625         u32 msglen = len + 1; /* total message length including header */
626         u32 fence;
627         u32 status;
628         u32 datalen;
629         struct ct_request *req;
630         bool found = false;
631
632         GEM_BUG_ON(!ct_header_is_response(header));
633         GEM_BUG_ON(!in_irq());
634
635         /* Response payload shall at least include fence and status */
636         if (unlikely(len < 2)) {
637                 DRM_ERROR("CT: corrupted response %*ph\n", 4 * msglen, msg);
638                 return -EPROTO;
639         }
640
641         fence = msg[1];
642         status = msg[2];
643         datalen = len - 2;
644
645         /* Format of the status follows RESPONSE message */
646         if (unlikely(!INTEL_GUC_MSG_IS_RESPONSE(status))) {
647                 DRM_ERROR("CT: corrupted response %*ph\n", 4 * msglen, msg);
648                 return -EPROTO;
649         }
650
651         CT_DEBUG_DRIVER("CT: response fence %u status %#x\n", fence, status);
652
653         spin_lock(&ct->requests.lock);
654         list_for_each_entry(req, &ct->requests.pending, link) {
655                 if (unlikely(fence != req->fence)) {
656                         CT_DEBUG_DRIVER("CT: request %u awaits response\n",
657                                         req->fence);
658                         continue;
659                 }
660                 if (unlikely(datalen > req->response_len)) {
661                         DRM_ERROR("CT: response %u too long %*ph\n",
662                                   req->fence, 4 * msglen, msg);
663                         datalen = 0;
664                 }
665                 if (datalen)
666                         memcpy(req->response_buf, msg + 3, 4 * datalen);
667                 req->response_len = datalen;
668                 WRITE_ONCE(req->status, status);
669                 found = true;
670                 break;
671         }
672         spin_unlock(&ct->requests.lock);
673
674         if (!found)
675                 DRM_ERROR("CT: unsolicited response %*ph\n", 4 * msglen, msg);
676         return 0;
677 }
678
679 static void ct_process_request(struct intel_guc_ct *ct,
680                                u32 action, u32 len, const u32 *payload)
681 {
682         struct intel_guc *guc = ct_to_guc(ct);
683         int ret;
684
685         CT_DEBUG_DRIVER("CT: request %x %*ph\n", action, 4 * len, payload);
686
687         switch (action) {
688         case INTEL_GUC_ACTION_DEFAULT:
689                 ret = intel_guc_to_host_process_recv_msg(guc, payload, len);
690                 if (unlikely(ret))
691                         goto fail_unexpected;
692                 break;
693
694         default:
695 fail_unexpected:
696                 DRM_ERROR("CT: unexpected request %x %*ph\n",
697                           action, 4 * len, payload);
698                 break;
699         }
700 }
701
702 static bool ct_process_incoming_requests(struct intel_guc_ct *ct)
703 {
704         unsigned long flags;
705         struct ct_incoming_request *request;
706         u32 header;
707         u32 *payload;
708         bool done;
709
710         spin_lock_irqsave(&ct->requests.lock, flags);
711         request = list_first_entry_or_null(&ct->requests.incoming,
712                                            struct ct_incoming_request, link);
713         if (request)
714                 list_del(&request->link);
715         done = !!list_empty(&ct->requests.incoming);
716         spin_unlock_irqrestore(&ct->requests.lock, flags);
717
718         if (!request)
719                 return true;
720
721         header = request->msg[0];
722         payload = &request->msg[1];
723         ct_process_request(ct,
724                            ct_header_get_action(header),
725                            ct_header_get_len(header),
726                            payload);
727
728         kfree(request);
729         return done;
730 }
731
732 static void ct_incoming_request_worker_func(struct work_struct *w)
733 {
734         struct intel_guc_ct *ct =
735                 container_of(w, struct intel_guc_ct, requests.worker);
736         bool done;
737
738         done = ct_process_incoming_requests(ct);
739         if (!done)
740                 queue_work(system_unbound_wq, &ct->requests.worker);
741 }
742
743 /**
744  * DOC: CTB GuC to Host request
745  *
746  * Format of the CTB GuC to Host request message is as follows::
747  *
748  *      +------------+---------+---------+---------+---------+---------+
749  *      |   msg[0]   |   [1]   |   [2]   |   [3]   |   ...   |  [n-1]  |
750  *      +------------+---------+---------+---------+---------+---------+
751  *      |   MESSAGE  |       MESSAGE PAYLOAD                           |
752  *      +   HEADER   +---------+---------+---------+---------+---------+
753  *      |            |    0    |    1    |    2    |   ...   |    n    |
754  *      +============+=========+=========+=========+=========+=========+
755  *      |     len    |            request specific data                |
756  *      +------+-----+---------+---------+---------+---------+---------+
757  *
758  *                   ^-----------------------len-----------------------^
759  */
760
761 static int ct_handle_request(struct intel_guc_ct *ct, const u32 *msg)
762 {
763         u32 header = msg[0];
764         u32 len = ct_header_get_len(header);
765         u32 msglen = len + 1; /* total message length including header */
766         struct ct_incoming_request *request;
767         unsigned long flags;
768
769         GEM_BUG_ON(ct_header_is_response(header));
770
771         request = kmalloc(sizeof(*request) + 4 * msglen, GFP_ATOMIC);
772         if (unlikely(!request)) {
773                 DRM_ERROR("CT: dropping request %*ph\n", 4 * msglen, msg);
774                 return 0; /* XXX: -ENOMEM ? */
775         }
776         memcpy(request->msg, msg, 4 * msglen);
777
778         spin_lock_irqsave(&ct->requests.lock, flags);
779         list_add_tail(&request->link, &ct->requests.incoming);
780         spin_unlock_irqrestore(&ct->requests.lock, flags);
781
782         queue_work(system_unbound_wq, &ct->requests.worker);
783         return 0;
784 }
785
786 /*
787  * When we're communicating with the GuC over CT, GuC uses events
788  * to notify us about new messages being posted on the RECV buffer.
789  */
790 void intel_guc_to_host_event_handler_ct(struct intel_guc *guc)
791 {
792         struct intel_guc_ct *ct = &guc->ct;
793         struct intel_guc_ct_buffer *ctb = &ct->ctbs[CTB_RECV];
794         u32 msg[GUC_CT_MSG_LEN_MASK + 1]; /* one extra dw for the header */
795         int err = 0;
796
797         if (!ct->enabled)
798                 return;
799
800         do {
801                 err = ctb_read(ctb, msg);
802                 if (err)
803                         break;
804
805                 if (ct_header_is_response(msg[0]))
806                         err = ct_handle_response(ct, msg);
807                 else
808                         err = ct_handle_request(ct, msg);
809         } while (!err);
810
811         if (GEM_WARN_ON(err == -EPROTO)) {
812                 DRM_ERROR("CT: corrupted message detected!\n");
813                 ctb->desc->is_in_error = 1;
814         }
815 }
816