]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/drm_dp_mst_topology.c
drm/dp_mst: Remove VCPI while disabling topology mgr
[linux.git] / drivers / gpu / drm / drm_dp_mst_topology.c
1 /*
2  * Copyright © 2014 Red Hat
3  *
4  * Permission to use, copy, modify, distribute, and sell this software and its
5  * documentation for any purpose is hereby granted without fee, provided that
6  * the above copyright notice appear in all copies and that both that copyright
7  * notice and this permission notice appear in supporting documentation, and
8  * that the name of the copyright holders not be used in advertising or
9  * publicity pertaining to distribution of the software without specific,
10  * written prior permission.  The copyright holders make no representations
11  * about the suitability of this software for any purpose.  It is provided "as
12  * is" without express or implied warranty.
13  *
14  * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15  * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16  * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17  * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18  * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
20  * OF THIS SOFTWARE.
21  */
22
23 #include <linux/delay.h>
24 #include <linux/errno.h>
25 #include <linux/i2c.h>
26 #include <linux/init.h>
27 #include <linux/kernel.h>
28 #include <linux/sched.h>
29 #include <linux/seq_file.h>
30
31 #include <drm/drm_atomic.h>
32 #include <drm/drm_atomic_helper.h>
33 #include <drm/drm_dp_mst_helper.h>
34 #include <drm/drm_drv.h>
35 #include <drm/drm_print.h>
36 #include <drm/drm_probe_helper.h>
37
38 #include "drm_crtc_helper_internal.h"
39 #include "drm_dp_mst_topology_internal.h"
40
41 /**
42  * DOC: dp mst helper
43  *
44  * These functions contain parts of the DisplayPort 1.2a MultiStream Transport
45  * protocol. The helpers contain a topology manager and bandwidth manager.
46  * The helpers encapsulate the sending and received of sideband msgs.
47  */
48 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
49                                   char *buf);
50
51 static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port);
52
53 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
54                                      int id,
55                                      struct drm_dp_payload *payload);
56
57 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
58                                  struct drm_dp_mst_port *port,
59                                  int offset, int size, u8 *bytes);
60 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
61                                   struct drm_dp_mst_port *port,
62                                   int offset, int size, u8 *bytes);
63
64 static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
65                                      struct drm_dp_mst_branch *mstb);
66
67 static void
68 drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
69                                    struct drm_dp_mst_branch *mstb);
70
71 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
72                                            struct drm_dp_mst_branch *mstb,
73                                            struct drm_dp_mst_port *port);
74 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
75                                  u8 *guid);
76
77 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux);
78 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux);
79 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
80
81 #define DBG_PREFIX "[dp_mst]"
82
83 #define DP_STR(x) [DP_ ## x] = #x
84
85 static const char *drm_dp_mst_req_type_str(u8 req_type)
86 {
87         static const char * const req_type_str[] = {
88                 DP_STR(GET_MSG_TRANSACTION_VERSION),
89                 DP_STR(LINK_ADDRESS),
90                 DP_STR(CONNECTION_STATUS_NOTIFY),
91                 DP_STR(ENUM_PATH_RESOURCES),
92                 DP_STR(ALLOCATE_PAYLOAD),
93                 DP_STR(QUERY_PAYLOAD),
94                 DP_STR(RESOURCE_STATUS_NOTIFY),
95                 DP_STR(CLEAR_PAYLOAD_ID_TABLE),
96                 DP_STR(REMOTE_DPCD_READ),
97                 DP_STR(REMOTE_DPCD_WRITE),
98                 DP_STR(REMOTE_I2C_READ),
99                 DP_STR(REMOTE_I2C_WRITE),
100                 DP_STR(POWER_UP_PHY),
101                 DP_STR(POWER_DOWN_PHY),
102                 DP_STR(SINK_EVENT_NOTIFY),
103                 DP_STR(QUERY_STREAM_ENC_STATUS),
104         };
105
106         if (req_type >= ARRAY_SIZE(req_type_str) ||
107             !req_type_str[req_type])
108                 return "unknown";
109
110         return req_type_str[req_type];
111 }
112
113 #undef DP_STR
114 #define DP_STR(x) [DP_NAK_ ## x] = #x
115
116 static const char *drm_dp_mst_nak_reason_str(u8 nak_reason)
117 {
118         static const char * const nak_reason_str[] = {
119                 DP_STR(WRITE_FAILURE),
120                 DP_STR(INVALID_READ),
121                 DP_STR(CRC_FAILURE),
122                 DP_STR(BAD_PARAM),
123                 DP_STR(DEFER),
124                 DP_STR(LINK_FAILURE),
125                 DP_STR(NO_RESOURCES),
126                 DP_STR(DPCD_FAIL),
127                 DP_STR(I2C_NAK),
128                 DP_STR(ALLOCATE_FAIL),
129         };
130
131         if (nak_reason >= ARRAY_SIZE(nak_reason_str) ||
132             !nak_reason_str[nak_reason])
133                 return "unknown";
134
135         return nak_reason_str[nak_reason];
136 }
137
138 #undef DP_STR
139 #define DP_STR(x) [DRM_DP_SIDEBAND_TX_ ## x] = #x
140
141 static const char *drm_dp_mst_sideband_tx_state_str(int state)
142 {
143         static const char * const sideband_reason_str[] = {
144                 DP_STR(QUEUED),
145                 DP_STR(START_SEND),
146                 DP_STR(SENT),
147                 DP_STR(RX),
148                 DP_STR(TIMEOUT),
149         };
150
151         if (state >= ARRAY_SIZE(sideband_reason_str) ||
152             !sideband_reason_str[state])
153                 return "unknown";
154
155         return sideband_reason_str[state];
156 }
157
158 static int
159 drm_dp_mst_rad_to_str(const u8 rad[8], u8 lct, char *out, size_t len)
160 {
161         int i;
162         u8 unpacked_rad[16];
163
164         for (i = 0; i < lct; i++) {
165                 if (i % 2)
166                         unpacked_rad[i] = rad[i / 2] >> 4;
167                 else
168                         unpacked_rad[i] = rad[i / 2] & BIT_MASK(4);
169         }
170
171         /* TODO: Eventually add something to printk so we can format the rad
172          * like this: 1.2.3
173          */
174         return snprintf(out, len, "%*phC", lct, unpacked_rad);
175 }
176
177 /* sideband msg handling */
178 static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
179 {
180         u8 bitmask = 0x80;
181         u8 bitshift = 7;
182         u8 array_index = 0;
183         int number_of_bits = num_nibbles * 4;
184         u8 remainder = 0;
185
186         while (number_of_bits != 0) {
187                 number_of_bits--;
188                 remainder <<= 1;
189                 remainder |= (data[array_index] & bitmask) >> bitshift;
190                 bitmask >>= 1;
191                 bitshift--;
192                 if (bitmask == 0) {
193                         bitmask = 0x80;
194                         bitshift = 7;
195                         array_index++;
196                 }
197                 if ((remainder & 0x10) == 0x10)
198                         remainder ^= 0x13;
199         }
200
201         number_of_bits = 4;
202         while (number_of_bits != 0) {
203                 number_of_bits--;
204                 remainder <<= 1;
205                 if ((remainder & 0x10) != 0)
206                         remainder ^= 0x13;
207         }
208
209         return remainder;
210 }
211
212 static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
213 {
214         u8 bitmask = 0x80;
215         u8 bitshift = 7;
216         u8 array_index = 0;
217         int number_of_bits = number_of_bytes * 8;
218         u16 remainder = 0;
219
220         while (number_of_bits != 0) {
221                 number_of_bits--;
222                 remainder <<= 1;
223                 remainder |= (data[array_index] & bitmask) >> bitshift;
224                 bitmask >>= 1;
225                 bitshift--;
226                 if (bitmask == 0) {
227                         bitmask = 0x80;
228                         bitshift = 7;
229                         array_index++;
230                 }
231                 if ((remainder & 0x100) == 0x100)
232                         remainder ^= 0xd5;
233         }
234
235         number_of_bits = 8;
236         while (number_of_bits != 0) {
237                 number_of_bits--;
238                 remainder <<= 1;
239                 if ((remainder & 0x100) != 0)
240                         remainder ^= 0xd5;
241         }
242
243         return remainder & 0xff;
244 }
245 static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
246 {
247         u8 size = 3;
248         size += (hdr->lct / 2);
249         return size;
250 }
251
252 static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
253                                            u8 *buf, int *len)
254 {
255         int idx = 0;
256         int i;
257         u8 crc4;
258         buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
259         for (i = 0; i < (hdr->lct / 2); i++)
260                 buf[idx++] = hdr->rad[i];
261         buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
262                 (hdr->msg_len & 0x3f);
263         buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
264
265         crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
266         buf[idx - 1] |= (crc4 & 0xf);
267
268         *len = idx;
269 }
270
271 static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
272                                            u8 *buf, int buflen, u8 *hdrlen)
273 {
274         u8 crc4;
275         u8 len;
276         int i;
277         u8 idx;
278         if (buf[0] == 0)
279                 return false;
280         len = 3;
281         len += ((buf[0] & 0xf0) >> 4) / 2;
282         if (len > buflen)
283                 return false;
284         crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
285
286         if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
287                 DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
288                 return false;
289         }
290
291         hdr->lct = (buf[0] & 0xf0) >> 4;
292         hdr->lcr = (buf[0] & 0xf);
293         idx = 1;
294         for (i = 0; i < (hdr->lct / 2); i++)
295                 hdr->rad[i] = buf[idx++];
296         hdr->broadcast = (buf[idx] >> 7) & 0x1;
297         hdr->path_msg = (buf[idx] >> 6) & 0x1;
298         hdr->msg_len = buf[idx] & 0x3f;
299         idx++;
300         hdr->somt = (buf[idx] >> 7) & 0x1;
301         hdr->eomt = (buf[idx] >> 6) & 0x1;
302         hdr->seqno = (buf[idx] >> 4) & 0x1;
303         idx++;
304         *hdrlen = idx;
305         return true;
306 }
307
308 void
309 drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req,
310                            struct drm_dp_sideband_msg_tx *raw)
311 {
312         int idx = 0;
313         int i;
314         u8 *buf = raw->msg;
315         buf[idx++] = req->req_type & 0x7f;
316
317         switch (req->req_type) {
318         case DP_ENUM_PATH_RESOURCES:
319         case DP_POWER_DOWN_PHY:
320         case DP_POWER_UP_PHY:
321                 buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
322                 idx++;
323                 break;
324         case DP_ALLOCATE_PAYLOAD:
325                 buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
326                         (req->u.allocate_payload.number_sdp_streams & 0xf);
327                 idx++;
328                 buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
329                 idx++;
330                 buf[idx] = (req->u.allocate_payload.pbn >> 8);
331                 idx++;
332                 buf[idx] = (req->u.allocate_payload.pbn & 0xff);
333                 idx++;
334                 for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
335                         buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
336                                 (req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
337                         idx++;
338                 }
339                 if (req->u.allocate_payload.number_sdp_streams & 1) {
340                         i = req->u.allocate_payload.number_sdp_streams - 1;
341                         buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
342                         idx++;
343                 }
344                 break;
345         case DP_QUERY_PAYLOAD:
346                 buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
347                 idx++;
348                 buf[idx] = (req->u.query_payload.vcpi & 0x7f);
349                 idx++;
350                 break;
351         case DP_REMOTE_DPCD_READ:
352                 buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
353                 buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
354                 idx++;
355                 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
356                 idx++;
357                 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
358                 idx++;
359                 buf[idx] = (req->u.dpcd_read.num_bytes);
360                 idx++;
361                 break;
362
363         case DP_REMOTE_DPCD_WRITE:
364                 buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
365                 buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
366                 idx++;
367                 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
368                 idx++;
369                 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
370                 idx++;
371                 buf[idx] = (req->u.dpcd_write.num_bytes);
372                 idx++;
373                 memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
374                 idx += req->u.dpcd_write.num_bytes;
375                 break;
376         case DP_REMOTE_I2C_READ:
377                 buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
378                 buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
379                 idx++;
380                 for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
381                         buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
382                         idx++;
383                         buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
384                         idx++;
385                         memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
386                         idx += req->u.i2c_read.transactions[i].num_bytes;
387
388                         buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 5;
389                         buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
390                         idx++;
391                 }
392                 buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
393                 idx++;
394                 buf[idx] = (req->u.i2c_read.num_bytes_read);
395                 idx++;
396                 break;
397
398         case DP_REMOTE_I2C_WRITE:
399                 buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
400                 idx++;
401                 buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
402                 idx++;
403                 buf[idx] = (req->u.i2c_write.num_bytes);
404                 idx++;
405                 memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
406                 idx += req->u.i2c_write.num_bytes;
407                 break;
408         }
409         raw->cur_len = idx;
410 }
411 EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_encode_sideband_req);
412
413 /* Decode a sideband request we've encoded, mainly used for debugging */
414 int
415 drm_dp_decode_sideband_req(const struct drm_dp_sideband_msg_tx *raw,
416                            struct drm_dp_sideband_msg_req_body *req)
417 {
418         const u8 *buf = raw->msg;
419         int i, idx = 0;
420
421         req->req_type = buf[idx++] & 0x7f;
422         switch (req->req_type) {
423         case DP_ENUM_PATH_RESOURCES:
424         case DP_POWER_DOWN_PHY:
425         case DP_POWER_UP_PHY:
426                 req->u.port_num.port_number = (buf[idx] >> 4) & 0xf;
427                 break;
428         case DP_ALLOCATE_PAYLOAD:
429                 {
430                         struct drm_dp_allocate_payload *a =
431                                 &req->u.allocate_payload;
432
433                         a->number_sdp_streams = buf[idx] & 0xf;
434                         a->port_number = (buf[idx] >> 4) & 0xf;
435
436                         WARN_ON(buf[++idx] & 0x80);
437                         a->vcpi = buf[idx] & 0x7f;
438
439                         a->pbn = buf[++idx] << 8;
440                         a->pbn |= buf[++idx];
441
442                         idx++;
443                         for (i = 0; i < a->number_sdp_streams; i++) {
444                                 a->sdp_stream_sink[i] =
445                                         (buf[idx + (i / 2)] >> ((i % 2) ? 0 : 4)) & 0xf;
446                         }
447                 }
448                 break;
449         case DP_QUERY_PAYLOAD:
450                 req->u.query_payload.port_number = (buf[idx] >> 4) & 0xf;
451                 WARN_ON(buf[++idx] & 0x80);
452                 req->u.query_payload.vcpi = buf[idx] & 0x7f;
453                 break;
454         case DP_REMOTE_DPCD_READ:
455                 {
456                         struct drm_dp_remote_dpcd_read *r = &req->u.dpcd_read;
457
458                         r->port_number = (buf[idx] >> 4) & 0xf;
459
460                         r->dpcd_address = (buf[idx] << 16) & 0xf0000;
461                         r->dpcd_address |= (buf[++idx] << 8) & 0xff00;
462                         r->dpcd_address |= buf[++idx] & 0xff;
463
464                         r->num_bytes = buf[++idx];
465                 }
466                 break;
467         case DP_REMOTE_DPCD_WRITE:
468                 {
469                         struct drm_dp_remote_dpcd_write *w =
470                                 &req->u.dpcd_write;
471
472                         w->port_number = (buf[idx] >> 4) & 0xf;
473
474                         w->dpcd_address = (buf[idx] << 16) & 0xf0000;
475                         w->dpcd_address |= (buf[++idx] << 8) & 0xff00;
476                         w->dpcd_address |= buf[++idx] & 0xff;
477
478                         w->num_bytes = buf[++idx];
479
480                         w->bytes = kmemdup(&buf[++idx], w->num_bytes,
481                                            GFP_KERNEL);
482                         if (!w->bytes)
483                                 return -ENOMEM;
484                 }
485                 break;
486         case DP_REMOTE_I2C_READ:
487                 {
488                         struct drm_dp_remote_i2c_read *r = &req->u.i2c_read;
489                         struct drm_dp_remote_i2c_read_tx *tx;
490                         bool failed = false;
491
492                         r->num_transactions = buf[idx] & 0x3;
493                         r->port_number = (buf[idx] >> 4) & 0xf;
494                         for (i = 0; i < r->num_transactions; i++) {
495                                 tx = &r->transactions[i];
496
497                                 tx->i2c_dev_id = buf[++idx] & 0x7f;
498                                 tx->num_bytes = buf[++idx];
499                                 tx->bytes = kmemdup(&buf[++idx],
500                                                     tx->num_bytes,
501                                                     GFP_KERNEL);
502                                 if (!tx->bytes) {
503                                         failed = true;
504                                         break;
505                                 }
506                                 idx += tx->num_bytes;
507                                 tx->no_stop_bit = (buf[idx] >> 5) & 0x1;
508                                 tx->i2c_transaction_delay = buf[idx] & 0xf;
509                         }
510
511                         if (failed) {
512                                 for (i = 0; i < r->num_transactions; i++) {
513                                         tx = &r->transactions[i];
514                                         kfree(tx->bytes);
515                                 }
516                                 return -ENOMEM;
517                         }
518
519                         r->read_i2c_device_id = buf[++idx] & 0x7f;
520                         r->num_bytes_read = buf[++idx];
521                 }
522                 break;
523         case DP_REMOTE_I2C_WRITE:
524                 {
525                         struct drm_dp_remote_i2c_write *w = &req->u.i2c_write;
526
527                         w->port_number = (buf[idx] >> 4) & 0xf;
528                         w->write_i2c_device_id = buf[++idx] & 0x7f;
529                         w->num_bytes = buf[++idx];
530                         w->bytes = kmemdup(&buf[++idx], w->num_bytes,
531                                            GFP_KERNEL);
532                         if (!w->bytes)
533                                 return -ENOMEM;
534                 }
535                 break;
536         }
537
538         return 0;
539 }
540 EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_decode_sideband_req);
541
542 void
543 drm_dp_dump_sideband_msg_req_body(const struct drm_dp_sideband_msg_req_body *req,
544                                   int indent, struct drm_printer *printer)
545 {
546         int i;
547
548 #define P(f, ...) drm_printf_indent(printer, indent, f, ##__VA_ARGS__)
549         if (req->req_type == DP_LINK_ADDRESS) {
550                 /* No contents to print */
551                 P("type=%s\n", drm_dp_mst_req_type_str(req->req_type));
552                 return;
553         }
554
555         P("type=%s contents:\n", drm_dp_mst_req_type_str(req->req_type));
556         indent++;
557
558         switch (req->req_type) {
559         case DP_ENUM_PATH_RESOURCES:
560         case DP_POWER_DOWN_PHY:
561         case DP_POWER_UP_PHY:
562                 P("port=%d\n", req->u.port_num.port_number);
563                 break;
564         case DP_ALLOCATE_PAYLOAD:
565                 P("port=%d vcpi=%d pbn=%d sdp_streams=%d %*ph\n",
566                   req->u.allocate_payload.port_number,
567                   req->u.allocate_payload.vcpi, req->u.allocate_payload.pbn,
568                   req->u.allocate_payload.number_sdp_streams,
569                   req->u.allocate_payload.number_sdp_streams,
570                   req->u.allocate_payload.sdp_stream_sink);
571                 break;
572         case DP_QUERY_PAYLOAD:
573                 P("port=%d vcpi=%d\n",
574                   req->u.query_payload.port_number,
575                   req->u.query_payload.vcpi);
576                 break;
577         case DP_REMOTE_DPCD_READ:
578                 P("port=%d dpcd_addr=%05x len=%d\n",
579                   req->u.dpcd_read.port_number, req->u.dpcd_read.dpcd_address,
580                   req->u.dpcd_read.num_bytes);
581                 break;
582         case DP_REMOTE_DPCD_WRITE:
583                 P("port=%d addr=%05x len=%d: %*ph\n",
584                   req->u.dpcd_write.port_number,
585                   req->u.dpcd_write.dpcd_address,
586                   req->u.dpcd_write.num_bytes, req->u.dpcd_write.num_bytes,
587                   req->u.dpcd_write.bytes);
588                 break;
589         case DP_REMOTE_I2C_READ:
590                 P("port=%d num_tx=%d id=%d size=%d:\n",
591                   req->u.i2c_read.port_number,
592                   req->u.i2c_read.num_transactions,
593                   req->u.i2c_read.read_i2c_device_id,
594                   req->u.i2c_read.num_bytes_read);
595
596                 indent++;
597                 for (i = 0; i < req->u.i2c_read.num_transactions; i++) {
598                         const struct drm_dp_remote_i2c_read_tx *rtx =
599                                 &req->u.i2c_read.transactions[i];
600
601                         P("%d: id=%03d size=%03d no_stop_bit=%d tx_delay=%03d: %*ph\n",
602                           i, rtx->i2c_dev_id, rtx->num_bytes,
603                           rtx->no_stop_bit, rtx->i2c_transaction_delay,
604                           rtx->num_bytes, rtx->bytes);
605                 }
606                 break;
607         case DP_REMOTE_I2C_WRITE:
608                 P("port=%d id=%d size=%d: %*ph\n",
609                   req->u.i2c_write.port_number,
610                   req->u.i2c_write.write_i2c_device_id,
611                   req->u.i2c_write.num_bytes, req->u.i2c_write.num_bytes,
612                   req->u.i2c_write.bytes);
613                 break;
614         default:
615                 P("???\n");
616                 break;
617         }
618 #undef P
619 }
620 EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_dump_sideband_msg_req_body);
621
622 static inline void
623 drm_dp_mst_dump_sideband_msg_tx(struct drm_printer *p,
624                                 const struct drm_dp_sideband_msg_tx *txmsg)
625 {
626         struct drm_dp_sideband_msg_req_body req;
627         char buf[64];
628         int ret;
629         int i;
630
631         drm_dp_mst_rad_to_str(txmsg->dst->rad, txmsg->dst->lct, buf,
632                               sizeof(buf));
633         drm_printf(p, "txmsg cur_offset=%x cur_len=%x seqno=%x state=%s path_msg=%d dst=%s\n",
634                    txmsg->cur_offset, txmsg->cur_len, txmsg->seqno,
635                    drm_dp_mst_sideband_tx_state_str(txmsg->state),
636                    txmsg->path_msg, buf);
637
638         ret = drm_dp_decode_sideband_req(txmsg, &req);
639         if (ret) {
640                 drm_printf(p, "<failed to decode sideband req: %d>\n", ret);
641                 return;
642         }
643         drm_dp_dump_sideband_msg_req_body(&req, 1, p);
644
645         switch (req.req_type) {
646         case DP_REMOTE_DPCD_WRITE:
647                 kfree(req.u.dpcd_write.bytes);
648                 break;
649         case DP_REMOTE_I2C_READ:
650                 for (i = 0; i < req.u.i2c_read.num_transactions; i++)
651                         kfree(req.u.i2c_read.transactions[i].bytes);
652                 break;
653         case DP_REMOTE_I2C_WRITE:
654                 kfree(req.u.i2c_write.bytes);
655                 break;
656         }
657 }
658
659 static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
660 {
661         u8 crc4;
662         crc4 = drm_dp_msg_data_crc4(msg, len);
663         msg[len] = crc4;
664 }
665
666 static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
667                                          struct drm_dp_sideband_msg_tx *raw)
668 {
669         int idx = 0;
670         u8 *buf = raw->msg;
671
672         buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
673
674         raw->cur_len = idx;
675 }
676
677 /* this adds a chunk of msg to the builder to get the final msg */
678 static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
679                                       u8 *replybuf, u8 replybuflen, bool hdr)
680 {
681         int ret;
682         u8 crc4;
683
684         if (hdr) {
685                 u8 hdrlen;
686                 struct drm_dp_sideband_msg_hdr recv_hdr;
687                 ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen);
688                 if (ret == false) {
689                         print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false);
690                         return false;
691                 }
692
693                 /*
694                  * ignore out-of-order messages or messages that are part of a
695                  * failed transaction
696                  */
697                 if (!recv_hdr.somt && !msg->have_somt)
698                         return false;
699
700                 /* get length contained in this portion */
701                 msg->curchunk_len = recv_hdr.msg_len;
702                 msg->curchunk_hdrlen = hdrlen;
703
704                 /* we have already gotten an somt - don't bother parsing */
705                 if (recv_hdr.somt && msg->have_somt)
706                         return false;
707
708                 if (recv_hdr.somt) {
709                         memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr));
710                         msg->have_somt = true;
711                 }
712                 if (recv_hdr.eomt)
713                         msg->have_eomt = true;
714
715                 /* copy the bytes for the remainder of this header chunk */
716                 msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen));
717                 memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx);
718         } else {
719                 memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
720                 msg->curchunk_idx += replybuflen;
721         }
722
723         if (msg->curchunk_idx >= msg->curchunk_len) {
724                 /* do CRC */
725                 crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
726                 /* copy chunk into bigger msg */
727                 memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
728                 msg->curlen += msg->curchunk_len - 1;
729         }
730         return true;
731 }
732
733 static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw,
734                                                struct drm_dp_sideband_msg_reply_body *repmsg)
735 {
736         int idx = 1;
737         int i;
738         memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
739         idx += 16;
740         repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
741         idx++;
742         if (idx > raw->curlen)
743                 goto fail_len;
744         for (i = 0; i < repmsg->u.link_addr.nports; i++) {
745                 if (raw->msg[idx] & 0x80)
746                         repmsg->u.link_addr.ports[i].input_port = 1;
747
748                 repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
749                 repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
750
751                 idx++;
752                 if (idx > raw->curlen)
753                         goto fail_len;
754                 repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
755                 repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
756                 if (repmsg->u.link_addr.ports[i].input_port == 0)
757                         repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
758                 idx++;
759                 if (idx > raw->curlen)
760                         goto fail_len;
761                 if (repmsg->u.link_addr.ports[i].input_port == 0) {
762                         repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
763                         idx++;
764                         if (idx > raw->curlen)
765                                 goto fail_len;
766                         memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
767                         idx += 16;
768                         if (idx > raw->curlen)
769                                 goto fail_len;
770                         repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
771                         repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
772                         idx++;
773
774                 }
775                 if (idx > raw->curlen)
776                         goto fail_len;
777         }
778
779         return true;
780 fail_len:
781         DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
782         return false;
783 }
784
785 static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
786                                                    struct drm_dp_sideband_msg_reply_body *repmsg)
787 {
788         int idx = 1;
789         repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
790         idx++;
791         if (idx > raw->curlen)
792                 goto fail_len;
793         repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
794         idx++;
795         if (idx > raw->curlen)
796                 goto fail_len;
797
798         memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);
799         return true;
800 fail_len:
801         DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
802         return false;
803 }
804
805 static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
806                                                       struct drm_dp_sideband_msg_reply_body *repmsg)
807 {
808         int idx = 1;
809         repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
810         idx++;
811         if (idx > raw->curlen)
812                 goto fail_len;
813         return true;
814 fail_len:
815         DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen);
816         return false;
817 }
818
819 static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
820                                                       struct drm_dp_sideband_msg_reply_body *repmsg)
821 {
822         int idx = 1;
823
824         repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
825         idx++;
826         if (idx > raw->curlen)
827                 goto fail_len;
828         repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
829         idx++;
830         /* TODO check */
831         memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);
832         return true;
833 fail_len:
834         DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen);
835         return false;
836 }
837
838 static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
839                                                           struct drm_dp_sideband_msg_reply_body *repmsg)
840 {
841         int idx = 1;
842         repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
843         idx++;
844         if (idx > raw->curlen)
845                 goto fail_len;
846         repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
847         idx += 2;
848         if (idx > raw->curlen)
849                 goto fail_len;
850         repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
851         idx += 2;
852         if (idx > raw->curlen)
853                 goto fail_len;
854         return true;
855 fail_len:
856         DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen);
857         return false;
858 }
859
860 static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
861                                                           struct drm_dp_sideband_msg_reply_body *repmsg)
862 {
863         int idx = 1;
864         repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
865         idx++;
866         if (idx > raw->curlen)
867                 goto fail_len;
868         repmsg->u.allocate_payload.vcpi = raw->msg[idx];
869         idx++;
870         if (idx > raw->curlen)
871                 goto fail_len;
872         repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
873         idx += 2;
874         if (idx > raw->curlen)
875                 goto fail_len;
876         return true;
877 fail_len:
878         DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen);
879         return false;
880 }
881
882 static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
883                                                     struct drm_dp_sideband_msg_reply_body *repmsg)
884 {
885         int idx = 1;
886         repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
887         idx++;
888         if (idx > raw->curlen)
889                 goto fail_len;
890         repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
891         idx += 2;
892         if (idx > raw->curlen)
893                 goto fail_len;
894         return true;
895 fail_len:
896         DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen);
897         return false;
898 }
899
900 static bool drm_dp_sideband_parse_power_updown_phy_ack(struct drm_dp_sideband_msg_rx *raw,
901                                                        struct drm_dp_sideband_msg_reply_body *repmsg)
902 {
903         int idx = 1;
904
905         repmsg->u.port_number.port_number = (raw->msg[idx] >> 4) & 0xf;
906         idx++;
907         if (idx > raw->curlen) {
908                 DRM_DEBUG_KMS("power up/down phy parse length fail %d %d\n",
909                               idx, raw->curlen);
910                 return false;
911         }
912         return true;
913 }
914
915 static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
916                                         struct drm_dp_sideband_msg_reply_body *msg)
917 {
918         memset(msg, 0, sizeof(*msg));
919         msg->reply_type = (raw->msg[0] & 0x80) >> 7;
920         msg->req_type = (raw->msg[0] & 0x7f);
921
922         if (msg->reply_type == DP_SIDEBAND_REPLY_NAK) {
923                 memcpy(msg->u.nak.guid, &raw->msg[1], 16);
924                 msg->u.nak.reason = raw->msg[17];
925                 msg->u.nak.nak_data = raw->msg[18];
926                 return false;
927         }
928
929         switch (msg->req_type) {
930         case DP_LINK_ADDRESS:
931                 return drm_dp_sideband_parse_link_address(raw, msg);
932         case DP_QUERY_PAYLOAD:
933                 return drm_dp_sideband_parse_query_payload_ack(raw, msg);
934         case DP_REMOTE_DPCD_READ:
935                 return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
936         case DP_REMOTE_DPCD_WRITE:
937                 return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
938         case DP_REMOTE_I2C_READ:
939                 return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
940         case DP_ENUM_PATH_RESOURCES:
941                 return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
942         case DP_ALLOCATE_PAYLOAD:
943                 return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
944         case DP_POWER_DOWN_PHY:
945         case DP_POWER_UP_PHY:
946                 return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg);
947         case DP_CLEAR_PAYLOAD_ID_TABLE:
948                 return true; /* since there's nothing to parse */
949         default:
950                 DRM_ERROR("Got unknown reply 0x%02x (%s)\n", msg->req_type,
951                           drm_dp_mst_req_type_str(msg->req_type));
952                 return false;
953         }
954 }
955
956 static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw,
957                                                            struct drm_dp_sideband_msg_req_body *msg)
958 {
959         int idx = 1;
960
961         msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
962         idx++;
963         if (idx > raw->curlen)
964                 goto fail_len;
965
966         memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
967         idx += 16;
968         if (idx > raw->curlen)
969                 goto fail_len;
970
971         msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
972         msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
973         msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
974         msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
975         msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
976         idx++;
977         return true;
978 fail_len:
979         DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen);
980         return false;
981 }
982
983 static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw,
984                                                            struct drm_dp_sideband_msg_req_body *msg)
985 {
986         int idx = 1;
987
988         msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
989         idx++;
990         if (idx > raw->curlen)
991                 goto fail_len;
992
993         memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
994         idx += 16;
995         if (idx > raw->curlen)
996                 goto fail_len;
997
998         msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
999         idx++;
1000         return true;
1001 fail_len:
1002         DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen);
1003         return false;
1004 }
1005
1006 static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
1007                                       struct drm_dp_sideband_msg_req_body *msg)
1008 {
1009         memset(msg, 0, sizeof(*msg));
1010         msg->req_type = (raw->msg[0] & 0x7f);
1011
1012         switch (msg->req_type) {
1013         case DP_CONNECTION_STATUS_NOTIFY:
1014                 return drm_dp_sideband_parse_connection_status_notify(raw, msg);
1015         case DP_RESOURCE_STATUS_NOTIFY:
1016                 return drm_dp_sideband_parse_resource_status_notify(raw, msg);
1017         default:
1018                 DRM_ERROR("Got unknown request 0x%02x (%s)\n", msg->req_type,
1019                           drm_dp_mst_req_type_str(msg->req_type));
1020                 return false;
1021         }
1022 }
1023
1024 static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
1025 {
1026         struct drm_dp_sideband_msg_req_body req;
1027
1028         req.req_type = DP_REMOTE_DPCD_WRITE;
1029         req.u.dpcd_write.port_number = port_num;
1030         req.u.dpcd_write.dpcd_address = offset;
1031         req.u.dpcd_write.num_bytes = num_bytes;
1032         req.u.dpcd_write.bytes = bytes;
1033         drm_dp_encode_sideband_req(&req, msg);
1034
1035         return 0;
1036 }
1037
1038 static int build_link_address(struct drm_dp_sideband_msg_tx *msg)
1039 {
1040         struct drm_dp_sideband_msg_req_body req;
1041
1042         req.req_type = DP_LINK_ADDRESS;
1043         drm_dp_encode_sideband_req(&req, msg);
1044         return 0;
1045 }
1046
1047 static int build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg)
1048 {
1049         struct drm_dp_sideband_msg_req_body req;
1050
1051         req.req_type = DP_CLEAR_PAYLOAD_ID_TABLE;
1052         drm_dp_encode_sideband_req(&req, msg);
1053         return 0;
1054 }
1055
1056 static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num)
1057 {
1058         struct drm_dp_sideband_msg_req_body req;
1059
1060         req.req_type = DP_ENUM_PATH_RESOURCES;
1061         req.u.port_num.port_number = port_num;
1062         drm_dp_encode_sideband_req(&req, msg);
1063         msg->path_msg = true;
1064         return 0;
1065 }
1066
1067 static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num,
1068                                   u8 vcpi, uint16_t pbn,
1069                                   u8 number_sdp_streams,
1070                                   u8 *sdp_stream_sink)
1071 {
1072         struct drm_dp_sideband_msg_req_body req;
1073         memset(&req, 0, sizeof(req));
1074         req.req_type = DP_ALLOCATE_PAYLOAD;
1075         req.u.allocate_payload.port_number = port_num;
1076         req.u.allocate_payload.vcpi = vcpi;
1077         req.u.allocate_payload.pbn = pbn;
1078         req.u.allocate_payload.number_sdp_streams = number_sdp_streams;
1079         memcpy(req.u.allocate_payload.sdp_stream_sink, sdp_stream_sink,
1080                    number_sdp_streams);
1081         drm_dp_encode_sideband_req(&req, msg);
1082         msg->path_msg = true;
1083         return 0;
1084 }
1085
1086 static int build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
1087                                   int port_num, bool power_up)
1088 {
1089         struct drm_dp_sideband_msg_req_body req;
1090
1091         if (power_up)
1092                 req.req_type = DP_POWER_UP_PHY;
1093         else
1094                 req.req_type = DP_POWER_DOWN_PHY;
1095
1096         req.u.port_num.port_number = port_num;
1097         drm_dp_encode_sideband_req(&req, msg);
1098         msg->path_msg = true;
1099         return 0;
1100 }
1101
1102 static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
1103                                         struct drm_dp_vcpi *vcpi)
1104 {
1105         int ret, vcpi_ret;
1106
1107         mutex_lock(&mgr->payload_lock);
1108         ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
1109         if (ret > mgr->max_payloads) {
1110                 ret = -EINVAL;
1111                 DRM_DEBUG_KMS("out of payload ids %d\n", ret);
1112                 goto out_unlock;
1113         }
1114
1115         vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1);
1116         if (vcpi_ret > mgr->max_payloads) {
1117                 ret = -EINVAL;
1118                 DRM_DEBUG_KMS("out of vcpi ids %d\n", ret);
1119                 goto out_unlock;
1120         }
1121
1122         set_bit(ret, &mgr->payload_mask);
1123         set_bit(vcpi_ret, &mgr->vcpi_mask);
1124         vcpi->vcpi = vcpi_ret + 1;
1125         mgr->proposed_vcpis[ret - 1] = vcpi;
1126 out_unlock:
1127         mutex_unlock(&mgr->payload_lock);
1128         return ret;
1129 }
1130
1131 static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
1132                                       int vcpi)
1133 {
1134         int i;
1135         if (vcpi == 0)
1136                 return;
1137
1138         mutex_lock(&mgr->payload_lock);
1139         DRM_DEBUG_KMS("putting payload %d\n", vcpi);
1140         clear_bit(vcpi - 1, &mgr->vcpi_mask);
1141
1142         for (i = 0; i < mgr->max_payloads; i++) {
1143                 if (mgr->proposed_vcpis[i] &&
1144                     mgr->proposed_vcpis[i]->vcpi == vcpi) {
1145                         mgr->proposed_vcpis[i] = NULL;
1146                         clear_bit(i + 1, &mgr->payload_mask);
1147                 }
1148         }
1149         mutex_unlock(&mgr->payload_lock);
1150 }
1151
1152 static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
1153                               struct drm_dp_sideband_msg_tx *txmsg)
1154 {
1155         unsigned int state;
1156
1157         /*
1158          * All updates to txmsg->state are protected by mgr->qlock, and the two
1159          * cases we check here are terminal states. For those the barriers
1160          * provided by the wake_up/wait_event pair are enough.
1161          */
1162         state = READ_ONCE(txmsg->state);
1163         return (state == DRM_DP_SIDEBAND_TX_RX ||
1164                 state == DRM_DP_SIDEBAND_TX_TIMEOUT);
1165 }
1166
1167 static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
1168                                     struct drm_dp_sideband_msg_tx *txmsg)
1169 {
1170         struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
1171         int ret;
1172
1173         ret = wait_event_timeout(mgr->tx_waitq,
1174                                  check_txmsg_state(mgr, txmsg),
1175                                  (4 * HZ));
1176         mutex_lock(&mstb->mgr->qlock);
1177         if (ret > 0) {
1178                 if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
1179                         ret = -EIO;
1180                         goto out;
1181                 }
1182         } else {
1183                 DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno);
1184
1185                 /* dump some state */
1186                 ret = -EIO;
1187
1188                 /* remove from q */
1189                 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
1190                     txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) {
1191                         list_del(&txmsg->next);
1192                 }
1193
1194                 if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
1195                     txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
1196                         mstb->tx_slots[txmsg->seqno] = NULL;
1197                 }
1198         }
1199 out:
1200         if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
1201                 struct drm_printer p = drm_debug_printer(DBG_PREFIX);
1202
1203                 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
1204         }
1205         mutex_unlock(&mgr->qlock);
1206
1207         return ret;
1208 }
1209
1210 static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
1211 {
1212         struct drm_dp_mst_branch *mstb;
1213
1214         mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);
1215         if (!mstb)
1216                 return NULL;
1217
1218         mstb->lct = lct;
1219         if (lct > 1)
1220                 memcpy(mstb->rad, rad, lct / 2);
1221         INIT_LIST_HEAD(&mstb->ports);
1222         kref_init(&mstb->topology_kref);
1223         kref_init(&mstb->malloc_kref);
1224         return mstb;
1225 }
1226
1227 static void drm_dp_free_mst_branch_device(struct kref *kref)
1228 {
1229         struct drm_dp_mst_branch *mstb =
1230                 container_of(kref, struct drm_dp_mst_branch, malloc_kref);
1231
1232         if (mstb->port_parent)
1233                 drm_dp_mst_put_port_malloc(mstb->port_parent);
1234
1235         kfree(mstb);
1236 }
1237
1238 /**
1239  * DOC: Branch device and port refcounting
1240  *
1241  * Topology refcount overview
1242  * ~~~~~~~~~~~~~~~~~~~~~~~~~~
1243  *
1244  * The refcounting schemes for &struct drm_dp_mst_branch and &struct
1245  * drm_dp_mst_port are somewhat unusual. Both ports and branch devices have
1246  * two different kinds of refcounts: topology refcounts, and malloc refcounts.
1247  *
1248  * Topology refcounts are not exposed to drivers, and are handled internally
1249  * by the DP MST helpers. The helpers use them in order to prevent the
1250  * in-memory topology state from being changed in the middle of critical
1251  * operations like changing the internal state of payload allocations. This
1252  * means each branch and port will be considered to be connected to the rest
1253  * of the topology until its topology refcount reaches zero. Additionally,
1254  * for ports this means that their associated &struct drm_connector will stay
1255  * registered with userspace until the port's refcount reaches 0.
1256  *
1257  * Malloc refcount overview
1258  * ~~~~~~~~~~~~~~~~~~~~~~~~
1259  *
1260  * Malloc references are used to keep a &struct drm_dp_mst_port or &struct
1261  * drm_dp_mst_branch allocated even after all of its topology references have
1262  * been dropped, so that the driver or MST helpers can safely access each
1263  * branch's last known state before it was disconnected from the topology.
1264  * When the malloc refcount of a port or branch reaches 0, the memory
1265  * allocation containing the &struct drm_dp_mst_branch or &struct
1266  * drm_dp_mst_port respectively will be freed.
1267  *
1268  * For &struct drm_dp_mst_branch, malloc refcounts are not currently exposed
1269  * to drivers. As of writing this documentation, there are no drivers that
1270  * have a usecase for accessing &struct drm_dp_mst_branch outside of the MST
1271  * helpers. Exposing this API to drivers in a race-free manner would take more
1272  * tweaking of the refcounting scheme, however patches are welcome provided
1273  * there is a legitimate driver usecase for this.
1274  *
1275  * Refcount relationships in a topology
1276  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1277  *
1278  * Let's take a look at why the relationship between topology and malloc
1279  * refcounts is designed the way it is.
1280  *
1281  * .. kernel-figure:: dp-mst/topology-figure-1.dot
1282  *
1283  *    An example of topology and malloc refs in a DP MST topology with two
1284  *    active payloads. Topology refcount increments are indicated by solid
1285  *    lines, and malloc refcount increments are indicated by dashed lines.
1286  *    Each starts from the branch which incremented the refcount, and ends at
1287  *    the branch to which the refcount belongs to, i.e. the arrow points the
1288  *    same way as the C pointers used to reference a structure.
1289  *
1290  * As you can see in the above figure, every branch increments the topology
1291  * refcount of its children, and increments the malloc refcount of its
1292  * parent. Additionally, every payload increments the malloc refcount of its
1293  * assigned port by 1.
1294  *
1295  * So, what would happen if MSTB #3 from the above figure was unplugged from
1296  * the system, but the driver hadn't yet removed payload #2 from port #3? The
1297  * topology would start to look like the figure below.
1298  *
1299  * .. kernel-figure:: dp-mst/topology-figure-2.dot
1300  *
1301  *    Ports and branch devices which have been released from memory are
1302  *    colored grey, and references which have been removed are colored red.
1303  *
1304  * Whenever a port or branch device's topology refcount reaches zero, it will
1305  * decrement the topology refcounts of all its children, the malloc refcount
1306  * of its parent, and finally its own malloc refcount. For MSTB #4 and port
1307  * #4, this means they both have been disconnected from the topology and freed
1308  * from memory. But, because payload #2 is still holding a reference to port
1309  * #3, port #3 is removed from the topology but its &struct drm_dp_mst_port
1310  * is still accessible from memory. This also means port #3 has not yet
1311  * decremented the malloc refcount of MSTB #3, so its &struct
1312  * drm_dp_mst_branch will also stay allocated in memory until port #3's
1313  * malloc refcount reaches 0.
1314  *
1315  * This relationship is necessary because in order to release payload #2, we
1316  * need to be able to figure out the last relative of port #3 that's still
1317  * connected to the topology. In this case, we would travel up the topology as
1318  * shown below.
1319  *
1320  * .. kernel-figure:: dp-mst/topology-figure-3.dot
1321  *
1322  * And finally, remove payload #2 by communicating with port #2 through
1323  * sideband transactions.
1324  */
1325
1326 /**
1327  * drm_dp_mst_get_mstb_malloc() - Increment the malloc refcount of a branch
1328  * device
1329  * @mstb: The &struct drm_dp_mst_branch to increment the malloc refcount of
1330  *
1331  * Increments &drm_dp_mst_branch.malloc_kref. When
1332  * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
1333  * will be released and @mstb may no longer be used.
1334  *
1335  * See also: drm_dp_mst_put_mstb_malloc()
1336  */
1337 static void
1338 drm_dp_mst_get_mstb_malloc(struct drm_dp_mst_branch *mstb)
1339 {
1340         kref_get(&mstb->malloc_kref);
1341         DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref));
1342 }
1343
1344 /**
1345  * drm_dp_mst_put_mstb_malloc() - Decrement the malloc refcount of a branch
1346  * device
1347  * @mstb: The &struct drm_dp_mst_branch to decrement the malloc refcount of
1348  *
1349  * Decrements &drm_dp_mst_branch.malloc_kref. When
1350  * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
1351  * will be released and @mstb may no longer be used.
1352  *
1353  * See also: drm_dp_mst_get_mstb_malloc()
1354  */
1355 static void
1356 drm_dp_mst_put_mstb_malloc(struct drm_dp_mst_branch *mstb)
1357 {
1358         DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref) - 1);
1359         kref_put(&mstb->malloc_kref, drm_dp_free_mst_branch_device);
1360 }
1361
1362 static void drm_dp_free_mst_port(struct kref *kref)
1363 {
1364         struct drm_dp_mst_port *port =
1365                 container_of(kref, struct drm_dp_mst_port, malloc_kref);
1366
1367         drm_dp_mst_put_mstb_malloc(port->parent);
1368         kfree(port);
1369 }
1370
1371 /**
1372  * drm_dp_mst_get_port_malloc() - Increment the malloc refcount of an MST port
1373  * @port: The &struct drm_dp_mst_port to increment the malloc refcount of
1374  *
1375  * Increments &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
1376  * reaches 0, the memory allocation for @port will be released and @port may
1377  * no longer be used.
1378  *
1379  * Because @port could potentially be freed at any time by the DP MST helpers
1380  * if &drm_dp_mst_port.malloc_kref reaches 0, including during a call to this
1381  * function, drivers that which to make use of &struct drm_dp_mst_port should
1382  * ensure that they grab at least one main malloc reference to their MST ports
1383  * in &drm_dp_mst_topology_cbs.add_connector. This callback is called before
1384  * there is any chance for &drm_dp_mst_port.malloc_kref to reach 0.
1385  *
1386  * See also: drm_dp_mst_put_port_malloc()
1387  */
1388 void
1389 drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port)
1390 {
1391         kref_get(&port->malloc_kref);
1392         DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref));
1393 }
1394 EXPORT_SYMBOL(drm_dp_mst_get_port_malloc);
1395
1396 /**
1397  * drm_dp_mst_put_port_malloc() - Decrement the malloc refcount of an MST port
1398  * @port: The &struct drm_dp_mst_port to decrement the malloc refcount of
1399  *
1400  * Decrements &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
1401  * reaches 0, the memory allocation for @port will be released and @port may
1402  * no longer be used.
1403  *
1404  * See also: drm_dp_mst_get_port_malloc()
1405  */
1406 void
1407 drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port)
1408 {
1409         DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref) - 1);
1410         kref_put(&port->malloc_kref, drm_dp_free_mst_port);
1411 }
1412 EXPORT_SYMBOL(drm_dp_mst_put_port_malloc);
1413
1414 static void drm_dp_destroy_mst_branch_device(struct kref *kref)
1415 {
1416         struct drm_dp_mst_branch *mstb =
1417                 container_of(kref, struct drm_dp_mst_branch, topology_kref);
1418         struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
1419         struct drm_dp_mst_port *port, *tmp;
1420         bool wake_tx = false;
1421
1422         mutex_lock(&mgr->lock);
1423         list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
1424                 list_del(&port->next);
1425                 drm_dp_mst_topology_put_port(port);
1426         }
1427         mutex_unlock(&mgr->lock);
1428
1429         /* drop any tx slots msg */
1430         mutex_lock(&mstb->mgr->qlock);
1431         if (mstb->tx_slots[0]) {
1432                 mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
1433                 mstb->tx_slots[0] = NULL;
1434                 wake_tx = true;
1435         }
1436         if (mstb->tx_slots[1]) {
1437                 mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
1438                 mstb->tx_slots[1] = NULL;
1439                 wake_tx = true;
1440         }
1441         mutex_unlock(&mstb->mgr->qlock);
1442
1443         if (wake_tx)
1444                 wake_up_all(&mstb->mgr->tx_waitq);
1445
1446         drm_dp_mst_put_mstb_malloc(mstb);
1447 }
1448
1449 /**
1450  * drm_dp_mst_topology_try_get_mstb() - Increment the topology refcount of a
1451  * branch device unless it's zero
1452  * @mstb: &struct drm_dp_mst_branch to increment the topology refcount of
1453  *
1454  * Attempts to grab a topology reference to @mstb, if it hasn't yet been
1455  * removed from the topology (e.g. &drm_dp_mst_branch.topology_kref has
1456  * reached 0). Holding a topology reference implies that a malloc reference
1457  * will be held to @mstb as long as the user holds the topology reference.
1458  *
1459  * Care should be taken to ensure that the user has at least one malloc
1460  * reference to @mstb. If you already have a topology reference to @mstb, you
1461  * should use drm_dp_mst_topology_get_mstb() instead.
1462  *
1463  * See also:
1464  * drm_dp_mst_topology_get_mstb()
1465  * drm_dp_mst_topology_put_mstb()
1466  *
1467  * Returns:
1468  * * 1: A topology reference was grabbed successfully
1469  * * 0: @port is no longer in the topology, no reference was grabbed
1470  */
1471 static int __must_check
1472 drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb)
1473 {
1474         int ret = kref_get_unless_zero(&mstb->topology_kref);
1475
1476         if (ret)
1477                 DRM_DEBUG("mstb %p (%d)\n", mstb,
1478                           kref_read(&mstb->topology_kref));
1479
1480         return ret;
1481 }
1482
1483 /**
1484  * drm_dp_mst_topology_get_mstb() - Increment the topology refcount of a
1485  * branch device
1486  * @mstb: The &struct drm_dp_mst_branch to increment the topology refcount of
1487  *
1488  * Increments &drm_dp_mst_branch.topology_refcount without checking whether or
1489  * not it's already reached 0. This is only valid to use in scenarios where
1490  * you are already guaranteed to have at least one active topology reference
1491  * to @mstb. Otherwise, drm_dp_mst_topology_try_get_mstb() must be used.
1492  *
1493  * See also:
1494  * drm_dp_mst_topology_try_get_mstb()
1495  * drm_dp_mst_topology_put_mstb()
1496  */
1497 static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb)
1498 {
1499         WARN_ON(kref_read(&mstb->topology_kref) == 0);
1500         kref_get(&mstb->topology_kref);
1501         DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));
1502 }
1503
1504 /**
1505  * drm_dp_mst_topology_put_mstb() - release a topology reference to a branch
1506  * device
1507  * @mstb: The &struct drm_dp_mst_branch to release the topology reference from
1508  *
1509  * Releases a topology reference from @mstb by decrementing
1510  * &drm_dp_mst_branch.topology_kref.
1511  *
1512  * See also:
1513  * drm_dp_mst_topology_try_get_mstb()
1514  * drm_dp_mst_topology_get_mstb()
1515  */
1516 static void
1517 drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb)
1518 {
1519         DRM_DEBUG("mstb %p (%d)\n",
1520                   mstb, kref_read(&mstb->topology_kref) - 1);
1521         kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device);
1522 }
1523
1524 static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
1525 {
1526         struct drm_dp_mst_branch *mstb;
1527
1528         switch (old_pdt) {
1529         case DP_PEER_DEVICE_DP_LEGACY_CONV:
1530         case DP_PEER_DEVICE_SST_SINK:
1531                 /* remove i2c over sideband */
1532                 drm_dp_mst_unregister_i2c_bus(&port->aux);
1533                 break;
1534         case DP_PEER_DEVICE_MST_BRANCHING:
1535                 mstb = port->mstb;
1536                 port->mstb = NULL;
1537                 drm_dp_mst_topology_put_mstb(mstb);
1538                 break;
1539         }
1540 }
1541
1542 static void drm_dp_destroy_port(struct kref *kref)
1543 {
1544         struct drm_dp_mst_port *port =
1545                 container_of(kref, struct drm_dp_mst_port, topology_kref);
1546         struct drm_dp_mst_topology_mgr *mgr = port->mgr;
1547
1548         if (!port->input) {
1549                 kfree(port->cached_edid);
1550
1551                 /*
1552                  * The only time we don't have a connector
1553                  * on an output port is if the connector init
1554                  * fails.
1555                  */
1556                 if (port->connector) {
1557                         /* we can't destroy the connector here, as
1558                          * we might be holding the mode_config.mutex
1559                          * from an EDID retrieval */
1560
1561                         mutex_lock(&mgr->destroy_connector_lock);
1562                         list_add(&port->next, &mgr->destroy_connector_list);
1563                         mutex_unlock(&mgr->destroy_connector_lock);
1564                         schedule_work(&mgr->destroy_connector_work);
1565                         return;
1566                 }
1567                 /* no need to clean up vcpi
1568                  * as if we have no connector we never setup a vcpi */
1569                 drm_dp_port_teardown_pdt(port, port->pdt);
1570                 port->pdt = DP_PEER_DEVICE_NONE;
1571         }
1572         drm_dp_mst_put_port_malloc(port);
1573 }
1574
1575 /**
1576  * drm_dp_mst_topology_try_get_port() - Increment the topology refcount of a
1577  * port unless it's zero
1578  * @port: &struct drm_dp_mst_port to increment the topology refcount of
1579  *
1580  * Attempts to grab a topology reference to @port, if it hasn't yet been
1581  * removed from the topology (e.g. &drm_dp_mst_port.topology_kref has reached
1582  * 0). Holding a topology reference implies that a malloc reference will be
1583  * held to @port as long as the user holds the topology reference.
1584  *
1585  * Care should be taken to ensure that the user has at least one malloc
1586  * reference to @port. If you already have a topology reference to @port, you
1587  * should use drm_dp_mst_topology_get_port() instead.
1588  *
1589  * See also:
1590  * drm_dp_mst_topology_get_port()
1591  * drm_dp_mst_topology_put_port()
1592  *
1593  * Returns:
1594  * * 1: A topology reference was grabbed successfully
1595  * * 0: @port is no longer in the topology, no reference was grabbed
1596  */
1597 static int __must_check
1598 drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port)
1599 {
1600         int ret = kref_get_unless_zero(&port->topology_kref);
1601
1602         if (ret)
1603                 DRM_DEBUG("port %p (%d)\n", port,
1604                           kref_read(&port->topology_kref));
1605
1606         return ret;
1607 }
1608
1609 /**
1610  * drm_dp_mst_topology_get_port() - Increment the topology refcount of a port
1611  * @port: The &struct drm_dp_mst_port to increment the topology refcount of
1612  *
1613  * Increments &drm_dp_mst_port.topology_refcount without checking whether or
1614  * not it's already reached 0. This is only valid to use in scenarios where
1615  * you are already guaranteed to have at least one active topology reference
1616  * to @port. Otherwise, drm_dp_mst_topology_try_get_port() must be used.
1617  *
1618  * See also:
1619  * drm_dp_mst_topology_try_get_port()
1620  * drm_dp_mst_topology_put_port()
1621  */
1622 static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port)
1623 {
1624         WARN_ON(kref_read(&port->topology_kref) == 0);
1625         kref_get(&port->topology_kref);
1626         DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->topology_kref));
1627 }
1628
1629 /**
1630  * drm_dp_mst_topology_put_port() - release a topology reference to a port
1631  * @port: The &struct drm_dp_mst_port to release the topology reference from
1632  *
1633  * Releases a topology reference from @port by decrementing
1634  * &drm_dp_mst_port.topology_kref.
1635  *
1636  * See also:
1637  * drm_dp_mst_topology_try_get_port()
1638  * drm_dp_mst_topology_get_port()
1639  */
1640 static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port)
1641 {
1642         DRM_DEBUG("port %p (%d)\n",
1643                   port, kref_read(&port->topology_kref) - 1);
1644         kref_put(&port->topology_kref, drm_dp_destroy_port);
1645 }
1646
1647 static struct drm_dp_mst_branch *
1648 drm_dp_mst_topology_get_mstb_validated_locked(struct drm_dp_mst_branch *mstb,
1649                                               struct drm_dp_mst_branch *to_find)
1650 {
1651         struct drm_dp_mst_port *port;
1652         struct drm_dp_mst_branch *rmstb;
1653
1654         if (to_find == mstb)
1655                 return mstb;
1656
1657         list_for_each_entry(port, &mstb->ports, next) {
1658                 if (port->mstb) {
1659                         rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1660                             port->mstb, to_find);
1661                         if (rmstb)
1662                                 return rmstb;
1663                 }
1664         }
1665         return NULL;
1666 }
1667
1668 static struct drm_dp_mst_branch *
1669 drm_dp_mst_topology_get_mstb_validated(struct drm_dp_mst_topology_mgr *mgr,
1670                                        struct drm_dp_mst_branch *mstb)
1671 {
1672         struct drm_dp_mst_branch *rmstb = NULL;
1673
1674         mutex_lock(&mgr->lock);
1675         if (mgr->mst_primary) {
1676                 rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1677                     mgr->mst_primary, mstb);
1678
1679                 if (rmstb && !drm_dp_mst_topology_try_get_mstb(rmstb))
1680                         rmstb = NULL;
1681         }
1682         mutex_unlock(&mgr->lock);
1683         return rmstb;
1684 }
1685
1686 static struct drm_dp_mst_port *
1687 drm_dp_mst_topology_get_port_validated_locked(struct drm_dp_mst_branch *mstb,
1688                                               struct drm_dp_mst_port *to_find)
1689 {
1690         struct drm_dp_mst_port *port, *mport;
1691
1692         list_for_each_entry(port, &mstb->ports, next) {
1693                 if (port == to_find)
1694                         return port;
1695
1696                 if (port->mstb) {
1697                         mport = drm_dp_mst_topology_get_port_validated_locked(
1698                             port->mstb, to_find);
1699                         if (mport)
1700                                 return mport;
1701                 }
1702         }
1703         return NULL;
1704 }
1705
1706 static struct drm_dp_mst_port *
1707 drm_dp_mst_topology_get_port_validated(struct drm_dp_mst_topology_mgr *mgr,
1708                                        struct drm_dp_mst_port *port)
1709 {
1710         struct drm_dp_mst_port *rport = NULL;
1711
1712         mutex_lock(&mgr->lock);
1713         if (mgr->mst_primary) {
1714                 rport = drm_dp_mst_topology_get_port_validated_locked(
1715                     mgr->mst_primary, port);
1716
1717                 if (rport && !drm_dp_mst_topology_try_get_port(rport))
1718                         rport = NULL;
1719         }
1720         mutex_unlock(&mgr->lock);
1721         return rport;
1722 }
1723
1724 static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
1725 {
1726         struct drm_dp_mst_port *port;
1727         int ret;
1728
1729         list_for_each_entry(port, &mstb->ports, next) {
1730                 if (port->port_num == port_num) {
1731                         ret = drm_dp_mst_topology_try_get_port(port);
1732                         return ret ? port : NULL;
1733                 }
1734         }
1735
1736         return NULL;
1737 }
1738
1739 /*
1740  * calculate a new RAD for this MST branch device
1741  * if parent has an LCT of 2 then it has 1 nibble of RAD,
1742  * if parent has an LCT of 3 then it has 2 nibbles of RAD,
1743  */
1744 static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
1745                                  u8 *rad)
1746 {
1747         int parent_lct = port->parent->lct;
1748         int shift = 4;
1749         int idx = (parent_lct - 1) / 2;
1750         if (parent_lct > 1) {
1751                 memcpy(rad, port->parent->rad, idx + 1);
1752                 shift = (parent_lct % 2) ? 4 : 0;
1753         } else
1754                 rad[0] = 0;
1755
1756         rad[idx] |= port->port_num << shift;
1757         return parent_lct + 1;
1758 }
1759
1760 /*
1761  * return sends link address for new mstb
1762  */
1763 static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
1764 {
1765         int ret;
1766         u8 rad[6], lct;
1767         bool send_link = false;
1768         switch (port->pdt) {
1769         case DP_PEER_DEVICE_DP_LEGACY_CONV:
1770         case DP_PEER_DEVICE_SST_SINK:
1771                 /* add i2c over sideband */
1772                 ret = drm_dp_mst_register_i2c_bus(&port->aux);
1773                 break;
1774         case DP_PEER_DEVICE_MST_BRANCHING:
1775                 lct = drm_dp_calculate_rad(port, rad);
1776
1777                 port->mstb = drm_dp_add_mst_branch_device(lct, rad);
1778                 if (port->mstb) {
1779                         port->mstb->mgr = port->mgr;
1780                         port->mstb->port_parent = port;
1781                         /*
1782                          * Make sure this port's memory allocation stays
1783                          * around until its child MSTB releases it
1784                          */
1785                         drm_dp_mst_get_port_malloc(port);
1786
1787                         send_link = true;
1788                 }
1789                 break;
1790         }
1791         return send_link;
1792 }
1793
1794 /**
1795  * drm_dp_mst_dpcd_read() - read a series of bytes from the DPCD via sideband
1796  * @aux: Fake sideband AUX CH
1797  * @offset: address of the (first) register to read
1798  * @buffer: buffer to store the register values
1799  * @size: number of bytes in @buffer
1800  *
1801  * Performs the same functionality for remote devices via
1802  * sideband messaging as drm_dp_dpcd_read() does for local
1803  * devices via actual AUX CH.
1804  *
1805  * Return: Number of bytes read, or negative error code on failure.
1806  */
1807 ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
1808                              unsigned int offset, void *buffer, size_t size)
1809 {
1810         struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
1811                                                     aux);
1812
1813         return drm_dp_send_dpcd_read(port->mgr, port,
1814                                      offset, size, buffer);
1815 }
1816
1817 /**
1818  * drm_dp_mst_dpcd_write() - write a series of bytes to the DPCD via sideband
1819  * @aux: Fake sideband AUX CH
1820  * @offset: address of the (first) register to write
1821  * @buffer: buffer containing the values to write
1822  * @size: number of bytes in @buffer
1823  *
1824  * Performs the same functionality for remote devices via
1825  * sideband messaging as drm_dp_dpcd_write() does for local
1826  * devices via actual AUX CH.
1827  *
1828  * Return: 0 on success, negative error code on failure.
1829  */
1830 ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
1831                               unsigned int offset, void *buffer, size_t size)
1832 {
1833         struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
1834                                                     aux);
1835
1836         return drm_dp_send_dpcd_write(port->mgr, port,
1837                                       offset, size, buffer);
1838 }
1839
1840 static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
1841 {
1842         int ret;
1843
1844         memcpy(mstb->guid, guid, 16);
1845
1846         if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
1847                 if (mstb->port_parent) {
1848                         ret = drm_dp_send_dpcd_write(
1849                                         mstb->mgr,
1850                                         mstb->port_parent,
1851                                         DP_GUID,
1852                                         16,
1853                                         mstb->guid);
1854                 } else {
1855
1856                         ret = drm_dp_dpcd_write(
1857                                         mstb->mgr->aux,
1858                                         DP_GUID,
1859                                         mstb->guid,
1860                                         16);
1861                 }
1862         }
1863 }
1864
1865 static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
1866                                 int pnum,
1867                                 char *proppath,
1868                                 size_t proppath_size)
1869 {
1870         int i;
1871         char temp[8];
1872         snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
1873         for (i = 0; i < (mstb->lct - 1); i++) {
1874                 int shift = (i % 2) ? 0 : 4;
1875                 int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
1876                 snprintf(temp, sizeof(temp), "-%d", port_num);
1877                 strlcat(proppath, temp, proppath_size);
1878         }
1879         snprintf(temp, sizeof(temp), "-%d", pnum);
1880         strlcat(proppath, temp, proppath_size);
1881 }
1882
1883 /**
1884  * drm_dp_mst_connector_late_register() - Late MST connector registration
1885  * @connector: The MST connector
1886  * @port: The MST port for this connector
1887  *
1888  * Helper to register the remote aux device for this MST port. Drivers should
1889  * call this from their mst connector's late_register hook to enable MST aux
1890  * devices.
1891  *
1892  * Return: 0 on success, negative error code on failure.
1893  */
1894 int drm_dp_mst_connector_late_register(struct drm_connector *connector,
1895                                        struct drm_dp_mst_port *port)
1896 {
1897         DRM_DEBUG_KMS("registering %s remote bus for %s\n",
1898                       port->aux.name, connector->kdev->kobj.name);
1899
1900         port->aux.dev = connector->kdev;
1901         return drm_dp_aux_register_devnode(&port->aux);
1902 }
1903 EXPORT_SYMBOL(drm_dp_mst_connector_late_register);
1904
1905 /**
1906  * drm_dp_mst_connector_early_unregister() - Early MST connector unregistration
1907  * @connector: The MST connector
1908  * @port: The MST port for this connector
1909  *
1910  * Helper to unregister the remote aux device for this MST port, registered by
1911  * drm_dp_mst_connector_late_register(). Drivers should call this from their mst
1912  * connector's early_unregister hook.
1913  */
1914 void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
1915                                            struct drm_dp_mst_port *port)
1916 {
1917         DRM_DEBUG_KMS("unregistering %s remote bus for %s\n",
1918                       port->aux.name, connector->kdev->kobj.name);
1919         drm_dp_aux_unregister_devnode(&port->aux);
1920 }
1921 EXPORT_SYMBOL(drm_dp_mst_connector_early_unregister);
1922
1923 static void
1924 drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
1925                                     struct drm_device *dev,
1926                                     struct drm_dp_link_addr_reply_port *port_msg)
1927 {
1928         struct drm_dp_mst_port *port;
1929         bool ret;
1930         bool created = false;
1931         int old_pdt = 0;
1932         int old_ddps = 0;
1933
1934         port = drm_dp_get_port(mstb, port_msg->port_number);
1935         if (!port) {
1936                 port = kzalloc(sizeof(*port), GFP_KERNEL);
1937                 if (!port)
1938                         return;
1939                 kref_init(&port->topology_kref);
1940                 kref_init(&port->malloc_kref);
1941                 port->parent = mstb;
1942                 port->port_num = port_msg->port_number;
1943                 port->mgr = mstb->mgr;
1944                 port->aux.name = "DPMST";
1945                 port->aux.dev = dev->dev;
1946                 port->aux.is_remote = true;
1947
1948                 /*
1949                  * Make sure the memory allocation for our parent branch stays
1950                  * around until our own memory allocation is released
1951                  */
1952                 drm_dp_mst_get_mstb_malloc(mstb);
1953
1954                 created = true;
1955         } else {
1956                 old_pdt = port->pdt;
1957                 old_ddps = port->ddps;
1958         }
1959
1960         port->pdt = port_msg->peer_device_type;
1961         port->input = port_msg->input_port;
1962         port->mcs = port_msg->mcs;
1963         port->ddps = port_msg->ddps;
1964         port->ldps = port_msg->legacy_device_plug_status;
1965         port->dpcd_rev = port_msg->dpcd_revision;
1966         port->num_sdp_streams = port_msg->num_sdp_streams;
1967         port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
1968
1969         /* manage mstb port lists with mgr lock - take a reference
1970            for this list */
1971         if (created) {
1972                 mutex_lock(&mstb->mgr->lock);
1973                 drm_dp_mst_topology_get_port(port);
1974                 list_add(&port->next, &mstb->ports);
1975                 mutex_unlock(&mstb->mgr->lock);
1976         }
1977
1978         if (old_ddps != port->ddps) {
1979                 if (port->ddps) {
1980                         if (!port->input) {
1981                                 drm_dp_send_enum_path_resources(mstb->mgr,
1982                                                                 mstb, port);
1983                         }
1984                 } else {
1985                         port->available_pbn = 0;
1986                 }
1987         }
1988
1989         if (old_pdt != port->pdt && !port->input) {
1990                 drm_dp_port_teardown_pdt(port, old_pdt);
1991
1992                 ret = drm_dp_port_setup_pdt(port);
1993                 if (ret == true)
1994                         drm_dp_send_link_address(mstb->mgr, port->mstb);
1995         }
1996
1997         if (created && !port->input) {
1998                 char proppath[255];
1999
2000                 build_mst_prop_path(mstb, port->port_num, proppath,
2001                                     sizeof(proppath));
2002                 port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr,
2003                                                                    port,
2004                                                                    proppath);
2005                 if (!port->connector) {
2006                         /* remove it from the port list */
2007                         mutex_lock(&mstb->mgr->lock);
2008                         list_del(&port->next);
2009                         mutex_unlock(&mstb->mgr->lock);
2010                         /* drop port list reference */
2011                         drm_dp_mst_topology_put_port(port);
2012                         goto out;
2013                 }
2014                 if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
2015                      port->pdt == DP_PEER_DEVICE_SST_SINK) &&
2016                     port->port_num >= DP_MST_LOGICAL_PORT_0) {
2017                         port->cached_edid = drm_get_edid(port->connector,
2018                                                          &port->aux.ddc);
2019                         drm_connector_set_tile_property(port->connector);
2020                 }
2021                 (*mstb->mgr->cbs->register_connector)(port->connector);
2022         }
2023
2024 out:
2025         /* put reference to this port */
2026         drm_dp_mst_topology_put_port(port);
2027 }
2028
2029 static void
2030 drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
2031                             struct drm_dp_connection_status_notify *conn_stat)
2032 {
2033         struct drm_dp_mst_port *port;
2034         int old_pdt;
2035         int old_ddps;
2036         bool dowork = false;
2037         port = drm_dp_get_port(mstb, conn_stat->port_number);
2038         if (!port)
2039                 return;
2040
2041         old_ddps = port->ddps;
2042         old_pdt = port->pdt;
2043         port->pdt = conn_stat->peer_device_type;
2044         port->mcs = conn_stat->message_capability_status;
2045         port->ldps = conn_stat->legacy_device_plug_status;
2046         port->ddps = conn_stat->displayport_device_plug_status;
2047
2048         if (old_ddps != port->ddps) {
2049                 if (port->ddps) {
2050                         dowork = true;
2051                 } else {
2052                         port->available_pbn = 0;
2053                 }
2054         }
2055         if (old_pdt != port->pdt && !port->input) {
2056                 drm_dp_port_teardown_pdt(port, old_pdt);
2057
2058                 if (drm_dp_port_setup_pdt(port))
2059                         dowork = true;
2060         }
2061
2062         drm_dp_mst_topology_put_port(port);
2063         if (dowork)
2064                 queue_work(system_long_wq, &mstb->mgr->work);
2065
2066 }
2067
2068 static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
2069                                                                u8 lct, u8 *rad)
2070 {
2071         struct drm_dp_mst_branch *mstb;
2072         struct drm_dp_mst_port *port;
2073         int i, ret;
2074         /* find the port by iterating down */
2075
2076         mutex_lock(&mgr->lock);
2077         mstb = mgr->mst_primary;
2078
2079         if (!mstb)
2080                 goto out;
2081
2082         for (i = 0; i < lct - 1; i++) {
2083                 int shift = (i % 2) ? 0 : 4;
2084                 int port_num = (rad[i / 2] >> shift) & 0xf;
2085
2086                 list_for_each_entry(port, &mstb->ports, next) {
2087                         if (port->port_num == port_num) {
2088                                 mstb = port->mstb;
2089                                 if (!mstb) {
2090                                         DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]);
2091                                         goto out;
2092                                 }
2093
2094                                 break;
2095                         }
2096                 }
2097         }
2098         ret = drm_dp_mst_topology_try_get_mstb(mstb);
2099         if (!ret)
2100                 mstb = NULL;
2101 out:
2102         mutex_unlock(&mgr->lock);
2103         return mstb;
2104 }
2105
2106 static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
2107         struct drm_dp_mst_branch *mstb,
2108         const uint8_t *guid)
2109 {
2110         struct drm_dp_mst_branch *found_mstb;
2111         struct drm_dp_mst_port *port;
2112
2113         if (memcmp(mstb->guid, guid, 16) == 0)
2114                 return mstb;
2115
2116
2117         list_for_each_entry(port, &mstb->ports, next) {
2118                 if (!port->mstb)
2119                         continue;
2120
2121                 found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
2122
2123                 if (found_mstb)
2124                         return found_mstb;
2125         }
2126
2127         return NULL;
2128 }
2129
2130 static struct drm_dp_mst_branch *
2131 drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr,
2132                                      const uint8_t *guid)
2133 {
2134         struct drm_dp_mst_branch *mstb;
2135         int ret;
2136
2137         /* find the port by iterating down */
2138         mutex_lock(&mgr->lock);
2139
2140         mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
2141         if (mstb) {
2142                 ret = drm_dp_mst_topology_try_get_mstb(mstb);
2143                 if (!ret)
2144                         mstb = NULL;
2145         }
2146
2147         mutex_unlock(&mgr->lock);
2148         return mstb;
2149 }
2150
2151 static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
2152                                                struct drm_dp_mst_branch *mstb)
2153 {
2154         struct drm_dp_mst_port *port;
2155         struct drm_dp_mst_branch *mstb_child;
2156         if (!mstb->link_address_sent)
2157                 drm_dp_send_link_address(mgr, mstb);
2158
2159         list_for_each_entry(port, &mstb->ports, next) {
2160                 if (port->input)
2161                         continue;
2162
2163                 if (!port->ddps)
2164                         continue;
2165
2166                 if (!port->available_pbn)
2167                         drm_dp_send_enum_path_resources(mgr, mstb, port);
2168
2169                 if (port->mstb) {
2170                         mstb_child = drm_dp_mst_topology_get_mstb_validated(
2171                             mgr, port->mstb);
2172                         if (mstb_child) {
2173                                 drm_dp_check_and_send_link_address(mgr, mstb_child);
2174                                 drm_dp_mst_topology_put_mstb(mstb_child);
2175                         }
2176                 }
2177         }
2178 }
2179
2180 static void drm_dp_mst_link_probe_work(struct work_struct *work)
2181 {
2182         struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work);
2183         struct drm_dp_mst_branch *mstb;
2184         int ret;
2185         bool clear_payload_id_table;
2186
2187         mutex_lock(&mgr->lock);
2188         clear_payload_id_table = !mgr->payload_id_table_cleared;
2189         mgr->payload_id_table_cleared = true;
2190
2191         mstb = mgr->mst_primary;
2192         if (mstb) {
2193                 ret = drm_dp_mst_topology_try_get_mstb(mstb);
2194                 if (!ret)
2195                         mstb = NULL;
2196         }
2197         mutex_unlock(&mgr->lock);
2198         if (!mstb)
2199                 return;
2200
2201         /*
2202          * Certain branch devices seem to incorrectly report an available_pbn
2203          * of 0 on downstream sinks, even after clearing the
2204          * DP_PAYLOAD_ALLOCATE_* registers in
2205          * drm_dp_mst_topology_mgr_set_mst(). Namely, the CableMatters USB-C
2206          * 2x DP hub. Sending a CLEAR_PAYLOAD_ID_TABLE message seems to make
2207          * things work again.
2208          */
2209         if (clear_payload_id_table) {
2210                 DRM_DEBUG_KMS("Clearing payload ID table\n");
2211                 drm_dp_send_clear_payload_id_table(mgr, mstb);
2212         }
2213
2214         drm_dp_check_and_send_link_address(mgr, mstb);
2215         drm_dp_mst_topology_put_mstb(mstb);
2216 }
2217
2218 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
2219                                  u8 *guid)
2220 {
2221         u64 salt;
2222
2223         if (memchr_inv(guid, 0, 16))
2224                 return true;
2225
2226         salt = get_jiffies_64();
2227
2228         memcpy(&guid[0], &salt, sizeof(u64));
2229         memcpy(&guid[8], &salt, sizeof(u64));
2230
2231         return false;
2232 }
2233
2234 static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
2235 {
2236         struct drm_dp_sideband_msg_req_body req;
2237
2238         req.req_type = DP_REMOTE_DPCD_READ;
2239         req.u.dpcd_read.port_number = port_num;
2240         req.u.dpcd_read.dpcd_address = offset;
2241         req.u.dpcd_read.num_bytes = num_bytes;
2242         drm_dp_encode_sideband_req(&req, msg);
2243
2244         return 0;
2245 }
2246
2247 static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
2248                                     bool up, u8 *msg, int len)
2249 {
2250         int ret;
2251         int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;
2252         int tosend, total, offset;
2253         int retries = 0;
2254
2255 retry:
2256         total = len;
2257         offset = 0;
2258         do {
2259                 tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
2260
2261                 ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
2262                                         &msg[offset],
2263                                         tosend);
2264                 if (ret != tosend) {
2265                         if (ret == -EIO && retries < 5) {
2266                                 retries++;
2267                                 goto retry;
2268                         }
2269                         DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
2270
2271                         return -EIO;
2272                 }
2273                 offset += tosend;
2274                 total -= tosend;
2275         } while (total > 0);
2276         return 0;
2277 }
2278
2279 static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
2280                                   struct drm_dp_sideband_msg_tx *txmsg)
2281 {
2282         struct drm_dp_mst_branch *mstb = txmsg->dst;
2283         u8 req_type;
2284
2285         /* both msg slots are full */
2286         if (txmsg->seqno == -1) {
2287                 if (mstb->tx_slots[0] && mstb->tx_slots[1]) {
2288                         DRM_DEBUG_KMS("%s: failed to find slot\n", __func__);
2289                         return -EAGAIN;
2290                 }
2291                 if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) {
2292                         txmsg->seqno = mstb->last_seqno;
2293                         mstb->last_seqno ^= 1;
2294                 } else if (mstb->tx_slots[0] == NULL)
2295                         txmsg->seqno = 0;
2296                 else
2297                         txmsg->seqno = 1;
2298                 mstb->tx_slots[txmsg->seqno] = txmsg;
2299         }
2300
2301         req_type = txmsg->msg[0] & 0x7f;
2302         if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
2303                 req_type == DP_RESOURCE_STATUS_NOTIFY)
2304                 hdr->broadcast = 1;
2305         else
2306                 hdr->broadcast = 0;
2307         hdr->path_msg = txmsg->path_msg;
2308         hdr->lct = mstb->lct;
2309         hdr->lcr = mstb->lct - 1;
2310         if (mstb->lct > 1)
2311                 memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
2312         hdr->seqno = txmsg->seqno;
2313         return 0;
2314 }
2315 /*
2316  * process a single block of the next message in the sideband queue
2317  */
2318 static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
2319                                    struct drm_dp_sideband_msg_tx *txmsg,
2320                                    bool up)
2321 {
2322         u8 chunk[48];
2323         struct drm_dp_sideband_msg_hdr hdr;
2324         int len, space, idx, tosend;
2325         int ret;
2326
2327         memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
2328
2329         if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) {
2330                 txmsg->seqno = -1;
2331                 txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
2332         }
2333
2334         /* make hdr from dst mst - for replies use seqno
2335            otherwise assign one */
2336         ret = set_hdr_from_dst_qlock(&hdr, txmsg);
2337         if (ret < 0)
2338                 return ret;
2339
2340         /* amount left to send in this message */
2341         len = txmsg->cur_len - txmsg->cur_offset;
2342
2343         /* 48 - sideband msg size - 1 byte for data CRC, x header bytes */
2344         space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
2345
2346         tosend = min(len, space);
2347         if (len == txmsg->cur_len)
2348                 hdr.somt = 1;
2349         if (space >= len)
2350                 hdr.eomt = 1;
2351
2352
2353         hdr.msg_len = tosend + 1;
2354         drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
2355         memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
2356         /* add crc at end */
2357         drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
2358         idx += tosend + 1;
2359
2360         ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
2361         if (unlikely(ret) && drm_debug_enabled(DRM_UT_DP)) {
2362                 struct drm_printer p = drm_debug_printer(DBG_PREFIX);
2363
2364                 drm_printf(&p, "sideband msg failed to send\n");
2365                 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
2366                 return ret;
2367         }
2368
2369         txmsg->cur_offset += tosend;
2370         if (txmsg->cur_offset == txmsg->cur_len) {
2371                 txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
2372                 return 1;
2373         }
2374         return 0;
2375 }
2376
2377 static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
2378 {
2379         struct drm_dp_sideband_msg_tx *txmsg;
2380         int ret;
2381
2382         WARN_ON(!mutex_is_locked(&mgr->qlock));
2383
2384         /* construct a chunk from the first msg in the tx_msg queue */
2385         if (list_empty(&mgr->tx_msg_downq))
2386                 return;
2387
2388         txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
2389         ret = process_single_tx_qlock(mgr, txmsg, false);
2390         if (ret == 1) {
2391                 /* txmsg is sent it should be in the slots now */
2392                 list_del(&txmsg->next);
2393         } else if (ret) {
2394                 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
2395                 list_del(&txmsg->next);
2396                 if (txmsg->seqno != -1)
2397                         txmsg->dst->tx_slots[txmsg->seqno] = NULL;
2398                 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
2399                 wake_up_all(&mgr->tx_waitq);
2400         }
2401 }
2402
2403 /* called holding qlock */
2404 static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
2405                                        struct drm_dp_sideband_msg_tx *txmsg)
2406 {
2407         int ret;
2408
2409         /* construct a chunk from the first msg in the tx_msg queue */
2410         ret = process_single_tx_qlock(mgr, txmsg, true);
2411
2412         if (ret != 1)
2413                 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
2414
2415         if (txmsg->seqno != -1) {
2416                 WARN_ON((unsigned int)txmsg->seqno >
2417                         ARRAY_SIZE(txmsg->dst->tx_slots));
2418                 txmsg->dst->tx_slots[txmsg->seqno] = NULL;
2419         }
2420 }
2421
2422 static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
2423                                  struct drm_dp_sideband_msg_tx *txmsg)
2424 {
2425         mutex_lock(&mgr->qlock);
2426         list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
2427
2428         if (drm_debug_enabled(DRM_UT_DP)) {
2429                 struct drm_printer p = drm_debug_printer(DBG_PREFIX);
2430
2431                 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
2432         }
2433
2434         if (list_is_singular(&mgr->tx_msg_downq))
2435                 process_single_down_tx_qlock(mgr);
2436         mutex_unlock(&mgr->qlock);
2437 }
2438
2439 static void
2440 drm_dp_dump_link_address(struct drm_dp_link_address_ack_reply *reply)
2441 {
2442         struct drm_dp_link_addr_reply_port *port_reply;
2443         int i;
2444
2445         for (i = 0; i < reply->nports; i++) {
2446                 port_reply = &reply->ports[i];
2447                 DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n",
2448                               i,
2449                               port_reply->input_port,
2450                               port_reply->peer_device_type,
2451                               port_reply->port_number,
2452                               port_reply->dpcd_revision,
2453                               port_reply->mcs,
2454                               port_reply->ddps,
2455                               port_reply->legacy_device_plug_status,
2456                               port_reply->num_sdp_streams,
2457                               port_reply->num_sdp_stream_sinks);
2458         }
2459 }
2460
2461 static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
2462                                      struct drm_dp_mst_branch *mstb)
2463 {
2464         struct drm_dp_sideband_msg_tx *txmsg;
2465         struct drm_dp_link_address_ack_reply *reply;
2466         int i, len, ret;
2467
2468         txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2469         if (!txmsg)
2470                 return;
2471
2472         txmsg->dst = mstb;
2473         len = build_link_address(txmsg);
2474
2475         mstb->link_address_sent = true;
2476         drm_dp_queue_down_tx(mgr, txmsg);
2477
2478         /* FIXME: Actually do some real error handling here */
2479         ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2480         if (ret <= 0) {
2481                 DRM_ERROR("Sending link address failed with %d\n", ret);
2482                 goto out;
2483         }
2484         if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
2485                 DRM_ERROR("link address NAK received\n");
2486                 ret = -EIO;
2487                 goto out;
2488         }
2489
2490         reply = &txmsg->reply.u.link_addr;
2491         DRM_DEBUG_KMS("link address reply: %d\n", reply->nports);
2492         drm_dp_dump_link_address(reply);
2493
2494         drm_dp_check_mstb_guid(mstb, reply->guid);
2495
2496         for (i = 0; i < reply->nports; i++)
2497                 drm_dp_mst_handle_link_address_port(mstb, mgr->dev,
2498                                                     &reply->ports[i]);
2499
2500         drm_kms_helper_hotplug_event(mgr->dev);
2501
2502 out:
2503         if (ret <= 0)
2504                 mstb->link_address_sent = false;
2505         kfree(txmsg);
2506 }
2507
2508 void drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
2509                                         struct drm_dp_mst_branch *mstb)
2510 {
2511         struct drm_dp_sideband_msg_tx *txmsg;
2512         int len, ret;
2513
2514         txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2515         if (!txmsg)
2516                 return;
2517
2518         txmsg->dst = mstb;
2519         len = build_clear_payload_id_table(txmsg);
2520
2521         drm_dp_queue_down_tx(mgr, txmsg);
2522
2523         ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2524         if (ret > 0 && txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
2525                 DRM_DEBUG_KMS("clear payload table id nak received\n");
2526
2527         kfree(txmsg);
2528 }
2529
2530 static int
2531 drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
2532                                 struct drm_dp_mst_branch *mstb,
2533                                 struct drm_dp_mst_port *port)
2534 {
2535         struct drm_dp_enum_path_resources_ack_reply *path_res;
2536         struct drm_dp_sideband_msg_tx *txmsg;
2537         int len;
2538         int ret;
2539
2540         txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2541         if (!txmsg)
2542                 return -ENOMEM;
2543
2544         txmsg->dst = mstb;
2545         len = build_enum_path_resources(txmsg, port->port_num);
2546
2547         drm_dp_queue_down_tx(mgr, txmsg);
2548
2549         ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2550         if (ret > 0) {
2551                 path_res = &txmsg->reply.u.path_resources;
2552
2553                 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
2554                         DRM_DEBUG_KMS("enum path resources nak received\n");
2555                 } else {
2556                         if (port->port_num != path_res->port_number)
2557                                 DRM_ERROR("got incorrect port in response\n");
2558
2559                         DRM_DEBUG_KMS("enum path resources %d: %d %d\n",
2560                                       path_res->port_number,
2561                                       path_res->full_payload_bw_number,
2562                                       path_res->avail_payload_bw_number);
2563                         port->available_pbn =
2564                                 path_res->avail_payload_bw_number;
2565                 }
2566         }
2567
2568         kfree(txmsg);
2569         return 0;
2570 }
2571
2572 static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
2573 {
2574         if (!mstb->port_parent)
2575                 return NULL;
2576
2577         if (mstb->port_parent->mstb != mstb)
2578                 return mstb->port_parent;
2579
2580         return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
2581 }
2582
2583 /*
2584  * Searches upwards in the topology starting from mstb to try to find the
2585  * closest available parent of mstb that's still connected to the rest of the
2586  * topology. This can be used in order to perform operations like releasing
2587  * payloads, where the branch device which owned the payload may no longer be
2588  * around and thus would require that the payload on the last living relative
2589  * be freed instead.
2590  */
2591 static struct drm_dp_mst_branch *
2592 drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
2593                                         struct drm_dp_mst_branch *mstb,
2594                                         int *port_num)
2595 {
2596         struct drm_dp_mst_branch *rmstb = NULL;
2597         struct drm_dp_mst_port *found_port;
2598
2599         mutex_lock(&mgr->lock);
2600         if (!mgr->mst_primary)
2601                 goto out;
2602
2603         do {
2604                 found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
2605                 if (!found_port)
2606                         break;
2607
2608                 if (drm_dp_mst_topology_try_get_mstb(found_port->parent)) {
2609                         rmstb = found_port->parent;
2610                         *port_num = found_port->port_num;
2611                 } else {
2612                         /* Search again, starting from this parent */
2613                         mstb = found_port->parent;
2614                 }
2615         } while (!rmstb);
2616 out:
2617         mutex_unlock(&mgr->lock);
2618         return rmstb;
2619 }
2620
2621 static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
2622                                    struct drm_dp_mst_port *port,
2623                                    int id,
2624                                    int pbn)
2625 {
2626         struct drm_dp_sideband_msg_tx *txmsg;
2627         struct drm_dp_mst_branch *mstb;
2628         int len, ret, port_num;
2629         u8 sinks[DRM_DP_MAX_SDP_STREAMS];
2630         int i;
2631
2632         port_num = port->port_num;
2633         mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
2634         if (!mstb) {
2635                 mstb = drm_dp_get_last_connected_port_and_mstb(mgr,
2636                                                                port->parent,
2637                                                                &port_num);
2638
2639                 if (!mstb)
2640                         return -EINVAL;
2641         }
2642
2643         txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2644         if (!txmsg) {
2645                 ret = -ENOMEM;
2646                 goto fail_put;
2647         }
2648
2649         for (i = 0; i < port->num_sdp_streams; i++)
2650                 sinks[i] = i;
2651
2652         txmsg->dst = mstb;
2653         len = build_allocate_payload(txmsg, port_num,
2654                                      id,
2655                                      pbn, port->num_sdp_streams, sinks);
2656
2657         drm_dp_queue_down_tx(mgr, txmsg);
2658
2659         /*
2660          * FIXME: there is a small chance that between getting the last
2661          * connected mstb and sending the payload message, the last connected
2662          * mstb could also be removed from the topology. In the future, this
2663          * needs to be fixed by restarting the
2664          * drm_dp_get_last_connected_port_and_mstb() search in the event of a
2665          * timeout if the topology is still connected to the system.
2666          */
2667         ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2668         if (ret > 0) {
2669                 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
2670                         ret = -EINVAL;
2671                 else
2672                         ret = 0;
2673         }
2674         kfree(txmsg);
2675 fail_put:
2676         drm_dp_mst_topology_put_mstb(mstb);
2677         return ret;
2678 }
2679
2680 int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
2681                                  struct drm_dp_mst_port *port, bool power_up)
2682 {
2683         struct drm_dp_sideband_msg_tx *txmsg;
2684         int len, ret;
2685
2686         port = drm_dp_mst_topology_get_port_validated(mgr, port);
2687         if (!port)
2688                 return -EINVAL;
2689
2690         txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2691         if (!txmsg) {
2692                 drm_dp_mst_topology_put_port(port);
2693                 return -ENOMEM;
2694         }
2695
2696         txmsg->dst = port->parent;
2697         len = build_power_updown_phy(txmsg, port->port_num, power_up);
2698         drm_dp_queue_down_tx(mgr, txmsg);
2699
2700         ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg);
2701         if (ret > 0) {
2702                 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
2703                         ret = -EINVAL;
2704                 else
2705                         ret = 0;
2706         }
2707         kfree(txmsg);
2708         drm_dp_mst_topology_put_port(port);
2709
2710         return ret;
2711 }
2712 EXPORT_SYMBOL(drm_dp_send_power_updown_phy);
2713
2714 static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
2715                                        int id,
2716                                        struct drm_dp_payload *payload)
2717 {
2718         int ret;
2719
2720         ret = drm_dp_dpcd_write_payload(mgr, id, payload);
2721         if (ret < 0) {
2722                 payload->payload_state = 0;
2723                 return ret;
2724         }
2725         payload->payload_state = DP_PAYLOAD_LOCAL;
2726         return 0;
2727 }
2728
2729 static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
2730                                        struct drm_dp_mst_port *port,
2731                                        int id,
2732                                        struct drm_dp_payload *payload)
2733 {
2734         int ret;
2735         ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
2736         if (ret < 0)
2737                 return ret;
2738         payload->payload_state = DP_PAYLOAD_REMOTE;
2739         return ret;
2740 }
2741
2742 static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
2743                                         struct drm_dp_mst_port *port,
2744                                         int id,
2745                                         struct drm_dp_payload *payload)
2746 {
2747         DRM_DEBUG_KMS("\n");
2748         /* it's okay for these to fail */
2749         if (port) {
2750                 drm_dp_payload_send_msg(mgr, port, id, 0);
2751         }
2752
2753         drm_dp_dpcd_write_payload(mgr, id, payload);
2754         payload->payload_state = DP_PAYLOAD_DELETE_LOCAL;
2755         return 0;
2756 }
2757
2758 static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
2759                                         int id,
2760                                         struct drm_dp_payload *payload)
2761 {
2762         payload->payload_state = 0;
2763         return 0;
2764 }
2765
2766 /**
2767  * drm_dp_update_payload_part1() - Execute payload update part 1
2768  * @mgr: manager to use.
2769  *
2770  * This iterates over all proposed virtual channels, and tries to
2771  * allocate space in the link for them. For 0->slots transitions,
2772  * this step just writes the VCPI to the MST device. For slots->0
2773  * transitions, this writes the updated VCPIs and removes the
2774  * remote VC payloads.
2775  *
2776  * after calling this the driver should generate ACT and payload
2777  * packets.
2778  */
2779 int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
2780 {
2781         struct drm_dp_payload req_payload;
2782         struct drm_dp_mst_port *port;
2783         int i, j;
2784         int cur_slots = 1;
2785
2786         mutex_lock(&mgr->payload_lock);
2787         for (i = 0; i < mgr->max_payloads; i++) {
2788                 struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
2789                 struct drm_dp_payload *payload = &mgr->payloads[i];
2790                 bool put_port = false;
2791
2792                 /* solve the current payloads - compare to the hw ones
2793                    - update the hw view */
2794                 req_payload.start_slot = cur_slots;
2795                 if (vcpi) {
2796                         port = container_of(vcpi, struct drm_dp_mst_port,
2797                                             vcpi);
2798
2799                         /* Validated ports don't matter if we're releasing
2800                          * VCPI
2801                          */
2802                         if (vcpi->num_slots) {
2803                                 port = drm_dp_mst_topology_get_port_validated(
2804                                     mgr, port);
2805                                 if (!port) {
2806                                         mutex_unlock(&mgr->payload_lock);
2807                                         return -EINVAL;
2808                                 }
2809                                 put_port = true;
2810                         }
2811
2812                         req_payload.num_slots = vcpi->num_slots;
2813                         req_payload.vcpi = vcpi->vcpi;
2814                 } else {
2815                         port = NULL;
2816                         req_payload.num_slots = 0;
2817                 }
2818
2819                 payload->start_slot = req_payload.start_slot;
2820                 /* work out what is required to happen with this payload */
2821                 if (payload->num_slots != req_payload.num_slots) {
2822
2823                         /* need to push an update for this payload */
2824                         if (req_payload.num_slots) {
2825                                 drm_dp_create_payload_step1(mgr, vcpi->vcpi,
2826                                                             &req_payload);
2827                                 payload->num_slots = req_payload.num_slots;
2828                                 payload->vcpi = req_payload.vcpi;
2829
2830                         } else if (payload->num_slots) {
2831                                 payload->num_slots = 0;
2832                                 drm_dp_destroy_payload_step1(mgr, port,
2833                                                              payload->vcpi,
2834                                                              payload);
2835                                 req_payload.payload_state =
2836                                         payload->payload_state;
2837                                 payload->start_slot = 0;
2838                         }
2839                         payload->payload_state = req_payload.payload_state;
2840                 }
2841                 cur_slots += req_payload.num_slots;
2842
2843                 if (put_port)
2844                         drm_dp_mst_topology_put_port(port);
2845         }
2846
2847         for (i = 0; i < mgr->max_payloads; i++) {
2848                 if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL)
2849                         continue;
2850
2851                 DRM_DEBUG_KMS("removing payload %d\n", i);
2852                 for (j = i; j < mgr->max_payloads - 1; j++) {
2853                         mgr->payloads[j] = mgr->payloads[j + 1];
2854                         mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
2855
2856                         if (mgr->proposed_vcpis[j] &&
2857                             mgr->proposed_vcpis[j]->num_slots) {
2858                                 set_bit(j + 1, &mgr->payload_mask);
2859                         } else {
2860                                 clear_bit(j + 1, &mgr->payload_mask);
2861                         }
2862                 }
2863
2864                 memset(&mgr->payloads[mgr->max_payloads - 1], 0,
2865                        sizeof(struct drm_dp_payload));
2866                 mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
2867                 clear_bit(mgr->max_payloads, &mgr->payload_mask);
2868         }
2869         mutex_unlock(&mgr->payload_lock);
2870
2871         return 0;
2872 }
2873 EXPORT_SYMBOL(drm_dp_update_payload_part1);
2874
2875 /**
2876  * drm_dp_update_payload_part2() - Execute payload update part 2
2877  * @mgr: manager to use.
2878  *
2879  * This iterates over all proposed virtual channels, and tries to
2880  * allocate space in the link for them. For 0->slots transitions,
2881  * this step writes the remote VC payload commands. For slots->0
2882  * this just resets some internal state.
2883  */
2884 int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
2885 {
2886         struct drm_dp_mst_port *port;
2887         int i;
2888         int ret = 0;
2889         mutex_lock(&mgr->payload_lock);
2890         for (i = 0; i < mgr->max_payloads; i++) {
2891
2892                 if (!mgr->proposed_vcpis[i])
2893                         continue;
2894
2895                 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
2896
2897                 DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state);
2898                 if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
2899                         ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
2900                 } else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
2901                         ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
2902                 }
2903                 if (ret) {
2904                         mutex_unlock(&mgr->payload_lock);
2905                         return ret;
2906                 }
2907         }
2908         mutex_unlock(&mgr->payload_lock);
2909         return 0;
2910 }
2911 EXPORT_SYMBOL(drm_dp_update_payload_part2);
2912
2913 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
2914                                  struct drm_dp_mst_port *port,
2915                                  int offset, int size, u8 *bytes)
2916 {
2917         int len;
2918         int ret = 0;
2919         struct drm_dp_sideband_msg_tx *txmsg;
2920         struct drm_dp_mst_branch *mstb;
2921
2922         mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
2923         if (!mstb)
2924                 return -EINVAL;
2925
2926         txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2927         if (!txmsg) {
2928                 ret = -ENOMEM;
2929                 goto fail_put;
2930         }
2931
2932         len = build_dpcd_read(txmsg, port->port_num, offset, size);
2933         txmsg->dst = port->parent;
2934
2935         drm_dp_queue_down_tx(mgr, txmsg);
2936
2937         ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2938         if (ret < 0)
2939                 goto fail_free;
2940
2941         /* DPCD read should never be NACKed */
2942         if (txmsg->reply.reply_type == 1) {
2943                 DRM_ERROR("mstb %p port %d: DPCD read on addr 0x%x for %d bytes NAKed\n",
2944                           mstb, port->port_num, offset, size);
2945                 ret = -EIO;
2946                 goto fail_free;
2947         }
2948
2949         if (txmsg->reply.u.remote_dpcd_read_ack.num_bytes != size) {
2950                 ret = -EPROTO;
2951                 goto fail_free;
2952         }
2953
2954         ret = min_t(size_t, txmsg->reply.u.remote_dpcd_read_ack.num_bytes,
2955                     size);
2956         memcpy(bytes, txmsg->reply.u.remote_dpcd_read_ack.bytes, ret);
2957
2958 fail_free:
2959         kfree(txmsg);
2960 fail_put:
2961         drm_dp_mst_topology_put_mstb(mstb);
2962
2963         return ret;
2964 }
2965
2966 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
2967                                   struct drm_dp_mst_port *port,
2968                                   int offset, int size, u8 *bytes)
2969 {
2970         int len;
2971         int ret;
2972         struct drm_dp_sideband_msg_tx *txmsg;
2973         struct drm_dp_mst_branch *mstb;
2974
2975         mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
2976         if (!mstb)
2977                 return -EINVAL;
2978
2979         txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2980         if (!txmsg) {
2981                 ret = -ENOMEM;
2982                 goto fail_put;
2983         }
2984
2985         len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
2986         txmsg->dst = mstb;
2987
2988         drm_dp_queue_down_tx(mgr, txmsg);
2989
2990         ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2991         if (ret > 0) {
2992                 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
2993                         ret = -EIO;
2994                 else
2995                         ret = 0;
2996         }
2997         kfree(txmsg);
2998 fail_put:
2999         drm_dp_mst_topology_put_mstb(mstb);
3000         return ret;
3001 }
3002
3003 static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
3004 {
3005         struct drm_dp_sideband_msg_reply_body reply;
3006
3007         reply.reply_type = DP_SIDEBAND_REPLY_ACK;
3008         reply.req_type = req_type;
3009         drm_dp_encode_sideband_reply(&reply, msg);
3010         return 0;
3011 }
3012
3013 static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
3014                                     struct drm_dp_mst_branch *mstb,
3015                                     int req_type, int seqno, bool broadcast)
3016 {
3017         struct drm_dp_sideband_msg_tx *txmsg;
3018
3019         txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3020         if (!txmsg)
3021                 return -ENOMEM;
3022
3023         txmsg->dst = mstb;
3024         txmsg->seqno = seqno;
3025         drm_dp_encode_up_ack_reply(txmsg, req_type);
3026
3027         mutex_lock(&mgr->qlock);
3028
3029         process_single_up_tx_qlock(mgr, txmsg);
3030
3031         mutex_unlock(&mgr->qlock);
3032
3033         kfree(txmsg);
3034         return 0;
3035 }
3036
3037 static int drm_dp_get_vc_payload_bw(u8 dp_link_bw, u8  dp_link_count)
3038 {
3039         if (dp_link_bw == 0 || dp_link_count == 0)
3040                 DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
3041                               dp_link_bw, dp_link_count);
3042
3043         return dp_link_bw * dp_link_count / 2;
3044 }
3045
3046 /**
3047  * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
3048  * @mgr: manager to set state for
3049  * @mst_state: true to enable MST on this connector - false to disable.
3050  *
3051  * This is called by the driver when it detects an MST capable device plugged
3052  * into a DP MST capable port, or when a DP MST capable device is unplugged.
3053  */
3054 int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
3055 {
3056         int ret = 0;
3057         int i = 0;
3058         struct drm_dp_mst_branch *mstb = NULL;
3059
3060         mutex_lock(&mgr->lock);
3061         if (mst_state == mgr->mst_state)
3062                 goto out_unlock;
3063
3064         mgr->mst_state = mst_state;
3065         /* set the device into MST mode */
3066         if (mst_state) {
3067                 WARN_ON(mgr->mst_primary);
3068
3069                 /* get dpcd info */
3070                 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
3071                 if (ret != DP_RECEIVER_CAP_SIZE) {
3072                         DRM_DEBUG_KMS("failed to read DPCD\n");
3073                         goto out_unlock;
3074                 }
3075
3076                 mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr->dpcd[1],
3077                                                         mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK);
3078                 if (mgr->pbn_div == 0) {
3079                         ret = -EINVAL;
3080                         goto out_unlock;
3081                 }
3082
3083                 /* add initial branch device at LCT 1 */
3084                 mstb = drm_dp_add_mst_branch_device(1, NULL);
3085                 if (mstb == NULL) {
3086                         ret = -ENOMEM;
3087                         goto out_unlock;
3088                 }
3089                 mstb->mgr = mgr;
3090
3091                 /* give this the main reference */
3092                 mgr->mst_primary = mstb;
3093                 drm_dp_mst_topology_get_mstb(mgr->mst_primary);
3094
3095                 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
3096                                                          DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
3097                 if (ret < 0) {
3098                         goto out_unlock;
3099                 }
3100
3101                 {
3102                         struct drm_dp_payload reset_pay;
3103                         reset_pay.start_slot = 0;
3104                         reset_pay.num_slots = 0x3f;
3105                         drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
3106                 }
3107
3108                 queue_work(system_long_wq, &mgr->work);
3109
3110                 ret = 0;
3111         } else {
3112                 /* disable MST on the device */
3113                 mstb = mgr->mst_primary;
3114                 mgr->mst_primary = NULL;
3115                 /* this can fail if the device is gone */
3116                 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
3117                 ret = 0;
3118                 mutex_lock(&mgr->payload_lock);
3119                 memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
3120                 mgr->payload_mask = 0;
3121                 set_bit(0, &mgr->payload_mask);
3122                 for (i = 0; i < mgr->max_payloads; i++) {
3123                         struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
3124
3125                         if (vcpi) {
3126                                 vcpi->vcpi = 0;
3127                                 vcpi->num_slots = 0;
3128                         }
3129                         mgr->proposed_vcpis[i] = NULL;
3130                 }
3131                 mgr->vcpi_mask = 0;
3132                 mutex_unlock(&mgr->payload_lock);
3133
3134                 mgr->payload_id_table_cleared = false;
3135         }
3136
3137 out_unlock:
3138         mutex_unlock(&mgr->lock);
3139         if (mstb)
3140                 drm_dp_mst_topology_put_mstb(mstb);
3141         return ret;
3142
3143 }
3144 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
3145
3146 /**
3147  * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
3148  * @mgr: manager to suspend
3149  *
3150  * This function tells the MST device that we can't handle UP messages
3151  * anymore. This should stop it from sending any since we are suspended.
3152  */
3153 void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
3154 {
3155         mutex_lock(&mgr->lock);
3156         drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
3157                            DP_MST_EN | DP_UPSTREAM_IS_SRC);
3158         mutex_unlock(&mgr->lock);
3159         flush_work(&mgr->work);
3160         flush_work(&mgr->destroy_connector_work);
3161 }
3162 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
3163
3164 /**
3165  * drm_dp_mst_topology_mgr_resume() - resume the MST manager
3166  * @mgr: manager to resume
3167  *
3168  * This will fetch DPCD and see if the device is still there,
3169  * if it is, it will rewrite the MSTM control bits, and return.
3170  *
3171  * if the device fails this returns -1, and the driver should do
3172  * a full MST reprobe, in case we were undocked.
3173  */
3174 int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
3175 {
3176         int ret = 0;
3177
3178         mutex_lock(&mgr->lock);
3179
3180         if (mgr->mst_primary) {
3181                 int sret;
3182                 u8 guid[16];
3183
3184                 sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
3185                 if (sret != DP_RECEIVER_CAP_SIZE) {
3186                         DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
3187                         ret = -1;
3188                         goto out_unlock;
3189                 }
3190
3191                 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
3192                                          DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
3193                 if (ret < 0) {
3194                         DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
3195                         ret = -1;
3196                         goto out_unlock;
3197                 }
3198
3199                 /* Some hubs forget their guids after they resume */
3200                 sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
3201                 if (sret != 16) {
3202                         DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
3203                         ret = -1;
3204                         goto out_unlock;
3205                 }
3206                 drm_dp_check_mstb_guid(mgr->mst_primary, guid);
3207
3208                 ret = 0;
3209         } else
3210                 ret = -1;
3211
3212 out_unlock:
3213         mutex_unlock(&mgr->lock);
3214         return ret;
3215 }
3216 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
3217
3218 static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
3219 {
3220         int len;
3221         u8 replyblock[32];
3222         int replylen, origlen, curreply;
3223         int ret;
3224         struct drm_dp_sideband_msg_rx *msg;
3225         int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE;
3226         msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv;
3227
3228         len = min(mgr->max_dpcd_transaction_bytes, 16);
3229         ret = drm_dp_dpcd_read(mgr->aux, basereg,
3230                                replyblock, len);
3231         if (ret != len) {
3232                 DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
3233                 return false;
3234         }
3235         ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
3236         if (!ret) {
3237                 DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
3238                 return false;
3239         }
3240         replylen = msg->curchunk_len + msg->curchunk_hdrlen;
3241
3242         origlen = replylen;
3243         replylen -= len;
3244         curreply = len;
3245         while (replylen > 0) {
3246                 len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
3247                 ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
3248                                     replyblock, len);
3249                 if (ret != len) {
3250                         DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n",
3251                                       len, ret);
3252                         return false;
3253                 }
3254
3255                 ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
3256                 if (!ret) {
3257                         DRM_DEBUG_KMS("failed to build sideband msg\n");
3258                         return false;
3259                 }
3260
3261                 curreply += len;
3262                 replylen -= len;
3263         }
3264         return true;
3265 }
3266
3267 static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
3268 {
3269         struct drm_dp_sideband_msg_tx *txmsg;
3270         struct drm_dp_mst_branch *mstb;
3271         struct drm_dp_sideband_msg_hdr *hdr = &mgr->down_rep_recv.initial_hdr;
3272         int slot = -1;
3273
3274         if (!drm_dp_get_one_sb_msg(mgr, false))
3275                 goto clear_down_rep_recv;
3276
3277         if (!mgr->down_rep_recv.have_eomt)
3278                 return 0;
3279
3280         mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
3281         if (!mstb) {
3282                 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
3283                               hdr->lct);
3284                 goto clear_down_rep_recv;
3285         }
3286
3287         /* find the message */
3288         slot = hdr->seqno;
3289         mutex_lock(&mgr->qlock);
3290         txmsg = mstb->tx_slots[slot];
3291         /* remove from slots */
3292         mutex_unlock(&mgr->qlock);
3293
3294         if (!txmsg) {
3295                 DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
3296                               mstb, hdr->seqno, hdr->lct, hdr->rad[0],
3297                               mgr->down_rep_recv.msg[0]);
3298                 goto no_msg;
3299         }
3300
3301         drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
3302
3303         if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3304                 DRM_DEBUG_KMS("Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n",
3305                               txmsg->reply.req_type,
3306                               drm_dp_mst_req_type_str(txmsg->reply.req_type),
3307                               txmsg->reply.u.nak.reason,
3308                               drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason),
3309                               txmsg->reply.u.nak.nak_data);
3310
3311         memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
3312         drm_dp_mst_topology_put_mstb(mstb);
3313
3314         mutex_lock(&mgr->qlock);
3315         txmsg->state = DRM_DP_SIDEBAND_TX_RX;
3316         mstb->tx_slots[slot] = NULL;
3317         mutex_unlock(&mgr->qlock);
3318
3319         wake_up_all(&mgr->tx_waitq);
3320
3321         return 0;
3322
3323 no_msg:
3324         drm_dp_mst_topology_put_mstb(mstb);
3325 clear_down_rep_recv:
3326         memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
3327
3328         return 0;
3329 }
3330
3331 static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
3332 {
3333         struct drm_dp_sideband_msg_req_body msg;
3334         struct drm_dp_sideband_msg_hdr *hdr = &mgr->up_req_recv.initial_hdr;
3335         struct drm_dp_mst_branch *mstb = NULL;
3336         const u8 *guid;
3337         bool seqno;
3338
3339         if (!drm_dp_get_one_sb_msg(mgr, true))
3340                 goto out;
3341
3342         if (!mgr->up_req_recv.have_eomt)
3343                 return 0;
3344
3345         if (!hdr->broadcast) {
3346                 mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
3347                 if (!mstb) {
3348                         DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
3349                                       hdr->lct);
3350                         goto out;
3351                 }
3352         }
3353
3354         seqno = hdr->seqno;
3355         drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
3356
3357         if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY)
3358                 guid = msg.u.conn_stat.guid;
3359         else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY)
3360                 guid = msg.u.resource_stat.guid;
3361         else
3362                 goto out;
3363
3364         drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno,
3365                                  false);
3366
3367         if (!mstb) {
3368                 mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid);
3369                 if (!mstb) {
3370                         DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
3371                                       hdr->lct);
3372                         goto out;
3373                 }
3374         }
3375
3376         if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
3377                 drm_dp_mst_handle_conn_stat(mstb, &msg.u.conn_stat);
3378
3379                 DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n",
3380                               msg.u.conn_stat.port_number,
3381                               msg.u.conn_stat.legacy_device_plug_status,
3382                               msg.u.conn_stat.displayport_device_plug_status,
3383                               msg.u.conn_stat.message_capability_status,
3384                               msg.u.conn_stat.input_port,
3385                               msg.u.conn_stat.peer_device_type);
3386
3387                 drm_kms_helper_hotplug_event(mgr->dev);
3388         } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
3389                 DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n",
3390                               msg.u.resource_stat.port_number,
3391                               msg.u.resource_stat.available_pbn);
3392         }
3393
3394         drm_dp_mst_topology_put_mstb(mstb);
3395 out:
3396         memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
3397         return 0;
3398 }
3399
3400 /**
3401  * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
3402  * @mgr: manager to notify irq for.
3403  * @esi: 4 bytes from SINK_COUNT_ESI
3404  * @handled: whether the hpd interrupt was consumed or not
3405  *
3406  * This should be called from the driver when it detects a short IRQ,
3407  * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
3408  * topology manager will process the sideband messages received as a result
3409  * of this.
3410  */
3411 int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
3412 {
3413         int ret = 0;
3414         int sc;
3415         *handled = false;
3416         sc = esi[0] & 0x3f;
3417
3418         if (sc != mgr->sink_count) {
3419                 mgr->sink_count = sc;
3420                 *handled = true;
3421         }
3422
3423         if (esi[1] & DP_DOWN_REP_MSG_RDY) {
3424                 ret = drm_dp_mst_handle_down_rep(mgr);
3425                 *handled = true;
3426         }
3427
3428         if (esi[1] & DP_UP_REQ_MSG_RDY) {
3429                 ret |= drm_dp_mst_handle_up_req(mgr);
3430                 *handled = true;
3431         }
3432
3433         drm_dp_mst_kick_tx(mgr);
3434         return ret;
3435 }
3436 EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
3437
3438 /**
3439  * drm_dp_mst_detect_port() - get connection status for an MST port
3440  * @connector: DRM connector for this port
3441  * @mgr: manager for this port
3442  * @port: unverified pointer to a port
3443  *
3444  * This returns the current connection state for a port. It validates the
3445  * port pointer still exists so the caller doesn't require a reference
3446  */
3447 enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector,
3448                                                  struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
3449 {
3450         enum drm_connector_status status = connector_status_disconnected;
3451
3452         /* we need to search for the port in the mgr in case it's gone */
3453         port = drm_dp_mst_topology_get_port_validated(mgr, port);
3454         if (!port)
3455                 return connector_status_disconnected;
3456
3457         if (!port->ddps)
3458                 goto out;
3459
3460         switch (port->pdt) {
3461         case DP_PEER_DEVICE_NONE:
3462         case DP_PEER_DEVICE_MST_BRANCHING:
3463                 break;
3464
3465         case DP_PEER_DEVICE_SST_SINK:
3466                 status = connector_status_connected;
3467                 /* for logical ports - cache the EDID */
3468                 if (port->port_num >= 8 && !port->cached_edid) {
3469                         port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
3470                 }
3471                 break;
3472         case DP_PEER_DEVICE_DP_LEGACY_CONV:
3473                 if (port->ldps)
3474                         status = connector_status_connected;
3475                 break;
3476         }
3477 out:
3478         drm_dp_mst_topology_put_port(port);
3479         return status;
3480 }
3481 EXPORT_SYMBOL(drm_dp_mst_detect_port);
3482
3483 /**
3484  * drm_dp_mst_port_has_audio() - Check whether port has audio capability or not
3485  * @mgr: manager for this port
3486  * @port: unverified pointer to a port.
3487  *
3488  * This returns whether the port supports audio or not.
3489  */
3490 bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr,
3491                                         struct drm_dp_mst_port *port)
3492 {
3493         bool ret = false;
3494
3495         port = drm_dp_mst_topology_get_port_validated(mgr, port);
3496         if (!port)
3497                 return ret;
3498         ret = port->has_audio;
3499         drm_dp_mst_topology_put_port(port);
3500         return ret;
3501 }
3502 EXPORT_SYMBOL(drm_dp_mst_port_has_audio);
3503
3504 /**
3505  * drm_dp_mst_get_edid() - get EDID for an MST port
3506  * @connector: toplevel connector to get EDID for
3507  * @mgr: manager for this port
3508  * @port: unverified pointer to a port.
3509  *
3510  * This returns an EDID for the port connected to a connector,
3511  * It validates the pointer still exists so the caller doesn't require a
3512  * reference.
3513  */
3514 struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
3515 {
3516         struct edid *edid = NULL;
3517
3518         /* we need to search for the port in the mgr in case it's gone */
3519         port = drm_dp_mst_topology_get_port_validated(mgr, port);
3520         if (!port)
3521                 return NULL;
3522
3523         if (port->cached_edid)
3524                 edid = drm_edid_duplicate(port->cached_edid);
3525         else {
3526                 edid = drm_get_edid(connector, &port->aux.ddc);
3527         }
3528         port->has_audio = drm_detect_monitor_audio(edid);
3529         drm_dp_mst_topology_put_port(port);
3530         return edid;
3531 }
3532 EXPORT_SYMBOL(drm_dp_mst_get_edid);
3533
3534 /**
3535  * drm_dp_find_vcpi_slots() - Find VCPI slots for this PBN value
3536  * @mgr: manager to use
3537  * @pbn: payload bandwidth to convert into slots.
3538  *
3539  * Calculate the number of VCPI slots that will be required for the given PBN
3540  * value. This function is deprecated, and should not be used in atomic
3541  * drivers.
3542  *
3543  * RETURNS:
3544  * The total slots required for this port, or error.
3545  */
3546 int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
3547                            int pbn)
3548 {
3549         int num_slots;
3550
3551         num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
3552
3553         /* max. time slots - one slot for MTP header */
3554         if (num_slots > 63)
3555                 return -ENOSPC;
3556         return num_slots;
3557 }
3558 EXPORT_SYMBOL(drm_dp_find_vcpi_slots);
3559
3560 static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
3561                             struct drm_dp_vcpi *vcpi, int pbn, int slots)
3562 {
3563         int ret;
3564
3565         /* max. time slots - one slot for MTP header */
3566         if (slots > 63)
3567                 return -ENOSPC;
3568
3569         vcpi->pbn = pbn;
3570         vcpi->aligned_pbn = slots * mgr->pbn_div;
3571         vcpi->num_slots = slots;
3572
3573         ret = drm_dp_mst_assign_payload_id(mgr, vcpi);
3574         if (ret < 0)
3575                 return ret;
3576         return 0;
3577 }
3578
3579 /**
3580  * drm_dp_atomic_find_vcpi_slots() - Find and add VCPI slots to the state
3581  * @state: global atomic state
3582  * @mgr: MST topology manager for the port
3583  * @port: port to find vcpi slots for
3584  * @pbn: bandwidth required for the mode in PBN
3585  *
3586  * Allocates VCPI slots to @port, replacing any previous VCPI allocations it
3587  * may have had. Any atomic drivers which support MST must call this function
3588  * in their &drm_encoder_helper_funcs.atomic_check() callback to change the
3589  * current VCPI allocation for the new state, but only when
3590  * &drm_crtc_state.mode_changed or &drm_crtc_state.connectors_changed is set
3591  * to ensure compatibility with userspace applications that still use the
3592  * legacy modesetting UAPI.
3593  *
3594  * Allocations set by this function are not checked against the bandwidth
3595  * restraints of @mgr until the driver calls drm_dp_mst_atomic_check().
3596  *
3597  * Additionally, it is OK to call this function multiple times on the same
3598  * @port as needed. It is not OK however, to call this function and
3599  * drm_dp_atomic_release_vcpi_slots() in the same atomic check phase.
3600  *
3601  * See also:
3602  * drm_dp_atomic_release_vcpi_slots()
3603  * drm_dp_mst_atomic_check()
3604  *
3605  * Returns:
3606  * Total slots in the atomic state assigned for this port, or a negative error
3607  * code if the port no longer exists
3608  */
3609 int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
3610                                   struct drm_dp_mst_topology_mgr *mgr,
3611                                   struct drm_dp_mst_port *port, int pbn)
3612 {
3613         struct drm_dp_mst_topology_state *topology_state;
3614         struct drm_dp_vcpi_allocation *pos, *vcpi = NULL;
3615         int prev_slots, req_slots;
3616
3617         topology_state = drm_atomic_get_mst_topology_state(state, mgr);
3618         if (IS_ERR(topology_state))
3619                 return PTR_ERR(topology_state);
3620
3621         /* Find the current allocation for this port, if any */
3622         list_for_each_entry(pos, &topology_state->vcpis, next) {
3623                 if (pos->port == port) {
3624                         vcpi = pos;
3625                         prev_slots = vcpi->vcpi;
3626
3627                         /*
3628                          * This should never happen, unless the driver tries
3629                          * releasing and allocating the same VCPI allocation,
3630                          * which is an error
3631                          */
3632                         if (WARN_ON(!prev_slots)) {
3633                                 DRM_ERROR("cannot allocate and release VCPI on [MST PORT:%p] in the same state\n",
3634                                           port);
3635                                 return -EINVAL;
3636                         }
3637
3638                         break;
3639                 }
3640         }
3641         if (!vcpi)
3642                 prev_slots = 0;
3643
3644         req_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
3645
3646         DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] VCPI %d -> %d\n",
3647                          port->connector->base.id, port->connector->name,
3648                          port, prev_slots, req_slots);
3649
3650         /* Add the new allocation to the state */
3651         if (!vcpi) {
3652                 vcpi = kzalloc(sizeof(*vcpi), GFP_KERNEL);
3653                 if (!vcpi)
3654                         return -ENOMEM;
3655
3656                 drm_dp_mst_get_port_malloc(port);
3657                 vcpi->port = port;
3658                 list_add(&vcpi->next, &topology_state->vcpis);
3659         }
3660         vcpi->vcpi = req_slots;
3661
3662         return req_slots;
3663 }
3664 EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots);
3665
3666 /**
3667  * drm_dp_atomic_release_vcpi_slots() - Release allocated vcpi slots
3668  * @state: global atomic state
3669  * @mgr: MST topology manager for the port
3670  * @port: The port to release the VCPI slots from
3671  *
3672  * Releases any VCPI slots that have been allocated to a port in the atomic
3673  * state. Any atomic drivers which support MST must call this function in
3674  * their &drm_connector_helper_funcs.atomic_check() callback when the
3675  * connector will no longer have VCPI allocated (e.g. because its CRTC was
3676  * removed) when it had VCPI allocated in the previous atomic state.
3677  *
3678  * It is OK to call this even if @port has been removed from the system.
3679  * Additionally, it is OK to call this function multiple times on the same
3680  * @port as needed. It is not OK however, to call this function and
3681  * drm_dp_atomic_find_vcpi_slots() on the same @port in a single atomic check
3682  * phase.
3683  *
3684  * See also:
3685  * drm_dp_atomic_find_vcpi_slots()
3686  * drm_dp_mst_atomic_check()
3687  *
3688  * Returns:
3689  * 0 if all slots for this port were added back to
3690  * &drm_dp_mst_topology_state.avail_slots or negative error code
3691  */
3692 int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
3693                                      struct drm_dp_mst_topology_mgr *mgr,
3694                                      struct drm_dp_mst_port *port)
3695 {
3696         struct drm_dp_mst_topology_state *topology_state;
3697         struct drm_dp_vcpi_allocation *pos;
3698         bool found = false;
3699
3700         topology_state = drm_atomic_get_mst_topology_state(state, mgr);
3701         if (IS_ERR(topology_state))
3702                 return PTR_ERR(topology_state);
3703
3704         list_for_each_entry(pos, &topology_state->vcpis, next) {
3705                 if (pos->port == port) {
3706                         found = true;
3707                         break;
3708                 }
3709         }
3710         if (WARN_ON(!found)) {
3711                 DRM_ERROR("no VCPI for [MST PORT:%p] found in mst state %p\n",
3712                           port, &topology_state->base);
3713                 return -EINVAL;
3714         }
3715
3716         DRM_DEBUG_ATOMIC("[MST PORT:%p] VCPI %d -> 0\n", port, pos->vcpi);
3717         if (pos->vcpi) {
3718                 drm_dp_mst_put_port_malloc(port);
3719                 pos->vcpi = 0;
3720         }
3721
3722         return 0;
3723 }
3724 EXPORT_SYMBOL(drm_dp_atomic_release_vcpi_slots);
3725
3726 /**
3727  * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel
3728  * @mgr: manager for this port
3729  * @port: port to allocate a virtual channel for.
3730  * @pbn: payload bandwidth number to request
3731  * @slots: returned number of slots for this PBN.
3732  */
3733 bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
3734                               struct drm_dp_mst_port *port, int pbn, int slots)
3735 {
3736         int ret;
3737
3738         port = drm_dp_mst_topology_get_port_validated(mgr, port);
3739         if (!port)
3740                 return false;
3741
3742         if (slots < 0)
3743                 return false;
3744
3745         if (port->vcpi.vcpi > 0) {
3746                 DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n",
3747                               port->vcpi.vcpi, port->vcpi.pbn, pbn);
3748                 if (pbn == port->vcpi.pbn) {
3749                         drm_dp_mst_topology_put_port(port);
3750                         return true;
3751                 }
3752         }
3753
3754         ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn, slots);
3755         if (ret) {
3756                 DRM_DEBUG_KMS("failed to init vcpi slots=%d max=63 ret=%d\n",
3757                               DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
3758                 goto out;
3759         }
3760         DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n",
3761                       pbn, port->vcpi.num_slots);
3762
3763         /* Keep port allocated until its payload has been removed */
3764         drm_dp_mst_get_port_malloc(port);
3765         drm_dp_mst_topology_put_port(port);
3766         return true;
3767 out:
3768         return false;
3769 }
3770 EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
3771
3772 int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
3773 {
3774         int slots = 0;
3775         port = drm_dp_mst_topology_get_port_validated(mgr, port);
3776         if (!port)
3777                 return slots;
3778
3779         slots = port->vcpi.num_slots;
3780         drm_dp_mst_topology_put_port(port);
3781         return slots;
3782 }
3783 EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
3784
3785 /**
3786  * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI
3787  * @mgr: manager for this port
3788  * @port: unverified pointer to a port.
3789  *
3790  * This just resets the number of slots for the ports VCPI for later programming.
3791  */
3792 void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
3793 {
3794         /*
3795          * A port with VCPI will remain allocated until its VCPI is
3796          * released, no verified ref needed
3797          */
3798
3799         port->vcpi.num_slots = 0;
3800 }
3801 EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
3802
3803 /**
3804  * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI
3805  * @mgr: manager for this port
3806  * @port: port to deallocate vcpi for
3807  *
3808  * This can be called unconditionally, regardless of whether
3809  * drm_dp_mst_allocate_vcpi() succeeded or not.
3810  */
3811 void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
3812                                 struct drm_dp_mst_port *port)
3813 {
3814         if (!port->vcpi.vcpi)
3815                 return;
3816
3817         drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
3818         port->vcpi.num_slots = 0;
3819         port->vcpi.pbn = 0;
3820         port->vcpi.aligned_pbn = 0;
3821         port->vcpi.vcpi = 0;
3822         drm_dp_mst_put_port_malloc(port);
3823 }
3824 EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
3825
3826 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
3827                                      int id, struct drm_dp_payload *payload)
3828 {
3829         u8 payload_alloc[3], status;
3830         int ret;
3831         int retries = 0;
3832
3833         drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
3834                            DP_PAYLOAD_TABLE_UPDATED);
3835
3836         payload_alloc[0] = id;
3837         payload_alloc[1] = payload->start_slot;
3838         payload_alloc[2] = payload->num_slots;
3839
3840         ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
3841         if (ret != 3) {
3842                 DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret);
3843                 goto fail;
3844         }
3845
3846 retry:
3847         ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
3848         if (ret < 0) {
3849                 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
3850                 goto fail;
3851         }
3852
3853         if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
3854                 retries++;
3855                 if (retries < 20) {
3856                         usleep_range(10000, 20000);
3857                         goto retry;
3858                 }
3859                 DRM_DEBUG_KMS("status not set after read payload table status %d\n", status);
3860                 ret = -EINVAL;
3861                 goto fail;
3862         }
3863         ret = 0;
3864 fail:
3865         return ret;
3866 }
3867
3868
3869 /**
3870  * drm_dp_check_act_status() - Check ACT handled status.
3871  * @mgr: manager to use
3872  *
3873  * Check the payload status bits in the DPCD for ACT handled completion.
3874  */
3875 int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
3876 {
3877         u8 status;
3878         int ret;
3879         int count = 0;
3880
3881         do {
3882                 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
3883
3884                 if (ret < 0) {
3885                         DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
3886                         goto fail;
3887                 }
3888
3889                 if (status & DP_PAYLOAD_ACT_HANDLED)
3890                         break;
3891                 count++;
3892                 udelay(100);
3893
3894         } while (count < 30);
3895
3896         if (!(status & DP_PAYLOAD_ACT_HANDLED)) {
3897                 DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count);
3898                 ret = -EINVAL;
3899                 goto fail;
3900         }
3901         return 0;
3902 fail:
3903         return ret;
3904 }
3905 EXPORT_SYMBOL(drm_dp_check_act_status);
3906
3907 /**
3908  * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
3909  * @clock: dot clock for the mode
3910  * @bpp: bpp for the mode.
3911  *
3912  * This uses the formula in the spec to calculate the PBN value for a mode.
3913  */
3914 int drm_dp_calc_pbn_mode(int clock, int bpp)
3915 {
3916         /*
3917          * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
3918          * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
3919          * common multiplier to render an integer PBN for all link rate/lane
3920          * counts combinations
3921          * calculate
3922          * peak_kbps *= (1006/1000)
3923          * peak_kbps *= (64/54)
3924          * peak_kbps *= 8    convert to bytes
3925          */
3926         return DIV_ROUND_UP_ULL(mul_u32_u32(clock * bpp, 64 * 1006),
3927                                 8 * 54 * 1000 * 1000);
3928 }
3929 EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
3930
3931 /* we want to kick the TX after we've ack the up/down IRQs. */
3932 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
3933 {
3934         queue_work(system_long_wq, &mgr->tx_work);
3935 }
3936
3937 static void drm_dp_mst_dump_mstb(struct seq_file *m,
3938                                  struct drm_dp_mst_branch *mstb)
3939 {
3940         struct drm_dp_mst_port *port;
3941         int tabs = mstb->lct;
3942         char prefix[10];
3943         int i;
3944
3945         for (i = 0; i < tabs; i++)
3946                 prefix[i] = '\t';
3947         prefix[i] = '\0';
3948
3949         seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports);
3950         list_for_each_entry(port, &mstb->ports, next) {
3951                 seq_printf(m, "%sport: %d: input: %d: pdt: %d, ddps: %d ldps: %d, sdp: %d/%d, %p, conn: %p\n", prefix, port->port_num, port->input, port->pdt, port->ddps, port->ldps, port->num_sdp_streams, port->num_sdp_stream_sinks, port, port->connector);
3952                 if (port->mstb)
3953                         drm_dp_mst_dump_mstb(m, port->mstb);
3954         }
3955 }
3956
3957 #define DP_PAYLOAD_TABLE_SIZE           64
3958
3959 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
3960                                   char *buf)
3961 {
3962         int i;
3963
3964         for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) {
3965                 if (drm_dp_dpcd_read(mgr->aux,
3966                                      DP_PAYLOAD_TABLE_UPDATE_STATUS + i,
3967                                      &buf[i], 16) != 16)
3968                         return false;
3969         }
3970         return true;
3971 }
3972
3973 static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr,
3974                                struct drm_dp_mst_port *port, char *name,
3975                                int namelen)
3976 {
3977         struct edid *mst_edid;
3978
3979         mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port);
3980         drm_edid_get_monitor_name(mst_edid, name, namelen);
3981 }
3982
3983 /**
3984  * drm_dp_mst_dump_topology(): dump topology to seq file.
3985  * @m: seq_file to dump output to
3986  * @mgr: manager to dump current topology for.
3987  *
3988  * helper to dump MST topology to a seq file for debugfs.
3989  */
3990 void drm_dp_mst_dump_topology(struct seq_file *m,
3991                               struct drm_dp_mst_topology_mgr *mgr)
3992 {
3993         int i;
3994         struct drm_dp_mst_port *port;
3995
3996         mutex_lock(&mgr->lock);
3997         if (mgr->mst_primary)
3998                 drm_dp_mst_dump_mstb(m, mgr->mst_primary);
3999
4000         /* dump VCPIs */
4001         mutex_unlock(&mgr->lock);
4002
4003         mutex_lock(&mgr->payload_lock);
4004         seq_printf(m, "vcpi: %lx %lx %d\n", mgr->payload_mask, mgr->vcpi_mask,
4005                 mgr->max_payloads);
4006
4007         for (i = 0; i < mgr->max_payloads; i++) {
4008                 if (mgr->proposed_vcpis[i]) {
4009                         char name[14];
4010
4011                         port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
4012                         fetch_monitor_name(mgr, port, name, sizeof(name));
4013                         seq_printf(m, "vcpi %d: %d %d %d sink name: %s\n", i,
4014                                    port->port_num, port->vcpi.vcpi,
4015                                    port->vcpi.num_slots,
4016                                    (*name != 0) ? name :  "Unknown");
4017                 } else
4018                         seq_printf(m, "vcpi %d:unused\n", i);
4019         }
4020         for (i = 0; i < mgr->max_payloads; i++) {
4021                 seq_printf(m, "payload %d: %d, %d, %d\n",
4022                            i,
4023                            mgr->payloads[i].payload_state,
4024                            mgr->payloads[i].start_slot,
4025                            mgr->payloads[i].num_slots);
4026
4027
4028         }
4029         mutex_unlock(&mgr->payload_lock);
4030
4031         mutex_lock(&mgr->lock);
4032         if (mgr->mst_primary) {
4033                 u8 buf[DP_PAYLOAD_TABLE_SIZE];
4034                 int ret;
4035
4036                 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
4037                 seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf);
4038                 ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
4039                 seq_printf(m, "faux/mst: %*ph\n", 2, buf);
4040                 ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
4041                 seq_printf(m, "mst ctrl: %*ph\n", 1, buf);
4042
4043                 /* dump the standard OUI branch header */
4044                 ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
4045                 seq_printf(m, "branch oui: %*phN devid: ", 3, buf);
4046                 for (i = 0x3; i < 0x8 && buf[i]; i++)
4047                         seq_printf(m, "%c", buf[i]);
4048                 seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n",
4049                            buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
4050                 if (dump_dp_payload_table(mgr, buf))
4051                         seq_printf(m, "payload table: %*ph\n", DP_PAYLOAD_TABLE_SIZE, buf);
4052         }
4053
4054         mutex_unlock(&mgr->lock);
4055
4056 }
4057 EXPORT_SYMBOL(drm_dp_mst_dump_topology);
4058
4059 static void drm_dp_tx_work(struct work_struct *work)
4060 {
4061         struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
4062
4063         mutex_lock(&mgr->qlock);
4064         if (!list_empty(&mgr->tx_msg_downq))
4065                 process_single_down_tx_qlock(mgr);
4066         mutex_unlock(&mgr->qlock);
4067 }
4068
4069 static void drm_dp_destroy_connector_work(struct work_struct *work)
4070 {
4071         struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
4072         struct drm_dp_mst_port *port;
4073         bool send_hotplug = false;
4074         /*
4075          * Not a regular list traverse as we have to drop the destroy
4076          * connector lock before destroying the connector, to avoid AB->BA
4077          * ordering between this lock and the config mutex.
4078          */
4079         for (;;) {
4080                 mutex_lock(&mgr->destroy_connector_lock);
4081                 port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next);
4082                 if (!port) {
4083                         mutex_unlock(&mgr->destroy_connector_lock);
4084                         break;
4085                 }
4086                 list_del(&port->next);
4087                 mutex_unlock(&mgr->destroy_connector_lock);
4088
4089                 mgr->cbs->destroy_connector(mgr, port->connector);
4090
4091                 drm_dp_port_teardown_pdt(port, port->pdt);
4092                 port->pdt = DP_PEER_DEVICE_NONE;
4093
4094                 drm_dp_mst_put_port_malloc(port);
4095                 send_hotplug = true;
4096         }
4097         if (send_hotplug)
4098                 drm_kms_helper_hotplug_event(mgr->dev);
4099 }
4100
4101 static struct drm_private_state *
4102 drm_dp_mst_duplicate_state(struct drm_private_obj *obj)
4103 {
4104         struct drm_dp_mst_topology_state *state, *old_state =
4105                 to_dp_mst_topology_state(obj->state);
4106         struct drm_dp_vcpi_allocation *pos, *vcpi;
4107
4108         state = kmemdup(old_state, sizeof(*state), GFP_KERNEL);
4109         if (!state)
4110                 return NULL;
4111
4112         __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
4113
4114         INIT_LIST_HEAD(&state->vcpis);
4115
4116         list_for_each_entry(pos, &old_state->vcpis, next) {
4117                 /* Prune leftover freed VCPI allocations */
4118                 if (!pos->vcpi)
4119                         continue;
4120
4121                 vcpi = kmemdup(pos, sizeof(*vcpi), GFP_KERNEL);
4122                 if (!vcpi)
4123                         goto fail;
4124
4125                 drm_dp_mst_get_port_malloc(vcpi->port);
4126                 list_add(&vcpi->next, &state->vcpis);
4127         }
4128
4129         return &state->base;
4130
4131 fail:
4132         list_for_each_entry_safe(pos, vcpi, &state->vcpis, next) {
4133                 drm_dp_mst_put_port_malloc(pos->port);
4134                 kfree(pos);
4135         }
4136         kfree(state);
4137
4138         return NULL;
4139 }
4140
4141 static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,
4142                                      struct drm_private_state *state)
4143 {
4144         struct drm_dp_mst_topology_state *mst_state =
4145                 to_dp_mst_topology_state(state);
4146         struct drm_dp_vcpi_allocation *pos, *tmp;
4147
4148         list_for_each_entry_safe(pos, tmp, &mst_state->vcpis, next) {
4149                 /* We only keep references to ports with non-zero VCPIs */
4150                 if (pos->vcpi)
4151                         drm_dp_mst_put_port_malloc(pos->port);
4152                 kfree(pos);
4153         }
4154
4155         kfree(mst_state);
4156 }
4157
4158 static inline int
4159 drm_dp_mst_atomic_check_topology_state(struct drm_dp_mst_topology_mgr *mgr,
4160                                        struct drm_dp_mst_topology_state *mst_state)
4161 {
4162         struct drm_dp_vcpi_allocation *vcpi;
4163         int avail_slots = 63, payload_count = 0;
4164
4165         list_for_each_entry(vcpi, &mst_state->vcpis, next) {
4166                 /* Releasing VCPI is always OK-even if the port is gone */
4167                 if (!vcpi->vcpi) {
4168                         DRM_DEBUG_ATOMIC("[MST PORT:%p] releases all VCPI slots\n",
4169                                          vcpi->port);
4170                         continue;
4171                 }
4172
4173                 DRM_DEBUG_ATOMIC("[MST PORT:%p] requires %d vcpi slots\n",
4174                                  vcpi->port, vcpi->vcpi);
4175
4176                 avail_slots -= vcpi->vcpi;
4177                 if (avail_slots < 0) {
4178                         DRM_DEBUG_ATOMIC("[MST PORT:%p] not enough VCPI slots in mst state %p (avail=%d)\n",
4179                                          vcpi->port, mst_state,
4180                                          avail_slots + vcpi->vcpi);
4181                         return -ENOSPC;
4182                 }
4183
4184                 if (++payload_count > mgr->max_payloads) {
4185                         DRM_DEBUG_ATOMIC("[MST MGR:%p] state %p has too many payloads (max=%d)\n",
4186                                          mgr, mst_state, mgr->max_payloads);
4187                         return -EINVAL;
4188                 }
4189         }
4190         DRM_DEBUG_ATOMIC("[MST MGR:%p] mst state %p VCPI avail=%d used=%d\n",
4191                          mgr, mst_state, avail_slots,
4192                          63 - avail_slots);
4193
4194         return 0;
4195 }
4196
4197 /**
4198  * drm_dp_mst_atomic_check - Check that the new state of an MST topology in an
4199  * atomic update is valid
4200  * @state: Pointer to the new &struct drm_dp_mst_topology_state
4201  *
4202  * Checks the given topology state for an atomic update to ensure that it's
4203  * valid. This includes checking whether there's enough bandwidth to support
4204  * the new VCPI allocations in the atomic update.
4205  *
4206  * Any atomic drivers supporting DP MST must make sure to call this after
4207  * checking the rest of their state in their
4208  * &drm_mode_config_funcs.atomic_check() callback.
4209  *
4210  * See also:
4211  * drm_dp_atomic_find_vcpi_slots()
4212  * drm_dp_atomic_release_vcpi_slots()
4213  *
4214  * Returns:
4215  *
4216  * 0 if the new state is valid, negative error code otherwise.
4217  */
4218 int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
4219 {
4220         struct drm_dp_mst_topology_mgr *mgr;
4221         struct drm_dp_mst_topology_state *mst_state;
4222         int i, ret = 0;
4223
4224         for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
4225                 ret = drm_dp_mst_atomic_check_topology_state(mgr, mst_state);
4226                 if (ret)
4227                         break;
4228         }
4229
4230         return ret;
4231 }
4232 EXPORT_SYMBOL(drm_dp_mst_atomic_check);
4233
4234 const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs = {
4235         .atomic_duplicate_state = drm_dp_mst_duplicate_state,
4236         .atomic_destroy_state = drm_dp_mst_destroy_state,
4237 };
4238 EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs);
4239
4240 /**
4241  * drm_atomic_get_mst_topology_state: get MST topology state
4242  *
4243  * @state: global atomic state
4244  * @mgr: MST topology manager, also the private object in this case
4245  *
4246  * This function wraps drm_atomic_get_priv_obj_state() passing in the MST atomic
4247  * state vtable so that the private object state returned is that of a MST
4248  * topology object. Also, drm_atomic_get_private_obj_state() expects the caller
4249  * to care of the locking, so warn if don't hold the connection_mutex.
4250  *
4251  * RETURNS:
4252  *
4253  * The MST topology state or error pointer.
4254  */
4255 struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
4256                                                                     struct drm_dp_mst_topology_mgr *mgr)
4257 {
4258         return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base));
4259 }
4260 EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
4261
4262 /**
4263  * drm_dp_mst_topology_mgr_init - initialise a topology manager
4264  * @mgr: manager struct to initialise
4265  * @dev: device providing this structure - for i2c addition.
4266  * @aux: DP helper aux channel to talk to this device
4267  * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
4268  * @max_payloads: maximum number of payloads this GPU can source
4269  * @conn_base_id: the connector object ID the MST device is connected to.
4270  *
4271  * Return 0 for success, or negative error code on failure
4272  */
4273 int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
4274                                  struct drm_device *dev, struct drm_dp_aux *aux,
4275                                  int max_dpcd_transaction_bytes,
4276                                  int max_payloads, int conn_base_id)
4277 {
4278         struct drm_dp_mst_topology_state *mst_state;
4279
4280         mutex_init(&mgr->lock);
4281         mutex_init(&mgr->qlock);
4282         mutex_init(&mgr->payload_lock);
4283         mutex_init(&mgr->destroy_connector_lock);
4284         INIT_LIST_HEAD(&mgr->tx_msg_downq);
4285         INIT_LIST_HEAD(&mgr->destroy_connector_list);
4286         INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
4287         INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
4288         INIT_WORK(&mgr->destroy_connector_work, drm_dp_destroy_connector_work);
4289         init_waitqueue_head(&mgr->tx_waitq);
4290         mgr->dev = dev;
4291         mgr->aux = aux;
4292         mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
4293         mgr->max_payloads = max_payloads;
4294         mgr->conn_base_id = conn_base_id;
4295         if (max_payloads + 1 > sizeof(mgr->payload_mask) * 8 ||
4296             max_payloads + 1 > sizeof(mgr->vcpi_mask) * 8)
4297                 return -EINVAL;
4298         mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
4299         if (!mgr->payloads)
4300                 return -ENOMEM;
4301         mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL);
4302         if (!mgr->proposed_vcpis)
4303                 return -ENOMEM;
4304         set_bit(0, &mgr->payload_mask);
4305
4306         mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL);
4307         if (mst_state == NULL)
4308                 return -ENOMEM;
4309
4310         mst_state->mgr = mgr;
4311         INIT_LIST_HEAD(&mst_state->vcpis);
4312
4313         drm_atomic_private_obj_init(dev, &mgr->base,
4314                                     &mst_state->base,
4315                                     &drm_dp_mst_topology_state_funcs);
4316
4317         return 0;
4318 }
4319 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
4320
4321 /**
4322  * drm_dp_mst_topology_mgr_destroy() - destroy topology manager.
4323  * @mgr: manager to destroy
4324  */
4325 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
4326 {
4327         drm_dp_mst_topology_mgr_set_mst(mgr, false);
4328         flush_work(&mgr->work);
4329         flush_work(&mgr->destroy_connector_work);
4330         mutex_lock(&mgr->payload_lock);
4331         kfree(mgr->payloads);
4332         mgr->payloads = NULL;
4333         kfree(mgr->proposed_vcpis);
4334         mgr->proposed_vcpis = NULL;
4335         mutex_unlock(&mgr->payload_lock);
4336         mgr->dev = NULL;
4337         mgr->aux = NULL;
4338         drm_atomic_private_obj_fini(&mgr->base);
4339         mgr->funcs = NULL;
4340
4341         mutex_destroy(&mgr->destroy_connector_lock);
4342         mutex_destroy(&mgr->payload_lock);
4343         mutex_destroy(&mgr->qlock);
4344         mutex_destroy(&mgr->lock);
4345 }
4346 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
4347
4348 static bool remote_i2c_read_ok(const struct i2c_msg msgs[], int num)
4349 {
4350         int i;
4351
4352         if (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)
4353                 return false;
4354
4355         for (i = 0; i < num - 1; i++) {
4356                 if (msgs[i].flags & I2C_M_RD ||
4357                     msgs[i].len > 0xff)
4358                         return false;
4359         }
4360
4361         return msgs[num - 1].flags & I2C_M_RD &&
4362                 msgs[num - 1].len <= 0xff;
4363 }
4364
4365 /* I2C device */
4366 static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
4367                                int num)
4368 {
4369         struct drm_dp_aux *aux = adapter->algo_data;
4370         struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux);
4371         struct drm_dp_mst_branch *mstb;
4372         struct drm_dp_mst_topology_mgr *mgr = port->mgr;
4373         unsigned int i;
4374         struct drm_dp_sideband_msg_req_body msg;
4375         struct drm_dp_sideband_msg_tx *txmsg = NULL;
4376         int ret;
4377
4378         mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
4379         if (!mstb)
4380                 return -EREMOTEIO;
4381
4382         if (!remote_i2c_read_ok(msgs, num)) {
4383                 DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
4384                 ret = -EIO;
4385                 goto out;
4386         }
4387
4388         memset(&msg, 0, sizeof(msg));
4389         msg.req_type = DP_REMOTE_I2C_READ;
4390         msg.u.i2c_read.num_transactions = num - 1;
4391         msg.u.i2c_read.port_number = port->port_num;
4392         for (i = 0; i < num - 1; i++) {
4393                 msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
4394                 msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
4395                 msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
4396                 msg.u.i2c_read.transactions[i].no_stop_bit = !(msgs[i].flags & I2C_M_STOP);
4397         }
4398         msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
4399         msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
4400
4401         txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
4402         if (!txmsg) {
4403                 ret = -ENOMEM;
4404                 goto out;
4405         }
4406
4407         txmsg->dst = mstb;
4408         drm_dp_encode_sideband_req(&msg, txmsg);
4409
4410         drm_dp_queue_down_tx(mgr, txmsg);
4411
4412         ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
4413         if (ret > 0) {
4414
4415                 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
4416                         ret = -EREMOTEIO;
4417                         goto out;
4418                 }
4419                 if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
4420                         ret = -EIO;
4421                         goto out;
4422                 }
4423                 memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
4424                 ret = num;
4425         }
4426 out:
4427         kfree(txmsg);
4428         drm_dp_mst_topology_put_mstb(mstb);
4429         return ret;
4430 }
4431
4432 static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
4433 {
4434         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
4435                I2C_FUNC_SMBUS_READ_BLOCK_DATA |
4436                I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
4437                I2C_FUNC_10BIT_ADDR;
4438 }
4439
4440 static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
4441         .functionality = drm_dp_mst_i2c_functionality,
4442         .master_xfer = drm_dp_mst_i2c_xfer,
4443 };
4444
4445 /**
4446  * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
4447  * @aux: DisplayPort AUX channel
4448  *
4449  * Returns 0 on success or a negative error code on failure.
4450  */
4451 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux)
4452 {
4453         aux->ddc.algo = &drm_dp_mst_i2c_algo;
4454         aux->ddc.algo_data = aux;
4455         aux->ddc.retries = 3;
4456
4457         aux->ddc.class = I2C_CLASS_DDC;
4458         aux->ddc.owner = THIS_MODULE;
4459         aux->ddc.dev.parent = aux->dev;
4460         aux->ddc.dev.of_node = aux->dev->of_node;
4461
4462         strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev),
4463                 sizeof(aux->ddc.name));
4464
4465         return i2c_add_adapter(&aux->ddc);
4466 }
4467
4468 /**
4469  * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
4470  * @aux: DisplayPort AUX channel
4471  */
4472 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux)
4473 {
4474         i2c_del_adapter(&aux->ddc);
4475 }