2 * Copyright © 2014 Red Hat
4 * Permission to use, copy, modify, distribute, and sell this software and its
5 * documentation for any purpose is hereby granted without fee, provided that
6 * the above copyright notice appear in all copies and that both that copyright
7 * notice and this permission notice appear in supporting documentation, and
8 * that the name of the copyright holders not be used in advertising or
9 * publicity pertaining to distribution of the software without specific,
10 * written prior permission. The copyright holders make no representations
11 * about the suitability of this software for any purpose. It is provided "as
12 * is" without express or implied warranty.
14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
23 #include <linux/delay.h>
24 #include <linux/errno.h>
25 #include <linux/i2c.h>
26 #include <linux/init.h>
27 #include <linux/kernel.h>
28 #include <linux/sched.h>
29 #include <linux/seq_file.h>
31 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
32 #include <linux/stacktrace.h>
33 #include <linux/sort.h>
34 #include <linux/timekeeping.h>
35 #include <linux/math64.h>
38 #include <drm/drm_atomic.h>
39 #include <drm/drm_atomic_helper.h>
40 #include <drm/drm_dp_mst_helper.h>
41 #include <drm/drm_drv.h>
42 #include <drm/drm_print.h>
43 #include <drm/drm_probe_helper.h>
45 #include "drm_crtc_helper_internal.h"
46 #include "drm_dp_mst_topology_internal.h"
51 * These functions contain parts of the DisplayPort 1.2a MultiStream Transport
52 * protocol. The helpers contain a topology manager and bandwidth manager.
53 * The helpers encapsulate the sending and received of sideband msgs.
55 struct drm_dp_pending_up_req {
56 struct drm_dp_sideband_msg_hdr hdr;
57 struct drm_dp_sideband_msg_req_body msg;
58 struct list_head next;
61 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
64 static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port);
66 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
68 struct drm_dp_payload *payload);
70 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
71 struct drm_dp_mst_port *port,
72 int offset, int size, u8 *bytes);
73 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
74 struct drm_dp_mst_port *port,
75 int offset, int size, u8 *bytes);
77 static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
78 struct drm_dp_mst_branch *mstb);
81 drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
82 struct drm_dp_mst_branch *mstb);
84 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
85 struct drm_dp_mst_branch *mstb,
86 struct drm_dp_mst_port *port);
87 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
90 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux);
91 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux);
92 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
94 #define DBG_PREFIX "[dp_mst]"
96 #define DP_STR(x) [DP_ ## x] = #x
98 static const char *drm_dp_mst_req_type_str(u8 req_type)
100 static const char * const req_type_str[] = {
101 DP_STR(GET_MSG_TRANSACTION_VERSION),
102 DP_STR(LINK_ADDRESS),
103 DP_STR(CONNECTION_STATUS_NOTIFY),
104 DP_STR(ENUM_PATH_RESOURCES),
105 DP_STR(ALLOCATE_PAYLOAD),
106 DP_STR(QUERY_PAYLOAD),
107 DP_STR(RESOURCE_STATUS_NOTIFY),
108 DP_STR(CLEAR_PAYLOAD_ID_TABLE),
109 DP_STR(REMOTE_DPCD_READ),
110 DP_STR(REMOTE_DPCD_WRITE),
111 DP_STR(REMOTE_I2C_READ),
112 DP_STR(REMOTE_I2C_WRITE),
113 DP_STR(POWER_UP_PHY),
114 DP_STR(POWER_DOWN_PHY),
115 DP_STR(SINK_EVENT_NOTIFY),
116 DP_STR(QUERY_STREAM_ENC_STATUS),
119 if (req_type >= ARRAY_SIZE(req_type_str) ||
120 !req_type_str[req_type])
123 return req_type_str[req_type];
127 #define DP_STR(x) [DP_NAK_ ## x] = #x
129 static const char *drm_dp_mst_nak_reason_str(u8 nak_reason)
131 static const char * const nak_reason_str[] = {
132 DP_STR(WRITE_FAILURE),
133 DP_STR(INVALID_READ),
137 DP_STR(LINK_FAILURE),
138 DP_STR(NO_RESOURCES),
141 DP_STR(ALLOCATE_FAIL),
144 if (nak_reason >= ARRAY_SIZE(nak_reason_str) ||
145 !nak_reason_str[nak_reason])
148 return nak_reason_str[nak_reason];
152 #define DP_STR(x) [DRM_DP_SIDEBAND_TX_ ## x] = #x
154 static const char *drm_dp_mst_sideband_tx_state_str(int state)
156 static const char * const sideband_reason_str[] = {
164 if (state >= ARRAY_SIZE(sideband_reason_str) ||
165 !sideband_reason_str[state])
168 return sideband_reason_str[state];
172 drm_dp_mst_rad_to_str(const u8 rad[8], u8 lct, char *out, size_t len)
177 for (i = 0; i < lct; i++) {
179 unpacked_rad[i] = rad[i / 2] >> 4;
181 unpacked_rad[i] = rad[i / 2] & BIT_MASK(4);
184 /* TODO: Eventually add something to printk so we can format the rad
187 return snprintf(out, len, "%*phC", lct, unpacked_rad);
190 /* sideband msg handling */
191 static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
196 int number_of_bits = num_nibbles * 4;
199 while (number_of_bits != 0) {
202 remainder |= (data[array_index] & bitmask) >> bitshift;
210 if ((remainder & 0x10) == 0x10)
215 while (number_of_bits != 0) {
218 if ((remainder & 0x10) != 0)
225 static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
230 int number_of_bits = number_of_bytes * 8;
233 while (number_of_bits != 0) {
236 remainder |= (data[array_index] & bitmask) >> bitshift;
244 if ((remainder & 0x100) == 0x100)
249 while (number_of_bits != 0) {
252 if ((remainder & 0x100) != 0)
256 return remainder & 0xff;
258 static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
261 size += (hdr->lct / 2);
265 static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
271 buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
272 for (i = 0; i < (hdr->lct / 2); i++)
273 buf[idx++] = hdr->rad[i];
274 buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
275 (hdr->msg_len & 0x3f);
276 buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
278 crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
279 buf[idx - 1] |= (crc4 & 0xf);
284 static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
285 u8 *buf, int buflen, u8 *hdrlen)
294 len += ((buf[0] & 0xf0) >> 4) / 2;
297 crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
299 if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
300 DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
304 hdr->lct = (buf[0] & 0xf0) >> 4;
305 hdr->lcr = (buf[0] & 0xf);
307 for (i = 0; i < (hdr->lct / 2); i++)
308 hdr->rad[i] = buf[idx++];
309 hdr->broadcast = (buf[idx] >> 7) & 0x1;
310 hdr->path_msg = (buf[idx] >> 6) & 0x1;
311 hdr->msg_len = buf[idx] & 0x3f;
313 hdr->somt = (buf[idx] >> 7) & 0x1;
314 hdr->eomt = (buf[idx] >> 6) & 0x1;
315 hdr->seqno = (buf[idx] >> 4) & 0x1;
322 drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req,
323 struct drm_dp_sideband_msg_tx *raw)
328 buf[idx++] = req->req_type & 0x7f;
330 switch (req->req_type) {
331 case DP_ENUM_PATH_RESOURCES:
332 case DP_POWER_DOWN_PHY:
333 case DP_POWER_UP_PHY:
334 buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
337 case DP_ALLOCATE_PAYLOAD:
338 buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
339 (req->u.allocate_payload.number_sdp_streams & 0xf);
341 buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
343 buf[idx] = (req->u.allocate_payload.pbn >> 8);
345 buf[idx] = (req->u.allocate_payload.pbn & 0xff);
347 for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
348 buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
349 (req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
352 if (req->u.allocate_payload.number_sdp_streams & 1) {
353 i = req->u.allocate_payload.number_sdp_streams - 1;
354 buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
358 case DP_QUERY_PAYLOAD:
359 buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
361 buf[idx] = (req->u.query_payload.vcpi & 0x7f);
364 case DP_REMOTE_DPCD_READ:
365 buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
366 buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
368 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
370 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
372 buf[idx] = (req->u.dpcd_read.num_bytes);
376 case DP_REMOTE_DPCD_WRITE:
377 buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
378 buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
380 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
382 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
384 buf[idx] = (req->u.dpcd_write.num_bytes);
386 memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
387 idx += req->u.dpcd_write.num_bytes;
389 case DP_REMOTE_I2C_READ:
390 buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
391 buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
393 for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
394 buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
396 buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
398 memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
399 idx += req->u.i2c_read.transactions[i].num_bytes;
401 buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 5;
402 buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
405 buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
407 buf[idx] = (req->u.i2c_read.num_bytes_read);
411 case DP_REMOTE_I2C_WRITE:
412 buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
414 buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
416 buf[idx] = (req->u.i2c_write.num_bytes);
418 memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
419 idx += req->u.i2c_write.num_bytes;
424 EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_encode_sideband_req);
426 /* Decode a sideband request we've encoded, mainly used for debugging */
428 drm_dp_decode_sideband_req(const struct drm_dp_sideband_msg_tx *raw,
429 struct drm_dp_sideband_msg_req_body *req)
431 const u8 *buf = raw->msg;
434 req->req_type = buf[idx++] & 0x7f;
435 switch (req->req_type) {
436 case DP_ENUM_PATH_RESOURCES:
437 case DP_POWER_DOWN_PHY:
438 case DP_POWER_UP_PHY:
439 req->u.port_num.port_number = (buf[idx] >> 4) & 0xf;
441 case DP_ALLOCATE_PAYLOAD:
443 struct drm_dp_allocate_payload *a =
444 &req->u.allocate_payload;
446 a->number_sdp_streams = buf[idx] & 0xf;
447 a->port_number = (buf[idx] >> 4) & 0xf;
449 WARN_ON(buf[++idx] & 0x80);
450 a->vcpi = buf[idx] & 0x7f;
452 a->pbn = buf[++idx] << 8;
453 a->pbn |= buf[++idx];
456 for (i = 0; i < a->number_sdp_streams; i++) {
457 a->sdp_stream_sink[i] =
458 (buf[idx + (i / 2)] >> ((i % 2) ? 0 : 4)) & 0xf;
462 case DP_QUERY_PAYLOAD:
463 req->u.query_payload.port_number = (buf[idx] >> 4) & 0xf;
464 WARN_ON(buf[++idx] & 0x80);
465 req->u.query_payload.vcpi = buf[idx] & 0x7f;
467 case DP_REMOTE_DPCD_READ:
469 struct drm_dp_remote_dpcd_read *r = &req->u.dpcd_read;
471 r->port_number = (buf[idx] >> 4) & 0xf;
473 r->dpcd_address = (buf[idx] << 16) & 0xf0000;
474 r->dpcd_address |= (buf[++idx] << 8) & 0xff00;
475 r->dpcd_address |= buf[++idx] & 0xff;
477 r->num_bytes = buf[++idx];
480 case DP_REMOTE_DPCD_WRITE:
482 struct drm_dp_remote_dpcd_write *w =
485 w->port_number = (buf[idx] >> 4) & 0xf;
487 w->dpcd_address = (buf[idx] << 16) & 0xf0000;
488 w->dpcd_address |= (buf[++idx] << 8) & 0xff00;
489 w->dpcd_address |= buf[++idx] & 0xff;
491 w->num_bytes = buf[++idx];
493 w->bytes = kmemdup(&buf[++idx], w->num_bytes,
499 case DP_REMOTE_I2C_READ:
501 struct drm_dp_remote_i2c_read *r = &req->u.i2c_read;
502 struct drm_dp_remote_i2c_read_tx *tx;
505 r->num_transactions = buf[idx] & 0x3;
506 r->port_number = (buf[idx] >> 4) & 0xf;
507 for (i = 0; i < r->num_transactions; i++) {
508 tx = &r->transactions[i];
510 tx->i2c_dev_id = buf[++idx] & 0x7f;
511 tx->num_bytes = buf[++idx];
512 tx->bytes = kmemdup(&buf[++idx],
519 idx += tx->num_bytes;
520 tx->no_stop_bit = (buf[idx] >> 5) & 0x1;
521 tx->i2c_transaction_delay = buf[idx] & 0xf;
525 for (i = 0; i < r->num_transactions; i++) {
526 tx = &r->transactions[i];
532 r->read_i2c_device_id = buf[++idx] & 0x7f;
533 r->num_bytes_read = buf[++idx];
536 case DP_REMOTE_I2C_WRITE:
538 struct drm_dp_remote_i2c_write *w = &req->u.i2c_write;
540 w->port_number = (buf[idx] >> 4) & 0xf;
541 w->write_i2c_device_id = buf[++idx] & 0x7f;
542 w->num_bytes = buf[++idx];
543 w->bytes = kmemdup(&buf[++idx], w->num_bytes,
553 EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_decode_sideband_req);
556 drm_dp_dump_sideband_msg_req_body(const struct drm_dp_sideband_msg_req_body *req,
557 int indent, struct drm_printer *printer)
561 #define P(f, ...) drm_printf_indent(printer, indent, f, ##__VA_ARGS__)
562 if (req->req_type == DP_LINK_ADDRESS) {
563 /* No contents to print */
564 P("type=%s\n", drm_dp_mst_req_type_str(req->req_type));
568 P("type=%s contents:\n", drm_dp_mst_req_type_str(req->req_type));
571 switch (req->req_type) {
572 case DP_ENUM_PATH_RESOURCES:
573 case DP_POWER_DOWN_PHY:
574 case DP_POWER_UP_PHY:
575 P("port=%d\n", req->u.port_num.port_number);
577 case DP_ALLOCATE_PAYLOAD:
578 P("port=%d vcpi=%d pbn=%d sdp_streams=%d %*ph\n",
579 req->u.allocate_payload.port_number,
580 req->u.allocate_payload.vcpi, req->u.allocate_payload.pbn,
581 req->u.allocate_payload.number_sdp_streams,
582 req->u.allocate_payload.number_sdp_streams,
583 req->u.allocate_payload.sdp_stream_sink);
585 case DP_QUERY_PAYLOAD:
586 P("port=%d vcpi=%d\n",
587 req->u.query_payload.port_number,
588 req->u.query_payload.vcpi);
590 case DP_REMOTE_DPCD_READ:
591 P("port=%d dpcd_addr=%05x len=%d\n",
592 req->u.dpcd_read.port_number, req->u.dpcd_read.dpcd_address,
593 req->u.dpcd_read.num_bytes);
595 case DP_REMOTE_DPCD_WRITE:
596 P("port=%d addr=%05x len=%d: %*ph\n",
597 req->u.dpcd_write.port_number,
598 req->u.dpcd_write.dpcd_address,
599 req->u.dpcd_write.num_bytes, req->u.dpcd_write.num_bytes,
600 req->u.dpcd_write.bytes);
602 case DP_REMOTE_I2C_READ:
603 P("port=%d num_tx=%d id=%d size=%d:\n",
604 req->u.i2c_read.port_number,
605 req->u.i2c_read.num_transactions,
606 req->u.i2c_read.read_i2c_device_id,
607 req->u.i2c_read.num_bytes_read);
610 for (i = 0; i < req->u.i2c_read.num_transactions; i++) {
611 const struct drm_dp_remote_i2c_read_tx *rtx =
612 &req->u.i2c_read.transactions[i];
614 P("%d: id=%03d size=%03d no_stop_bit=%d tx_delay=%03d: %*ph\n",
615 i, rtx->i2c_dev_id, rtx->num_bytes,
616 rtx->no_stop_bit, rtx->i2c_transaction_delay,
617 rtx->num_bytes, rtx->bytes);
620 case DP_REMOTE_I2C_WRITE:
621 P("port=%d id=%d size=%d: %*ph\n",
622 req->u.i2c_write.port_number,
623 req->u.i2c_write.write_i2c_device_id,
624 req->u.i2c_write.num_bytes, req->u.i2c_write.num_bytes,
625 req->u.i2c_write.bytes);
633 EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_dump_sideband_msg_req_body);
636 drm_dp_mst_dump_sideband_msg_tx(struct drm_printer *p,
637 const struct drm_dp_sideband_msg_tx *txmsg)
639 struct drm_dp_sideband_msg_req_body req;
644 drm_dp_mst_rad_to_str(txmsg->dst->rad, txmsg->dst->lct, buf,
646 drm_printf(p, "txmsg cur_offset=%x cur_len=%x seqno=%x state=%s path_msg=%d dst=%s\n",
647 txmsg->cur_offset, txmsg->cur_len, txmsg->seqno,
648 drm_dp_mst_sideband_tx_state_str(txmsg->state),
649 txmsg->path_msg, buf);
651 ret = drm_dp_decode_sideband_req(txmsg, &req);
653 drm_printf(p, "<failed to decode sideband req: %d>\n", ret);
656 drm_dp_dump_sideband_msg_req_body(&req, 1, p);
658 switch (req.req_type) {
659 case DP_REMOTE_DPCD_WRITE:
660 kfree(req.u.dpcd_write.bytes);
662 case DP_REMOTE_I2C_READ:
663 for (i = 0; i < req.u.i2c_read.num_transactions; i++)
664 kfree(req.u.i2c_read.transactions[i].bytes);
666 case DP_REMOTE_I2C_WRITE:
667 kfree(req.u.i2c_write.bytes);
672 static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
675 crc4 = drm_dp_msg_data_crc4(msg, len);
679 static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
680 struct drm_dp_sideband_msg_tx *raw)
685 buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
690 /* this adds a chunk of msg to the builder to get the final msg */
691 static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
692 u8 *replybuf, u8 replybuflen, bool hdr)
699 struct drm_dp_sideband_msg_hdr recv_hdr;
700 ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen);
702 print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false);
707 * ignore out-of-order messages or messages that are part of a
710 if (!recv_hdr.somt && !msg->have_somt)
713 /* get length contained in this portion */
714 msg->curchunk_len = recv_hdr.msg_len;
715 msg->curchunk_hdrlen = hdrlen;
717 /* we have already gotten an somt - don't bother parsing */
718 if (recv_hdr.somt && msg->have_somt)
722 memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr));
723 msg->have_somt = true;
726 msg->have_eomt = true;
728 /* copy the bytes for the remainder of this header chunk */
729 msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen));
730 memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx);
732 memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
733 msg->curchunk_idx += replybuflen;
736 if (msg->curchunk_idx >= msg->curchunk_len) {
738 crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
739 /* copy chunk into bigger msg */
740 memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
741 msg->curlen += msg->curchunk_len - 1;
746 static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw,
747 struct drm_dp_sideband_msg_reply_body *repmsg)
751 memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
753 repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
755 if (idx > raw->curlen)
757 for (i = 0; i < repmsg->u.link_addr.nports; i++) {
758 if (raw->msg[idx] & 0x80)
759 repmsg->u.link_addr.ports[i].input_port = 1;
761 repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
762 repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
765 if (idx > raw->curlen)
767 repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
768 repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
769 if (repmsg->u.link_addr.ports[i].input_port == 0)
770 repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
772 if (idx > raw->curlen)
774 if (repmsg->u.link_addr.ports[i].input_port == 0) {
775 repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
777 if (idx > raw->curlen)
779 memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
781 if (idx > raw->curlen)
783 repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
784 repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
788 if (idx > raw->curlen)
794 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
798 static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
799 struct drm_dp_sideband_msg_reply_body *repmsg)
802 repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
804 if (idx > raw->curlen)
806 repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
808 if (idx > raw->curlen)
811 memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);
814 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
818 static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
819 struct drm_dp_sideband_msg_reply_body *repmsg)
822 repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
824 if (idx > raw->curlen)
828 DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen);
832 static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
833 struct drm_dp_sideband_msg_reply_body *repmsg)
837 repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
839 if (idx > raw->curlen)
841 repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
844 memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);
847 DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen);
851 static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
852 struct drm_dp_sideband_msg_reply_body *repmsg)
855 repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
856 repmsg->u.path_resources.fec_capable = raw->msg[idx] & 0x1;
858 if (idx > raw->curlen)
860 repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
862 if (idx > raw->curlen)
864 repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
866 if (idx > raw->curlen)
870 DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen);
874 static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
875 struct drm_dp_sideband_msg_reply_body *repmsg)
878 repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
880 if (idx > raw->curlen)
882 repmsg->u.allocate_payload.vcpi = raw->msg[idx];
884 if (idx > raw->curlen)
886 repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
888 if (idx > raw->curlen)
892 DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen);
896 static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
897 struct drm_dp_sideband_msg_reply_body *repmsg)
900 repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
902 if (idx > raw->curlen)
904 repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
906 if (idx > raw->curlen)
910 DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen);
914 static bool drm_dp_sideband_parse_power_updown_phy_ack(struct drm_dp_sideband_msg_rx *raw,
915 struct drm_dp_sideband_msg_reply_body *repmsg)
919 repmsg->u.port_number.port_number = (raw->msg[idx] >> 4) & 0xf;
921 if (idx > raw->curlen) {
922 DRM_DEBUG_KMS("power up/down phy parse length fail %d %d\n",
929 static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
930 struct drm_dp_sideband_msg_reply_body *msg)
932 memset(msg, 0, sizeof(*msg));
933 msg->reply_type = (raw->msg[0] & 0x80) >> 7;
934 msg->req_type = (raw->msg[0] & 0x7f);
936 if (msg->reply_type == DP_SIDEBAND_REPLY_NAK) {
937 memcpy(msg->u.nak.guid, &raw->msg[1], 16);
938 msg->u.nak.reason = raw->msg[17];
939 msg->u.nak.nak_data = raw->msg[18];
943 switch (msg->req_type) {
944 case DP_LINK_ADDRESS:
945 return drm_dp_sideband_parse_link_address(raw, msg);
946 case DP_QUERY_PAYLOAD:
947 return drm_dp_sideband_parse_query_payload_ack(raw, msg);
948 case DP_REMOTE_DPCD_READ:
949 return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
950 case DP_REMOTE_DPCD_WRITE:
951 return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
952 case DP_REMOTE_I2C_READ:
953 return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
954 case DP_ENUM_PATH_RESOURCES:
955 return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
956 case DP_ALLOCATE_PAYLOAD:
957 return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
958 case DP_POWER_DOWN_PHY:
959 case DP_POWER_UP_PHY:
960 return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg);
961 case DP_CLEAR_PAYLOAD_ID_TABLE:
962 return true; /* since there's nothing to parse */
964 DRM_ERROR("Got unknown reply 0x%02x (%s)\n", msg->req_type,
965 drm_dp_mst_req_type_str(msg->req_type));
970 static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw,
971 struct drm_dp_sideband_msg_req_body *msg)
975 msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
977 if (idx > raw->curlen)
980 memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
982 if (idx > raw->curlen)
985 msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
986 msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
987 msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
988 msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
989 msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
993 DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen);
997 static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw,
998 struct drm_dp_sideband_msg_req_body *msg)
1002 msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
1004 if (idx > raw->curlen)
1007 memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
1009 if (idx > raw->curlen)
1012 msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
1016 DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen);
1020 static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
1021 struct drm_dp_sideband_msg_req_body *msg)
1023 memset(msg, 0, sizeof(*msg));
1024 msg->req_type = (raw->msg[0] & 0x7f);
1026 switch (msg->req_type) {
1027 case DP_CONNECTION_STATUS_NOTIFY:
1028 return drm_dp_sideband_parse_connection_status_notify(raw, msg);
1029 case DP_RESOURCE_STATUS_NOTIFY:
1030 return drm_dp_sideband_parse_resource_status_notify(raw, msg);
1032 DRM_ERROR("Got unknown request 0x%02x (%s)\n", msg->req_type,
1033 drm_dp_mst_req_type_str(msg->req_type));
1038 static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
1040 struct drm_dp_sideband_msg_req_body req;
1042 req.req_type = DP_REMOTE_DPCD_WRITE;
1043 req.u.dpcd_write.port_number = port_num;
1044 req.u.dpcd_write.dpcd_address = offset;
1045 req.u.dpcd_write.num_bytes = num_bytes;
1046 req.u.dpcd_write.bytes = bytes;
1047 drm_dp_encode_sideband_req(&req, msg);
1052 static int build_link_address(struct drm_dp_sideband_msg_tx *msg)
1054 struct drm_dp_sideband_msg_req_body req;
1056 req.req_type = DP_LINK_ADDRESS;
1057 drm_dp_encode_sideband_req(&req, msg);
1061 static int build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg)
1063 struct drm_dp_sideband_msg_req_body req;
1065 req.req_type = DP_CLEAR_PAYLOAD_ID_TABLE;
1066 drm_dp_encode_sideband_req(&req, msg);
1070 static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num)
1072 struct drm_dp_sideband_msg_req_body req;
1074 req.req_type = DP_ENUM_PATH_RESOURCES;
1075 req.u.port_num.port_number = port_num;
1076 drm_dp_encode_sideband_req(&req, msg);
1077 msg->path_msg = true;
1081 static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num,
1082 u8 vcpi, uint16_t pbn,
1083 u8 number_sdp_streams,
1084 u8 *sdp_stream_sink)
1086 struct drm_dp_sideband_msg_req_body req;
1087 memset(&req, 0, sizeof(req));
1088 req.req_type = DP_ALLOCATE_PAYLOAD;
1089 req.u.allocate_payload.port_number = port_num;
1090 req.u.allocate_payload.vcpi = vcpi;
1091 req.u.allocate_payload.pbn = pbn;
1092 req.u.allocate_payload.number_sdp_streams = number_sdp_streams;
1093 memcpy(req.u.allocate_payload.sdp_stream_sink, sdp_stream_sink,
1094 number_sdp_streams);
1095 drm_dp_encode_sideband_req(&req, msg);
1096 msg->path_msg = true;
1100 static int build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
1101 int port_num, bool power_up)
1103 struct drm_dp_sideband_msg_req_body req;
1106 req.req_type = DP_POWER_UP_PHY;
1108 req.req_type = DP_POWER_DOWN_PHY;
1110 req.u.port_num.port_number = port_num;
1111 drm_dp_encode_sideband_req(&req, msg);
1112 msg->path_msg = true;
1116 static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
1117 struct drm_dp_vcpi *vcpi)
1121 mutex_lock(&mgr->payload_lock);
1122 ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
1123 if (ret > mgr->max_payloads) {
1125 DRM_DEBUG_KMS("out of payload ids %d\n", ret);
1129 vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1);
1130 if (vcpi_ret > mgr->max_payloads) {
1132 DRM_DEBUG_KMS("out of vcpi ids %d\n", ret);
1136 set_bit(ret, &mgr->payload_mask);
1137 set_bit(vcpi_ret, &mgr->vcpi_mask);
1138 vcpi->vcpi = vcpi_ret + 1;
1139 mgr->proposed_vcpis[ret - 1] = vcpi;
1141 mutex_unlock(&mgr->payload_lock);
1145 static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
1152 mutex_lock(&mgr->payload_lock);
1153 DRM_DEBUG_KMS("putting payload %d\n", vcpi);
1154 clear_bit(vcpi - 1, &mgr->vcpi_mask);
1156 for (i = 0; i < mgr->max_payloads; i++) {
1157 if (mgr->proposed_vcpis[i] &&
1158 mgr->proposed_vcpis[i]->vcpi == vcpi) {
1159 mgr->proposed_vcpis[i] = NULL;
1160 clear_bit(i + 1, &mgr->payload_mask);
1163 mutex_unlock(&mgr->payload_lock);
1166 static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
1167 struct drm_dp_sideband_msg_tx *txmsg)
1172 * All updates to txmsg->state are protected by mgr->qlock, and the two
1173 * cases we check here are terminal states. For those the barriers
1174 * provided by the wake_up/wait_event pair are enough.
1176 state = READ_ONCE(txmsg->state);
1177 return (state == DRM_DP_SIDEBAND_TX_RX ||
1178 state == DRM_DP_SIDEBAND_TX_TIMEOUT);
1181 static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
1182 struct drm_dp_sideband_msg_tx *txmsg)
1184 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
1187 ret = wait_event_timeout(mgr->tx_waitq,
1188 check_txmsg_state(mgr, txmsg),
1190 mutex_lock(&mstb->mgr->qlock);
1192 if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
1197 DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno);
1199 /* dump some state */
1203 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
1204 txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) {
1205 list_del(&txmsg->next);
1208 if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
1209 txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
1210 mstb->tx_slots[txmsg->seqno] = NULL;
1214 if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
1215 struct drm_printer p = drm_debug_printer(DBG_PREFIX);
1217 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
1219 mutex_unlock(&mgr->qlock);
1224 static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
1226 struct drm_dp_mst_branch *mstb;
1228 mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);
1234 memcpy(mstb->rad, rad, lct / 2);
1235 INIT_LIST_HEAD(&mstb->ports);
1236 kref_init(&mstb->topology_kref);
1237 kref_init(&mstb->malloc_kref);
1241 static void drm_dp_free_mst_branch_device(struct kref *kref)
1243 struct drm_dp_mst_branch *mstb =
1244 container_of(kref, struct drm_dp_mst_branch, malloc_kref);
1246 if (mstb->port_parent)
1247 drm_dp_mst_put_port_malloc(mstb->port_parent);
1253 * DOC: Branch device and port refcounting
1255 * Topology refcount overview
1256 * ~~~~~~~~~~~~~~~~~~~~~~~~~~
1258 * The refcounting schemes for &struct drm_dp_mst_branch and &struct
1259 * drm_dp_mst_port are somewhat unusual. Both ports and branch devices have
1260 * two different kinds of refcounts: topology refcounts, and malloc refcounts.
1262 * Topology refcounts are not exposed to drivers, and are handled internally
1263 * by the DP MST helpers. The helpers use them in order to prevent the
1264 * in-memory topology state from being changed in the middle of critical
1265 * operations like changing the internal state of payload allocations. This
1266 * means each branch and port will be considered to be connected to the rest
1267 * of the topology until its topology refcount reaches zero. Additionally,
1268 * for ports this means that their associated &struct drm_connector will stay
1269 * registered with userspace until the port's refcount reaches 0.
1271 * Malloc refcount overview
1272 * ~~~~~~~~~~~~~~~~~~~~~~~~
1274 * Malloc references are used to keep a &struct drm_dp_mst_port or &struct
1275 * drm_dp_mst_branch allocated even after all of its topology references have
1276 * been dropped, so that the driver or MST helpers can safely access each
1277 * branch's last known state before it was disconnected from the topology.
1278 * When the malloc refcount of a port or branch reaches 0, the memory
1279 * allocation containing the &struct drm_dp_mst_branch or &struct
1280 * drm_dp_mst_port respectively will be freed.
1282 * For &struct drm_dp_mst_branch, malloc refcounts are not currently exposed
1283 * to drivers. As of writing this documentation, there are no drivers that
1284 * have a usecase for accessing &struct drm_dp_mst_branch outside of the MST
1285 * helpers. Exposing this API to drivers in a race-free manner would take more
1286 * tweaking of the refcounting scheme, however patches are welcome provided
1287 * there is a legitimate driver usecase for this.
1289 * Refcount relationships in a topology
1290 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1292 * Let's take a look at why the relationship between topology and malloc
1293 * refcounts is designed the way it is.
1295 * .. kernel-figure:: dp-mst/topology-figure-1.dot
1297 * An example of topology and malloc refs in a DP MST topology with two
1298 * active payloads. Topology refcount increments are indicated by solid
1299 * lines, and malloc refcount increments are indicated by dashed lines.
1300 * Each starts from the branch which incremented the refcount, and ends at
1301 * the branch to which the refcount belongs to, i.e. the arrow points the
1302 * same way as the C pointers used to reference a structure.
1304 * As you can see in the above figure, every branch increments the topology
1305 * refcount of its children, and increments the malloc refcount of its
1306 * parent. Additionally, every payload increments the malloc refcount of its
1307 * assigned port by 1.
1309 * So, what would happen if MSTB #3 from the above figure was unplugged from
1310 * the system, but the driver hadn't yet removed payload #2 from port #3? The
1311 * topology would start to look like the figure below.
1313 * .. kernel-figure:: dp-mst/topology-figure-2.dot
1315 * Ports and branch devices which have been released from memory are
1316 * colored grey, and references which have been removed are colored red.
1318 * Whenever a port or branch device's topology refcount reaches zero, it will
1319 * decrement the topology refcounts of all its children, the malloc refcount
1320 * of its parent, and finally its own malloc refcount. For MSTB #4 and port
1321 * #4, this means they both have been disconnected from the topology and freed
1322 * from memory. But, because payload #2 is still holding a reference to port
1323 * #3, port #3 is removed from the topology but its &struct drm_dp_mst_port
1324 * is still accessible from memory. This also means port #3 has not yet
1325 * decremented the malloc refcount of MSTB #3, so its &struct
1326 * drm_dp_mst_branch will also stay allocated in memory until port #3's
1327 * malloc refcount reaches 0.
1329 * This relationship is necessary because in order to release payload #2, we
1330 * need to be able to figure out the last relative of port #3 that's still
1331 * connected to the topology. In this case, we would travel up the topology as
1334 * .. kernel-figure:: dp-mst/topology-figure-3.dot
1336 * And finally, remove payload #2 by communicating with port #2 through
1337 * sideband transactions.
1341 * drm_dp_mst_get_mstb_malloc() - Increment the malloc refcount of a branch
1343 * @mstb: The &struct drm_dp_mst_branch to increment the malloc refcount of
1345 * Increments &drm_dp_mst_branch.malloc_kref. When
1346 * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
1347 * will be released and @mstb may no longer be used.
1349 * See also: drm_dp_mst_put_mstb_malloc()
1352 drm_dp_mst_get_mstb_malloc(struct drm_dp_mst_branch *mstb)
1354 kref_get(&mstb->malloc_kref);
1355 DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref));
1359 * drm_dp_mst_put_mstb_malloc() - Decrement the malloc refcount of a branch
1361 * @mstb: The &struct drm_dp_mst_branch to decrement the malloc refcount of
1363 * Decrements &drm_dp_mst_branch.malloc_kref. When
1364 * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
1365 * will be released and @mstb may no longer be used.
1367 * See also: drm_dp_mst_get_mstb_malloc()
1370 drm_dp_mst_put_mstb_malloc(struct drm_dp_mst_branch *mstb)
1372 DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref) - 1);
1373 kref_put(&mstb->malloc_kref, drm_dp_free_mst_branch_device);
1376 static void drm_dp_free_mst_port(struct kref *kref)
1378 struct drm_dp_mst_port *port =
1379 container_of(kref, struct drm_dp_mst_port, malloc_kref);
1381 drm_dp_mst_put_mstb_malloc(port->parent);
1386 * drm_dp_mst_get_port_malloc() - Increment the malloc refcount of an MST port
1387 * @port: The &struct drm_dp_mst_port to increment the malloc refcount of
1389 * Increments &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
1390 * reaches 0, the memory allocation for @port will be released and @port may
1391 * no longer be used.
1393 * Because @port could potentially be freed at any time by the DP MST helpers
1394 * if &drm_dp_mst_port.malloc_kref reaches 0, including during a call to this
1395 * function, drivers that which to make use of &struct drm_dp_mst_port should
1396 * ensure that they grab at least one main malloc reference to their MST ports
1397 * in &drm_dp_mst_topology_cbs.add_connector. This callback is called before
1398 * there is any chance for &drm_dp_mst_port.malloc_kref to reach 0.
1400 * See also: drm_dp_mst_put_port_malloc()
1403 drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port)
1405 kref_get(&port->malloc_kref);
1406 DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref));
1408 EXPORT_SYMBOL(drm_dp_mst_get_port_malloc);
1411 * drm_dp_mst_put_port_malloc() - Decrement the malloc refcount of an MST port
1412 * @port: The &struct drm_dp_mst_port to decrement the malloc refcount of
1414 * Decrements &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
1415 * reaches 0, the memory allocation for @port will be released and @port may
1416 * no longer be used.
1418 * See also: drm_dp_mst_get_port_malloc()
1421 drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port)
1423 DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref) - 1);
1424 kref_put(&port->malloc_kref, drm_dp_free_mst_port);
1426 EXPORT_SYMBOL(drm_dp_mst_put_port_malloc);
1428 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
1430 #define STACK_DEPTH 8
1432 static noinline void
1433 __topology_ref_save(struct drm_dp_mst_topology_mgr *mgr,
1434 struct drm_dp_mst_topology_ref_history *history,
1435 enum drm_dp_mst_topology_ref_type type)
1437 struct drm_dp_mst_topology_ref_entry *entry = NULL;
1438 depot_stack_handle_t backtrace;
1439 ulong stack_entries[STACK_DEPTH];
1443 n = stack_trace_save(stack_entries, ARRAY_SIZE(stack_entries), 1);
1444 backtrace = stack_depot_save(stack_entries, n, GFP_KERNEL);
1448 /* Try to find an existing entry for this backtrace */
1449 for (i = 0; i < history->len; i++) {
1450 if (history->entries[i].backtrace == backtrace) {
1451 entry = &history->entries[i];
1456 /* Otherwise add one */
1458 struct drm_dp_mst_topology_ref_entry *new;
1459 int new_len = history->len + 1;
1461 new = krealloc(history->entries, sizeof(*new) * new_len,
1466 entry = &new[history->len];
1467 history->len = new_len;
1468 history->entries = new;
1470 entry->backtrace = backtrace;
1475 entry->ts_nsec = ktime_get_ns();
1479 topology_ref_history_cmp(const void *a, const void *b)
1481 const struct drm_dp_mst_topology_ref_entry *entry_a = a, *entry_b = b;
1483 if (entry_a->ts_nsec > entry_b->ts_nsec)
1485 else if (entry_a->ts_nsec < entry_b->ts_nsec)
1491 static inline const char *
1492 topology_ref_type_to_str(enum drm_dp_mst_topology_ref_type type)
1494 if (type == DRM_DP_MST_TOPOLOGY_REF_GET)
1501 __dump_topology_ref_history(struct drm_dp_mst_topology_ref_history *history,
1502 void *ptr, const char *type_str)
1504 struct drm_printer p = drm_debug_printer(DBG_PREFIX);
1505 char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
1514 /* First, sort the list so that it goes from oldest to newest
1517 sort(history->entries, history->len, sizeof(*history->entries),
1518 topology_ref_history_cmp, NULL);
1520 drm_printf(&p, "%s (%p) topology count reached 0, dumping history:\n",
1523 for (i = 0; i < history->len; i++) {
1524 const struct drm_dp_mst_topology_ref_entry *entry =
1525 &history->entries[i];
1528 u64 ts_nsec = entry->ts_nsec;
1529 u32 rem_nsec = do_div(ts_nsec, 1000000000);
1531 nr_entries = stack_depot_fetch(entry->backtrace, &entries);
1532 stack_trace_snprint(buf, PAGE_SIZE, entries, nr_entries, 4);
1534 drm_printf(&p, " %d %ss (last at %5llu.%06u):\n%s",
1536 topology_ref_type_to_str(entry->type),
1537 ts_nsec, rem_nsec / 1000, buf);
1540 /* Now free the history, since this is the only time we expose it */
1541 kfree(history->entries);
1546 static __always_inline void
1547 drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb)
1549 __dump_topology_ref_history(&mstb->topology_ref_history, mstb,
1553 static __always_inline void
1554 drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port)
1556 __dump_topology_ref_history(&port->topology_ref_history, port,
1560 static __always_inline void
1561 save_mstb_topology_ref(struct drm_dp_mst_branch *mstb,
1562 enum drm_dp_mst_topology_ref_type type)
1564 __topology_ref_save(mstb->mgr, &mstb->topology_ref_history, type);
1567 static __always_inline void
1568 save_port_topology_ref(struct drm_dp_mst_port *port,
1569 enum drm_dp_mst_topology_ref_type type)
1571 __topology_ref_save(port->mgr, &port->topology_ref_history, type);
1575 topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr)
1577 mutex_lock(&mgr->topology_ref_history_lock);
1581 topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr)
1583 mutex_unlock(&mgr->topology_ref_history_lock);
1587 topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr) {}
1589 topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr) {}
1591 drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb) {}
1593 drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port) {}
1594 #define save_mstb_topology_ref(mstb, type)
1595 #define save_port_topology_ref(port, type)
1598 static void drm_dp_destroy_mst_branch_device(struct kref *kref)
1600 struct drm_dp_mst_branch *mstb =
1601 container_of(kref, struct drm_dp_mst_branch, topology_kref);
1602 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
1604 drm_dp_mst_dump_mstb_topology_history(mstb);
1606 INIT_LIST_HEAD(&mstb->destroy_next);
1609 * This can get called under mgr->mutex, so we need to perform the
1610 * actual destruction of the mstb in another worker
1612 mutex_lock(&mgr->delayed_destroy_lock);
1613 list_add(&mstb->destroy_next, &mgr->destroy_branch_device_list);
1614 mutex_unlock(&mgr->delayed_destroy_lock);
1615 schedule_work(&mgr->delayed_destroy_work);
1619 * drm_dp_mst_topology_try_get_mstb() - Increment the topology refcount of a
1620 * branch device unless it's zero
1621 * @mstb: &struct drm_dp_mst_branch to increment the topology refcount of
1623 * Attempts to grab a topology reference to @mstb, if it hasn't yet been
1624 * removed from the topology (e.g. &drm_dp_mst_branch.topology_kref has
1625 * reached 0). Holding a topology reference implies that a malloc reference
1626 * will be held to @mstb as long as the user holds the topology reference.
1628 * Care should be taken to ensure that the user has at least one malloc
1629 * reference to @mstb. If you already have a topology reference to @mstb, you
1630 * should use drm_dp_mst_topology_get_mstb() instead.
1633 * drm_dp_mst_topology_get_mstb()
1634 * drm_dp_mst_topology_put_mstb()
1637 * * 1: A topology reference was grabbed successfully
1638 * * 0: @port is no longer in the topology, no reference was grabbed
1640 static int __must_check
1641 drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb)
1645 topology_ref_history_lock(mstb->mgr);
1646 ret = kref_get_unless_zero(&mstb->topology_kref);
1648 DRM_DEBUG("mstb %p (%d)\n",
1649 mstb, kref_read(&mstb->topology_kref));
1650 save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
1653 topology_ref_history_unlock(mstb->mgr);
1659 * drm_dp_mst_topology_get_mstb() - Increment the topology refcount of a
1661 * @mstb: The &struct drm_dp_mst_branch to increment the topology refcount of
1663 * Increments &drm_dp_mst_branch.topology_refcount without checking whether or
1664 * not it's already reached 0. This is only valid to use in scenarios where
1665 * you are already guaranteed to have at least one active topology reference
1666 * to @mstb. Otherwise, drm_dp_mst_topology_try_get_mstb() must be used.
1669 * drm_dp_mst_topology_try_get_mstb()
1670 * drm_dp_mst_topology_put_mstb()
1672 static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb)
1674 topology_ref_history_lock(mstb->mgr);
1676 save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
1677 WARN_ON(kref_read(&mstb->topology_kref) == 0);
1678 kref_get(&mstb->topology_kref);
1679 DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));
1681 topology_ref_history_unlock(mstb->mgr);
1685 * drm_dp_mst_topology_put_mstb() - release a topology reference to a branch
1687 * @mstb: The &struct drm_dp_mst_branch to release the topology reference from
1689 * Releases a topology reference from @mstb by decrementing
1690 * &drm_dp_mst_branch.topology_kref.
1693 * drm_dp_mst_topology_try_get_mstb()
1694 * drm_dp_mst_topology_get_mstb()
1697 drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb)
1699 topology_ref_history_lock(mstb->mgr);
1701 DRM_DEBUG("mstb %p (%d)\n",
1702 mstb, kref_read(&mstb->topology_kref) - 1);
1703 save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_PUT);
1705 topology_ref_history_unlock(mstb->mgr);
1706 kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device);
1709 static void drm_dp_destroy_port(struct kref *kref)
1711 struct drm_dp_mst_port *port =
1712 container_of(kref, struct drm_dp_mst_port, topology_kref);
1713 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
1715 drm_dp_mst_dump_port_topology_history(port);
1717 /* There's nothing that needs locking to destroy an input port yet */
1719 drm_dp_mst_put_port_malloc(port);
1723 kfree(port->cached_edid);
1726 * we can't destroy the connector here, as we might be holding the
1727 * mode_config.mutex from an EDID retrieval
1729 mutex_lock(&mgr->delayed_destroy_lock);
1730 list_add(&port->next, &mgr->destroy_port_list);
1731 mutex_unlock(&mgr->delayed_destroy_lock);
1732 schedule_work(&mgr->delayed_destroy_work);
1736 * drm_dp_mst_topology_try_get_port() - Increment the topology refcount of a
1737 * port unless it's zero
1738 * @port: &struct drm_dp_mst_port to increment the topology refcount of
1740 * Attempts to grab a topology reference to @port, if it hasn't yet been
1741 * removed from the topology (e.g. &drm_dp_mst_port.topology_kref has reached
1742 * 0). Holding a topology reference implies that a malloc reference will be
1743 * held to @port as long as the user holds the topology reference.
1745 * Care should be taken to ensure that the user has at least one malloc
1746 * reference to @port. If you already have a topology reference to @port, you
1747 * should use drm_dp_mst_topology_get_port() instead.
1750 * drm_dp_mst_topology_get_port()
1751 * drm_dp_mst_topology_put_port()
1754 * * 1: A topology reference was grabbed successfully
1755 * * 0: @port is no longer in the topology, no reference was grabbed
1757 static int __must_check
1758 drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port)
1762 topology_ref_history_lock(port->mgr);
1763 ret = kref_get_unless_zero(&port->topology_kref);
1765 DRM_DEBUG("port %p (%d)\n",
1766 port, kref_read(&port->topology_kref));
1767 save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
1770 topology_ref_history_unlock(port->mgr);
1775 * drm_dp_mst_topology_get_port() - Increment the topology refcount of a port
1776 * @port: The &struct drm_dp_mst_port to increment the topology refcount of
1778 * Increments &drm_dp_mst_port.topology_refcount without checking whether or
1779 * not it's already reached 0. This is only valid to use in scenarios where
1780 * you are already guaranteed to have at least one active topology reference
1781 * to @port. Otherwise, drm_dp_mst_topology_try_get_port() must be used.
1784 * drm_dp_mst_topology_try_get_port()
1785 * drm_dp_mst_topology_put_port()
1787 static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port)
1789 topology_ref_history_lock(port->mgr);
1791 WARN_ON(kref_read(&port->topology_kref) == 0);
1792 kref_get(&port->topology_kref);
1793 DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->topology_kref));
1794 save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
1796 topology_ref_history_unlock(port->mgr);
1800 * drm_dp_mst_topology_put_port() - release a topology reference to a port
1801 * @port: The &struct drm_dp_mst_port to release the topology reference from
1803 * Releases a topology reference from @port by decrementing
1804 * &drm_dp_mst_port.topology_kref.
1807 * drm_dp_mst_topology_try_get_port()
1808 * drm_dp_mst_topology_get_port()
1810 static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port)
1812 topology_ref_history_lock(port->mgr);
1814 DRM_DEBUG("port %p (%d)\n",
1815 port, kref_read(&port->topology_kref) - 1);
1816 save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_PUT);
1818 topology_ref_history_unlock(port->mgr);
1819 kref_put(&port->topology_kref, drm_dp_destroy_port);
1822 static struct drm_dp_mst_branch *
1823 drm_dp_mst_topology_get_mstb_validated_locked(struct drm_dp_mst_branch *mstb,
1824 struct drm_dp_mst_branch *to_find)
1826 struct drm_dp_mst_port *port;
1827 struct drm_dp_mst_branch *rmstb;
1829 if (to_find == mstb)
1832 list_for_each_entry(port, &mstb->ports, next) {
1834 rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1835 port->mstb, to_find);
1843 static struct drm_dp_mst_branch *
1844 drm_dp_mst_topology_get_mstb_validated(struct drm_dp_mst_topology_mgr *mgr,
1845 struct drm_dp_mst_branch *mstb)
1847 struct drm_dp_mst_branch *rmstb = NULL;
1849 mutex_lock(&mgr->lock);
1850 if (mgr->mst_primary) {
1851 rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1852 mgr->mst_primary, mstb);
1854 if (rmstb && !drm_dp_mst_topology_try_get_mstb(rmstb))
1857 mutex_unlock(&mgr->lock);
1861 static struct drm_dp_mst_port *
1862 drm_dp_mst_topology_get_port_validated_locked(struct drm_dp_mst_branch *mstb,
1863 struct drm_dp_mst_port *to_find)
1865 struct drm_dp_mst_port *port, *mport;
1867 list_for_each_entry(port, &mstb->ports, next) {
1868 if (port == to_find)
1872 mport = drm_dp_mst_topology_get_port_validated_locked(
1873 port->mstb, to_find);
1881 static struct drm_dp_mst_port *
1882 drm_dp_mst_topology_get_port_validated(struct drm_dp_mst_topology_mgr *mgr,
1883 struct drm_dp_mst_port *port)
1885 struct drm_dp_mst_port *rport = NULL;
1887 mutex_lock(&mgr->lock);
1888 if (mgr->mst_primary) {
1889 rport = drm_dp_mst_topology_get_port_validated_locked(
1890 mgr->mst_primary, port);
1892 if (rport && !drm_dp_mst_topology_try_get_port(rport))
1895 mutex_unlock(&mgr->lock);
1899 static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
1901 struct drm_dp_mst_port *port;
1904 list_for_each_entry(port, &mstb->ports, next) {
1905 if (port->port_num == port_num) {
1906 ret = drm_dp_mst_topology_try_get_port(port);
1907 return ret ? port : NULL;
1915 * calculate a new RAD for this MST branch device
1916 * if parent has an LCT of 2 then it has 1 nibble of RAD,
1917 * if parent has an LCT of 3 then it has 2 nibbles of RAD,
1919 static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
1922 int parent_lct = port->parent->lct;
1924 int idx = (parent_lct - 1) / 2;
1925 if (parent_lct > 1) {
1926 memcpy(rad, port->parent->rad, idx + 1);
1927 shift = (parent_lct % 2) ? 4 : 0;
1931 rad[idx] |= port->port_num << shift;
1932 return parent_lct + 1;
1935 static int drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt)
1937 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
1938 struct drm_dp_mst_branch *mstb;
1942 if (port->pdt == new_pdt)
1945 /* Teardown the old pdt, if there is one */
1946 switch (port->pdt) {
1947 case DP_PEER_DEVICE_DP_LEGACY_CONV:
1948 case DP_PEER_DEVICE_SST_SINK:
1950 * If the new PDT would also have an i2c bus, don't bother
1951 * with reregistering it
1953 if (new_pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
1954 new_pdt == DP_PEER_DEVICE_SST_SINK) {
1955 port->pdt = new_pdt;
1959 /* remove i2c over sideband */
1960 drm_dp_mst_unregister_i2c_bus(&port->aux);
1962 case DP_PEER_DEVICE_MST_BRANCHING:
1963 mutex_lock(&mgr->lock);
1964 drm_dp_mst_topology_put_mstb(port->mstb);
1966 mutex_unlock(&mgr->lock);
1970 port->pdt = new_pdt;
1971 switch (port->pdt) {
1972 case DP_PEER_DEVICE_DP_LEGACY_CONV:
1973 case DP_PEER_DEVICE_SST_SINK:
1974 /* add i2c over sideband */
1975 ret = drm_dp_mst_register_i2c_bus(&port->aux);
1978 case DP_PEER_DEVICE_MST_BRANCHING:
1979 lct = drm_dp_calculate_rad(port, rad);
1980 mstb = drm_dp_add_mst_branch_device(lct, rad);
1983 DRM_ERROR("Failed to create MSTB for port %p", port);
1987 mutex_lock(&mgr->lock);
1989 mstb->mgr = port->mgr;
1990 mstb->port_parent = port;
1993 * Make sure this port's memory allocation stays
1994 * around until its child MSTB releases it
1996 drm_dp_mst_get_port_malloc(port);
1997 mutex_unlock(&mgr->lock);
1999 /* And make sure we send a link address for this */
2006 port->pdt = DP_PEER_DEVICE_NONE;
2011 * drm_dp_mst_dpcd_read() - read a series of bytes from the DPCD via sideband
2012 * @aux: Fake sideband AUX CH
2013 * @offset: address of the (first) register to read
2014 * @buffer: buffer to store the register values
2015 * @size: number of bytes in @buffer
2017 * Performs the same functionality for remote devices via
2018 * sideband messaging as drm_dp_dpcd_read() does for local
2019 * devices via actual AUX CH.
2021 * Return: Number of bytes read, or negative error code on failure.
2023 ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
2024 unsigned int offset, void *buffer, size_t size)
2026 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
2029 return drm_dp_send_dpcd_read(port->mgr, port,
2030 offset, size, buffer);
2034 * drm_dp_mst_dpcd_write() - write a series of bytes to the DPCD via sideband
2035 * @aux: Fake sideband AUX CH
2036 * @offset: address of the (first) register to write
2037 * @buffer: buffer containing the values to write
2038 * @size: number of bytes in @buffer
2040 * Performs the same functionality for remote devices via
2041 * sideband messaging as drm_dp_dpcd_write() does for local
2042 * devices via actual AUX CH.
2044 * Return: 0 on success, negative error code on failure.
2046 ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
2047 unsigned int offset, void *buffer, size_t size)
2049 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
2052 return drm_dp_send_dpcd_write(port->mgr, port,
2053 offset, size, buffer);
2056 static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
2060 memcpy(mstb->guid, guid, 16);
2062 if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
2063 if (mstb->port_parent) {
2064 ret = drm_dp_send_dpcd_write(
2072 ret = drm_dp_dpcd_write(
2081 static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
2084 size_t proppath_size)
2088 snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
2089 for (i = 0; i < (mstb->lct - 1); i++) {
2090 int shift = (i % 2) ? 0 : 4;
2091 int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
2092 snprintf(temp, sizeof(temp), "-%d", port_num);
2093 strlcat(proppath, temp, proppath_size);
2095 snprintf(temp, sizeof(temp), "-%d", pnum);
2096 strlcat(proppath, temp, proppath_size);
2100 * drm_dp_mst_connector_late_register() - Late MST connector registration
2101 * @connector: The MST connector
2102 * @port: The MST port for this connector
2104 * Helper to register the remote aux device for this MST port. Drivers should
2105 * call this from their mst connector's late_register hook to enable MST aux
2108 * Return: 0 on success, negative error code on failure.
2110 int drm_dp_mst_connector_late_register(struct drm_connector *connector,
2111 struct drm_dp_mst_port *port)
2113 DRM_DEBUG_KMS("registering %s remote bus for %s\n",
2114 port->aux.name, connector->kdev->kobj.name);
2116 port->aux.dev = connector->kdev;
2117 return drm_dp_aux_register_devnode(&port->aux);
2119 EXPORT_SYMBOL(drm_dp_mst_connector_late_register);
2122 * drm_dp_mst_connector_early_unregister() - Early MST connector unregistration
2123 * @connector: The MST connector
2124 * @port: The MST port for this connector
2126 * Helper to unregister the remote aux device for this MST port, registered by
2127 * drm_dp_mst_connector_late_register(). Drivers should call this from their mst
2128 * connector's early_unregister hook.
2130 void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
2131 struct drm_dp_mst_port *port)
2133 DRM_DEBUG_KMS("unregistering %s remote bus for %s\n",
2134 port->aux.name, connector->kdev->kobj.name);
2135 drm_dp_aux_unregister_devnode(&port->aux);
2137 EXPORT_SYMBOL(drm_dp_mst_connector_early_unregister);
2140 drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
2141 struct drm_dp_mst_port *port)
2143 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
2147 build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
2148 port->connector = mgr->cbs->add_connector(mgr, port, proppath);
2149 if (!port->connector) {
2154 if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
2155 port->pdt == DP_PEER_DEVICE_SST_SINK) &&
2156 port->port_num >= DP_MST_LOGICAL_PORT_0) {
2157 port->cached_edid = drm_get_edid(port->connector,
2159 drm_connector_set_tile_property(port->connector);
2162 mgr->cbs->register_connector(port->connector);
2166 DRM_ERROR("Failed to create connector for port %p: %d\n", port, ret);
2170 * Drop a topology reference, and unlink the port from the in-memory topology
2174 drm_dp_mst_topology_unlink_port(struct drm_dp_mst_topology_mgr *mgr,
2175 struct drm_dp_mst_port *port)
2177 mutex_lock(&mgr->lock);
2178 port->parent->num_ports--;
2179 list_del(&port->next);
2180 mutex_unlock(&mgr->lock);
2181 drm_dp_mst_topology_put_port(port);
2184 static struct drm_dp_mst_port *
2185 drm_dp_mst_add_port(struct drm_device *dev,
2186 struct drm_dp_mst_topology_mgr *mgr,
2187 struct drm_dp_mst_branch *mstb, u8 port_number)
2189 struct drm_dp_mst_port *port = kzalloc(sizeof(*port), GFP_KERNEL);
2194 kref_init(&port->topology_kref);
2195 kref_init(&port->malloc_kref);
2196 port->parent = mstb;
2197 port->port_num = port_number;
2199 port->aux.name = "DPMST";
2200 port->aux.dev = dev->dev;
2201 port->aux.is_remote = true;
2204 * Make sure the memory allocation for our parent branch stays
2205 * around until our own memory allocation is released
2207 drm_dp_mst_get_mstb_malloc(mstb);
2213 drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
2214 struct drm_device *dev,
2215 struct drm_dp_link_addr_reply_port *port_msg)
2217 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
2218 struct drm_dp_mst_port *port;
2219 int old_ddps = 0, ret;
2220 u8 new_pdt = DP_PEER_DEVICE_NONE;
2221 bool created = false, send_link_addr = false, changed = false;
2223 port = drm_dp_get_port(mstb, port_msg->port_number);
2225 port = drm_dp_mst_add_port(dev, mgr, mstb,
2226 port_msg->port_number);
2231 } else if (!port->input && port_msg->input_port && port->connector) {
2232 /* Since port->connector can't be changed here, we create a
2233 * new port if input_port changes from 0 to 1
2235 drm_dp_mst_topology_unlink_port(mgr, port);
2236 drm_dp_mst_topology_put_port(port);
2237 port = drm_dp_mst_add_port(dev, mgr, mstb,
2238 port_msg->port_number);
2243 } else if (port->input && !port_msg->input_port) {
2245 } else if (port->connector) {
2246 /* We're updating a port that's exposed to userspace, so do it
2249 drm_modeset_lock(&mgr->base.lock, NULL);
2251 old_ddps = port->ddps;
2252 changed = port->ddps != port_msg->ddps ||
2254 (port->ldps != port_msg->legacy_device_plug_status ||
2255 port->dpcd_rev != port_msg->dpcd_revision ||
2256 port->mcs != port_msg->mcs ||
2257 port->pdt != port_msg->peer_device_type ||
2258 port->num_sdp_stream_sinks !=
2259 port_msg->num_sdp_stream_sinks));
2262 port->input = port_msg->input_port;
2264 new_pdt = port_msg->peer_device_type;
2265 port->mcs = port_msg->mcs;
2266 port->ddps = port_msg->ddps;
2267 port->ldps = port_msg->legacy_device_plug_status;
2268 port->dpcd_rev = port_msg->dpcd_revision;
2269 port->num_sdp_streams = port_msg->num_sdp_streams;
2270 port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
2272 /* manage mstb port lists with mgr lock - take a reference
2275 mutex_lock(&mgr->lock);
2276 drm_dp_mst_topology_get_port(port);
2277 list_add(&port->next, &mstb->ports);
2279 mutex_unlock(&mgr->lock);
2282 if (old_ddps != port->ddps) {
2285 drm_dp_send_enum_path_resources(mgr, mstb,
2289 port->available_pbn = 0;
2293 ret = drm_dp_port_set_pdt(port, new_pdt);
2295 send_link_addr = true;
2296 } else if (ret < 0) {
2297 DRM_ERROR("Failed to change PDT on port %p: %d\n",
2303 * If this port wasn't just created, then we're reprobing because
2304 * we're coming out of suspend. In this case, always resend the link
2305 * address if there's an MSTB on this port
2307 if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING)
2308 send_link_addr = true;
2310 if (port->connector)
2311 drm_modeset_unlock(&mgr->base.lock);
2312 else if (!port->input)
2313 drm_dp_mst_port_add_connector(mstb, port);
2315 if (send_link_addr && port->mstb) {
2316 ret = drm_dp_send_link_address(mgr, port->mstb);
2317 if (ret == 1) /* MSTB below us changed */
2323 /* put reference to this port */
2324 drm_dp_mst_topology_put_port(port);
2328 drm_dp_mst_topology_unlink_port(mgr, port);
2329 if (port->connector)
2330 drm_modeset_unlock(&mgr->base.lock);
2332 drm_dp_mst_topology_put_port(port);
2337 drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
2338 struct drm_dp_connection_status_notify *conn_stat)
2340 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
2341 struct drm_dp_mst_port *port;
2344 bool dowork = false, create_connector = false;
2346 port = drm_dp_get_port(mstb, conn_stat->port_number);
2350 if (port->connector) {
2351 if (!port->input && conn_stat->input_port) {
2353 * We can't remove a connector from an already exposed
2354 * port, so just throw the port out and make sure we
2355 * reprobe the link address of it's parent MSTB
2357 drm_dp_mst_topology_unlink_port(mgr, port);
2358 mstb->link_address_sent = false;
2363 /* Locking is only needed if the port's exposed to userspace */
2364 drm_modeset_lock(&mgr->base.lock, NULL);
2365 } else if (port->input && !conn_stat->input_port) {
2366 create_connector = true;
2367 /* Reprobe link address so we get num_sdp_streams */
2368 mstb->link_address_sent = false;
2372 old_ddps = port->ddps;
2373 port->input = conn_stat->input_port;
2374 port->mcs = conn_stat->message_capability_status;
2375 port->ldps = conn_stat->legacy_device_plug_status;
2376 port->ddps = conn_stat->displayport_device_plug_status;
2378 if (old_ddps != port->ddps) {
2382 port->available_pbn = 0;
2386 new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type;
2388 ret = drm_dp_port_set_pdt(port, new_pdt);
2391 } else if (ret < 0) {
2392 DRM_ERROR("Failed to change PDT for port %p: %d\n",
2397 if (port->connector)
2398 drm_modeset_unlock(&mgr->base.lock);
2399 else if (create_connector)
2400 drm_dp_mst_port_add_connector(mstb, port);
2403 drm_dp_mst_topology_put_port(port);
2405 queue_work(system_long_wq, &mstb->mgr->work);
2408 static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
2411 struct drm_dp_mst_branch *mstb;
2412 struct drm_dp_mst_port *port;
2414 /* find the port by iterating down */
2416 mutex_lock(&mgr->lock);
2417 mstb = mgr->mst_primary;
2422 for (i = 0; i < lct - 1; i++) {
2423 int shift = (i % 2) ? 0 : 4;
2424 int port_num = (rad[i / 2] >> shift) & 0xf;
2426 list_for_each_entry(port, &mstb->ports, next) {
2427 if (port->port_num == port_num) {
2430 DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]);
2438 ret = drm_dp_mst_topology_try_get_mstb(mstb);
2442 mutex_unlock(&mgr->lock);
2446 static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
2447 struct drm_dp_mst_branch *mstb,
2448 const uint8_t *guid)
2450 struct drm_dp_mst_branch *found_mstb;
2451 struct drm_dp_mst_port *port;
2453 if (memcmp(mstb->guid, guid, 16) == 0)
2457 list_for_each_entry(port, &mstb->ports, next) {
2461 found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
2470 static struct drm_dp_mst_branch *
2471 drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr,
2472 const uint8_t *guid)
2474 struct drm_dp_mst_branch *mstb;
2477 /* find the port by iterating down */
2478 mutex_lock(&mgr->lock);
2480 mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
2482 ret = drm_dp_mst_topology_try_get_mstb(mstb);
2487 mutex_unlock(&mgr->lock);
2491 static int drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
2492 struct drm_dp_mst_branch *mstb)
2494 struct drm_dp_mst_port *port;
2496 bool changed = false;
2498 if (!mstb->link_address_sent) {
2499 ret = drm_dp_send_link_address(mgr, mstb);
2506 list_for_each_entry(port, &mstb->ports, next) {
2507 struct drm_dp_mst_branch *mstb_child = NULL;
2509 if (port->input || !port->ddps)
2512 if (!port->available_pbn) {
2513 drm_modeset_lock(&mgr->base.lock, NULL);
2514 drm_dp_send_enum_path_resources(mgr, mstb, port);
2515 drm_modeset_unlock(&mgr->base.lock);
2520 mstb_child = drm_dp_mst_topology_get_mstb_validated(
2524 ret = drm_dp_check_and_send_link_address(mgr,
2526 drm_dp_mst_topology_put_mstb(mstb_child);
2537 static void drm_dp_mst_link_probe_work(struct work_struct *work)
2539 struct drm_dp_mst_topology_mgr *mgr =
2540 container_of(work, struct drm_dp_mst_topology_mgr, work);
2541 struct drm_device *dev = mgr->dev;
2542 struct drm_dp_mst_branch *mstb;
2544 bool clear_payload_id_table;
2546 mutex_lock(&mgr->probe_lock);
2548 mutex_lock(&mgr->lock);
2549 clear_payload_id_table = !mgr->payload_id_table_cleared;
2550 mgr->payload_id_table_cleared = true;
2552 mstb = mgr->mst_primary;
2554 ret = drm_dp_mst_topology_try_get_mstb(mstb);
2558 mutex_unlock(&mgr->lock);
2560 mutex_unlock(&mgr->probe_lock);
2565 * Certain branch devices seem to incorrectly report an available_pbn
2566 * of 0 on downstream sinks, even after clearing the
2567 * DP_PAYLOAD_ALLOCATE_* registers in
2568 * drm_dp_mst_topology_mgr_set_mst(). Namely, the CableMatters USB-C
2569 * 2x DP hub. Sending a CLEAR_PAYLOAD_ID_TABLE message seems to make
2570 * things work again.
2572 if (clear_payload_id_table) {
2573 DRM_DEBUG_KMS("Clearing payload ID table\n");
2574 drm_dp_send_clear_payload_id_table(mgr, mstb);
2577 ret = drm_dp_check_and_send_link_address(mgr, mstb);
2578 drm_dp_mst_topology_put_mstb(mstb);
2580 mutex_unlock(&mgr->probe_lock);
2582 drm_kms_helper_hotplug_event(dev);
2585 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
2590 if (memchr_inv(guid, 0, 16))
2593 salt = get_jiffies_64();
2595 memcpy(&guid[0], &salt, sizeof(u64));
2596 memcpy(&guid[8], &salt, sizeof(u64));
2601 static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
2603 struct drm_dp_sideband_msg_req_body req;
2605 req.req_type = DP_REMOTE_DPCD_READ;
2606 req.u.dpcd_read.port_number = port_num;
2607 req.u.dpcd_read.dpcd_address = offset;
2608 req.u.dpcd_read.num_bytes = num_bytes;
2609 drm_dp_encode_sideband_req(&req, msg);
2614 static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
2615 bool up, u8 *msg, int len)
2618 int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;
2619 int tosend, total, offset;
2626 tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
2628 ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
2631 if (ret != tosend) {
2632 if (ret == -EIO && retries < 5) {
2636 DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
2642 } while (total > 0);
2646 static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
2647 struct drm_dp_sideband_msg_tx *txmsg)
2649 struct drm_dp_mst_branch *mstb = txmsg->dst;
2652 /* both msg slots are full */
2653 if (txmsg->seqno == -1) {
2654 if (mstb->tx_slots[0] && mstb->tx_slots[1]) {
2655 DRM_DEBUG_KMS("%s: failed to find slot\n", __func__);
2658 if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) {
2659 txmsg->seqno = mstb->last_seqno;
2660 mstb->last_seqno ^= 1;
2661 } else if (mstb->tx_slots[0] == NULL)
2665 mstb->tx_slots[txmsg->seqno] = txmsg;
2668 req_type = txmsg->msg[0] & 0x7f;
2669 if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
2670 req_type == DP_RESOURCE_STATUS_NOTIFY)
2674 hdr->path_msg = txmsg->path_msg;
2675 hdr->lct = mstb->lct;
2676 hdr->lcr = mstb->lct - 1;
2678 memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
2679 hdr->seqno = txmsg->seqno;
2683 * process a single block of the next message in the sideband queue
2685 static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
2686 struct drm_dp_sideband_msg_tx *txmsg,
2690 struct drm_dp_sideband_msg_hdr hdr;
2691 int len, space, idx, tosend;
2694 memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
2696 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) {
2698 txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
2701 /* make hdr from dst mst - for replies use seqno
2702 otherwise assign one */
2703 ret = set_hdr_from_dst_qlock(&hdr, txmsg);
2707 /* amount left to send in this message */
2708 len = txmsg->cur_len - txmsg->cur_offset;
2710 /* 48 - sideband msg size - 1 byte for data CRC, x header bytes */
2711 space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
2713 tosend = min(len, space);
2714 if (len == txmsg->cur_len)
2720 hdr.msg_len = tosend + 1;
2721 drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
2722 memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
2723 /* add crc at end */
2724 drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
2727 ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
2728 if (unlikely(ret) && drm_debug_enabled(DRM_UT_DP)) {
2729 struct drm_printer p = drm_debug_printer(DBG_PREFIX);
2731 drm_printf(&p, "sideband msg failed to send\n");
2732 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
2736 txmsg->cur_offset += tosend;
2737 if (txmsg->cur_offset == txmsg->cur_len) {
2738 txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
2744 static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
2746 struct drm_dp_sideband_msg_tx *txmsg;
2749 WARN_ON(!mutex_is_locked(&mgr->qlock));
2751 /* construct a chunk from the first msg in the tx_msg queue */
2752 if (list_empty(&mgr->tx_msg_downq))
2755 txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
2756 ret = process_single_tx_qlock(mgr, txmsg, false);
2758 /* txmsg is sent it should be in the slots now */
2759 list_del(&txmsg->next);
2761 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
2762 list_del(&txmsg->next);
2763 if (txmsg->seqno != -1)
2764 txmsg->dst->tx_slots[txmsg->seqno] = NULL;
2765 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
2766 wake_up_all(&mgr->tx_waitq);
2770 /* called holding qlock */
2771 static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
2772 struct drm_dp_sideband_msg_tx *txmsg)
2776 /* construct a chunk from the first msg in the tx_msg queue */
2777 ret = process_single_tx_qlock(mgr, txmsg, true);
2780 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
2782 if (txmsg->seqno != -1) {
2783 WARN_ON((unsigned int)txmsg->seqno >
2784 ARRAY_SIZE(txmsg->dst->tx_slots));
2785 txmsg->dst->tx_slots[txmsg->seqno] = NULL;
2789 static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
2790 struct drm_dp_sideband_msg_tx *txmsg)
2792 mutex_lock(&mgr->qlock);
2793 list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
2795 if (drm_debug_enabled(DRM_UT_DP)) {
2796 struct drm_printer p = drm_debug_printer(DBG_PREFIX);
2798 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
2801 if (list_is_singular(&mgr->tx_msg_downq))
2802 process_single_down_tx_qlock(mgr);
2803 mutex_unlock(&mgr->qlock);
2807 drm_dp_dump_link_address(struct drm_dp_link_address_ack_reply *reply)
2809 struct drm_dp_link_addr_reply_port *port_reply;
2812 for (i = 0; i < reply->nports; i++) {
2813 port_reply = &reply->ports[i];
2814 DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n",
2816 port_reply->input_port,
2817 port_reply->peer_device_type,
2818 port_reply->port_number,
2819 port_reply->dpcd_revision,
2822 port_reply->legacy_device_plug_status,
2823 port_reply->num_sdp_streams,
2824 port_reply->num_sdp_stream_sinks);
2828 static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
2829 struct drm_dp_mst_branch *mstb)
2831 struct drm_dp_sideband_msg_tx *txmsg;
2832 struct drm_dp_link_address_ack_reply *reply;
2833 struct drm_dp_mst_port *port, *tmp;
2834 int i, len, ret, port_mask = 0;
2835 bool changed = false;
2837 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2842 len = build_link_address(txmsg);
2844 mstb->link_address_sent = true;
2845 drm_dp_queue_down_tx(mgr, txmsg);
2847 /* FIXME: Actually do some real error handling here */
2848 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2850 DRM_ERROR("Sending link address failed with %d\n", ret);
2853 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
2854 DRM_ERROR("link address NAK received\n");
2859 reply = &txmsg->reply.u.link_addr;
2860 DRM_DEBUG_KMS("link address reply: %d\n", reply->nports);
2861 drm_dp_dump_link_address(reply);
2863 drm_dp_check_mstb_guid(mstb, reply->guid);
2865 for (i = 0; i < reply->nports; i++) {
2866 port_mask |= BIT(reply->ports[i].port_number);
2867 ret = drm_dp_mst_handle_link_address_port(mstb, mgr->dev,
2875 /* Prune any ports that are currently a part of mstb in our in-memory
2876 * topology, but were not seen in this link address. Usually this
2877 * means that they were removed while the topology was out of sync,
2878 * e.g. during suspend/resume
2880 mutex_lock(&mgr->lock);
2881 list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
2882 if (port_mask & BIT(port->port_num))
2885 DRM_DEBUG_KMS("port %d was not in link address, removing\n",
2887 list_del(&port->next);
2888 drm_dp_mst_topology_put_port(port);
2891 mutex_unlock(&mgr->lock);
2895 mstb->link_address_sent = false;
2897 return ret < 0 ? ret : changed;
2900 void drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
2901 struct drm_dp_mst_branch *mstb)
2903 struct drm_dp_sideband_msg_tx *txmsg;
2906 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2911 len = build_clear_payload_id_table(txmsg);
2913 drm_dp_queue_down_tx(mgr, txmsg);
2915 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2916 if (ret > 0 && txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
2917 DRM_DEBUG_KMS("clear payload table id nak received\n");
2923 drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
2924 struct drm_dp_mst_branch *mstb,
2925 struct drm_dp_mst_port *port)
2927 struct drm_dp_enum_path_resources_ack_reply *path_res;
2928 struct drm_dp_sideband_msg_tx *txmsg;
2932 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2937 len = build_enum_path_resources(txmsg, port->port_num);
2939 drm_dp_queue_down_tx(mgr, txmsg);
2941 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2943 path_res = &txmsg->reply.u.path_resources;
2945 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
2946 DRM_DEBUG_KMS("enum path resources nak received\n");
2948 if (port->port_num != path_res->port_number)
2949 DRM_ERROR("got incorrect port in response\n");
2951 DRM_DEBUG_KMS("enum path resources %d: %d %d\n",
2952 path_res->port_number,
2953 path_res->full_payload_bw_number,
2954 path_res->avail_payload_bw_number);
2955 port->available_pbn =
2956 path_res->avail_payload_bw_number;
2957 port->fec_capable = path_res->fec_capable;
2965 static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
2967 if (!mstb->port_parent)
2970 if (mstb->port_parent->mstb != mstb)
2971 return mstb->port_parent;
2973 return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
2977 * Searches upwards in the topology starting from mstb to try to find the
2978 * closest available parent of mstb that's still connected to the rest of the
2979 * topology. This can be used in order to perform operations like releasing
2980 * payloads, where the branch device which owned the payload may no longer be
2981 * around and thus would require that the payload on the last living relative
2984 static struct drm_dp_mst_branch *
2985 drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
2986 struct drm_dp_mst_branch *mstb,
2989 struct drm_dp_mst_branch *rmstb = NULL;
2990 struct drm_dp_mst_port *found_port;
2992 mutex_lock(&mgr->lock);
2993 if (!mgr->mst_primary)
2997 found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
3001 if (drm_dp_mst_topology_try_get_mstb(found_port->parent)) {
3002 rmstb = found_port->parent;
3003 *port_num = found_port->port_num;
3005 /* Search again, starting from this parent */
3006 mstb = found_port->parent;
3010 mutex_unlock(&mgr->lock);
3014 static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
3015 struct drm_dp_mst_port *port,
3019 struct drm_dp_sideband_msg_tx *txmsg;
3020 struct drm_dp_mst_branch *mstb;
3021 int len, ret, port_num;
3022 u8 sinks[DRM_DP_MAX_SDP_STREAMS];
3025 port_num = port->port_num;
3026 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
3028 mstb = drm_dp_get_last_connected_port_and_mstb(mgr,
3036 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3042 for (i = 0; i < port->num_sdp_streams; i++)
3046 len = build_allocate_payload(txmsg, port_num,
3048 pbn, port->num_sdp_streams, sinks);
3050 drm_dp_queue_down_tx(mgr, txmsg);
3053 * FIXME: there is a small chance that between getting the last
3054 * connected mstb and sending the payload message, the last connected
3055 * mstb could also be removed from the topology. In the future, this
3056 * needs to be fixed by restarting the
3057 * drm_dp_get_last_connected_port_and_mstb() search in the event of a
3058 * timeout if the topology is still connected to the system.
3060 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3062 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3069 drm_dp_mst_topology_put_mstb(mstb);
3073 int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
3074 struct drm_dp_mst_port *port, bool power_up)
3076 struct drm_dp_sideband_msg_tx *txmsg;
3079 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3083 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3085 drm_dp_mst_topology_put_port(port);
3089 txmsg->dst = port->parent;
3090 len = build_power_updown_phy(txmsg, port->port_num, power_up);
3091 drm_dp_queue_down_tx(mgr, txmsg);
3093 ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg);
3095 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3101 drm_dp_mst_topology_put_port(port);
3105 EXPORT_SYMBOL(drm_dp_send_power_updown_phy);
3107 static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
3109 struct drm_dp_payload *payload)
3113 ret = drm_dp_dpcd_write_payload(mgr, id, payload);
3115 payload->payload_state = 0;
3118 payload->payload_state = DP_PAYLOAD_LOCAL;
3122 static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
3123 struct drm_dp_mst_port *port,
3125 struct drm_dp_payload *payload)
3128 ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
3131 payload->payload_state = DP_PAYLOAD_REMOTE;
3135 static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
3136 struct drm_dp_mst_port *port,
3138 struct drm_dp_payload *payload)
3140 DRM_DEBUG_KMS("\n");
3141 /* it's okay for these to fail */
3143 drm_dp_payload_send_msg(mgr, port, id, 0);
3146 drm_dp_dpcd_write_payload(mgr, id, payload);
3147 payload->payload_state = DP_PAYLOAD_DELETE_LOCAL;
3151 static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
3153 struct drm_dp_payload *payload)
3155 payload->payload_state = 0;
3160 * drm_dp_update_payload_part1() - Execute payload update part 1
3161 * @mgr: manager to use.
3163 * This iterates over all proposed virtual channels, and tries to
3164 * allocate space in the link for them. For 0->slots transitions,
3165 * this step just writes the VCPI to the MST device. For slots->0
3166 * transitions, this writes the updated VCPIs and removes the
3167 * remote VC payloads.
3169 * after calling this the driver should generate ACT and payload
3172 int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
3174 struct drm_dp_payload req_payload;
3175 struct drm_dp_mst_port *port;
3179 mutex_lock(&mgr->payload_lock);
3180 for (i = 0; i < mgr->max_payloads; i++) {
3181 struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
3182 struct drm_dp_payload *payload = &mgr->payloads[i];
3183 bool put_port = false;
3185 /* solve the current payloads - compare to the hw ones
3186 - update the hw view */
3187 req_payload.start_slot = cur_slots;
3189 port = container_of(vcpi, struct drm_dp_mst_port,
3192 /* Validated ports don't matter if we're releasing
3195 if (vcpi->num_slots) {
3196 port = drm_dp_mst_topology_get_port_validated(
3199 mutex_unlock(&mgr->payload_lock);
3205 req_payload.num_slots = vcpi->num_slots;
3206 req_payload.vcpi = vcpi->vcpi;
3209 req_payload.num_slots = 0;
3212 payload->start_slot = req_payload.start_slot;
3213 /* work out what is required to happen with this payload */
3214 if (payload->num_slots != req_payload.num_slots) {
3216 /* need to push an update for this payload */
3217 if (req_payload.num_slots) {
3218 drm_dp_create_payload_step1(mgr, vcpi->vcpi,
3220 payload->num_slots = req_payload.num_slots;
3221 payload->vcpi = req_payload.vcpi;
3223 } else if (payload->num_slots) {
3224 payload->num_slots = 0;
3225 drm_dp_destroy_payload_step1(mgr, port,
3228 req_payload.payload_state =
3229 payload->payload_state;
3230 payload->start_slot = 0;
3232 payload->payload_state = req_payload.payload_state;
3234 cur_slots += req_payload.num_slots;
3237 drm_dp_mst_topology_put_port(port);
3240 for (i = 0; i < mgr->max_payloads; /* do nothing */) {
3241 if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL) {
3246 DRM_DEBUG_KMS("removing payload %d\n", i);
3247 for (j = i; j < mgr->max_payloads - 1; j++) {
3248 mgr->payloads[j] = mgr->payloads[j + 1];
3249 mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
3251 if (mgr->proposed_vcpis[j] &&
3252 mgr->proposed_vcpis[j]->num_slots) {
3253 set_bit(j + 1, &mgr->payload_mask);
3255 clear_bit(j + 1, &mgr->payload_mask);
3259 memset(&mgr->payloads[mgr->max_payloads - 1], 0,
3260 sizeof(struct drm_dp_payload));
3261 mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
3262 clear_bit(mgr->max_payloads, &mgr->payload_mask);
3264 mutex_unlock(&mgr->payload_lock);
3268 EXPORT_SYMBOL(drm_dp_update_payload_part1);
3271 * drm_dp_update_payload_part2() - Execute payload update part 2
3272 * @mgr: manager to use.
3274 * This iterates over all proposed virtual channels, and tries to
3275 * allocate space in the link for them. For 0->slots transitions,
3276 * this step writes the remote VC payload commands. For slots->0
3277 * this just resets some internal state.
3279 int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
3281 struct drm_dp_mst_port *port;
3284 mutex_lock(&mgr->payload_lock);
3285 for (i = 0; i < mgr->max_payloads; i++) {
3287 if (!mgr->proposed_vcpis[i])
3290 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
3292 DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state);
3293 if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
3294 ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
3295 } else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
3296 ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
3299 mutex_unlock(&mgr->payload_lock);
3303 mutex_unlock(&mgr->payload_lock);
3306 EXPORT_SYMBOL(drm_dp_update_payload_part2);
3308 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
3309 struct drm_dp_mst_port *port,
3310 int offset, int size, u8 *bytes)
3314 struct drm_dp_sideband_msg_tx *txmsg;
3315 struct drm_dp_mst_branch *mstb;
3317 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
3321 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3327 len = build_dpcd_read(txmsg, port->port_num, offset, size);
3328 txmsg->dst = port->parent;
3330 drm_dp_queue_down_tx(mgr, txmsg);
3332 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3336 /* DPCD read should never be NACKed */
3337 if (txmsg->reply.reply_type == 1) {
3338 DRM_ERROR("mstb %p port %d: DPCD read on addr 0x%x for %d bytes NAKed\n",
3339 mstb, port->port_num, offset, size);
3344 if (txmsg->reply.u.remote_dpcd_read_ack.num_bytes != size) {
3349 ret = min_t(size_t, txmsg->reply.u.remote_dpcd_read_ack.num_bytes,
3351 memcpy(bytes, txmsg->reply.u.remote_dpcd_read_ack.bytes, ret);
3356 drm_dp_mst_topology_put_mstb(mstb);
3361 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
3362 struct drm_dp_mst_port *port,
3363 int offset, int size, u8 *bytes)
3367 struct drm_dp_sideband_msg_tx *txmsg;
3368 struct drm_dp_mst_branch *mstb;
3370 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
3374 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3380 len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
3383 drm_dp_queue_down_tx(mgr, txmsg);
3385 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3387 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3394 drm_dp_mst_topology_put_mstb(mstb);
3398 static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
3400 struct drm_dp_sideband_msg_reply_body reply;
3402 reply.reply_type = DP_SIDEBAND_REPLY_ACK;
3403 reply.req_type = req_type;
3404 drm_dp_encode_sideband_reply(&reply, msg);
3408 static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
3409 struct drm_dp_mst_branch *mstb,
3410 int req_type, int seqno, bool broadcast)
3412 struct drm_dp_sideband_msg_tx *txmsg;
3414 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3419 txmsg->seqno = seqno;
3420 drm_dp_encode_up_ack_reply(txmsg, req_type);
3422 mutex_lock(&mgr->qlock);
3424 process_single_up_tx_qlock(mgr, txmsg);
3426 mutex_unlock(&mgr->qlock);
3432 static int drm_dp_get_vc_payload_bw(u8 dp_link_bw, u8 dp_link_count)
3434 if (dp_link_bw == 0 || dp_link_count == 0)
3435 DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
3436 dp_link_bw, dp_link_count);
3438 return dp_link_bw * dp_link_count / 2;
3442 * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
3443 * @mgr: manager to set state for
3444 * @mst_state: true to enable MST on this connector - false to disable.
3446 * This is called by the driver when it detects an MST capable device plugged
3447 * into a DP MST capable port, or when a DP MST capable device is unplugged.
3449 int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
3453 struct drm_dp_mst_branch *mstb = NULL;
3455 mutex_lock(&mgr->lock);
3456 if (mst_state == mgr->mst_state)
3459 mgr->mst_state = mst_state;
3460 /* set the device into MST mode */
3462 WARN_ON(mgr->mst_primary);
3465 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
3466 if (ret != DP_RECEIVER_CAP_SIZE) {
3467 DRM_DEBUG_KMS("failed to read DPCD\n");
3471 mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr->dpcd[1],
3472 mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK);
3473 if (mgr->pbn_div == 0) {
3478 /* add initial branch device at LCT 1 */
3479 mstb = drm_dp_add_mst_branch_device(1, NULL);
3486 /* give this the main reference */
3487 mgr->mst_primary = mstb;
3488 drm_dp_mst_topology_get_mstb(mgr->mst_primary);
3490 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
3491 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
3497 struct drm_dp_payload reset_pay;
3498 reset_pay.start_slot = 0;
3499 reset_pay.num_slots = 0x3f;
3500 drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
3503 queue_work(system_long_wq, &mgr->work);
3507 /* disable MST on the device */
3508 mstb = mgr->mst_primary;
3509 mgr->mst_primary = NULL;
3510 /* this can fail if the device is gone */
3511 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
3513 mutex_lock(&mgr->payload_lock);
3514 memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
3515 mgr->payload_mask = 0;
3516 set_bit(0, &mgr->payload_mask);
3517 for (i = 0; i < mgr->max_payloads; i++) {
3518 struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
3522 vcpi->num_slots = 0;
3524 mgr->proposed_vcpis[i] = NULL;
3527 mutex_unlock(&mgr->payload_lock);
3529 mgr->payload_id_table_cleared = false;
3533 mutex_unlock(&mgr->lock);
3535 drm_dp_mst_topology_put_mstb(mstb);
3539 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
3542 drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch *mstb)
3544 struct drm_dp_mst_port *port;
3546 /* The link address will need to be re-sent on resume */
3547 mstb->link_address_sent = false;
3549 list_for_each_entry(port, &mstb->ports, next) {
3550 /* The PBN for each port will also need to be re-probed */
3551 port->available_pbn = 0;
3554 drm_dp_mst_topology_mgr_invalidate_mstb(port->mstb);
3559 * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
3560 * @mgr: manager to suspend
3562 * This function tells the MST device that we can't handle UP messages
3563 * anymore. This should stop it from sending any since we are suspended.
3565 void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
3567 mutex_lock(&mgr->lock);
3568 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
3569 DP_MST_EN | DP_UPSTREAM_IS_SRC);
3570 mutex_unlock(&mgr->lock);
3571 flush_work(&mgr->up_req_work);
3572 flush_work(&mgr->work);
3573 flush_work(&mgr->delayed_destroy_work);
3575 mutex_lock(&mgr->lock);
3576 if (mgr->mst_state && mgr->mst_primary)
3577 drm_dp_mst_topology_mgr_invalidate_mstb(mgr->mst_primary);
3578 mutex_unlock(&mgr->lock);
3580 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
3583 * drm_dp_mst_topology_mgr_resume() - resume the MST manager
3584 * @mgr: manager to resume
3585 * @sync: whether or not to perform topology reprobing synchronously
3587 * This will fetch DPCD and see if the device is still there,
3588 * if it is, it will rewrite the MSTM control bits, and return.
3590 * If the device fails this returns -1, and the driver should do
3591 * a full MST reprobe, in case we were undocked.
3593 * During system resume (where it is assumed that the driver will be calling
3594 * drm_atomic_helper_resume()) this function should be called beforehand with
3595 * @sync set to true. In contexts like runtime resume where the driver is not
3596 * expected to be calling drm_atomic_helper_resume(), this function should be
3597 * called with @sync set to false in order to avoid deadlocking.
3599 * Returns: -1 if the MST topology was removed while we were suspended, 0
3602 int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
3608 mutex_lock(&mgr->lock);
3609 if (!mgr->mst_primary)
3612 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd,
3613 DP_RECEIVER_CAP_SIZE);
3614 if (ret != DP_RECEIVER_CAP_SIZE) {
3615 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
3619 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
3622 DP_UPSTREAM_IS_SRC);
3624 DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
3628 /* Some hubs forget their guids after they resume */
3629 ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
3631 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
3634 drm_dp_check_mstb_guid(mgr->mst_primary, guid);
3637 * For the final step of resuming the topology, we need to bring the
3638 * state of our in-memory topology back into sync with reality. So,
3639 * restart the probing process as if we're probing a new hub
3641 queue_work(system_long_wq, &mgr->work);
3642 mutex_unlock(&mgr->lock);
3645 DRM_DEBUG_KMS("Waiting for link probe work to finish re-syncing topology...\n");
3646 flush_work(&mgr->work);
3652 mutex_unlock(&mgr->lock);
3655 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
3657 static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
3661 int replylen, origlen, curreply;
3663 struct drm_dp_sideband_msg_rx *msg;
3664 int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE;
3665 msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv;
3667 len = min(mgr->max_dpcd_transaction_bytes, 16);
3668 ret = drm_dp_dpcd_read(mgr->aux, basereg,
3671 DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
3674 ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
3676 DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
3679 replylen = msg->curchunk_len + msg->curchunk_hdrlen;
3684 while (replylen > 0) {
3685 len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
3686 ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
3689 DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n",
3694 ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
3696 DRM_DEBUG_KMS("failed to build sideband msg\n");
3706 static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
3708 struct drm_dp_sideband_msg_tx *txmsg;
3709 struct drm_dp_mst_branch *mstb;
3710 struct drm_dp_sideband_msg_hdr *hdr = &mgr->down_rep_recv.initial_hdr;
3713 if (!drm_dp_get_one_sb_msg(mgr, false))
3714 goto clear_down_rep_recv;
3716 if (!mgr->down_rep_recv.have_eomt)
3719 mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
3721 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
3723 goto clear_down_rep_recv;
3726 /* find the message */
3728 mutex_lock(&mgr->qlock);
3729 txmsg = mstb->tx_slots[slot];
3730 /* remove from slots */
3731 mutex_unlock(&mgr->qlock);
3734 DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
3735 mstb, hdr->seqno, hdr->lct, hdr->rad[0],
3736 mgr->down_rep_recv.msg[0]);
3740 drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
3742 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3743 DRM_DEBUG_KMS("Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n",
3744 txmsg->reply.req_type,
3745 drm_dp_mst_req_type_str(txmsg->reply.req_type),
3746 txmsg->reply.u.nak.reason,
3747 drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason),
3748 txmsg->reply.u.nak.nak_data);
3750 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
3751 drm_dp_mst_topology_put_mstb(mstb);
3753 mutex_lock(&mgr->qlock);
3754 txmsg->state = DRM_DP_SIDEBAND_TX_RX;
3755 mstb->tx_slots[slot] = NULL;
3756 mutex_unlock(&mgr->qlock);
3758 wake_up_all(&mgr->tx_waitq);
3763 drm_dp_mst_topology_put_mstb(mstb);
3764 clear_down_rep_recv:
3765 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
3771 drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
3772 struct drm_dp_pending_up_req *up_req)
3774 struct drm_dp_mst_branch *mstb = NULL;
3775 struct drm_dp_sideband_msg_req_body *msg = &up_req->msg;
3776 struct drm_dp_sideband_msg_hdr *hdr = &up_req->hdr;
3777 bool hotplug = false;
3779 if (hdr->broadcast) {
3780 const u8 *guid = NULL;
3782 if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY)
3783 guid = msg->u.conn_stat.guid;
3784 else if (msg->req_type == DP_RESOURCE_STATUS_NOTIFY)
3785 guid = msg->u.resource_stat.guid;
3787 mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid);
3789 mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
3793 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
3798 /* TODO: Add missing handler for DP_RESOURCE_STATUS_NOTIFY events */
3799 if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) {
3800 drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat);
3804 drm_dp_mst_topology_put_mstb(mstb);
3808 static void drm_dp_mst_up_req_work(struct work_struct *work)
3810 struct drm_dp_mst_topology_mgr *mgr =
3811 container_of(work, struct drm_dp_mst_topology_mgr,
3813 struct drm_dp_pending_up_req *up_req;
3814 bool send_hotplug = false;
3816 mutex_lock(&mgr->probe_lock);
3818 mutex_lock(&mgr->up_req_lock);
3819 up_req = list_first_entry_or_null(&mgr->up_req_list,
3820 struct drm_dp_pending_up_req,
3823 list_del(&up_req->next);
3824 mutex_unlock(&mgr->up_req_lock);
3829 send_hotplug |= drm_dp_mst_process_up_req(mgr, up_req);
3832 mutex_unlock(&mgr->probe_lock);
3835 drm_kms_helper_hotplug_event(mgr->dev);
3838 static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
3840 struct drm_dp_sideband_msg_hdr *hdr = &mgr->up_req_recv.initial_hdr;
3841 struct drm_dp_pending_up_req *up_req;
3844 if (!drm_dp_get_one_sb_msg(mgr, true))
3847 if (!mgr->up_req_recv.have_eomt)
3850 up_req = kzalloc(sizeof(*up_req), GFP_KERNEL);
3852 DRM_ERROR("Not enough memory to process MST up req\n");
3855 INIT_LIST_HEAD(&up_req->next);
3858 drm_dp_sideband_parse_req(&mgr->up_req_recv, &up_req->msg);
3860 if (up_req->msg.req_type != DP_CONNECTION_STATUS_NOTIFY &&
3861 up_req->msg.req_type != DP_RESOURCE_STATUS_NOTIFY) {
3862 DRM_DEBUG_KMS("Received unknown up req type, ignoring: %x\n",
3863 up_req->msg.req_type);
3868 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, up_req->msg.req_type,
3871 if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
3872 const struct drm_dp_connection_status_notify *conn_stat =
3873 &up_req->msg.u.conn_stat;
3875 DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n",
3876 conn_stat->port_number,
3877 conn_stat->legacy_device_plug_status,
3878 conn_stat->displayport_device_plug_status,
3879 conn_stat->message_capability_status,
3880 conn_stat->input_port,
3881 conn_stat->peer_device_type);
3882 } else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
3883 const struct drm_dp_resource_status_notify *res_stat =
3884 &up_req->msg.u.resource_stat;
3886 DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n",
3887 res_stat->port_number,
3888 res_stat->available_pbn);
3892 mutex_lock(&mgr->up_req_lock);
3893 list_add_tail(&up_req->next, &mgr->up_req_list);
3894 mutex_unlock(&mgr->up_req_lock);
3895 queue_work(system_long_wq, &mgr->up_req_work);
3898 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
3903 * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
3904 * @mgr: manager to notify irq for.
3905 * @esi: 4 bytes from SINK_COUNT_ESI
3906 * @handled: whether the hpd interrupt was consumed or not
3908 * This should be called from the driver when it detects a short IRQ,
3909 * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
3910 * topology manager will process the sideband messages received as a result
3913 int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
3920 if (sc != mgr->sink_count) {
3921 mgr->sink_count = sc;
3925 if (esi[1] & DP_DOWN_REP_MSG_RDY) {
3926 ret = drm_dp_mst_handle_down_rep(mgr);
3930 if (esi[1] & DP_UP_REQ_MSG_RDY) {
3931 ret |= drm_dp_mst_handle_up_req(mgr);
3935 drm_dp_mst_kick_tx(mgr);
3938 EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
3941 * drm_dp_mst_detect_port() - get connection status for an MST port
3942 * @connector: DRM connector for this port
3943 * @ctx: The acquisition context to use for grabbing locks
3944 * @mgr: manager for this port
3945 * @port: pointer to a port
3947 * This returns the current connection state for a port.
3950 drm_dp_mst_detect_port(struct drm_connector *connector,
3951 struct drm_modeset_acquire_ctx *ctx,
3952 struct drm_dp_mst_topology_mgr *mgr,
3953 struct drm_dp_mst_port *port)
3957 /* we need to search for the port in the mgr in case it's gone */
3958 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3960 return connector_status_disconnected;
3962 ret = drm_modeset_lock(&mgr->base.lock, ctx);
3966 ret = connector_status_disconnected;
3971 switch (port->pdt) {
3972 case DP_PEER_DEVICE_NONE:
3973 case DP_PEER_DEVICE_MST_BRANCHING:
3976 case DP_PEER_DEVICE_SST_SINK:
3977 ret = connector_status_connected;
3978 /* for logical ports - cache the EDID */
3979 if (port->port_num >= 8 && !port->cached_edid) {
3980 port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
3983 case DP_PEER_DEVICE_DP_LEGACY_CONV:
3985 ret = connector_status_connected;
3989 drm_dp_mst_topology_put_port(port);
3992 EXPORT_SYMBOL(drm_dp_mst_detect_port);
3995 * drm_dp_mst_port_has_audio() - Check whether port has audio capability or not
3996 * @mgr: manager for this port
3997 * @port: unverified pointer to a port.
3999 * This returns whether the port supports audio or not.
4001 bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr,
4002 struct drm_dp_mst_port *port)
4006 port = drm_dp_mst_topology_get_port_validated(mgr, port);
4009 ret = port->has_audio;
4010 drm_dp_mst_topology_put_port(port);
4013 EXPORT_SYMBOL(drm_dp_mst_port_has_audio);
4016 * drm_dp_mst_get_edid() - get EDID for an MST port
4017 * @connector: toplevel connector to get EDID for
4018 * @mgr: manager for this port
4019 * @port: unverified pointer to a port.
4021 * This returns an EDID for the port connected to a connector,
4022 * It validates the pointer still exists so the caller doesn't require a
4025 struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
4027 struct edid *edid = NULL;
4029 /* we need to search for the port in the mgr in case it's gone */
4030 port = drm_dp_mst_topology_get_port_validated(mgr, port);
4034 if (port->cached_edid)
4035 edid = drm_edid_duplicate(port->cached_edid);
4037 edid = drm_get_edid(connector, &port->aux.ddc);
4039 port->has_audio = drm_detect_monitor_audio(edid);
4040 drm_dp_mst_topology_put_port(port);
4043 EXPORT_SYMBOL(drm_dp_mst_get_edid);
4046 * drm_dp_find_vcpi_slots() - Find VCPI slots for this PBN value
4047 * @mgr: manager to use
4048 * @pbn: payload bandwidth to convert into slots.
4050 * Calculate the number of VCPI slots that will be required for the given PBN
4051 * value. This function is deprecated, and should not be used in atomic
4055 * The total slots required for this port, or error.
4057 int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
4062 num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
4064 /* max. time slots - one slot for MTP header */
4069 EXPORT_SYMBOL(drm_dp_find_vcpi_slots);
4071 static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
4072 struct drm_dp_vcpi *vcpi, int pbn, int slots)
4076 /* max. time slots - one slot for MTP header */
4081 vcpi->aligned_pbn = slots * mgr->pbn_div;
4082 vcpi->num_slots = slots;
4084 ret = drm_dp_mst_assign_payload_id(mgr, vcpi);
4091 * drm_dp_atomic_find_vcpi_slots() - Find and add VCPI slots to the state
4092 * @state: global atomic state
4093 * @mgr: MST topology manager for the port
4094 * @port: port to find vcpi slots for
4095 * @pbn: bandwidth required for the mode in PBN
4097 * Allocates VCPI slots to @port, replacing any previous VCPI allocations it
4098 * may have had. Any atomic drivers which support MST must call this function
4099 * in their &drm_encoder_helper_funcs.atomic_check() callback to change the
4100 * current VCPI allocation for the new state, but only when
4101 * &drm_crtc_state.mode_changed or &drm_crtc_state.connectors_changed is set
4102 * to ensure compatibility with userspace applications that still use the
4103 * legacy modesetting UAPI.
4105 * Allocations set by this function are not checked against the bandwidth
4106 * restraints of @mgr until the driver calls drm_dp_mst_atomic_check().
4108 * Additionally, it is OK to call this function multiple times on the same
4109 * @port as needed. It is not OK however, to call this function and
4110 * drm_dp_atomic_release_vcpi_slots() in the same atomic check phase.
4113 * drm_dp_atomic_release_vcpi_slots()
4114 * drm_dp_mst_atomic_check()
4117 * Total slots in the atomic state assigned for this port, or a negative error
4118 * code if the port no longer exists
4120 int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
4121 struct drm_dp_mst_topology_mgr *mgr,
4122 struct drm_dp_mst_port *port, int pbn)
4124 struct drm_dp_mst_topology_state *topology_state;
4125 struct drm_dp_vcpi_allocation *pos, *vcpi = NULL;
4126 int prev_slots, req_slots;
4128 topology_state = drm_atomic_get_mst_topology_state(state, mgr);
4129 if (IS_ERR(topology_state))
4130 return PTR_ERR(topology_state);
4132 /* Find the current allocation for this port, if any */
4133 list_for_each_entry(pos, &topology_state->vcpis, next) {
4134 if (pos->port == port) {
4136 prev_slots = vcpi->vcpi;
4139 * This should never happen, unless the driver tries
4140 * releasing and allocating the same VCPI allocation,
4143 if (WARN_ON(!prev_slots)) {
4144 DRM_ERROR("cannot allocate and release VCPI on [MST PORT:%p] in the same state\n",
4155 req_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
4157 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] VCPI %d -> %d\n",
4158 port->connector->base.id, port->connector->name,
4159 port, prev_slots, req_slots);
4161 /* Add the new allocation to the state */
4163 vcpi = kzalloc(sizeof(*vcpi), GFP_KERNEL);
4167 drm_dp_mst_get_port_malloc(port);
4169 list_add(&vcpi->next, &topology_state->vcpis);
4171 vcpi->vcpi = req_slots;
4175 EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots);
4178 * drm_dp_atomic_release_vcpi_slots() - Release allocated vcpi slots
4179 * @state: global atomic state
4180 * @mgr: MST topology manager for the port
4181 * @port: The port to release the VCPI slots from
4183 * Releases any VCPI slots that have been allocated to a port in the atomic
4184 * state. Any atomic drivers which support MST must call this function in
4185 * their &drm_connector_helper_funcs.atomic_check() callback when the
4186 * connector will no longer have VCPI allocated (e.g. because its CRTC was
4187 * removed) when it had VCPI allocated in the previous atomic state.
4189 * It is OK to call this even if @port has been removed from the system.
4190 * Additionally, it is OK to call this function multiple times on the same
4191 * @port as needed. It is not OK however, to call this function and
4192 * drm_dp_atomic_find_vcpi_slots() on the same @port in a single atomic check
4196 * drm_dp_atomic_find_vcpi_slots()
4197 * drm_dp_mst_atomic_check()
4200 * 0 if all slots for this port were added back to
4201 * &drm_dp_mst_topology_state.avail_slots or negative error code
4203 int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
4204 struct drm_dp_mst_topology_mgr *mgr,
4205 struct drm_dp_mst_port *port)
4207 struct drm_dp_mst_topology_state *topology_state;
4208 struct drm_dp_vcpi_allocation *pos;
4211 topology_state = drm_atomic_get_mst_topology_state(state, mgr);
4212 if (IS_ERR(topology_state))
4213 return PTR_ERR(topology_state);
4215 list_for_each_entry(pos, &topology_state->vcpis, next) {
4216 if (pos->port == port) {
4221 if (WARN_ON(!found)) {
4222 DRM_ERROR("no VCPI for [MST PORT:%p] found in mst state %p\n",
4223 port, &topology_state->base);
4227 DRM_DEBUG_ATOMIC("[MST PORT:%p] VCPI %d -> 0\n", port, pos->vcpi);
4229 drm_dp_mst_put_port_malloc(port);
4235 EXPORT_SYMBOL(drm_dp_atomic_release_vcpi_slots);
4238 * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel
4239 * @mgr: manager for this port
4240 * @port: port to allocate a virtual channel for.
4241 * @pbn: payload bandwidth number to request
4242 * @slots: returned number of slots for this PBN.
4244 bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
4245 struct drm_dp_mst_port *port, int pbn, int slots)
4249 port = drm_dp_mst_topology_get_port_validated(mgr, port);
4256 if (port->vcpi.vcpi > 0) {
4257 DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n",
4258 port->vcpi.vcpi, port->vcpi.pbn, pbn);
4259 if (pbn == port->vcpi.pbn) {
4260 drm_dp_mst_topology_put_port(port);
4265 ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn, slots);
4267 DRM_DEBUG_KMS("failed to init vcpi slots=%d max=63 ret=%d\n",
4268 DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
4271 DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n",
4272 pbn, port->vcpi.num_slots);
4274 /* Keep port allocated until its payload has been removed */
4275 drm_dp_mst_get_port_malloc(port);
4276 drm_dp_mst_topology_put_port(port);
4281 EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
4283 int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
4286 port = drm_dp_mst_topology_get_port_validated(mgr, port);
4290 slots = port->vcpi.num_slots;
4291 drm_dp_mst_topology_put_port(port);
4294 EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
4297 * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI
4298 * @mgr: manager for this port
4299 * @port: unverified pointer to a port.
4301 * This just resets the number of slots for the ports VCPI for later programming.
4303 void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
4306 * A port with VCPI will remain allocated until its VCPI is
4307 * released, no verified ref needed
4310 port->vcpi.num_slots = 0;
4312 EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
4315 * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI
4316 * @mgr: manager for this port
4317 * @port: port to deallocate vcpi for
4319 * This can be called unconditionally, regardless of whether
4320 * drm_dp_mst_allocate_vcpi() succeeded or not.
4322 void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
4323 struct drm_dp_mst_port *port)
4325 if (!port->vcpi.vcpi)
4328 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
4329 port->vcpi.num_slots = 0;
4331 port->vcpi.aligned_pbn = 0;
4332 port->vcpi.vcpi = 0;
4333 drm_dp_mst_put_port_malloc(port);
4335 EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
4337 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
4338 int id, struct drm_dp_payload *payload)
4340 u8 payload_alloc[3], status;
4344 drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
4345 DP_PAYLOAD_TABLE_UPDATED);
4347 payload_alloc[0] = id;
4348 payload_alloc[1] = payload->start_slot;
4349 payload_alloc[2] = payload->num_slots;
4351 ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
4353 DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret);
4358 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
4360 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
4364 if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
4367 usleep_range(10000, 20000);
4370 DRM_DEBUG_KMS("status not set after read payload table status %d\n", status);
4381 * drm_dp_check_act_status() - Check ACT handled status.
4382 * @mgr: manager to use
4384 * Check the payload status bits in the DPCD for ACT handled completion.
4386 int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
4393 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
4396 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
4400 if (status & DP_PAYLOAD_ACT_HANDLED)
4405 } while (count < 30);
4407 if (!(status & DP_PAYLOAD_ACT_HANDLED)) {
4408 DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count);
4416 EXPORT_SYMBOL(drm_dp_check_act_status);
4419 * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
4420 * @clock: dot clock for the mode
4421 * @bpp: bpp for the mode.
4422 * @dsc: DSC mode. If true, bpp has units of 1/16 of a bit per pixel
4424 * This uses the formula in the spec to calculate the PBN value for a mode.
4426 int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc)
4429 * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
4430 * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
4431 * common multiplier to render an integer PBN for all link rate/lane
4432 * counts combinations
4434 * peak_kbps *= (1006/1000)
4435 * peak_kbps *= (64/54)
4436 * peak_kbps *= 8 convert to bytes
4438 * If the bpp is in units of 1/16, further divide by 16. Put this
4439 * factor in the numerator rather than the denominator to avoid
4444 return DIV_ROUND_UP_ULL(mul_u32_u32(clock * (bpp / 16), 64 * 1006),
4445 8 * 54 * 1000 * 1000);
4447 return DIV_ROUND_UP_ULL(mul_u32_u32(clock * bpp, 64 * 1006),
4448 8 * 54 * 1000 * 1000);
4450 EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
4452 /* we want to kick the TX after we've ack the up/down IRQs. */
4453 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
4455 queue_work(system_long_wq, &mgr->tx_work);
4458 static void drm_dp_mst_dump_mstb(struct seq_file *m,
4459 struct drm_dp_mst_branch *mstb)
4461 struct drm_dp_mst_port *port;
4462 int tabs = mstb->lct;
4466 for (i = 0; i < tabs; i++)
4470 seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports);
4471 list_for_each_entry(port, &mstb->ports, next) {
4472 seq_printf(m, "%sport: %d: input: %d: pdt: %d, ddps: %d ldps: %d, sdp: %d/%d, %p, conn: %p\n", prefix, port->port_num, port->input, port->pdt, port->ddps, port->ldps, port->num_sdp_streams, port->num_sdp_stream_sinks, port, port->connector);
4474 drm_dp_mst_dump_mstb(m, port->mstb);
4478 #define DP_PAYLOAD_TABLE_SIZE 64
4480 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
4485 for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) {
4486 if (drm_dp_dpcd_read(mgr->aux,
4487 DP_PAYLOAD_TABLE_UPDATE_STATUS + i,
4494 static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr,
4495 struct drm_dp_mst_port *port, char *name,
4498 struct edid *mst_edid;
4500 mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port);
4501 drm_edid_get_monitor_name(mst_edid, name, namelen);
4505 * drm_dp_mst_dump_topology(): dump topology to seq file.
4506 * @m: seq_file to dump output to
4507 * @mgr: manager to dump current topology for.
4509 * helper to dump MST topology to a seq file for debugfs.
4511 void drm_dp_mst_dump_topology(struct seq_file *m,
4512 struct drm_dp_mst_topology_mgr *mgr)
4515 struct drm_dp_mst_port *port;
4517 mutex_lock(&mgr->lock);
4518 if (mgr->mst_primary)
4519 drm_dp_mst_dump_mstb(m, mgr->mst_primary);
4522 mutex_unlock(&mgr->lock);
4524 mutex_lock(&mgr->payload_lock);
4525 seq_printf(m, "vcpi: %lx %lx %d\n", mgr->payload_mask, mgr->vcpi_mask,
4528 for (i = 0; i < mgr->max_payloads; i++) {
4529 if (mgr->proposed_vcpis[i]) {
4532 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
4533 fetch_monitor_name(mgr, port, name, sizeof(name));
4534 seq_printf(m, "vcpi %d: %d %d %d sink name: %s\n", i,
4535 port->port_num, port->vcpi.vcpi,
4536 port->vcpi.num_slots,
4537 (*name != 0) ? name : "Unknown");
4539 seq_printf(m, "vcpi %d:unused\n", i);
4541 for (i = 0; i < mgr->max_payloads; i++) {
4542 seq_printf(m, "payload %d: %d, %d, %d\n",
4544 mgr->payloads[i].payload_state,
4545 mgr->payloads[i].start_slot,
4546 mgr->payloads[i].num_slots);
4550 mutex_unlock(&mgr->payload_lock);
4552 mutex_lock(&mgr->lock);
4553 if (mgr->mst_primary) {
4554 u8 buf[DP_PAYLOAD_TABLE_SIZE];
4557 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
4558 seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf);
4559 ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
4560 seq_printf(m, "faux/mst: %*ph\n", 2, buf);
4561 ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
4562 seq_printf(m, "mst ctrl: %*ph\n", 1, buf);
4564 /* dump the standard OUI branch header */
4565 ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
4566 seq_printf(m, "branch oui: %*phN devid: ", 3, buf);
4567 for (i = 0x3; i < 0x8 && buf[i]; i++)
4568 seq_printf(m, "%c", buf[i]);
4569 seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n",
4570 buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
4571 if (dump_dp_payload_table(mgr, buf))
4572 seq_printf(m, "payload table: %*ph\n", DP_PAYLOAD_TABLE_SIZE, buf);
4575 mutex_unlock(&mgr->lock);
4578 EXPORT_SYMBOL(drm_dp_mst_dump_topology);
4580 static void drm_dp_tx_work(struct work_struct *work)
4582 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
4584 mutex_lock(&mgr->qlock);
4585 if (!list_empty(&mgr->tx_msg_downq))
4586 process_single_down_tx_qlock(mgr);
4587 mutex_unlock(&mgr->qlock);
4591 drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port)
4593 if (port->connector)
4594 port->mgr->cbs->destroy_connector(port->mgr, port->connector);
4596 drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE);
4597 drm_dp_mst_put_port_malloc(port);
4601 drm_dp_delayed_destroy_mstb(struct drm_dp_mst_branch *mstb)
4603 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
4604 struct drm_dp_mst_port *port, *tmp;
4605 bool wake_tx = false;
4607 mutex_lock(&mgr->lock);
4608 list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
4609 list_del(&port->next);
4610 drm_dp_mst_topology_put_port(port);
4612 mutex_unlock(&mgr->lock);
4614 /* drop any tx slots msg */
4615 mutex_lock(&mstb->mgr->qlock);
4616 if (mstb->tx_slots[0]) {
4617 mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
4618 mstb->tx_slots[0] = NULL;
4621 if (mstb->tx_slots[1]) {
4622 mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
4623 mstb->tx_slots[1] = NULL;
4626 mutex_unlock(&mstb->mgr->qlock);
4629 wake_up_all(&mstb->mgr->tx_waitq);
4631 drm_dp_mst_put_mstb_malloc(mstb);
4634 static void drm_dp_delayed_destroy_work(struct work_struct *work)
4636 struct drm_dp_mst_topology_mgr *mgr =
4637 container_of(work, struct drm_dp_mst_topology_mgr,
4638 delayed_destroy_work);
4639 bool send_hotplug = false, go_again;
4642 * Not a regular list traverse as we have to drop the destroy
4643 * connector lock before destroying the mstb/port, to avoid AB->BA
4644 * ordering between this lock and the config mutex.
4650 struct drm_dp_mst_branch *mstb;
4652 mutex_lock(&mgr->delayed_destroy_lock);
4653 mstb = list_first_entry_or_null(&mgr->destroy_branch_device_list,
4654 struct drm_dp_mst_branch,
4657 list_del(&mstb->destroy_next);
4658 mutex_unlock(&mgr->delayed_destroy_lock);
4663 drm_dp_delayed_destroy_mstb(mstb);
4668 struct drm_dp_mst_port *port;
4670 mutex_lock(&mgr->delayed_destroy_lock);
4671 port = list_first_entry_or_null(&mgr->destroy_port_list,
4672 struct drm_dp_mst_port,
4675 list_del(&port->next);
4676 mutex_unlock(&mgr->delayed_destroy_lock);
4681 drm_dp_delayed_destroy_port(port);
4682 send_hotplug = true;
4688 drm_kms_helper_hotplug_event(mgr->dev);
4691 static struct drm_private_state *
4692 drm_dp_mst_duplicate_state(struct drm_private_obj *obj)
4694 struct drm_dp_mst_topology_state *state, *old_state =
4695 to_dp_mst_topology_state(obj->state);
4696 struct drm_dp_vcpi_allocation *pos, *vcpi;
4698 state = kmemdup(old_state, sizeof(*state), GFP_KERNEL);
4702 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
4704 INIT_LIST_HEAD(&state->vcpis);
4706 list_for_each_entry(pos, &old_state->vcpis, next) {
4707 /* Prune leftover freed VCPI allocations */
4711 vcpi = kmemdup(pos, sizeof(*vcpi), GFP_KERNEL);
4715 drm_dp_mst_get_port_malloc(vcpi->port);
4716 list_add(&vcpi->next, &state->vcpis);
4719 return &state->base;
4722 list_for_each_entry_safe(pos, vcpi, &state->vcpis, next) {
4723 drm_dp_mst_put_port_malloc(pos->port);
4731 static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,
4732 struct drm_private_state *state)
4734 struct drm_dp_mst_topology_state *mst_state =
4735 to_dp_mst_topology_state(state);
4736 struct drm_dp_vcpi_allocation *pos, *tmp;
4738 list_for_each_entry_safe(pos, tmp, &mst_state->vcpis, next) {
4739 /* We only keep references to ports with non-zero VCPIs */
4741 drm_dp_mst_put_port_malloc(pos->port);
4749 drm_dp_mst_atomic_check_topology_state(struct drm_dp_mst_topology_mgr *mgr,
4750 struct drm_dp_mst_topology_state *mst_state)
4752 struct drm_dp_vcpi_allocation *vcpi;
4753 int avail_slots = 63, payload_count = 0;
4755 list_for_each_entry(vcpi, &mst_state->vcpis, next) {
4756 /* Releasing VCPI is always OK-even if the port is gone */
4758 DRM_DEBUG_ATOMIC("[MST PORT:%p] releases all VCPI slots\n",
4763 DRM_DEBUG_ATOMIC("[MST PORT:%p] requires %d vcpi slots\n",
4764 vcpi->port, vcpi->vcpi);
4766 avail_slots -= vcpi->vcpi;
4767 if (avail_slots < 0) {
4768 DRM_DEBUG_ATOMIC("[MST PORT:%p] not enough VCPI slots in mst state %p (avail=%d)\n",
4769 vcpi->port, mst_state,
4770 avail_slots + vcpi->vcpi);
4774 if (++payload_count > mgr->max_payloads) {
4775 DRM_DEBUG_ATOMIC("[MST MGR:%p] state %p has too many payloads (max=%d)\n",
4776 mgr, mst_state, mgr->max_payloads);
4780 DRM_DEBUG_ATOMIC("[MST MGR:%p] mst state %p VCPI avail=%d used=%d\n",
4781 mgr, mst_state, avail_slots,
4788 * drm_dp_mst_atomic_check - Check that the new state of an MST topology in an
4789 * atomic update is valid
4790 * @state: Pointer to the new &struct drm_dp_mst_topology_state
4792 * Checks the given topology state for an atomic update to ensure that it's
4793 * valid. This includes checking whether there's enough bandwidth to support
4794 * the new VCPI allocations in the atomic update.
4796 * Any atomic drivers supporting DP MST must make sure to call this after
4797 * checking the rest of their state in their
4798 * &drm_mode_config_funcs.atomic_check() callback.
4801 * drm_dp_atomic_find_vcpi_slots()
4802 * drm_dp_atomic_release_vcpi_slots()
4806 * 0 if the new state is valid, negative error code otherwise.
4808 int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
4810 struct drm_dp_mst_topology_mgr *mgr;
4811 struct drm_dp_mst_topology_state *mst_state;
4814 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
4815 ret = drm_dp_mst_atomic_check_topology_state(mgr, mst_state);
4822 EXPORT_SYMBOL(drm_dp_mst_atomic_check);
4824 const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs = {
4825 .atomic_duplicate_state = drm_dp_mst_duplicate_state,
4826 .atomic_destroy_state = drm_dp_mst_destroy_state,
4828 EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs);
4831 * drm_atomic_get_mst_topology_state: get MST topology state
4833 * @state: global atomic state
4834 * @mgr: MST topology manager, also the private object in this case
4836 * This function wraps drm_atomic_get_priv_obj_state() passing in the MST atomic
4837 * state vtable so that the private object state returned is that of a MST
4838 * topology object. Also, drm_atomic_get_private_obj_state() expects the caller
4839 * to care of the locking, so warn if don't hold the connection_mutex.
4843 * The MST topology state or error pointer.
4845 struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
4846 struct drm_dp_mst_topology_mgr *mgr)
4848 return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base));
4850 EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
4853 * drm_dp_mst_topology_mgr_init - initialise a topology manager
4854 * @mgr: manager struct to initialise
4855 * @dev: device providing this structure - for i2c addition.
4856 * @aux: DP helper aux channel to talk to this device
4857 * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
4858 * @max_payloads: maximum number of payloads this GPU can source
4859 * @conn_base_id: the connector object ID the MST device is connected to.
4861 * Return 0 for success, or negative error code on failure
4863 int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
4864 struct drm_device *dev, struct drm_dp_aux *aux,
4865 int max_dpcd_transaction_bytes,
4866 int max_payloads, int conn_base_id)
4868 struct drm_dp_mst_topology_state *mst_state;
4870 mutex_init(&mgr->lock);
4871 mutex_init(&mgr->qlock);
4872 mutex_init(&mgr->payload_lock);
4873 mutex_init(&mgr->delayed_destroy_lock);
4874 mutex_init(&mgr->up_req_lock);
4875 mutex_init(&mgr->probe_lock);
4876 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
4877 mutex_init(&mgr->topology_ref_history_lock);
4879 INIT_LIST_HEAD(&mgr->tx_msg_downq);
4880 INIT_LIST_HEAD(&mgr->destroy_port_list);
4881 INIT_LIST_HEAD(&mgr->destroy_branch_device_list);
4882 INIT_LIST_HEAD(&mgr->up_req_list);
4883 INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
4884 INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
4885 INIT_WORK(&mgr->delayed_destroy_work, drm_dp_delayed_destroy_work);
4886 INIT_WORK(&mgr->up_req_work, drm_dp_mst_up_req_work);
4887 init_waitqueue_head(&mgr->tx_waitq);
4890 mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
4891 mgr->max_payloads = max_payloads;
4892 mgr->conn_base_id = conn_base_id;
4893 if (max_payloads + 1 > sizeof(mgr->payload_mask) * 8 ||
4894 max_payloads + 1 > sizeof(mgr->vcpi_mask) * 8)
4896 mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
4899 mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL);
4900 if (!mgr->proposed_vcpis)
4902 set_bit(0, &mgr->payload_mask);
4904 mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL);
4905 if (mst_state == NULL)
4908 mst_state->mgr = mgr;
4909 INIT_LIST_HEAD(&mst_state->vcpis);
4911 drm_atomic_private_obj_init(dev, &mgr->base,
4913 &drm_dp_mst_topology_state_funcs);
4917 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
4920 * drm_dp_mst_topology_mgr_destroy() - destroy topology manager.
4921 * @mgr: manager to destroy
4923 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
4925 drm_dp_mst_topology_mgr_set_mst(mgr, false);
4926 flush_work(&mgr->work);
4927 cancel_work_sync(&mgr->delayed_destroy_work);
4928 mutex_lock(&mgr->payload_lock);
4929 kfree(mgr->payloads);
4930 mgr->payloads = NULL;
4931 kfree(mgr->proposed_vcpis);
4932 mgr->proposed_vcpis = NULL;
4933 mutex_unlock(&mgr->payload_lock);
4936 drm_atomic_private_obj_fini(&mgr->base);
4939 mutex_destroy(&mgr->delayed_destroy_lock);
4940 mutex_destroy(&mgr->payload_lock);
4941 mutex_destroy(&mgr->qlock);
4942 mutex_destroy(&mgr->lock);
4943 mutex_destroy(&mgr->up_req_lock);
4944 mutex_destroy(&mgr->probe_lock);
4945 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
4946 mutex_destroy(&mgr->topology_ref_history_lock);
4949 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
4951 static bool remote_i2c_read_ok(const struct i2c_msg msgs[], int num)
4955 if (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)
4958 for (i = 0; i < num - 1; i++) {
4959 if (msgs[i].flags & I2C_M_RD ||
4964 return msgs[num - 1].flags & I2C_M_RD &&
4965 msgs[num - 1].len <= 0xff;
4969 static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
4972 struct drm_dp_aux *aux = adapter->algo_data;
4973 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux);
4974 struct drm_dp_mst_branch *mstb;
4975 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
4977 struct drm_dp_sideband_msg_req_body msg;
4978 struct drm_dp_sideband_msg_tx *txmsg = NULL;
4981 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
4985 if (!remote_i2c_read_ok(msgs, num)) {
4986 DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
4991 memset(&msg, 0, sizeof(msg));
4992 msg.req_type = DP_REMOTE_I2C_READ;
4993 msg.u.i2c_read.num_transactions = num - 1;
4994 msg.u.i2c_read.port_number = port->port_num;
4995 for (i = 0; i < num - 1; i++) {
4996 msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
4997 msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
4998 msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
4999 msg.u.i2c_read.transactions[i].no_stop_bit = !(msgs[i].flags & I2C_M_STOP);
5001 msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
5002 msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
5004 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
5011 drm_dp_encode_sideband_req(&msg, txmsg);
5013 drm_dp_queue_down_tx(mgr, txmsg);
5015 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
5018 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
5022 if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
5026 memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
5031 drm_dp_mst_topology_put_mstb(mstb);
5035 static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
5037 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
5038 I2C_FUNC_SMBUS_READ_BLOCK_DATA |
5039 I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
5040 I2C_FUNC_10BIT_ADDR;
5043 static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
5044 .functionality = drm_dp_mst_i2c_functionality,
5045 .master_xfer = drm_dp_mst_i2c_xfer,
5049 * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
5050 * @aux: DisplayPort AUX channel
5052 * Returns 0 on success or a negative error code on failure.
5054 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux)
5056 aux->ddc.algo = &drm_dp_mst_i2c_algo;
5057 aux->ddc.algo_data = aux;
5058 aux->ddc.retries = 3;
5060 aux->ddc.class = I2C_CLASS_DDC;
5061 aux->ddc.owner = THIS_MODULE;
5062 aux->ddc.dev.parent = aux->dev;
5063 aux->ddc.dev.of_node = aux->dev->of_node;
5065 strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev),
5066 sizeof(aux->ddc.name));
5068 return i2c_add_adapter(&aux->ddc);
5072 * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
5073 * @aux: DisplayPort AUX channel
5075 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux)
5077 i2c_del_adapter(&aux->ddc);
5081 * drm_dp_mst_is_virtual_dpcd() - Is the given port a virtual DP Peer Device
5082 * @port: The port to check
5084 * A single physical MST hub object can be represented in the topology
5085 * by multiple branches, with virtual ports between those branches.
5087 * As of DP1.4, An MST hub with internal (virtual) ports must expose
5088 * certain DPCD registers over those ports. See sections 2.6.1.1.1
5089 * and 2.6.1.1.2 of Display Port specification v1.4 for details.
5091 * May acquire mgr->lock
5094 * true if the port is a virtual DP peer device, false otherwise
5096 static bool drm_dp_mst_is_virtual_dpcd(struct drm_dp_mst_port *port)
5098 struct drm_dp_mst_port *downstream_port;
5100 if (!port || port->dpcd_rev < DP_DPCD_REV_14)
5103 /* Virtual DP Sink (Internal Display Panel) */
5104 if (port->port_num >= 8)
5107 /* DP-to-HDMI Protocol Converter */
5108 if (port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV &&
5114 mutex_lock(&port->mgr->lock);
5115 if (port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
5117 port->mstb->num_ports == 2) {
5118 list_for_each_entry(downstream_port, &port->mstb->ports, next) {
5119 if (downstream_port->pdt == DP_PEER_DEVICE_SST_SINK &&
5120 !downstream_port->input) {
5121 mutex_unlock(&port->mgr->lock);
5126 mutex_unlock(&port->mgr->lock);
5132 * drm_dp_mst_dsc_aux_for_port() - Find the correct aux for DSC
5133 * @port: The port to check. A leaf of the MST tree with an attached display.
5135 * Depending on the situation, DSC may be enabled via the endpoint aux,
5136 * the immediately upstream aux, or the connector's physical aux.
5138 * This is both the correct aux to read DSC_CAPABILITY and the
5139 * correct aux to write DSC_ENABLED.
5141 * This operation can be expensive (up to four aux reads), so
5142 * the caller should cache the return.
5145 * NULL if DSC cannot be enabled on this port, otherwise the aux device
5147 struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
5149 struct drm_dp_mst_port *immediate_upstream_port;
5150 struct drm_dp_mst_port *fec_port;
5157 if (port->parent->port_parent)
5158 immediate_upstream_port = port->parent->port_parent;
5160 immediate_upstream_port = NULL;
5162 fec_port = immediate_upstream_port;
5165 * Each physical link (i.e. not a virtual port) between the
5166 * output and the primary device must support FEC
5168 if (!drm_dp_mst_is_virtual_dpcd(fec_port) &&
5169 !fec_port->fec_capable)
5172 fec_port = fec_port->parent->port_parent;
5175 /* DP-to-DP peer device */
5176 if (drm_dp_mst_is_virtual_dpcd(immediate_upstream_port)) {
5179 if (drm_dp_dpcd_read(&port->aux,
5180 DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
5182 if (drm_dp_dpcd_read(&port->aux,
5183 DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
5185 if (drm_dp_dpcd_read(&immediate_upstream_port->aux,
5186 DP_DSC_SUPPORT, &upstream_dsc, 1) != 1)
5189 /* Enpoint decompression with DP-to-DP peer device */
5190 if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
5191 (endpoint_fec & DP_FEC_CAPABLE) &&
5192 (upstream_dsc & 0x2) /* DSC passthrough */)
5195 /* Virtual DPCD decompression with DP-to-DP peer device */
5196 return &immediate_upstream_port->aux;
5199 /* Virtual DPCD decompression with DP-to-HDMI or Virtual DP Sink */
5200 if (drm_dp_mst_is_virtual_dpcd(port))
5204 * The check below verifies if the MST sink
5205 * connected to the GPU is capable of DSC -
5206 * therefore the endpoint needs to be
5207 * both DSC and FEC capable.
5209 if (drm_dp_dpcd_read(&port->aux,
5210 DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
5212 if (drm_dp_dpcd_read(&port->aux,
5213 DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
5215 if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
5216 (endpoint_fec & DP_FEC_CAPABLE))
5221 EXPORT_SYMBOL(drm_dp_mst_dsc_aux_for_port);