2 * Copyright © 2014 Red Hat
4 * Permission to use, copy, modify, distribute, and sell this software and its
5 * documentation for any purpose is hereby granted without fee, provided that
6 * the above copyright notice appear in all copies and that both that copyright
7 * notice and this permission notice appear in supporting documentation, and
8 * that the name of the copyright holders not be used in advertising or
9 * publicity pertaining to distribution of the software without specific,
10 * written prior permission. The copyright holders make no representations
11 * about the suitability of this software for any purpose. It is provided "as
12 * is" without express or implied warranty.
14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
23 #include <linux/kernel.h>
24 #include <linux/delay.h>
25 #include <linux/init.h>
26 #include <linux/errno.h>
27 #include <linux/sched.h>
28 #include <linux/seq_file.h>
29 #include <linux/i2c.h>
30 #include <drm/drm_dp_mst_helper.h>
33 #include <drm/drm_fixed.h>
34 #include <drm/drm_atomic.h>
35 #include <drm/drm_atomic_helper.h>
36 #include <drm/drm_probe_helper.h>
41 * These functions contain parts of the DisplayPort 1.2a MultiStream Transport
42 * protocol. The helpers contain a topology manager and bandwidth manager.
43 * The helpers encapsulate the sending and received of sideband msgs.
45 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
47 static int test_calc_pbn_mode(void);
49 static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port);
51 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
53 struct drm_dp_payload *payload);
55 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
56 struct drm_dp_mst_port *port,
57 int offset, int size, u8 *bytes);
59 static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
60 struct drm_dp_mst_branch *mstb);
61 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
62 struct drm_dp_mst_branch *mstb,
63 struct drm_dp_mst_port *port);
64 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
67 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux);
68 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux);
69 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
71 #define DP_STR(x) [DP_ ## x] = #x
73 static const char *drm_dp_mst_req_type_str(u8 req_type)
75 static const char * const req_type_str[] = {
76 DP_STR(GET_MSG_TRANSACTION_VERSION),
78 DP_STR(CONNECTION_STATUS_NOTIFY),
79 DP_STR(ENUM_PATH_RESOURCES),
80 DP_STR(ALLOCATE_PAYLOAD),
81 DP_STR(QUERY_PAYLOAD),
82 DP_STR(RESOURCE_STATUS_NOTIFY),
83 DP_STR(CLEAR_PAYLOAD_ID_TABLE),
84 DP_STR(REMOTE_DPCD_READ),
85 DP_STR(REMOTE_DPCD_WRITE),
86 DP_STR(REMOTE_I2C_READ),
87 DP_STR(REMOTE_I2C_WRITE),
89 DP_STR(POWER_DOWN_PHY),
90 DP_STR(SINK_EVENT_NOTIFY),
91 DP_STR(QUERY_STREAM_ENC_STATUS),
94 if (req_type >= ARRAY_SIZE(req_type_str) ||
95 !req_type_str[req_type])
98 return req_type_str[req_type];
102 #define DP_STR(x) [DP_NAK_ ## x] = #x
104 static const char *drm_dp_mst_nak_reason_str(u8 nak_reason)
106 static const char * const nak_reason_str[] = {
107 DP_STR(WRITE_FAILURE),
108 DP_STR(INVALID_READ),
112 DP_STR(LINK_FAILURE),
113 DP_STR(NO_RESOURCES),
116 DP_STR(ALLOCATE_FAIL),
119 if (nak_reason >= ARRAY_SIZE(nak_reason_str) ||
120 !nak_reason_str[nak_reason])
123 return nak_reason_str[nak_reason];
128 /* sideband msg handling */
129 static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
134 int number_of_bits = num_nibbles * 4;
137 while (number_of_bits != 0) {
140 remainder |= (data[array_index] & bitmask) >> bitshift;
148 if ((remainder & 0x10) == 0x10)
153 while (number_of_bits != 0) {
156 if ((remainder & 0x10) != 0)
163 static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
168 int number_of_bits = number_of_bytes * 8;
171 while (number_of_bits != 0) {
174 remainder |= (data[array_index] & bitmask) >> bitshift;
182 if ((remainder & 0x100) == 0x100)
187 while (number_of_bits != 0) {
190 if ((remainder & 0x100) != 0)
194 return remainder & 0xff;
196 static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
199 size += (hdr->lct / 2);
203 static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
209 buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
210 for (i = 0; i < (hdr->lct / 2); i++)
211 buf[idx++] = hdr->rad[i];
212 buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
213 (hdr->msg_len & 0x3f);
214 buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
216 crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
217 buf[idx - 1] |= (crc4 & 0xf);
222 static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
223 u8 *buf, int buflen, u8 *hdrlen)
232 len += ((buf[0] & 0xf0) >> 4) / 2;
235 crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
237 if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
238 DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
242 hdr->lct = (buf[0] & 0xf0) >> 4;
243 hdr->lcr = (buf[0] & 0xf);
245 for (i = 0; i < (hdr->lct / 2); i++)
246 hdr->rad[i] = buf[idx++];
247 hdr->broadcast = (buf[idx] >> 7) & 0x1;
248 hdr->path_msg = (buf[idx] >> 6) & 0x1;
249 hdr->msg_len = buf[idx] & 0x3f;
251 hdr->somt = (buf[idx] >> 7) & 0x1;
252 hdr->eomt = (buf[idx] >> 6) & 0x1;
253 hdr->seqno = (buf[idx] >> 4) & 0x1;
259 static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req,
260 struct drm_dp_sideband_msg_tx *raw)
265 buf[idx++] = req->req_type & 0x7f;
267 switch (req->req_type) {
268 case DP_ENUM_PATH_RESOURCES:
269 buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
272 case DP_ALLOCATE_PAYLOAD:
273 buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
274 (req->u.allocate_payload.number_sdp_streams & 0xf);
276 buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
278 buf[idx] = (req->u.allocate_payload.pbn >> 8);
280 buf[idx] = (req->u.allocate_payload.pbn & 0xff);
282 for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
283 buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
284 (req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
287 if (req->u.allocate_payload.number_sdp_streams & 1) {
288 i = req->u.allocate_payload.number_sdp_streams - 1;
289 buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
293 case DP_QUERY_PAYLOAD:
294 buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
296 buf[idx] = (req->u.query_payload.vcpi & 0x7f);
299 case DP_REMOTE_DPCD_READ:
300 buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
301 buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
303 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
305 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
307 buf[idx] = (req->u.dpcd_read.num_bytes);
311 case DP_REMOTE_DPCD_WRITE:
312 buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
313 buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
315 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
317 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
319 buf[idx] = (req->u.dpcd_write.num_bytes);
321 memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
322 idx += req->u.dpcd_write.num_bytes;
324 case DP_REMOTE_I2C_READ:
325 buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
326 buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
328 for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
329 buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
331 buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
333 memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
334 idx += req->u.i2c_read.transactions[i].num_bytes;
336 buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 5;
337 buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
340 buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
342 buf[idx] = (req->u.i2c_read.num_bytes_read);
346 case DP_REMOTE_I2C_WRITE:
347 buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
349 buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
351 buf[idx] = (req->u.i2c_write.num_bytes);
353 memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
354 idx += req->u.i2c_write.num_bytes;
357 case DP_POWER_DOWN_PHY:
358 case DP_POWER_UP_PHY:
359 buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
366 static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
369 crc4 = drm_dp_msg_data_crc4(msg, len);
373 static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
374 struct drm_dp_sideband_msg_tx *raw)
379 buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
384 /* this adds a chunk of msg to the builder to get the final msg */
385 static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
386 u8 *replybuf, u8 replybuflen, bool hdr)
393 struct drm_dp_sideband_msg_hdr recv_hdr;
394 ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen);
396 print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false);
401 * ignore out-of-order messages or messages that are part of a
404 if (!recv_hdr.somt && !msg->have_somt)
407 /* get length contained in this portion */
408 msg->curchunk_len = recv_hdr.msg_len;
409 msg->curchunk_hdrlen = hdrlen;
411 /* we have already gotten an somt - don't bother parsing */
412 if (recv_hdr.somt && msg->have_somt)
416 memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr));
417 msg->have_somt = true;
420 msg->have_eomt = true;
422 /* copy the bytes for the remainder of this header chunk */
423 msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen));
424 memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx);
426 memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
427 msg->curchunk_idx += replybuflen;
430 if (msg->curchunk_idx >= msg->curchunk_len) {
432 crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
433 /* copy chunk into bigger msg */
434 memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
435 msg->curlen += msg->curchunk_len - 1;
440 static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw,
441 struct drm_dp_sideband_msg_reply_body *repmsg)
445 memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
447 repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
449 if (idx > raw->curlen)
451 for (i = 0; i < repmsg->u.link_addr.nports; i++) {
452 if (raw->msg[idx] & 0x80)
453 repmsg->u.link_addr.ports[i].input_port = 1;
455 repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
456 repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
459 if (idx > raw->curlen)
461 repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
462 repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
463 if (repmsg->u.link_addr.ports[i].input_port == 0)
464 repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
466 if (idx > raw->curlen)
468 if (repmsg->u.link_addr.ports[i].input_port == 0) {
469 repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
471 if (idx > raw->curlen)
473 memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
475 if (idx > raw->curlen)
477 repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
478 repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
482 if (idx > raw->curlen)
488 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
492 static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
493 struct drm_dp_sideband_msg_reply_body *repmsg)
496 repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
498 if (idx > raw->curlen)
500 repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
502 if (idx > raw->curlen)
505 memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);
508 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
512 static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
513 struct drm_dp_sideband_msg_reply_body *repmsg)
516 repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
518 if (idx > raw->curlen)
522 DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen);
526 static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
527 struct drm_dp_sideband_msg_reply_body *repmsg)
531 repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
533 if (idx > raw->curlen)
535 repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
538 memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);
541 DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen);
545 static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
546 struct drm_dp_sideband_msg_reply_body *repmsg)
549 repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
551 if (idx > raw->curlen)
553 repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
555 if (idx > raw->curlen)
557 repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
559 if (idx > raw->curlen)
563 DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen);
567 static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
568 struct drm_dp_sideband_msg_reply_body *repmsg)
571 repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
573 if (idx > raw->curlen)
575 repmsg->u.allocate_payload.vcpi = raw->msg[idx];
577 if (idx > raw->curlen)
579 repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
581 if (idx > raw->curlen)
585 DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen);
589 static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
590 struct drm_dp_sideband_msg_reply_body *repmsg)
593 repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
595 if (idx > raw->curlen)
597 repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
599 if (idx > raw->curlen)
603 DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen);
607 static bool drm_dp_sideband_parse_power_updown_phy_ack(struct drm_dp_sideband_msg_rx *raw,
608 struct drm_dp_sideband_msg_reply_body *repmsg)
612 repmsg->u.port_number.port_number = (raw->msg[idx] >> 4) & 0xf;
614 if (idx > raw->curlen) {
615 DRM_DEBUG_KMS("power up/down phy parse length fail %d %d\n",
622 static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
623 struct drm_dp_sideband_msg_reply_body *msg)
625 memset(msg, 0, sizeof(*msg));
626 msg->reply_type = (raw->msg[0] & 0x80) >> 7;
627 msg->req_type = (raw->msg[0] & 0x7f);
629 if (msg->reply_type == DP_SIDEBAND_REPLY_NAK) {
630 memcpy(msg->u.nak.guid, &raw->msg[1], 16);
631 msg->u.nak.reason = raw->msg[17];
632 msg->u.nak.nak_data = raw->msg[18];
636 switch (msg->req_type) {
637 case DP_LINK_ADDRESS:
638 return drm_dp_sideband_parse_link_address(raw, msg);
639 case DP_QUERY_PAYLOAD:
640 return drm_dp_sideband_parse_query_payload_ack(raw, msg);
641 case DP_REMOTE_DPCD_READ:
642 return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
643 case DP_REMOTE_DPCD_WRITE:
644 return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
645 case DP_REMOTE_I2C_READ:
646 return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
647 case DP_ENUM_PATH_RESOURCES:
648 return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
649 case DP_ALLOCATE_PAYLOAD:
650 return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
651 case DP_POWER_DOWN_PHY:
652 case DP_POWER_UP_PHY:
653 return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg);
655 DRM_ERROR("Got unknown reply 0x%02x (%s)\n", msg->req_type,
656 drm_dp_mst_req_type_str(msg->req_type));
661 static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw,
662 struct drm_dp_sideband_msg_req_body *msg)
666 msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
668 if (idx > raw->curlen)
671 memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
673 if (idx > raw->curlen)
676 msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
677 msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
678 msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
679 msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
680 msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
684 DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen);
688 static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw,
689 struct drm_dp_sideband_msg_req_body *msg)
693 msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
695 if (idx > raw->curlen)
698 memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
700 if (idx > raw->curlen)
703 msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
707 DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen);
711 static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
712 struct drm_dp_sideband_msg_req_body *msg)
714 memset(msg, 0, sizeof(*msg));
715 msg->req_type = (raw->msg[0] & 0x7f);
717 switch (msg->req_type) {
718 case DP_CONNECTION_STATUS_NOTIFY:
719 return drm_dp_sideband_parse_connection_status_notify(raw, msg);
720 case DP_RESOURCE_STATUS_NOTIFY:
721 return drm_dp_sideband_parse_resource_status_notify(raw, msg);
723 DRM_ERROR("Got unknown request 0x%02x (%s)\n", msg->req_type,
724 drm_dp_mst_req_type_str(msg->req_type));
729 static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
731 struct drm_dp_sideband_msg_req_body req;
733 req.req_type = DP_REMOTE_DPCD_WRITE;
734 req.u.dpcd_write.port_number = port_num;
735 req.u.dpcd_write.dpcd_address = offset;
736 req.u.dpcd_write.num_bytes = num_bytes;
737 req.u.dpcd_write.bytes = bytes;
738 drm_dp_encode_sideband_req(&req, msg);
743 static int build_link_address(struct drm_dp_sideband_msg_tx *msg)
745 struct drm_dp_sideband_msg_req_body req;
747 req.req_type = DP_LINK_ADDRESS;
748 drm_dp_encode_sideband_req(&req, msg);
752 static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num)
754 struct drm_dp_sideband_msg_req_body req;
756 req.req_type = DP_ENUM_PATH_RESOURCES;
757 req.u.port_num.port_number = port_num;
758 drm_dp_encode_sideband_req(&req, msg);
759 msg->path_msg = true;
763 static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num,
764 u8 vcpi, uint16_t pbn,
765 u8 number_sdp_streams,
768 struct drm_dp_sideband_msg_req_body req;
769 memset(&req, 0, sizeof(req));
770 req.req_type = DP_ALLOCATE_PAYLOAD;
771 req.u.allocate_payload.port_number = port_num;
772 req.u.allocate_payload.vcpi = vcpi;
773 req.u.allocate_payload.pbn = pbn;
774 req.u.allocate_payload.number_sdp_streams = number_sdp_streams;
775 memcpy(req.u.allocate_payload.sdp_stream_sink, sdp_stream_sink,
777 drm_dp_encode_sideband_req(&req, msg);
778 msg->path_msg = true;
782 static int build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
783 int port_num, bool power_up)
785 struct drm_dp_sideband_msg_req_body req;
788 req.req_type = DP_POWER_UP_PHY;
790 req.req_type = DP_POWER_DOWN_PHY;
792 req.u.port_num.port_number = port_num;
793 drm_dp_encode_sideband_req(&req, msg);
794 msg->path_msg = true;
798 static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
799 struct drm_dp_vcpi *vcpi)
803 mutex_lock(&mgr->payload_lock);
804 ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
805 if (ret > mgr->max_payloads) {
807 DRM_DEBUG_KMS("out of payload ids %d\n", ret);
811 vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1);
812 if (vcpi_ret > mgr->max_payloads) {
814 DRM_DEBUG_KMS("out of vcpi ids %d\n", ret);
818 set_bit(ret, &mgr->payload_mask);
819 set_bit(vcpi_ret, &mgr->vcpi_mask);
820 vcpi->vcpi = vcpi_ret + 1;
821 mgr->proposed_vcpis[ret - 1] = vcpi;
823 mutex_unlock(&mgr->payload_lock);
827 static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
834 mutex_lock(&mgr->payload_lock);
835 DRM_DEBUG_KMS("putting payload %d\n", vcpi);
836 clear_bit(vcpi - 1, &mgr->vcpi_mask);
838 for (i = 0; i < mgr->max_payloads; i++) {
839 if (mgr->proposed_vcpis[i])
840 if (mgr->proposed_vcpis[i]->vcpi == vcpi) {
841 mgr->proposed_vcpis[i] = NULL;
842 clear_bit(i + 1, &mgr->payload_mask);
845 mutex_unlock(&mgr->payload_lock);
848 static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
849 struct drm_dp_sideband_msg_tx *txmsg)
854 * All updates to txmsg->state are protected by mgr->qlock, and the two
855 * cases we check here are terminal states. For those the barriers
856 * provided by the wake_up/wait_event pair are enough.
858 state = READ_ONCE(txmsg->state);
859 return (state == DRM_DP_SIDEBAND_TX_RX ||
860 state == DRM_DP_SIDEBAND_TX_TIMEOUT);
863 static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
864 struct drm_dp_sideband_msg_tx *txmsg)
866 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
869 ret = wait_event_timeout(mgr->tx_waitq,
870 check_txmsg_state(mgr, txmsg),
872 mutex_lock(&mstb->mgr->qlock);
874 if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
879 DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno);
881 /* dump some state */
885 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
886 txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) {
887 list_del(&txmsg->next);
890 if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
891 txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
892 mstb->tx_slots[txmsg->seqno] = NULL;
896 mutex_unlock(&mgr->qlock);
901 static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
903 struct drm_dp_mst_branch *mstb;
905 mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);
911 memcpy(mstb->rad, rad, lct / 2);
912 INIT_LIST_HEAD(&mstb->ports);
913 kref_init(&mstb->topology_kref);
914 kref_init(&mstb->malloc_kref);
918 static void drm_dp_free_mst_branch_device(struct kref *kref)
920 struct drm_dp_mst_branch *mstb =
921 container_of(kref, struct drm_dp_mst_branch, malloc_kref);
923 if (mstb->port_parent)
924 drm_dp_mst_put_port_malloc(mstb->port_parent);
930 * DOC: Branch device and port refcounting
932 * Topology refcount overview
933 * ~~~~~~~~~~~~~~~~~~~~~~~~~~
935 * The refcounting schemes for &struct drm_dp_mst_branch and &struct
936 * drm_dp_mst_port are somewhat unusual. Both ports and branch devices have
937 * two different kinds of refcounts: topology refcounts, and malloc refcounts.
939 * Topology refcounts are not exposed to drivers, and are handled internally
940 * by the DP MST helpers. The helpers use them in order to prevent the
941 * in-memory topology state from being changed in the middle of critical
942 * operations like changing the internal state of payload allocations. This
943 * means each branch and port will be considered to be connected to the rest
944 * of the topology until its topology refcount reaches zero. Additionally,
945 * for ports this means that their associated &struct drm_connector will stay
946 * registered with userspace until the port's refcount reaches 0.
948 * Malloc refcount overview
949 * ~~~~~~~~~~~~~~~~~~~~~~~~
951 * Malloc references are used to keep a &struct drm_dp_mst_port or &struct
952 * drm_dp_mst_branch allocated even after all of its topology references have
953 * been dropped, so that the driver or MST helpers can safely access each
954 * branch's last known state before it was disconnected from the topology.
955 * When the malloc refcount of a port or branch reaches 0, the memory
956 * allocation containing the &struct drm_dp_mst_branch or &struct
957 * drm_dp_mst_port respectively will be freed.
959 * For &struct drm_dp_mst_branch, malloc refcounts are not currently exposed
960 * to drivers. As of writing this documentation, there are no drivers that
961 * have a usecase for accessing &struct drm_dp_mst_branch outside of the MST
962 * helpers. Exposing this API to drivers in a race-free manner would take more
963 * tweaking of the refcounting scheme, however patches are welcome provided
964 * there is a legitimate driver usecase for this.
966 * Refcount relationships in a topology
967 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
969 * Let's take a look at why the relationship between topology and malloc
970 * refcounts is designed the way it is.
972 * .. kernel-figure:: dp-mst/topology-figure-1.dot
974 * An example of topology and malloc refs in a DP MST topology with two
975 * active payloads. Topology refcount increments are indicated by solid
976 * lines, and malloc refcount increments are indicated by dashed lines.
977 * Each starts from the branch which incremented the refcount, and ends at
978 * the branch to which the refcount belongs to, i.e. the arrow points the
979 * same way as the C pointers used to reference a structure.
981 * As you can see in the above figure, every branch increments the topology
982 * refcount of its children, and increments the malloc refcount of its
983 * parent. Additionally, every payload increments the malloc refcount of its
984 * assigned port by 1.
986 * So, what would happen if MSTB #3 from the above figure was unplugged from
987 * the system, but the driver hadn't yet removed payload #2 from port #3? The
988 * topology would start to look like the figure below.
990 * .. kernel-figure:: dp-mst/topology-figure-2.dot
992 * Ports and branch devices which have been released from memory are
993 * colored grey, and references which have been removed are colored red.
995 * Whenever a port or branch device's topology refcount reaches zero, it will
996 * decrement the topology refcounts of all its children, the malloc refcount
997 * of its parent, and finally its own malloc refcount. For MSTB #4 and port
998 * #4, this means they both have been disconnected from the topology and freed
999 * from memory. But, because payload #2 is still holding a reference to port
1000 * #3, port #3 is removed from the topology but its &struct drm_dp_mst_port
1001 * is still accessible from memory. This also means port #3 has not yet
1002 * decremented the malloc refcount of MSTB #3, so its &struct
1003 * drm_dp_mst_branch will also stay allocated in memory until port #3's
1004 * malloc refcount reaches 0.
1006 * This relationship is necessary because in order to release payload #2, we
1007 * need to be able to figure out the last relative of port #3 that's still
1008 * connected to the topology. In this case, we would travel up the topology as
1011 * .. kernel-figure:: dp-mst/topology-figure-3.dot
1013 * And finally, remove payload #2 by communicating with port #2 through
1014 * sideband transactions.
1018 * drm_dp_mst_get_mstb_malloc() - Increment the malloc refcount of a branch
1020 * @mstb: The &struct drm_dp_mst_branch to increment the malloc refcount of
1022 * Increments &drm_dp_mst_branch.malloc_kref. When
1023 * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
1024 * will be released and @mstb may no longer be used.
1026 * See also: drm_dp_mst_put_mstb_malloc()
1029 drm_dp_mst_get_mstb_malloc(struct drm_dp_mst_branch *mstb)
1031 kref_get(&mstb->malloc_kref);
1032 DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref));
1036 * drm_dp_mst_put_mstb_malloc() - Decrement the malloc refcount of a branch
1038 * @mstb: The &struct drm_dp_mst_branch to decrement the malloc refcount of
1040 * Decrements &drm_dp_mst_branch.malloc_kref. When
1041 * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
1042 * will be released and @mstb may no longer be used.
1044 * See also: drm_dp_mst_get_mstb_malloc()
1047 drm_dp_mst_put_mstb_malloc(struct drm_dp_mst_branch *mstb)
1049 DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref) - 1);
1050 kref_put(&mstb->malloc_kref, drm_dp_free_mst_branch_device);
1053 static void drm_dp_free_mst_port(struct kref *kref)
1055 struct drm_dp_mst_port *port =
1056 container_of(kref, struct drm_dp_mst_port, malloc_kref);
1058 drm_dp_mst_put_mstb_malloc(port->parent);
1063 * drm_dp_mst_get_port_malloc() - Increment the malloc refcount of an MST port
1064 * @port: The &struct drm_dp_mst_port to increment the malloc refcount of
1066 * Increments &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
1067 * reaches 0, the memory allocation for @port will be released and @port may
1068 * no longer be used.
1070 * Because @port could potentially be freed at any time by the DP MST helpers
1071 * if &drm_dp_mst_port.malloc_kref reaches 0, including during a call to this
1072 * function, drivers that which to make use of &struct drm_dp_mst_port should
1073 * ensure that they grab at least one main malloc reference to their MST ports
1074 * in &drm_dp_mst_topology_cbs.add_connector. This callback is called before
1075 * there is any chance for &drm_dp_mst_port.malloc_kref to reach 0.
1077 * See also: drm_dp_mst_put_port_malloc()
1080 drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port)
1082 kref_get(&port->malloc_kref);
1083 DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref));
1085 EXPORT_SYMBOL(drm_dp_mst_get_port_malloc);
1088 * drm_dp_mst_put_port_malloc() - Decrement the malloc refcount of an MST port
1089 * @port: The &struct drm_dp_mst_port to decrement the malloc refcount of
1091 * Decrements &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
1092 * reaches 0, the memory allocation for @port will be released and @port may
1093 * no longer be used.
1095 * See also: drm_dp_mst_get_port_malloc()
1098 drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port)
1100 DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref) - 1);
1101 kref_put(&port->malloc_kref, drm_dp_free_mst_port);
1103 EXPORT_SYMBOL(drm_dp_mst_put_port_malloc);
1105 static void drm_dp_destroy_mst_branch_device(struct kref *kref)
1107 struct drm_dp_mst_branch *mstb =
1108 container_of(kref, struct drm_dp_mst_branch, topology_kref);
1109 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
1110 struct drm_dp_mst_port *port, *tmp;
1111 bool wake_tx = false;
1113 mutex_lock(&mgr->lock);
1114 list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
1115 list_del(&port->next);
1116 drm_dp_mst_topology_put_port(port);
1118 mutex_unlock(&mgr->lock);
1120 /* drop any tx slots msg */
1121 mutex_lock(&mstb->mgr->qlock);
1122 if (mstb->tx_slots[0]) {
1123 mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
1124 mstb->tx_slots[0] = NULL;
1127 if (mstb->tx_slots[1]) {
1128 mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
1129 mstb->tx_slots[1] = NULL;
1132 mutex_unlock(&mstb->mgr->qlock);
1135 wake_up_all(&mstb->mgr->tx_waitq);
1137 drm_dp_mst_put_mstb_malloc(mstb);
1141 * drm_dp_mst_topology_try_get_mstb() - Increment the topology refcount of a
1142 * branch device unless it's zero
1143 * @mstb: &struct drm_dp_mst_branch to increment the topology refcount of
1145 * Attempts to grab a topology reference to @mstb, if it hasn't yet been
1146 * removed from the topology (e.g. &drm_dp_mst_branch.topology_kref has
1147 * reached 0). Holding a topology reference implies that a malloc reference
1148 * will be held to @mstb as long as the user holds the topology reference.
1150 * Care should be taken to ensure that the user has at least one malloc
1151 * reference to @mstb. If you already have a topology reference to @mstb, you
1152 * should use drm_dp_mst_topology_get_mstb() instead.
1155 * drm_dp_mst_topology_get_mstb()
1156 * drm_dp_mst_topology_put_mstb()
1159 * * 1: A topology reference was grabbed successfully
1160 * * 0: @port is no longer in the topology, no reference was grabbed
1162 static int __must_check
1163 drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb)
1165 int ret = kref_get_unless_zero(&mstb->topology_kref);
1168 DRM_DEBUG("mstb %p (%d)\n", mstb,
1169 kref_read(&mstb->topology_kref));
1175 * drm_dp_mst_topology_get_mstb() - Increment the topology refcount of a
1177 * @mstb: The &struct drm_dp_mst_branch to increment the topology refcount of
1179 * Increments &drm_dp_mst_branch.topology_refcount without checking whether or
1180 * not it's already reached 0. This is only valid to use in scenarios where
1181 * you are already guaranteed to have at least one active topology reference
1182 * to @mstb. Otherwise, drm_dp_mst_topology_try_get_mstb() must be used.
1185 * drm_dp_mst_topology_try_get_mstb()
1186 * drm_dp_mst_topology_put_mstb()
1188 static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb)
1190 WARN_ON(kref_read(&mstb->topology_kref) == 0);
1191 kref_get(&mstb->topology_kref);
1192 DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));
1196 * drm_dp_mst_topology_put_mstb() - release a topology reference to a branch
1198 * @mstb: The &struct drm_dp_mst_branch to release the topology reference from
1200 * Releases a topology reference from @mstb by decrementing
1201 * &drm_dp_mst_branch.topology_kref.
1204 * drm_dp_mst_topology_try_get_mstb()
1205 * drm_dp_mst_topology_get_mstb()
1208 drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb)
1210 DRM_DEBUG("mstb %p (%d)\n",
1211 mstb, kref_read(&mstb->topology_kref) - 1);
1212 kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device);
1215 static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
1217 struct drm_dp_mst_branch *mstb;
1220 case DP_PEER_DEVICE_DP_LEGACY_CONV:
1221 case DP_PEER_DEVICE_SST_SINK:
1222 /* remove i2c over sideband */
1223 drm_dp_mst_unregister_i2c_bus(&port->aux);
1225 case DP_PEER_DEVICE_MST_BRANCHING:
1228 drm_dp_mst_topology_put_mstb(mstb);
1233 static void drm_dp_destroy_port(struct kref *kref)
1235 struct drm_dp_mst_port *port =
1236 container_of(kref, struct drm_dp_mst_port, topology_kref);
1237 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
1240 kfree(port->cached_edid);
1243 * The only time we don't have a connector
1244 * on an output port is if the connector init
1247 if (port->connector) {
1248 /* we can't destroy the connector here, as
1249 * we might be holding the mode_config.mutex
1250 * from an EDID retrieval */
1252 mutex_lock(&mgr->destroy_connector_lock);
1253 list_add(&port->next, &mgr->destroy_connector_list);
1254 mutex_unlock(&mgr->destroy_connector_lock);
1255 schedule_work(&mgr->destroy_connector_work);
1258 /* no need to clean up vcpi
1259 * as if we have no connector we never setup a vcpi */
1260 drm_dp_port_teardown_pdt(port, port->pdt);
1261 port->pdt = DP_PEER_DEVICE_NONE;
1263 drm_dp_mst_put_port_malloc(port);
1267 * drm_dp_mst_topology_try_get_port() - Increment the topology refcount of a
1268 * port unless it's zero
1269 * @port: &struct drm_dp_mst_port to increment the topology refcount of
1271 * Attempts to grab a topology reference to @port, if it hasn't yet been
1272 * removed from the topology (e.g. &drm_dp_mst_port.topology_kref has reached
1273 * 0). Holding a topology reference implies that a malloc reference will be
1274 * held to @port as long as the user holds the topology reference.
1276 * Care should be taken to ensure that the user has at least one malloc
1277 * reference to @port. If you already have a topology reference to @port, you
1278 * should use drm_dp_mst_topology_get_port() instead.
1281 * drm_dp_mst_topology_get_port()
1282 * drm_dp_mst_topology_put_port()
1285 * * 1: A topology reference was grabbed successfully
1286 * * 0: @port is no longer in the topology, no reference was grabbed
1288 static int __must_check
1289 drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port)
1291 int ret = kref_get_unless_zero(&port->topology_kref);
1294 DRM_DEBUG("port %p (%d)\n", port,
1295 kref_read(&port->topology_kref));
1301 * drm_dp_mst_topology_get_port() - Increment the topology refcount of a port
1302 * @port: The &struct drm_dp_mst_port to increment the topology refcount of
1304 * Increments &drm_dp_mst_port.topology_refcount without checking whether or
1305 * not it's already reached 0. This is only valid to use in scenarios where
1306 * you are already guaranteed to have at least one active topology reference
1307 * to @port. Otherwise, drm_dp_mst_topology_try_get_port() must be used.
1310 * drm_dp_mst_topology_try_get_port()
1311 * drm_dp_mst_topology_put_port()
1313 static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port)
1315 WARN_ON(kref_read(&port->topology_kref) == 0);
1316 kref_get(&port->topology_kref);
1317 DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->topology_kref));
1321 * drm_dp_mst_topology_put_port() - release a topology reference to a port
1322 * @port: The &struct drm_dp_mst_port to release the topology reference from
1324 * Releases a topology reference from @port by decrementing
1325 * &drm_dp_mst_port.topology_kref.
1328 * drm_dp_mst_topology_try_get_port()
1329 * drm_dp_mst_topology_get_port()
1331 static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port)
1333 DRM_DEBUG("port %p (%d)\n",
1334 port, kref_read(&port->topology_kref) - 1);
1335 kref_put(&port->topology_kref, drm_dp_destroy_port);
1338 static struct drm_dp_mst_branch *
1339 drm_dp_mst_topology_get_mstb_validated_locked(struct drm_dp_mst_branch *mstb,
1340 struct drm_dp_mst_branch *to_find)
1342 struct drm_dp_mst_port *port;
1343 struct drm_dp_mst_branch *rmstb;
1345 if (to_find == mstb)
1348 list_for_each_entry(port, &mstb->ports, next) {
1350 rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1351 port->mstb, to_find);
1359 static struct drm_dp_mst_branch *
1360 drm_dp_mst_topology_get_mstb_validated(struct drm_dp_mst_topology_mgr *mgr,
1361 struct drm_dp_mst_branch *mstb)
1363 struct drm_dp_mst_branch *rmstb = NULL;
1365 mutex_lock(&mgr->lock);
1366 if (mgr->mst_primary) {
1367 rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1368 mgr->mst_primary, mstb);
1370 if (rmstb && !drm_dp_mst_topology_try_get_mstb(rmstb))
1373 mutex_unlock(&mgr->lock);
1377 static struct drm_dp_mst_port *
1378 drm_dp_mst_topology_get_port_validated_locked(struct drm_dp_mst_branch *mstb,
1379 struct drm_dp_mst_port *to_find)
1381 struct drm_dp_mst_port *port, *mport;
1383 list_for_each_entry(port, &mstb->ports, next) {
1384 if (port == to_find)
1388 mport = drm_dp_mst_topology_get_port_validated_locked(
1389 port->mstb, to_find);
1397 static struct drm_dp_mst_port *
1398 drm_dp_mst_topology_get_port_validated(struct drm_dp_mst_topology_mgr *mgr,
1399 struct drm_dp_mst_port *port)
1401 struct drm_dp_mst_port *rport = NULL;
1403 mutex_lock(&mgr->lock);
1404 if (mgr->mst_primary) {
1405 rport = drm_dp_mst_topology_get_port_validated_locked(
1406 mgr->mst_primary, port);
1408 if (rport && !drm_dp_mst_topology_try_get_port(rport))
1411 mutex_unlock(&mgr->lock);
1415 static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
1417 struct drm_dp_mst_port *port;
1420 list_for_each_entry(port, &mstb->ports, next) {
1421 if (port->port_num == port_num) {
1422 ret = drm_dp_mst_topology_try_get_port(port);
1423 return ret ? port : NULL;
1431 * calculate a new RAD for this MST branch device
1432 * if parent has an LCT of 2 then it has 1 nibble of RAD,
1433 * if parent has an LCT of 3 then it has 2 nibbles of RAD,
1435 static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
1438 int parent_lct = port->parent->lct;
1440 int idx = (parent_lct - 1) / 2;
1441 if (parent_lct > 1) {
1442 memcpy(rad, port->parent->rad, idx + 1);
1443 shift = (parent_lct % 2) ? 4 : 0;
1447 rad[idx] |= port->port_num << shift;
1448 return parent_lct + 1;
1452 * return sends link address for new mstb
1454 static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
1458 bool send_link = false;
1459 switch (port->pdt) {
1460 case DP_PEER_DEVICE_DP_LEGACY_CONV:
1461 case DP_PEER_DEVICE_SST_SINK:
1462 /* add i2c over sideband */
1463 ret = drm_dp_mst_register_i2c_bus(&port->aux);
1465 case DP_PEER_DEVICE_MST_BRANCHING:
1466 lct = drm_dp_calculate_rad(port, rad);
1468 port->mstb = drm_dp_add_mst_branch_device(lct, rad);
1470 port->mstb->mgr = port->mgr;
1471 port->mstb->port_parent = port;
1473 * Make sure this port's memory allocation stays
1474 * around until its child MSTB releases it
1476 drm_dp_mst_get_port_malloc(port);
1485 static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
1489 memcpy(mstb->guid, guid, 16);
1491 if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
1492 if (mstb->port_parent) {
1493 ret = drm_dp_send_dpcd_write(
1501 ret = drm_dp_dpcd_write(
1510 static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
1513 size_t proppath_size)
1517 snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
1518 for (i = 0; i < (mstb->lct - 1); i++) {
1519 int shift = (i % 2) ? 0 : 4;
1520 int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
1521 snprintf(temp, sizeof(temp), "-%d", port_num);
1522 strlcat(proppath, temp, proppath_size);
1524 snprintf(temp, sizeof(temp), "-%d", pnum);
1525 strlcat(proppath, temp, proppath_size);
1528 static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
1529 struct drm_device *dev,
1530 struct drm_dp_link_addr_reply_port *port_msg)
1532 struct drm_dp_mst_port *port;
1534 bool created = false;
1538 port = drm_dp_get_port(mstb, port_msg->port_number);
1540 port = kzalloc(sizeof(*port), GFP_KERNEL);
1543 kref_init(&port->topology_kref);
1544 kref_init(&port->malloc_kref);
1545 port->parent = mstb;
1546 port->port_num = port_msg->port_number;
1547 port->mgr = mstb->mgr;
1548 port->aux.name = "DPMST";
1549 port->aux.dev = dev->dev;
1552 * Make sure the memory allocation for our parent branch stays
1553 * around until our own memory allocation is released
1555 drm_dp_mst_get_mstb_malloc(mstb);
1559 old_pdt = port->pdt;
1560 old_ddps = port->ddps;
1563 port->pdt = port_msg->peer_device_type;
1564 port->input = port_msg->input_port;
1565 port->mcs = port_msg->mcs;
1566 port->ddps = port_msg->ddps;
1567 port->ldps = port_msg->legacy_device_plug_status;
1568 port->dpcd_rev = port_msg->dpcd_revision;
1569 port->num_sdp_streams = port_msg->num_sdp_streams;
1570 port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
1572 /* manage mstb port lists with mgr lock - take a reference
1575 mutex_lock(&mstb->mgr->lock);
1576 drm_dp_mst_topology_get_port(port);
1577 list_add(&port->next, &mstb->ports);
1578 mutex_unlock(&mstb->mgr->lock);
1581 if (old_ddps != port->ddps) {
1584 drm_dp_send_enum_path_resources(mstb->mgr,
1588 port->available_pbn = 0;
1592 if (old_pdt != port->pdt && !port->input) {
1593 drm_dp_port_teardown_pdt(port, old_pdt);
1595 ret = drm_dp_port_setup_pdt(port);
1597 drm_dp_send_link_address(mstb->mgr, port->mstb);
1600 if (created && !port->input) {
1603 build_mst_prop_path(mstb, port->port_num, proppath,
1605 port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr,
1608 if (!port->connector) {
1609 /* remove it from the port list */
1610 mutex_lock(&mstb->mgr->lock);
1611 list_del(&port->next);
1612 mutex_unlock(&mstb->mgr->lock);
1613 /* drop port list reference */
1614 drm_dp_mst_topology_put_port(port);
1617 if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
1618 port->pdt == DP_PEER_DEVICE_SST_SINK) &&
1619 port->port_num >= DP_MST_LOGICAL_PORT_0) {
1620 port->cached_edid = drm_get_edid(port->connector,
1622 drm_connector_set_tile_property(port->connector);
1624 (*mstb->mgr->cbs->register_connector)(port->connector);
1628 /* put reference to this port */
1629 drm_dp_mst_topology_put_port(port);
1632 static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
1633 struct drm_dp_connection_status_notify *conn_stat)
1635 struct drm_dp_mst_port *port;
1638 bool dowork = false;
1639 port = drm_dp_get_port(mstb, conn_stat->port_number);
1643 old_ddps = port->ddps;
1644 old_pdt = port->pdt;
1645 port->pdt = conn_stat->peer_device_type;
1646 port->mcs = conn_stat->message_capability_status;
1647 port->ldps = conn_stat->legacy_device_plug_status;
1648 port->ddps = conn_stat->displayport_device_plug_status;
1650 if (old_ddps != port->ddps) {
1654 port->available_pbn = 0;
1657 if (old_pdt != port->pdt && !port->input) {
1658 drm_dp_port_teardown_pdt(port, old_pdt);
1660 if (drm_dp_port_setup_pdt(port))
1664 drm_dp_mst_topology_put_port(port);
1666 queue_work(system_long_wq, &mstb->mgr->work);
1670 static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
1673 struct drm_dp_mst_branch *mstb;
1674 struct drm_dp_mst_port *port;
1676 /* find the port by iterating down */
1678 mutex_lock(&mgr->lock);
1679 mstb = mgr->mst_primary;
1684 for (i = 0; i < lct - 1; i++) {
1685 int shift = (i % 2) ? 0 : 4;
1686 int port_num = (rad[i / 2] >> shift) & 0xf;
1688 list_for_each_entry(port, &mstb->ports, next) {
1689 if (port->port_num == port_num) {
1692 DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]);
1700 ret = drm_dp_mst_topology_try_get_mstb(mstb);
1704 mutex_unlock(&mgr->lock);
1708 static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
1709 struct drm_dp_mst_branch *mstb,
1712 struct drm_dp_mst_branch *found_mstb;
1713 struct drm_dp_mst_port *port;
1715 if (memcmp(mstb->guid, guid, 16) == 0)
1719 list_for_each_entry(port, &mstb->ports, next) {
1723 found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
1732 static struct drm_dp_mst_branch *
1733 drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr,
1736 struct drm_dp_mst_branch *mstb;
1739 /* find the port by iterating down */
1740 mutex_lock(&mgr->lock);
1742 mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
1744 ret = drm_dp_mst_topology_try_get_mstb(mstb);
1749 mutex_unlock(&mgr->lock);
1753 static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
1754 struct drm_dp_mst_branch *mstb)
1756 struct drm_dp_mst_port *port;
1757 struct drm_dp_mst_branch *mstb_child;
1758 if (!mstb->link_address_sent)
1759 drm_dp_send_link_address(mgr, mstb);
1761 list_for_each_entry(port, &mstb->ports, next) {
1768 if (!port->available_pbn)
1769 drm_dp_send_enum_path_resources(mgr, mstb, port);
1772 mstb_child = drm_dp_mst_topology_get_mstb_validated(
1775 drm_dp_check_and_send_link_address(mgr, mstb_child);
1776 drm_dp_mst_topology_put_mstb(mstb_child);
1782 static void drm_dp_mst_link_probe_work(struct work_struct *work)
1784 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work);
1785 struct drm_dp_mst_branch *mstb;
1788 mutex_lock(&mgr->lock);
1789 mstb = mgr->mst_primary;
1791 ret = drm_dp_mst_topology_try_get_mstb(mstb);
1795 mutex_unlock(&mgr->lock);
1797 drm_dp_check_and_send_link_address(mgr, mstb);
1798 drm_dp_mst_topology_put_mstb(mstb);
1802 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
1807 if (memchr_inv(guid, 0, 16))
1810 salt = get_jiffies_64();
1812 memcpy(&guid[0], &salt, sizeof(u64));
1813 memcpy(&guid[8], &salt, sizeof(u64));
1819 static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
1821 struct drm_dp_sideband_msg_req_body req;
1823 req.req_type = DP_REMOTE_DPCD_READ;
1824 req.u.dpcd_read.port_number = port_num;
1825 req.u.dpcd_read.dpcd_address = offset;
1826 req.u.dpcd_read.num_bytes = num_bytes;
1827 drm_dp_encode_sideband_req(&req, msg);
1833 static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
1834 bool up, u8 *msg, int len)
1837 int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;
1838 int tosend, total, offset;
1845 tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
1847 ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
1850 if (ret != tosend) {
1851 if (ret == -EIO && retries < 5) {
1855 DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
1861 } while (total > 0);
1865 static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
1866 struct drm_dp_sideband_msg_tx *txmsg)
1868 struct drm_dp_mst_branch *mstb = txmsg->dst;
1871 /* both msg slots are full */
1872 if (txmsg->seqno == -1) {
1873 if (mstb->tx_slots[0] && mstb->tx_slots[1]) {
1874 DRM_DEBUG_KMS("%s: failed to find slot\n", __func__);
1877 if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) {
1878 txmsg->seqno = mstb->last_seqno;
1879 mstb->last_seqno ^= 1;
1880 } else if (mstb->tx_slots[0] == NULL)
1884 mstb->tx_slots[txmsg->seqno] = txmsg;
1887 req_type = txmsg->msg[0] & 0x7f;
1888 if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
1889 req_type == DP_RESOURCE_STATUS_NOTIFY)
1893 hdr->path_msg = txmsg->path_msg;
1894 hdr->lct = mstb->lct;
1895 hdr->lcr = mstb->lct - 1;
1897 memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
1898 hdr->seqno = txmsg->seqno;
1902 * process a single block of the next message in the sideband queue
1904 static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
1905 struct drm_dp_sideband_msg_tx *txmsg,
1909 struct drm_dp_sideband_msg_hdr hdr;
1910 int len, space, idx, tosend;
1913 memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
1915 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) {
1917 txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
1920 /* make hdr from dst mst - for replies use seqno
1921 otherwise assign one */
1922 ret = set_hdr_from_dst_qlock(&hdr, txmsg);
1926 /* amount left to send in this message */
1927 len = txmsg->cur_len - txmsg->cur_offset;
1929 /* 48 - sideband msg size - 1 byte for data CRC, x header bytes */
1930 space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
1932 tosend = min(len, space);
1933 if (len == txmsg->cur_len)
1939 hdr.msg_len = tosend + 1;
1940 drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
1941 memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
1942 /* add crc at end */
1943 drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
1946 ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
1948 DRM_DEBUG_KMS("sideband msg failed to send\n");
1952 txmsg->cur_offset += tosend;
1953 if (txmsg->cur_offset == txmsg->cur_len) {
1954 txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
1960 static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
1962 struct drm_dp_sideband_msg_tx *txmsg;
1965 WARN_ON(!mutex_is_locked(&mgr->qlock));
1967 /* construct a chunk from the first msg in the tx_msg queue */
1968 if (list_empty(&mgr->tx_msg_downq))
1971 txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
1972 ret = process_single_tx_qlock(mgr, txmsg, false);
1974 /* txmsg is sent it should be in the slots now */
1975 list_del(&txmsg->next);
1977 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
1978 list_del(&txmsg->next);
1979 if (txmsg->seqno != -1)
1980 txmsg->dst->tx_slots[txmsg->seqno] = NULL;
1981 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
1982 wake_up_all(&mgr->tx_waitq);
1986 /* called holding qlock */
1987 static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
1988 struct drm_dp_sideband_msg_tx *txmsg)
1992 /* construct a chunk from the first msg in the tx_msg queue */
1993 ret = process_single_tx_qlock(mgr, txmsg, true);
1996 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
1998 txmsg->dst->tx_slots[txmsg->seqno] = NULL;
2001 static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
2002 struct drm_dp_sideband_msg_tx *txmsg)
2004 mutex_lock(&mgr->qlock);
2005 list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
2006 if (list_is_singular(&mgr->tx_msg_downq))
2007 process_single_down_tx_qlock(mgr);
2008 mutex_unlock(&mgr->qlock);
2011 static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
2012 struct drm_dp_mst_branch *mstb)
2015 struct drm_dp_sideband_msg_tx *txmsg;
2018 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2023 len = build_link_address(txmsg);
2025 mstb->link_address_sent = true;
2026 drm_dp_queue_down_tx(mgr, txmsg);
2028 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2032 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
2033 DRM_DEBUG_KMS("link address nak received\n");
2035 DRM_DEBUG_KMS("link address reply: %d\n", txmsg->reply.u.link_addr.nports);
2036 for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
2037 DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n", i,
2038 txmsg->reply.u.link_addr.ports[i].input_port,
2039 txmsg->reply.u.link_addr.ports[i].peer_device_type,
2040 txmsg->reply.u.link_addr.ports[i].port_number,
2041 txmsg->reply.u.link_addr.ports[i].dpcd_revision,
2042 txmsg->reply.u.link_addr.ports[i].mcs,
2043 txmsg->reply.u.link_addr.ports[i].ddps,
2044 txmsg->reply.u.link_addr.ports[i].legacy_device_plug_status,
2045 txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
2046 txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
2049 drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid);
2051 for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
2052 drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
2054 drm_kms_helper_hotplug_event(mgr->dev);
2057 mstb->link_address_sent = false;
2058 DRM_DEBUG_KMS("link address failed %d\n", ret);
2064 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
2065 struct drm_dp_mst_branch *mstb,
2066 struct drm_dp_mst_port *port)
2069 struct drm_dp_sideband_msg_tx *txmsg;
2072 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2077 len = build_enum_path_resources(txmsg, port->port_num);
2079 drm_dp_queue_down_tx(mgr, txmsg);
2081 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2083 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
2084 DRM_DEBUG_KMS("enum path resources nak received\n");
2086 if (port->port_num != txmsg->reply.u.path_resources.port_number)
2087 DRM_ERROR("got incorrect port in response\n");
2088 DRM_DEBUG_KMS("enum path resources %d: %d %d\n", txmsg->reply.u.path_resources.port_number, txmsg->reply.u.path_resources.full_payload_bw_number,
2089 txmsg->reply.u.path_resources.avail_payload_bw_number);
2090 port->available_pbn = txmsg->reply.u.path_resources.avail_payload_bw_number;
2098 static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
2100 if (!mstb->port_parent)
2103 if (mstb->port_parent->mstb != mstb)
2104 return mstb->port_parent;
2106 return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
2110 * Searches upwards in the topology starting from mstb to try to find the
2111 * closest available parent of mstb that's still connected to the rest of the
2112 * topology. This can be used in order to perform operations like releasing
2113 * payloads, where the branch device which owned the payload may no longer be
2114 * around and thus would require that the payload on the last living relative
2117 static struct drm_dp_mst_branch *
2118 drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
2119 struct drm_dp_mst_branch *mstb,
2122 struct drm_dp_mst_branch *rmstb = NULL;
2123 struct drm_dp_mst_port *found_port;
2125 mutex_lock(&mgr->lock);
2126 if (!mgr->mst_primary)
2130 found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
2134 if (drm_dp_mst_topology_try_get_mstb(found_port->parent)) {
2135 rmstb = found_port->parent;
2136 *port_num = found_port->port_num;
2138 /* Search again, starting from this parent */
2139 mstb = found_port->parent;
2143 mutex_unlock(&mgr->lock);
2147 static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
2148 struct drm_dp_mst_port *port,
2152 struct drm_dp_sideband_msg_tx *txmsg;
2153 struct drm_dp_mst_branch *mstb;
2154 int len, ret, port_num;
2155 u8 sinks[DRM_DP_MAX_SDP_STREAMS];
2158 port_num = port->port_num;
2159 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
2161 mstb = drm_dp_get_last_connected_port_and_mstb(mgr,
2169 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2175 for (i = 0; i < port->num_sdp_streams; i++)
2179 len = build_allocate_payload(txmsg, port_num,
2181 pbn, port->num_sdp_streams, sinks);
2183 drm_dp_queue_down_tx(mgr, txmsg);
2186 * FIXME: there is a small chance that between getting the last
2187 * connected mstb and sending the payload message, the last connected
2188 * mstb could also be removed from the topology. In the future, this
2189 * needs to be fixed by restarting the
2190 * drm_dp_get_last_connected_port_and_mstb() search in the event of a
2191 * timeout if the topology is still connected to the system.
2193 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2195 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
2202 drm_dp_mst_topology_put_mstb(mstb);
2206 int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
2207 struct drm_dp_mst_port *port, bool power_up)
2209 struct drm_dp_sideband_msg_tx *txmsg;
2212 port = drm_dp_mst_topology_get_port_validated(mgr, port);
2216 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2218 drm_dp_mst_topology_put_port(port);
2222 txmsg->dst = port->parent;
2223 len = build_power_updown_phy(txmsg, port->port_num, power_up);
2224 drm_dp_queue_down_tx(mgr, txmsg);
2226 ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg);
2228 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
2234 drm_dp_mst_topology_put_port(port);
2238 EXPORT_SYMBOL(drm_dp_send_power_updown_phy);
2240 static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
2242 struct drm_dp_payload *payload)
2246 ret = drm_dp_dpcd_write_payload(mgr, id, payload);
2248 payload->payload_state = 0;
2251 payload->payload_state = DP_PAYLOAD_LOCAL;
2255 static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
2256 struct drm_dp_mst_port *port,
2258 struct drm_dp_payload *payload)
2261 ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
2264 payload->payload_state = DP_PAYLOAD_REMOTE;
2268 static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
2269 struct drm_dp_mst_port *port,
2271 struct drm_dp_payload *payload)
2273 DRM_DEBUG_KMS("\n");
2274 /* it's okay for these to fail */
2276 drm_dp_payload_send_msg(mgr, port, id, 0);
2279 drm_dp_dpcd_write_payload(mgr, id, payload);
2280 payload->payload_state = DP_PAYLOAD_DELETE_LOCAL;
2284 static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
2286 struct drm_dp_payload *payload)
2288 payload->payload_state = 0;
2293 * drm_dp_update_payload_part1() - Execute payload update part 1
2294 * @mgr: manager to use.
2296 * This iterates over all proposed virtual channels, and tries to
2297 * allocate space in the link for them. For 0->slots transitions,
2298 * this step just writes the VCPI to the MST device. For slots->0
2299 * transitions, this writes the updated VCPIs and removes the
2300 * remote VC payloads.
2302 * after calling this the driver should generate ACT and payload
2305 int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
2307 struct drm_dp_payload req_payload;
2308 struct drm_dp_mst_port *port;
2312 mutex_lock(&mgr->payload_lock);
2313 for (i = 0; i < mgr->max_payloads; i++) {
2314 struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
2315 struct drm_dp_payload *payload = &mgr->payloads[i];
2316 bool put_port = false;
2318 /* solve the current payloads - compare to the hw ones
2319 - update the hw view */
2320 req_payload.start_slot = cur_slots;
2322 port = container_of(vcpi, struct drm_dp_mst_port,
2325 /* Validated ports don't matter if we're releasing
2328 if (vcpi->num_slots) {
2329 port = drm_dp_mst_topology_get_port_validated(
2332 mutex_unlock(&mgr->payload_lock);
2338 req_payload.num_slots = vcpi->num_slots;
2339 req_payload.vcpi = vcpi->vcpi;
2342 req_payload.num_slots = 0;
2345 payload->start_slot = req_payload.start_slot;
2346 /* work out what is required to happen with this payload */
2347 if (payload->num_slots != req_payload.num_slots) {
2349 /* need to push an update for this payload */
2350 if (req_payload.num_slots) {
2351 drm_dp_create_payload_step1(mgr, vcpi->vcpi,
2353 payload->num_slots = req_payload.num_slots;
2354 payload->vcpi = req_payload.vcpi;
2356 } else if (payload->num_slots) {
2357 payload->num_slots = 0;
2358 drm_dp_destroy_payload_step1(mgr, port,
2361 req_payload.payload_state =
2362 payload->payload_state;
2363 payload->start_slot = 0;
2365 payload->payload_state = req_payload.payload_state;
2367 cur_slots += req_payload.num_slots;
2370 drm_dp_mst_topology_put_port(port);
2373 for (i = 0; i < mgr->max_payloads; i++) {
2374 if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL)
2377 DRM_DEBUG_KMS("removing payload %d\n", i);
2378 for (j = i; j < mgr->max_payloads - 1; j++) {
2379 mgr->payloads[j] = mgr->payloads[j + 1];
2380 mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
2382 if (mgr->proposed_vcpis[j] &&
2383 mgr->proposed_vcpis[j]->num_slots) {
2384 set_bit(j + 1, &mgr->payload_mask);
2386 clear_bit(j + 1, &mgr->payload_mask);
2390 memset(&mgr->payloads[mgr->max_payloads - 1], 0,
2391 sizeof(struct drm_dp_payload));
2392 mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
2393 clear_bit(mgr->max_payloads, &mgr->payload_mask);
2395 mutex_unlock(&mgr->payload_lock);
2399 EXPORT_SYMBOL(drm_dp_update_payload_part1);
2402 * drm_dp_update_payload_part2() - Execute payload update part 2
2403 * @mgr: manager to use.
2405 * This iterates over all proposed virtual channels, and tries to
2406 * allocate space in the link for them. For 0->slots transitions,
2407 * this step writes the remote VC payload commands. For slots->0
2408 * this just resets some internal state.
2410 int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
2412 struct drm_dp_mst_port *port;
2415 mutex_lock(&mgr->payload_lock);
2416 for (i = 0; i < mgr->max_payloads; i++) {
2418 if (!mgr->proposed_vcpis[i])
2421 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
2423 DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state);
2424 if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
2425 ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
2426 } else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
2427 ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
2430 mutex_unlock(&mgr->payload_lock);
2434 mutex_unlock(&mgr->payload_lock);
2437 EXPORT_SYMBOL(drm_dp_update_payload_part2);
2439 #if 0 /* unused as of yet */
2440 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
2441 struct drm_dp_mst_port *port,
2442 int offset, int size)
2445 struct drm_dp_sideband_msg_tx *txmsg;
2447 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2451 len = build_dpcd_read(txmsg, port->port_num, 0, 8);
2452 txmsg->dst = port->parent;
2454 drm_dp_queue_down_tx(mgr, txmsg);
2460 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
2461 struct drm_dp_mst_port *port,
2462 int offset, int size, u8 *bytes)
2466 struct drm_dp_sideband_msg_tx *txmsg;
2467 struct drm_dp_mst_branch *mstb;
2469 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
2473 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2479 len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
2482 drm_dp_queue_down_tx(mgr, txmsg);
2484 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2486 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
2493 drm_dp_mst_topology_put_mstb(mstb);
2497 static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
2499 struct drm_dp_sideband_msg_reply_body reply;
2501 reply.reply_type = DP_SIDEBAND_REPLY_ACK;
2502 reply.req_type = req_type;
2503 drm_dp_encode_sideband_reply(&reply, msg);
2507 static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
2508 struct drm_dp_mst_branch *mstb,
2509 int req_type, int seqno, bool broadcast)
2511 struct drm_dp_sideband_msg_tx *txmsg;
2513 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2518 txmsg->seqno = seqno;
2519 drm_dp_encode_up_ack_reply(txmsg, req_type);
2521 mutex_lock(&mgr->qlock);
2523 process_single_up_tx_qlock(mgr, txmsg);
2525 mutex_unlock(&mgr->qlock);
2531 static bool drm_dp_get_vc_payload_bw(int dp_link_bw,
2535 switch (dp_link_bw) {
2537 DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
2538 dp_link_bw, dp_link_count);
2541 case DP_LINK_BW_1_62:
2542 *out = 3 * dp_link_count;
2544 case DP_LINK_BW_2_7:
2545 *out = 5 * dp_link_count;
2547 case DP_LINK_BW_5_4:
2548 *out = 10 * dp_link_count;
2550 case DP_LINK_BW_8_1:
2551 *out = 15 * dp_link_count;
2558 * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
2559 * @mgr: manager to set state for
2560 * @mst_state: true to enable MST on this connector - false to disable.
2562 * This is called by the driver when it detects an MST capable device plugged
2563 * into a DP MST capable port, or when a DP MST capable device is unplugged.
2565 int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
2568 struct drm_dp_mst_branch *mstb = NULL;
2570 mutex_lock(&mgr->lock);
2571 if (mst_state == mgr->mst_state)
2574 mgr->mst_state = mst_state;
2575 /* set the device into MST mode */
2577 WARN_ON(mgr->mst_primary);
2580 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
2581 if (ret != DP_RECEIVER_CAP_SIZE) {
2582 DRM_DEBUG_KMS("failed to read DPCD\n");
2586 if (!drm_dp_get_vc_payload_bw(mgr->dpcd[1],
2587 mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK,
2593 /* add initial branch device at LCT 1 */
2594 mstb = drm_dp_add_mst_branch_device(1, NULL);
2601 /* give this the main reference */
2602 mgr->mst_primary = mstb;
2603 drm_dp_mst_topology_get_mstb(mgr->mst_primary);
2605 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2606 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
2612 struct drm_dp_payload reset_pay;
2613 reset_pay.start_slot = 0;
2614 reset_pay.num_slots = 0x3f;
2615 drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
2618 queue_work(system_long_wq, &mgr->work);
2622 /* disable MST on the device */
2623 mstb = mgr->mst_primary;
2624 mgr->mst_primary = NULL;
2625 /* this can fail if the device is gone */
2626 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
2628 memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
2629 mgr->payload_mask = 0;
2630 set_bit(0, &mgr->payload_mask);
2635 mutex_unlock(&mgr->lock);
2637 drm_dp_mst_topology_put_mstb(mstb);
2641 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
2644 * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
2645 * @mgr: manager to suspend
2647 * This function tells the MST device that we can't handle UP messages
2648 * anymore. This should stop it from sending any since we are suspended.
2650 void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
2652 mutex_lock(&mgr->lock);
2653 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2654 DP_MST_EN | DP_UPSTREAM_IS_SRC);
2655 mutex_unlock(&mgr->lock);
2656 flush_work(&mgr->work);
2657 flush_work(&mgr->destroy_connector_work);
2659 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
2662 * drm_dp_mst_topology_mgr_resume() - resume the MST manager
2663 * @mgr: manager to resume
2665 * This will fetch DPCD and see if the device is still there,
2666 * if it is, it will rewrite the MSTM control bits, and return.
2668 * if the device fails this returns -1, and the driver should do
2669 * a full MST reprobe, in case we were undocked.
2671 int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
2675 mutex_lock(&mgr->lock);
2677 if (mgr->mst_primary) {
2681 sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
2682 if (sret != DP_RECEIVER_CAP_SIZE) {
2683 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
2688 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2689 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
2691 DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
2696 /* Some hubs forget their guids after they resume */
2697 sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
2699 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
2703 drm_dp_check_mstb_guid(mgr->mst_primary, guid);
2710 mutex_unlock(&mgr->lock);
2713 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
2715 static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
2719 int replylen, origlen, curreply;
2721 struct drm_dp_sideband_msg_rx *msg;
2722 int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE;
2723 msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv;
2725 len = min(mgr->max_dpcd_transaction_bytes, 16);
2726 ret = drm_dp_dpcd_read(mgr->aux, basereg,
2729 DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
2732 ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
2734 DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
2737 replylen = msg->curchunk_len + msg->curchunk_hdrlen;
2742 while (replylen > 0) {
2743 len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
2744 ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
2747 DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n",
2752 ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
2754 DRM_DEBUG_KMS("failed to build sideband msg\n");
2764 static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
2768 if (!drm_dp_get_one_sb_msg(mgr, false)) {
2769 memset(&mgr->down_rep_recv, 0,
2770 sizeof(struct drm_dp_sideband_msg_rx));
2774 if (mgr->down_rep_recv.have_eomt) {
2775 struct drm_dp_sideband_msg_tx *txmsg;
2776 struct drm_dp_mst_branch *mstb;
2778 mstb = drm_dp_get_mst_branch_device(mgr,
2779 mgr->down_rep_recv.initial_hdr.lct,
2780 mgr->down_rep_recv.initial_hdr.rad);
2783 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->down_rep_recv.initial_hdr.lct);
2784 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2788 /* find the message */
2789 slot = mgr->down_rep_recv.initial_hdr.seqno;
2790 mutex_lock(&mgr->qlock);
2791 txmsg = mstb->tx_slots[slot];
2792 /* remove from slots */
2793 mutex_unlock(&mgr->qlock);
2796 DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
2798 mgr->down_rep_recv.initial_hdr.seqno,
2799 mgr->down_rep_recv.initial_hdr.lct,
2800 mgr->down_rep_recv.initial_hdr.rad[0],
2801 mgr->down_rep_recv.msg[0]);
2802 drm_dp_mst_topology_put_mstb(mstb);
2803 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2807 drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
2809 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
2810 DRM_DEBUG_KMS("Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n",
2811 txmsg->reply.req_type,
2812 drm_dp_mst_req_type_str(txmsg->reply.req_type),
2813 txmsg->reply.u.nak.reason,
2814 drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason),
2815 txmsg->reply.u.nak.nak_data);
2817 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2818 drm_dp_mst_topology_put_mstb(mstb);
2820 mutex_lock(&mgr->qlock);
2821 txmsg->state = DRM_DP_SIDEBAND_TX_RX;
2822 mstb->tx_slots[slot] = NULL;
2823 mutex_unlock(&mgr->qlock);
2825 wake_up_all(&mgr->tx_waitq);
2830 static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
2834 if (!drm_dp_get_one_sb_msg(mgr, true)) {
2835 memset(&mgr->up_req_recv, 0,
2836 sizeof(struct drm_dp_sideband_msg_rx));
2840 if (mgr->up_req_recv.have_eomt) {
2841 struct drm_dp_sideband_msg_req_body msg;
2842 struct drm_dp_mst_branch *mstb = NULL;
2845 if (!mgr->up_req_recv.initial_hdr.broadcast) {
2846 mstb = drm_dp_get_mst_branch_device(mgr,
2847 mgr->up_req_recv.initial_hdr.lct,
2848 mgr->up_req_recv.initial_hdr.rad);
2850 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2851 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2856 seqno = mgr->up_req_recv.initial_hdr.seqno;
2857 drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
2859 if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
2860 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
2863 mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.conn_stat.guid);
2866 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2867 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2871 drm_dp_update_port(mstb, &msg.u.conn_stat);
2873 DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
2874 drm_kms_helper_hotplug_event(mgr->dev);
2876 } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
2877 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
2879 mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.resource_stat.guid);
2882 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2883 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2887 DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
2891 drm_dp_mst_topology_put_mstb(mstb);
2893 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2899 * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
2900 * @mgr: manager to notify irq for.
2901 * @esi: 4 bytes from SINK_COUNT_ESI
2902 * @handled: whether the hpd interrupt was consumed or not
2904 * This should be called from the driver when it detects a short IRQ,
2905 * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
2906 * topology manager will process the sideband messages received as a result
2909 int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
2916 if (sc != mgr->sink_count) {
2917 mgr->sink_count = sc;
2921 if (esi[1] & DP_DOWN_REP_MSG_RDY) {
2922 ret = drm_dp_mst_handle_down_rep(mgr);
2926 if (esi[1] & DP_UP_REQ_MSG_RDY) {
2927 ret |= drm_dp_mst_handle_up_req(mgr);
2931 drm_dp_mst_kick_tx(mgr);
2934 EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
2937 * drm_dp_mst_detect_port() - get connection status for an MST port
2938 * @connector: DRM connector for this port
2939 * @mgr: manager for this port
2940 * @port: unverified pointer to a port
2942 * This returns the current connection state for a port. It validates the
2943 * port pointer still exists so the caller doesn't require a reference
2945 enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector,
2946 struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2948 enum drm_connector_status status = connector_status_disconnected;
2950 /* we need to search for the port in the mgr in case it's gone */
2951 port = drm_dp_mst_topology_get_port_validated(mgr, port);
2953 return connector_status_disconnected;
2958 switch (port->pdt) {
2959 case DP_PEER_DEVICE_NONE:
2960 case DP_PEER_DEVICE_MST_BRANCHING:
2963 case DP_PEER_DEVICE_SST_SINK:
2964 status = connector_status_connected;
2965 /* for logical ports - cache the EDID */
2966 if (port->port_num >= 8 && !port->cached_edid) {
2967 port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
2970 case DP_PEER_DEVICE_DP_LEGACY_CONV:
2972 status = connector_status_connected;
2976 drm_dp_mst_topology_put_port(port);
2979 EXPORT_SYMBOL(drm_dp_mst_detect_port);
2982 * drm_dp_mst_port_has_audio() - Check whether port has audio capability or not
2983 * @mgr: manager for this port
2984 * @port: unverified pointer to a port.
2986 * This returns whether the port supports audio or not.
2988 bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr,
2989 struct drm_dp_mst_port *port)
2993 port = drm_dp_mst_topology_get_port_validated(mgr, port);
2996 ret = port->has_audio;
2997 drm_dp_mst_topology_put_port(port);
3000 EXPORT_SYMBOL(drm_dp_mst_port_has_audio);
3003 * drm_dp_mst_get_edid() - get EDID for an MST port
3004 * @connector: toplevel connector to get EDID for
3005 * @mgr: manager for this port
3006 * @port: unverified pointer to a port.
3008 * This returns an EDID for the port connected to a connector,
3009 * It validates the pointer still exists so the caller doesn't require a
3012 struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
3014 struct edid *edid = NULL;
3016 /* we need to search for the port in the mgr in case it's gone */
3017 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3021 if (port->cached_edid)
3022 edid = drm_edid_duplicate(port->cached_edid);
3024 edid = drm_get_edid(connector, &port->aux.ddc);
3026 port->has_audio = drm_detect_monitor_audio(edid);
3027 drm_dp_mst_topology_put_port(port);
3030 EXPORT_SYMBOL(drm_dp_mst_get_edid);
3033 * drm_dp_find_vcpi_slots() - Find VCPI slots for this PBN value
3034 * @mgr: manager to use
3035 * @pbn: payload bandwidth to convert into slots.
3037 * Calculate the number of VCPI slots that will be required for the given PBN
3038 * value. This function is deprecated, and should not be used in atomic
3042 * The total slots required for this port, or error.
3044 int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
3049 num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
3051 /* max. time slots - one slot for MTP header */
3056 EXPORT_SYMBOL(drm_dp_find_vcpi_slots);
3058 static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
3059 struct drm_dp_vcpi *vcpi, int pbn, int slots)
3063 /* max. time slots - one slot for MTP header */
3068 vcpi->aligned_pbn = slots * mgr->pbn_div;
3069 vcpi->num_slots = slots;
3071 ret = drm_dp_mst_assign_payload_id(mgr, vcpi);
3078 * drm_dp_atomic_find_vcpi_slots() - Find and add VCPI slots to the state
3079 * @state: global atomic state
3080 * @mgr: MST topology manager for the port
3081 * @port: port to find vcpi slots for
3082 * @pbn: bandwidth required for the mode in PBN
3084 * Allocates VCPI slots to @port, replacing any previous VCPI allocations it
3085 * may have had. Any atomic drivers which support MST must call this function
3086 * in their &drm_encoder_helper_funcs.atomic_check() callback to change the
3087 * current VCPI allocation for the new state, but only when
3088 * &drm_crtc_state.mode_changed or &drm_crtc_state.connectors_changed is set
3089 * to ensure compatibility with userspace applications that still use the
3090 * legacy modesetting UAPI.
3092 * Allocations set by this function are not checked against the bandwidth
3093 * restraints of @mgr until the driver calls drm_dp_mst_atomic_check().
3095 * Additionally, it is OK to call this function multiple times on the same
3096 * @port as needed. It is not OK however, to call this function and
3097 * drm_dp_atomic_release_vcpi_slots() in the same atomic check phase.
3100 * drm_dp_atomic_release_vcpi_slots()
3101 * drm_dp_mst_atomic_check()
3104 * Total slots in the atomic state assigned for this port, or a negative error
3105 * code if the port no longer exists
3107 int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
3108 struct drm_dp_mst_topology_mgr *mgr,
3109 struct drm_dp_mst_port *port, int pbn)
3111 struct drm_dp_mst_topology_state *topology_state;
3112 struct drm_dp_vcpi_allocation *pos, *vcpi = NULL;
3113 int prev_slots, req_slots, ret;
3115 topology_state = drm_atomic_get_mst_topology_state(state, mgr);
3116 if (IS_ERR(topology_state))
3117 return PTR_ERR(topology_state);
3119 /* Find the current allocation for this port, if any */
3120 list_for_each_entry(pos, &topology_state->vcpis, next) {
3121 if (pos->port == port) {
3123 prev_slots = vcpi->vcpi;
3126 * This should never happen, unless the driver tries
3127 * releasing and allocating the same VCPI allocation,
3130 if (WARN_ON(!prev_slots)) {
3131 DRM_ERROR("cannot allocate and release VCPI on [MST PORT:%p] in the same state\n",
3142 req_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
3144 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] VCPI %d -> %d\n",
3145 port->connector->base.id, port->connector->name,
3146 port, prev_slots, req_slots);
3148 /* Add the new allocation to the state */
3150 vcpi = kzalloc(sizeof(*vcpi), GFP_KERNEL);
3154 drm_dp_mst_get_port_malloc(port);
3156 list_add(&vcpi->next, &topology_state->vcpis);
3158 vcpi->vcpi = req_slots;
3163 EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots);
3166 * drm_dp_atomic_release_vcpi_slots() - Release allocated vcpi slots
3167 * @state: global atomic state
3168 * @mgr: MST topology manager for the port
3169 * @port: The port to release the VCPI slots from
3171 * Releases any VCPI slots that have been allocated to a port in the atomic
3172 * state. Any atomic drivers which support MST must call this function in
3173 * their &drm_connector_helper_funcs.atomic_check() callback when the
3174 * connector will no longer have VCPI allocated (e.g. because its CRTC was
3175 * removed) when it had VCPI allocated in the previous atomic state.
3177 * It is OK to call this even if @port has been removed from the system.
3178 * Additionally, it is OK to call this function multiple times on the same
3179 * @port as needed. It is not OK however, to call this function and
3180 * drm_dp_atomic_find_vcpi_slots() on the same @port in a single atomic check
3184 * drm_dp_atomic_find_vcpi_slots()
3185 * drm_dp_mst_atomic_check()
3188 * 0 if all slots for this port were added back to
3189 * &drm_dp_mst_topology_state.avail_slots or negative error code
3191 int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
3192 struct drm_dp_mst_topology_mgr *mgr,
3193 struct drm_dp_mst_port *port)
3195 struct drm_dp_mst_topology_state *topology_state;
3196 struct drm_dp_vcpi_allocation *pos;
3199 topology_state = drm_atomic_get_mst_topology_state(state, mgr);
3200 if (IS_ERR(topology_state))
3201 return PTR_ERR(topology_state);
3203 list_for_each_entry(pos, &topology_state->vcpis, next) {
3204 if (pos->port == port) {
3209 if (WARN_ON(!found)) {
3210 DRM_ERROR("no VCPI for [MST PORT:%p] found in mst state %p\n",
3211 port, &topology_state->base);
3215 DRM_DEBUG_ATOMIC("[MST PORT:%p] VCPI %d -> 0\n", port, pos->vcpi);
3217 drm_dp_mst_put_port_malloc(port);
3223 EXPORT_SYMBOL(drm_dp_atomic_release_vcpi_slots);
3226 * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel
3227 * @mgr: manager for this port
3228 * @port: port to allocate a virtual channel for.
3229 * @pbn: payload bandwidth number to request
3230 * @slots: returned number of slots for this PBN.
3232 bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
3233 struct drm_dp_mst_port *port, int pbn, int slots)
3237 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3244 if (port->vcpi.vcpi > 0) {
3245 DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n",
3246 port->vcpi.vcpi, port->vcpi.pbn, pbn);
3247 if (pbn == port->vcpi.pbn) {
3248 drm_dp_mst_topology_put_port(port);
3253 ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn, slots);
3255 DRM_DEBUG_KMS("failed to init vcpi slots=%d max=63 ret=%d\n",
3256 DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
3259 DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n",
3260 pbn, port->vcpi.num_slots);
3262 /* Keep port allocated until its payload has been removed */
3263 drm_dp_mst_get_port_malloc(port);
3264 drm_dp_mst_topology_put_port(port);
3269 EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
3271 int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
3274 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3278 slots = port->vcpi.num_slots;
3279 drm_dp_mst_topology_put_port(port);
3282 EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
3285 * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI
3286 * @mgr: manager for this port
3287 * @port: unverified pointer to a port.
3289 * This just resets the number of slots for the ports VCPI for later programming.
3291 void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
3294 * A port with VCPI will remain allocated until its VCPI is
3295 * released, no verified ref needed
3298 port->vcpi.num_slots = 0;
3300 EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
3303 * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI
3304 * @mgr: manager for this port
3305 * @port: port to deallocate vcpi for
3307 * This can be called unconditionally, regardless of whether
3308 * drm_dp_mst_allocate_vcpi() succeeded or not.
3310 void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
3311 struct drm_dp_mst_port *port)
3313 if (!port->vcpi.vcpi)
3316 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
3317 port->vcpi.num_slots = 0;
3319 port->vcpi.aligned_pbn = 0;
3320 port->vcpi.vcpi = 0;
3321 drm_dp_mst_put_port_malloc(port);
3323 EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
3325 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
3326 int id, struct drm_dp_payload *payload)
3328 u8 payload_alloc[3], status;
3332 drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
3333 DP_PAYLOAD_TABLE_UPDATED);
3335 payload_alloc[0] = id;
3336 payload_alloc[1] = payload->start_slot;
3337 payload_alloc[2] = payload->num_slots;
3339 ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
3341 DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret);
3346 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
3348 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
3352 if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
3355 usleep_range(10000, 20000);
3358 DRM_DEBUG_KMS("status not set after read payload table status %d\n", status);
3369 * drm_dp_check_act_status() - Check ACT handled status.
3370 * @mgr: manager to use
3372 * Check the payload status bits in the DPCD for ACT handled completion.
3374 int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
3381 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
3384 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
3388 if (status & DP_PAYLOAD_ACT_HANDLED)
3393 } while (count < 30);
3395 if (!(status & DP_PAYLOAD_ACT_HANDLED)) {
3396 DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count);
3404 EXPORT_SYMBOL(drm_dp_check_act_status);
3407 * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
3408 * @clock: dot clock for the mode
3409 * @bpp: bpp for the mode.
3411 * This uses the formula in the spec to calculate the PBN value for a mode.
3413 int drm_dp_calc_pbn_mode(int clock, int bpp)
3423 * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
3424 * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
3425 * common multiplier to render an integer PBN for all link rate/lane
3426 * counts combinations
3428 * peak_kbps *= (1006/1000)
3429 * peak_kbps *= (64/54)
3430 * peak_kbps *= 8 convert to bytes
3433 numerator = 64 * 1006;
3434 denominator = 54 * 8 * 1000 * 1000;
3437 peak_kbps = drm_fixp_from_fraction(kbps, denominator);
3439 return drm_fixp2int_ceil(peak_kbps);
3441 EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
3443 static int test_calc_pbn_mode(void)
3446 ret = drm_dp_calc_pbn_mode(154000, 30);
3448 DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
3449 154000, 30, 689, ret);
3452 ret = drm_dp_calc_pbn_mode(234000, 30);
3454 DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
3455 234000, 30, 1047, ret);
3458 ret = drm_dp_calc_pbn_mode(297000, 24);
3460 DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
3461 297000, 24, 1063, ret);
3467 /* we want to kick the TX after we've ack the up/down IRQs. */
3468 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
3470 queue_work(system_long_wq, &mgr->tx_work);
3473 static void drm_dp_mst_dump_mstb(struct seq_file *m,
3474 struct drm_dp_mst_branch *mstb)
3476 struct drm_dp_mst_port *port;
3477 int tabs = mstb->lct;
3481 for (i = 0; i < tabs; i++)
3485 seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports);
3486 list_for_each_entry(port, &mstb->ports, next) {
3487 seq_printf(m, "%sport: %d: input: %d: pdt: %d, ddps: %d ldps: %d, sdp: %d/%d, %p, conn: %p\n", prefix, port->port_num, port->input, port->pdt, port->ddps, port->ldps, port->num_sdp_streams, port->num_sdp_stream_sinks, port, port->connector);
3489 drm_dp_mst_dump_mstb(m, port->mstb);
3493 #define DP_PAYLOAD_TABLE_SIZE 64
3495 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
3500 for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) {
3501 if (drm_dp_dpcd_read(mgr->aux,
3502 DP_PAYLOAD_TABLE_UPDATE_STATUS + i,
3509 static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr,
3510 struct drm_dp_mst_port *port, char *name,
3513 struct edid *mst_edid;
3515 mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port);
3516 drm_edid_get_monitor_name(mst_edid, name, namelen);
3520 * drm_dp_mst_dump_topology(): dump topology to seq file.
3521 * @m: seq_file to dump output to
3522 * @mgr: manager to dump current topology for.
3524 * helper to dump MST topology to a seq file for debugfs.
3526 void drm_dp_mst_dump_topology(struct seq_file *m,
3527 struct drm_dp_mst_topology_mgr *mgr)
3530 struct drm_dp_mst_port *port;
3532 mutex_lock(&mgr->lock);
3533 if (mgr->mst_primary)
3534 drm_dp_mst_dump_mstb(m, mgr->mst_primary);
3537 mutex_unlock(&mgr->lock);
3539 mutex_lock(&mgr->payload_lock);
3540 seq_printf(m, "vcpi: %lx %lx %d\n", mgr->payload_mask, mgr->vcpi_mask,
3543 for (i = 0; i < mgr->max_payloads; i++) {
3544 if (mgr->proposed_vcpis[i]) {
3547 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
3548 fetch_monitor_name(mgr, port, name, sizeof(name));
3549 seq_printf(m, "vcpi %d: %d %d %d sink name: %s\n", i,
3550 port->port_num, port->vcpi.vcpi,
3551 port->vcpi.num_slots,
3552 (*name != 0) ? name : "Unknown");
3554 seq_printf(m, "vcpi %d:unused\n", i);
3556 for (i = 0; i < mgr->max_payloads; i++) {
3557 seq_printf(m, "payload %d: %d, %d, %d\n",
3559 mgr->payloads[i].payload_state,
3560 mgr->payloads[i].start_slot,
3561 mgr->payloads[i].num_slots);
3565 mutex_unlock(&mgr->payload_lock);
3567 mutex_lock(&mgr->lock);
3568 if (mgr->mst_primary) {
3569 u8 buf[DP_PAYLOAD_TABLE_SIZE];
3572 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
3573 seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf);
3574 ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
3575 seq_printf(m, "faux/mst: %*ph\n", 2, buf);
3576 ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
3577 seq_printf(m, "mst ctrl: %*ph\n", 1, buf);
3579 /* dump the standard OUI branch header */
3580 ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
3581 seq_printf(m, "branch oui: %*phN devid: ", 3, buf);
3582 for (i = 0x3; i < 0x8 && buf[i]; i++)
3583 seq_printf(m, "%c", buf[i]);
3584 seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n",
3585 buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
3586 if (dump_dp_payload_table(mgr, buf))
3587 seq_printf(m, "payload table: %*ph\n", DP_PAYLOAD_TABLE_SIZE, buf);
3590 mutex_unlock(&mgr->lock);
3593 EXPORT_SYMBOL(drm_dp_mst_dump_topology);
3595 static void drm_dp_tx_work(struct work_struct *work)
3597 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
3599 mutex_lock(&mgr->qlock);
3600 if (!list_empty(&mgr->tx_msg_downq))
3601 process_single_down_tx_qlock(mgr);
3602 mutex_unlock(&mgr->qlock);
3605 static void drm_dp_destroy_connector_work(struct work_struct *work)
3607 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
3608 struct drm_dp_mst_port *port;
3609 bool send_hotplug = false;
3611 * Not a regular list traverse as we have to drop the destroy
3612 * connector lock before destroying the connector, to avoid AB->BA
3613 * ordering between this lock and the config mutex.
3616 mutex_lock(&mgr->destroy_connector_lock);
3617 port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next);
3619 mutex_unlock(&mgr->destroy_connector_lock);
3622 list_del(&port->next);
3623 mutex_unlock(&mgr->destroy_connector_lock);
3625 INIT_LIST_HEAD(&port->next);
3627 mgr->cbs->destroy_connector(mgr, port->connector);
3629 drm_dp_port_teardown_pdt(port, port->pdt);
3630 port->pdt = DP_PEER_DEVICE_NONE;
3632 drm_dp_mst_put_port_malloc(port);
3633 send_hotplug = true;
3636 drm_kms_helper_hotplug_event(mgr->dev);
3639 static struct drm_private_state *
3640 drm_dp_mst_duplicate_state(struct drm_private_obj *obj)
3642 struct drm_dp_mst_topology_state *state, *old_state =
3643 to_dp_mst_topology_state(obj->state);
3644 struct drm_dp_vcpi_allocation *pos, *vcpi;
3646 state = kmemdup(old_state, sizeof(*state), GFP_KERNEL);
3650 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
3652 INIT_LIST_HEAD(&state->vcpis);
3654 list_for_each_entry(pos, &old_state->vcpis, next) {
3655 /* Prune leftover freed VCPI allocations */
3659 vcpi = kmemdup(pos, sizeof(*vcpi), GFP_KERNEL);
3663 drm_dp_mst_get_port_malloc(vcpi->port);
3664 list_add(&vcpi->next, &state->vcpis);
3667 return &state->base;
3670 list_for_each_entry_safe(pos, vcpi, &state->vcpis, next) {
3671 drm_dp_mst_put_port_malloc(pos->port);
3679 static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,
3680 struct drm_private_state *state)
3682 struct drm_dp_mst_topology_state *mst_state =
3683 to_dp_mst_topology_state(state);
3684 struct drm_dp_vcpi_allocation *pos, *tmp;
3686 list_for_each_entry_safe(pos, tmp, &mst_state->vcpis, next) {
3687 /* We only keep references to ports with non-zero VCPIs */
3689 drm_dp_mst_put_port_malloc(pos->port);
3697 drm_dp_mst_atomic_check_topology_state(struct drm_dp_mst_topology_mgr *mgr,
3698 struct drm_dp_mst_topology_state *mst_state)
3700 struct drm_dp_vcpi_allocation *vcpi;
3701 int avail_slots = 63, payload_count = 0;
3703 list_for_each_entry(vcpi, &mst_state->vcpis, next) {
3704 /* Releasing VCPI is always OK-even if the port is gone */
3706 DRM_DEBUG_ATOMIC("[MST PORT:%p] releases all VCPI slots\n",
3711 DRM_DEBUG_ATOMIC("[MST PORT:%p] requires %d vcpi slots\n",
3712 vcpi->port, vcpi->vcpi);
3714 avail_slots -= vcpi->vcpi;
3715 if (avail_slots < 0) {
3716 DRM_DEBUG_ATOMIC("[MST PORT:%p] not enough VCPI slots in mst state %p (avail=%d)\n",
3717 vcpi->port, mst_state,
3718 avail_slots + vcpi->vcpi);
3722 if (++payload_count > mgr->max_payloads) {
3723 DRM_DEBUG_ATOMIC("[MST MGR:%p] state %p has too many payloads (max=%d)\n",
3724 mgr, mst_state, mgr->max_payloads);
3728 DRM_DEBUG_ATOMIC("[MST MGR:%p] mst state %p VCPI avail=%d used=%d\n",
3729 mgr, mst_state, avail_slots,
3736 * drm_dp_mst_atomic_check - Check that the new state of an MST topology in an
3737 * atomic update is valid
3738 * @state: Pointer to the new &struct drm_dp_mst_topology_state
3740 * Checks the given topology state for an atomic update to ensure that it's
3741 * valid. This includes checking whether there's enough bandwidth to support
3742 * the new VCPI allocations in the atomic update.
3744 * Any atomic drivers supporting DP MST must make sure to call this after
3745 * checking the rest of their state in their
3746 * &drm_mode_config_funcs.atomic_check() callback.
3749 * drm_dp_atomic_find_vcpi_slots()
3750 * drm_dp_atomic_release_vcpi_slots()
3754 * 0 if the new state is valid, negative error code otherwise.
3756 int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
3758 struct drm_dp_mst_topology_mgr *mgr;
3759 struct drm_dp_mst_topology_state *mst_state;
3762 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
3763 ret = drm_dp_mst_atomic_check_topology_state(mgr, mst_state);
3770 EXPORT_SYMBOL(drm_dp_mst_atomic_check);
3772 const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs = {
3773 .atomic_duplicate_state = drm_dp_mst_duplicate_state,
3774 .atomic_destroy_state = drm_dp_mst_destroy_state,
3776 EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs);
3779 * drm_atomic_get_mst_topology_state: get MST topology state
3781 * @state: global atomic state
3782 * @mgr: MST topology manager, also the private object in this case
3784 * This function wraps drm_atomic_get_priv_obj_state() passing in the MST atomic
3785 * state vtable so that the private object state returned is that of a MST
3786 * topology object. Also, drm_atomic_get_private_obj_state() expects the caller
3787 * to care of the locking, so warn if don't hold the connection_mutex.
3791 * The MST topology state or error pointer.
3793 struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
3794 struct drm_dp_mst_topology_mgr *mgr)
3796 struct drm_device *dev = mgr->dev;
3798 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
3799 return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base));
3801 EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
3804 * drm_dp_mst_topology_mgr_init - initialise a topology manager
3805 * @mgr: manager struct to initialise
3806 * @dev: device providing this structure - for i2c addition.
3807 * @aux: DP helper aux channel to talk to this device
3808 * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
3809 * @max_payloads: maximum number of payloads this GPU can source
3810 * @conn_base_id: the connector object ID the MST device is connected to.
3812 * Return 0 for success, or negative error code on failure
3814 int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
3815 struct drm_device *dev, struct drm_dp_aux *aux,
3816 int max_dpcd_transaction_bytes,
3817 int max_payloads, int conn_base_id)
3819 struct drm_dp_mst_topology_state *mst_state;
3821 mutex_init(&mgr->lock);
3822 mutex_init(&mgr->qlock);
3823 mutex_init(&mgr->payload_lock);
3824 mutex_init(&mgr->destroy_connector_lock);
3825 INIT_LIST_HEAD(&mgr->tx_msg_downq);
3826 INIT_LIST_HEAD(&mgr->destroy_connector_list);
3827 INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
3828 INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
3829 INIT_WORK(&mgr->destroy_connector_work, drm_dp_destroy_connector_work);
3830 init_waitqueue_head(&mgr->tx_waitq);
3833 mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
3834 mgr->max_payloads = max_payloads;
3835 mgr->conn_base_id = conn_base_id;
3836 if (max_payloads + 1 > sizeof(mgr->payload_mask) * 8 ||
3837 max_payloads + 1 > sizeof(mgr->vcpi_mask) * 8)
3839 mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
3842 mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL);
3843 if (!mgr->proposed_vcpis)
3845 set_bit(0, &mgr->payload_mask);
3846 if (test_calc_pbn_mode() < 0)
3847 DRM_ERROR("MST PBN self-test failed\n");
3849 mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL);
3850 if (mst_state == NULL)
3853 mst_state->mgr = mgr;
3854 INIT_LIST_HEAD(&mst_state->vcpis);
3856 drm_atomic_private_obj_init(dev, &mgr->base,
3858 &drm_dp_mst_topology_state_funcs);
3862 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
3865 * drm_dp_mst_topology_mgr_destroy() - destroy topology manager.
3866 * @mgr: manager to destroy
3868 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
3870 drm_dp_mst_topology_mgr_set_mst(mgr, false);
3871 flush_work(&mgr->work);
3872 flush_work(&mgr->destroy_connector_work);
3873 mutex_lock(&mgr->payload_lock);
3874 kfree(mgr->payloads);
3875 mgr->payloads = NULL;
3876 kfree(mgr->proposed_vcpis);
3877 mgr->proposed_vcpis = NULL;
3878 mutex_unlock(&mgr->payload_lock);
3881 drm_atomic_private_obj_fini(&mgr->base);
3884 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
3886 static bool remote_i2c_read_ok(const struct i2c_msg msgs[], int num)
3890 if (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)
3893 for (i = 0; i < num - 1; i++) {
3894 if (msgs[i].flags & I2C_M_RD ||
3899 return msgs[num - 1].flags & I2C_M_RD &&
3900 msgs[num - 1].len <= 0xff;
3904 static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
3907 struct drm_dp_aux *aux = adapter->algo_data;
3908 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux);
3909 struct drm_dp_mst_branch *mstb;
3910 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
3912 struct drm_dp_sideband_msg_req_body msg;
3913 struct drm_dp_sideband_msg_tx *txmsg = NULL;
3916 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
3920 if (!remote_i2c_read_ok(msgs, num)) {
3921 DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
3926 memset(&msg, 0, sizeof(msg));
3927 msg.req_type = DP_REMOTE_I2C_READ;
3928 msg.u.i2c_read.num_transactions = num - 1;
3929 msg.u.i2c_read.port_number = port->port_num;
3930 for (i = 0; i < num - 1; i++) {
3931 msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
3932 msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
3933 msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
3934 msg.u.i2c_read.transactions[i].no_stop_bit = !(msgs[i].flags & I2C_M_STOP);
3936 msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
3937 msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
3939 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3946 drm_dp_encode_sideband_req(&msg, txmsg);
3948 drm_dp_queue_down_tx(mgr, txmsg);
3950 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3953 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
3957 if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
3961 memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
3966 drm_dp_mst_topology_put_mstb(mstb);
3970 static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
3972 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
3973 I2C_FUNC_SMBUS_READ_BLOCK_DATA |
3974 I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
3975 I2C_FUNC_10BIT_ADDR;
3978 static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
3979 .functionality = drm_dp_mst_i2c_functionality,
3980 .master_xfer = drm_dp_mst_i2c_xfer,
3984 * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
3985 * @aux: DisplayPort AUX channel
3987 * Returns 0 on success or a negative error code on failure.
3989 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux)
3991 aux->ddc.algo = &drm_dp_mst_i2c_algo;
3992 aux->ddc.algo_data = aux;
3993 aux->ddc.retries = 3;
3995 aux->ddc.class = I2C_CLASS_DDC;
3996 aux->ddc.owner = THIS_MODULE;
3997 aux->ddc.dev.parent = aux->dev;
3998 aux->ddc.dev.of_node = aux->dev->of_node;
4000 strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev),
4001 sizeof(aux->ddc.name));
4003 return i2c_add_adapter(&aux->ddc);
4007 * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
4008 * @aux: DisplayPort AUX channel
4010 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux)
4012 i2c_del_adapter(&aux->ddc);