]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/i915/display/intel_hdcp.c
drm/i915/hdcp: Nuke intel_hdcp_transcoder_config()
[linux.git] / drivers / gpu / drm / i915 / display / intel_hdcp.c
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright (C) 2017 Google, Inc.
4  * Copyright _ 2017-2019, Intel Corporation.
5  *
6  * Authors:
7  * Sean Paul <seanpaul@chromium.org>
8  * Ramalingam C <ramalingam.c@intel.com>
9  */
10
11 #include <linux/component.h>
12 #include <linux/i2c.h>
13 #include <linux/random.h>
14
15 #include <drm/drm_hdcp.h>
16 #include <drm/i915_component.h>
17
18 #include "i915_reg.h"
19 #include "intel_display_power.h"
20 #include "intel_display_types.h"
21 #include "intel_hdcp.h"
22 #include "intel_sideband.h"
23 #include "intel_connector.h"
24
25 #define KEY_LOAD_TRIES  5
26 #define ENCRYPT_STATUS_CHANGE_TIMEOUT_MS        50
27 #define HDCP2_LC_RETRY_CNT                      3
28
29 static
30 bool intel_hdcp_is_ksv_valid(u8 *ksv)
31 {
32         int i, ones = 0;
33         /* KSV has 20 1's and 20 0's */
34         for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
35                 ones += hweight8(ksv[i]);
36         if (ones != 20)
37                 return false;
38
39         return true;
40 }
41
42 static
43 int intel_hdcp_read_valid_bksv(struct intel_digital_port *intel_dig_port,
44                                const struct intel_hdcp_shim *shim, u8 *bksv)
45 {
46         int ret, i, tries = 2;
47
48         /* HDCP spec states that we must retry the bksv if it is invalid */
49         for (i = 0; i < tries; i++) {
50                 ret = shim->read_bksv(intel_dig_port, bksv);
51                 if (ret)
52                         return ret;
53                 if (intel_hdcp_is_ksv_valid(bksv))
54                         break;
55         }
56         if (i == tries) {
57                 DRM_DEBUG_KMS("Bksv is invalid\n");
58                 return -ENODEV;
59         }
60
61         return 0;
62 }
63
64 /* Is HDCP1.4 capable on Platform and Sink */
65 bool intel_hdcp_capable(struct intel_connector *connector)
66 {
67         struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
68         const struct intel_hdcp_shim *shim = connector->hdcp.shim;
69         bool capable = false;
70         u8 bksv[5];
71
72         if (!shim)
73                 return capable;
74
75         if (shim->hdcp_capable) {
76                 shim->hdcp_capable(intel_dig_port, &capable);
77         } else {
78                 if (!intel_hdcp_read_valid_bksv(intel_dig_port, shim, bksv))
79                         capable = true;
80         }
81
82         return capable;
83 }
84
85 /* Is HDCP2.2 capable on Platform and Sink */
86 bool intel_hdcp2_capable(struct intel_connector *connector)
87 {
88         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
89         struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
90         struct intel_hdcp *hdcp = &connector->hdcp;
91         bool capable = false;
92
93         /* I915 support for HDCP2.2 */
94         if (!hdcp->hdcp2_supported)
95                 return false;
96
97         /* MEI interface is solid */
98         mutex_lock(&dev_priv->hdcp_comp_mutex);
99         if (!dev_priv->hdcp_comp_added ||  !dev_priv->hdcp_master) {
100                 mutex_unlock(&dev_priv->hdcp_comp_mutex);
101                 return false;
102         }
103         mutex_unlock(&dev_priv->hdcp_comp_mutex);
104
105         /* Sink's capability for HDCP2.2 */
106         hdcp->shim->hdcp_2_2_capable(intel_dig_port, &capable);
107
108         return capable;
109 }
110
111 static inline
112 bool intel_hdcp_in_use(struct drm_i915_private *dev_priv,
113                        enum transcoder cpu_transcoder, enum port port)
114 {
115         return I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
116                HDCP_STATUS_ENC;
117 }
118
119 static inline
120 bool intel_hdcp2_in_use(struct drm_i915_private *dev_priv,
121                         enum transcoder cpu_transcoder, enum port port)
122 {
123         return I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
124                LINK_ENCRYPTION_STATUS;
125 }
126
127 static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port,
128                                     const struct intel_hdcp_shim *shim)
129 {
130         int ret, read_ret;
131         bool ksv_ready;
132
133         /* Poll for ksv list ready (spec says max time allowed is 5s) */
134         ret = __wait_for(read_ret = shim->read_ksv_ready(intel_dig_port,
135                                                          &ksv_ready),
136                          read_ret || ksv_ready, 5 * 1000 * 1000, 1000,
137                          100 * 1000);
138         if (ret)
139                 return ret;
140         if (read_ret)
141                 return read_ret;
142         if (!ksv_ready)
143                 return -ETIMEDOUT;
144
145         return 0;
146 }
147
148 static bool hdcp_key_loadable(struct drm_i915_private *dev_priv)
149 {
150         struct i915_power_domains *power_domains = &dev_priv->power_domains;
151         struct i915_power_well *power_well;
152         enum i915_power_well_id id;
153         bool enabled = false;
154
155         /*
156          * On HSW and BDW, Display HW loads the Key as soon as Display resumes.
157          * On all BXT+, SW can load the keys only when the PW#1 is turned on.
158          */
159         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
160                 id = HSW_DISP_PW_GLOBAL;
161         else
162                 id = SKL_DISP_PW_1;
163
164         mutex_lock(&power_domains->lock);
165
166         /* PG1 (power well #1) needs to be enabled */
167         for_each_power_well(dev_priv, power_well) {
168                 if (power_well->desc->id == id) {
169                         enabled = power_well->desc->ops->is_enabled(dev_priv,
170                                                                     power_well);
171                         break;
172                 }
173         }
174         mutex_unlock(&power_domains->lock);
175
176         /*
177          * Another req for hdcp key loadability is enabled state of pll for
178          * cdclk. Without active crtc we wont land here. So we are assuming that
179          * cdclk is already on.
180          */
181
182         return enabled;
183 }
184
185 static void intel_hdcp_clear_keys(struct drm_i915_private *dev_priv)
186 {
187         I915_WRITE(HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER);
188         I915_WRITE(HDCP_KEY_STATUS, HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS |
189                    HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE);
190 }
191
192 static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
193 {
194         int ret;
195         u32 val;
196
197         val = I915_READ(HDCP_KEY_STATUS);
198         if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS))
199                 return 0;
200
201         /*
202          * On HSW and BDW HW loads the HDCP1.4 Key when Display comes
203          * out of reset. So if Key is not already loaded, its an error state.
204          */
205         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
206                 if (!(I915_READ(HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE))
207                         return -ENXIO;
208
209         /*
210          * Initiate loading the HDCP key from fuses.
211          *
212          * BXT+ platforms, HDCP key needs to be loaded by SW. Only Gen 9
213          * platforms except BXT and GLK, differ in the key load trigger process
214          * from other platforms. So GEN9_BC uses the GT Driver Mailbox i/f.
215          */
216         if (IS_GEN9_BC(dev_priv)) {
217                 ret = sandybridge_pcode_write(dev_priv,
218                                               SKL_PCODE_LOAD_HDCP_KEYS, 1);
219                 if (ret) {
220                         DRM_ERROR("Failed to initiate HDCP key load (%d)\n",
221                                   ret);
222                         return ret;
223                 }
224         } else {
225                 I915_WRITE(HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER);
226         }
227
228         /* Wait for the keys to load (500us) */
229         ret = __intel_wait_for_register(&dev_priv->uncore, HDCP_KEY_STATUS,
230                                         HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
231                                         10, 1, &val);
232         if (ret)
233                 return ret;
234         else if (!(val & HDCP_KEY_LOAD_STATUS))
235                 return -ENXIO;
236
237         /* Send Aksv over to PCH display for use in authentication */
238         I915_WRITE(HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER);
239
240         return 0;
241 }
242
243 /* Returns updated SHA-1 index */
244 static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text)
245 {
246         I915_WRITE(HDCP_SHA_TEXT, sha_text);
247         if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) {
248                 DRM_ERROR("Timed out waiting for SHA1 ready\n");
249                 return -ETIMEDOUT;
250         }
251         return 0;
252 }
253
254 static
255 u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *dev_priv,
256                                 enum transcoder cpu_transcoder, enum port port)
257 {
258         if (INTEL_GEN(dev_priv) >= 12) {
259                 switch (cpu_transcoder) {
260                 case TRANSCODER_A:
261                         return HDCP_TRANSA_REP_PRESENT |
262                                HDCP_TRANSA_SHA1_M0;
263                 case TRANSCODER_B:
264                         return HDCP_TRANSB_REP_PRESENT |
265                                HDCP_TRANSB_SHA1_M0;
266                 case TRANSCODER_C:
267                         return HDCP_TRANSC_REP_PRESENT |
268                                HDCP_TRANSC_SHA1_M0;
269                 case TRANSCODER_D:
270                         return HDCP_TRANSD_REP_PRESENT |
271                                HDCP_TRANSD_SHA1_M0;
272                 default:
273                         DRM_ERROR("Unknown transcoder %d\n", cpu_transcoder);
274                         return -EINVAL;
275                 }
276         }
277
278         switch (port) {
279         case PORT_A:
280                 return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0;
281         case PORT_B:
282                 return HDCP_DDIB_REP_PRESENT | HDCP_DDIB_SHA1_M0;
283         case PORT_C:
284                 return HDCP_DDIC_REP_PRESENT | HDCP_DDIC_SHA1_M0;
285         case PORT_D:
286                 return HDCP_DDID_REP_PRESENT | HDCP_DDID_SHA1_M0;
287         case PORT_E:
288                 return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0;
289         default:
290                 DRM_ERROR("Unknown port %d\n", port);
291                 return -EINVAL;
292         }
293 }
294
295 static
296 int intel_hdcp_validate_v_prime(struct intel_connector *connector,
297                                 const struct intel_hdcp_shim *shim,
298                                 u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
299 {
300         struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
301         struct drm_i915_private *dev_priv;
302         enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
303         enum port port = intel_dig_port->base.port;
304         u32 vprime, sha_text, sha_leftovers, rep_ctl;
305         int ret, i, j, sha_idx;
306
307         dev_priv = intel_dig_port->base.base.dev->dev_private;
308
309         /* Process V' values from the receiver */
310         for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
311                 ret = shim->read_v_prime_part(intel_dig_port, i, &vprime);
312                 if (ret)
313                         return ret;
314                 I915_WRITE(HDCP_SHA_V_PRIME(i), vprime);
315         }
316
317         /*
318          * We need to write the concatenation of all device KSVs, BINFO (DP) ||
319          * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte
320          * stream is written via the HDCP_SHA_TEXT register in 32-bit
321          * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This
322          * index will keep track of our progress through the 64 bytes as well as
323          * helping us work the 40-bit KSVs through our 32-bit register.
324          *
325          * NOTE: data passed via HDCP_SHA_TEXT should be big-endian
326          */
327         sha_idx = 0;
328         sha_text = 0;
329         sha_leftovers = 0;
330         rep_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port);
331         I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
332         for (i = 0; i < num_downstream; i++) {
333                 unsigned int sha_empty;
334                 u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN];
335
336                 /* Fill up the empty slots in sha_text and write it out */
337                 sha_empty = sizeof(sha_text) - sha_leftovers;
338                 for (j = 0; j < sha_empty; j++)
339                         sha_text |= ksv[j] << ((sizeof(sha_text) - j - 1) * 8);
340
341                 ret = intel_write_sha_text(dev_priv, sha_text);
342                 if (ret < 0)
343                         return ret;
344
345                 /* Programming guide writes this every 64 bytes */
346                 sha_idx += sizeof(sha_text);
347                 if (!(sha_idx % 64))
348                         I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
349
350                 /* Store the leftover bytes from the ksv in sha_text */
351                 sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty;
352                 sha_text = 0;
353                 for (j = 0; j < sha_leftovers; j++)
354                         sha_text |= ksv[sha_empty + j] <<
355                                         ((sizeof(sha_text) - j - 1) * 8);
356
357                 /*
358                  * If we still have room in sha_text for more data, continue.
359                  * Otherwise, write it out immediately.
360                  */
361                 if (sizeof(sha_text) > sha_leftovers)
362                         continue;
363
364                 ret = intel_write_sha_text(dev_priv, sha_text);
365                 if (ret < 0)
366                         return ret;
367                 sha_leftovers = 0;
368                 sha_text = 0;
369                 sha_idx += sizeof(sha_text);
370         }
371
372         /*
373          * We need to write BINFO/BSTATUS, and M0 now. Depending on how many
374          * bytes are leftover from the last ksv, we might be able to fit them
375          * all in sha_text (first 2 cases), or we might need to split them up
376          * into 2 writes (last 2 cases).
377          */
378         if (sha_leftovers == 0) {
379                 /* Write 16 bits of text, 16 bits of M0 */
380                 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16);
381                 ret = intel_write_sha_text(dev_priv,
382                                            bstatus[0] << 8 | bstatus[1]);
383                 if (ret < 0)
384                         return ret;
385                 sha_idx += sizeof(sha_text);
386
387                 /* Write 32 bits of M0 */
388                 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
389                 ret = intel_write_sha_text(dev_priv, 0);
390                 if (ret < 0)
391                         return ret;
392                 sha_idx += sizeof(sha_text);
393
394                 /* Write 16 bits of M0 */
395                 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16);
396                 ret = intel_write_sha_text(dev_priv, 0);
397                 if (ret < 0)
398                         return ret;
399                 sha_idx += sizeof(sha_text);
400
401         } else if (sha_leftovers == 1) {
402                 /* Write 24 bits of text, 8 bits of M0 */
403                 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24);
404                 sha_text |= bstatus[0] << 16 | bstatus[1] << 8;
405                 /* Only 24-bits of data, must be in the LSB */
406                 sha_text = (sha_text & 0xffffff00) >> 8;
407                 ret = intel_write_sha_text(dev_priv, sha_text);
408                 if (ret < 0)
409                         return ret;
410                 sha_idx += sizeof(sha_text);
411
412                 /* Write 32 bits of M0 */
413                 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
414                 ret = intel_write_sha_text(dev_priv, 0);
415                 if (ret < 0)
416                         return ret;
417                 sha_idx += sizeof(sha_text);
418
419                 /* Write 24 bits of M0 */
420                 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8);
421                 ret = intel_write_sha_text(dev_priv, 0);
422                 if (ret < 0)
423                         return ret;
424                 sha_idx += sizeof(sha_text);
425
426         } else if (sha_leftovers == 2) {
427                 /* Write 32 bits of text */
428                 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
429                 sha_text |= bstatus[0] << 24 | bstatus[1] << 16;
430                 ret = intel_write_sha_text(dev_priv, sha_text);
431                 if (ret < 0)
432                         return ret;
433                 sha_idx += sizeof(sha_text);
434
435                 /* Write 64 bits of M0 */
436                 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
437                 for (i = 0; i < 2; i++) {
438                         ret = intel_write_sha_text(dev_priv, 0);
439                         if (ret < 0)
440                                 return ret;
441                         sha_idx += sizeof(sha_text);
442                 }
443         } else if (sha_leftovers == 3) {
444                 /* Write 32 bits of text */
445                 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
446                 sha_text |= bstatus[0] << 24;
447                 ret = intel_write_sha_text(dev_priv, sha_text);
448                 if (ret < 0)
449                         return ret;
450                 sha_idx += sizeof(sha_text);
451
452                 /* Write 8 bits of text, 24 bits of M0 */
453                 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8);
454                 ret = intel_write_sha_text(dev_priv, bstatus[1]);
455                 if (ret < 0)
456                         return ret;
457                 sha_idx += sizeof(sha_text);
458
459                 /* Write 32 bits of M0 */
460                 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
461                 ret = intel_write_sha_text(dev_priv, 0);
462                 if (ret < 0)
463                         return ret;
464                 sha_idx += sizeof(sha_text);
465
466                 /* Write 8 bits of M0 */
467                 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24);
468                 ret = intel_write_sha_text(dev_priv, 0);
469                 if (ret < 0)
470                         return ret;
471                 sha_idx += sizeof(sha_text);
472         } else {
473                 DRM_DEBUG_KMS("Invalid number of leftovers %d\n",
474                               sha_leftovers);
475                 return -EINVAL;
476         }
477
478         I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
479         /* Fill up to 64-4 bytes with zeros (leave the last write for length) */
480         while ((sha_idx % 64) < (64 - sizeof(sha_text))) {
481                 ret = intel_write_sha_text(dev_priv, 0);
482                 if (ret < 0)
483                         return ret;
484                 sha_idx += sizeof(sha_text);
485         }
486
487         /*
488          * Last write gets the length of the concatenation in bits. That is:
489          *  - 5 bytes per device
490          *  - 10 bytes for BINFO/BSTATUS(2), M0(8)
491          */
492         sha_text = (num_downstream * 5 + 10) * 8;
493         ret = intel_write_sha_text(dev_priv, sha_text);
494         if (ret < 0)
495                 return ret;
496
497         /* Tell the HW we're done with the hash and wait for it to ACK */
498         I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_COMPLETE_HASH);
499         if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL,
500                                   HDCP_SHA1_COMPLETE, 1)) {
501                 DRM_ERROR("Timed out waiting for SHA1 complete\n");
502                 return -ETIMEDOUT;
503         }
504         if (!(I915_READ(HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
505                 DRM_DEBUG_KMS("SHA-1 mismatch, HDCP failed\n");
506                 return -ENXIO;
507         }
508
509         return 0;
510 }
511
512 /* Implements Part 2 of the HDCP authorization procedure */
513 static
514 int intel_hdcp_auth_downstream(struct intel_connector *connector)
515 {
516         struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
517         const struct intel_hdcp_shim *shim = connector->hdcp.shim;
518         struct drm_device *dev = connector->base.dev;
519         u8 bstatus[2], num_downstream, *ksv_fifo;
520         int ret, i, tries = 3;
521
522         ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim);
523         if (ret) {
524                 DRM_DEBUG_KMS("KSV list failed to become ready (%d)\n", ret);
525                 return ret;
526         }
527
528         ret = shim->read_bstatus(intel_dig_port, bstatus);
529         if (ret)
530                 return ret;
531
532         if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
533             DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
534                 DRM_DEBUG_KMS("Max Topology Limit Exceeded\n");
535                 return -EPERM;
536         }
537
538         /*
539          * When repeater reports 0 device count, HDCP1.4 spec allows disabling
540          * the HDCP encryption. That implies that repeater can't have its own
541          * display. As there is no consumption of encrypted content in the
542          * repeater with 0 downstream devices, we are failing the
543          * authentication.
544          */
545         num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
546         if (num_downstream == 0) {
547                 DRM_DEBUG_KMS("Repeater with zero downstream devices\n");
548                 return -EINVAL;
549         }
550
551         ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL);
552         if (!ksv_fifo) {
553                 DRM_DEBUG_KMS("Out of mem: ksv_fifo\n");
554                 return -ENOMEM;
555         }
556
557         ret = shim->read_ksv_fifo(intel_dig_port, num_downstream, ksv_fifo);
558         if (ret)
559                 goto err;
560
561         if (drm_hdcp_check_ksvs_revoked(dev, ksv_fifo, num_downstream)) {
562                 DRM_ERROR("Revoked Ksv(s) in ksv_fifo\n");
563                 ret = -EPERM;
564                 goto err;
565         }
566
567         /*
568          * When V prime mismatches, DP Spec mandates re-read of
569          * V prime atleast twice.
570          */
571         for (i = 0; i < tries; i++) {
572                 ret = intel_hdcp_validate_v_prime(connector, shim,
573                                                   ksv_fifo, num_downstream,
574                                                   bstatus);
575                 if (!ret)
576                         break;
577         }
578
579         if (i == tries) {
580                 DRM_DEBUG_KMS("V Prime validation failed.(%d)\n", ret);
581                 goto err;
582         }
583
584         DRM_DEBUG_KMS("HDCP is enabled (%d downstream devices)\n",
585                       num_downstream);
586         ret = 0;
587 err:
588         kfree(ksv_fifo);
589         return ret;
590 }
591
592 /* Implements Part 1 of the HDCP authorization procedure */
593 static int intel_hdcp_auth(struct intel_connector *connector)
594 {
595         struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
596         struct intel_hdcp *hdcp = &connector->hdcp;
597         struct drm_device *dev = connector->base.dev;
598         const struct intel_hdcp_shim *shim = hdcp->shim;
599         struct drm_i915_private *dev_priv;
600         enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
601         enum port port;
602         unsigned long r0_prime_gen_start;
603         int ret, i, tries = 2;
604         union {
605                 u32 reg[2];
606                 u8 shim[DRM_HDCP_AN_LEN];
607         } an;
608         union {
609                 u32 reg[2];
610                 u8 shim[DRM_HDCP_KSV_LEN];
611         } bksv;
612         union {
613                 u32 reg;
614                 u8 shim[DRM_HDCP_RI_LEN];
615         } ri;
616         bool repeater_present, hdcp_capable;
617
618         dev_priv = intel_dig_port->base.base.dev->dev_private;
619
620         port = intel_dig_port->base.port;
621
622         /*
623          * Detects whether the display is HDCP capable. Although we check for
624          * valid Bksv below, the HDCP over DP spec requires that we check
625          * whether the display supports HDCP before we write An. For HDMI
626          * displays, this is not necessary.
627          */
628         if (shim->hdcp_capable) {
629                 ret = shim->hdcp_capable(intel_dig_port, &hdcp_capable);
630                 if (ret)
631                         return ret;
632                 if (!hdcp_capable) {
633                         DRM_DEBUG_KMS("Panel is not HDCP capable\n");
634                         return -EINVAL;
635                 }
636         }
637
638         /* Initialize An with 2 random values and acquire it */
639         for (i = 0; i < 2; i++)
640                 I915_WRITE(HDCP_ANINIT(dev_priv, cpu_transcoder, port),
641                            get_random_u32());
642         I915_WRITE(HDCP_CONF(dev_priv, cpu_transcoder, port),
643                    HDCP_CONF_CAPTURE_AN);
644
645         /* Wait for An to be acquired */
646         if (intel_de_wait_for_set(dev_priv,
647                                   HDCP_STATUS(dev_priv, cpu_transcoder, port),
648                                   HDCP_STATUS_AN_READY, 1)) {
649                 DRM_ERROR("Timed out waiting for An\n");
650                 return -ETIMEDOUT;
651         }
652
653         an.reg[0] = I915_READ(HDCP_ANLO(dev_priv, cpu_transcoder, port));
654         an.reg[1] = I915_READ(HDCP_ANHI(dev_priv, cpu_transcoder, port));
655         ret = shim->write_an_aksv(intel_dig_port, an.shim);
656         if (ret)
657                 return ret;
658
659         r0_prime_gen_start = jiffies;
660
661         memset(&bksv, 0, sizeof(bksv));
662
663         ret = intel_hdcp_read_valid_bksv(intel_dig_port, shim, bksv.shim);
664         if (ret < 0)
665                 return ret;
666
667         if (drm_hdcp_check_ksvs_revoked(dev, bksv.shim, 1)) {
668                 DRM_ERROR("BKSV is revoked\n");
669                 return -EPERM;
670         }
671
672         I915_WRITE(HDCP_BKSVLO(dev_priv, cpu_transcoder, port), bksv.reg[0]);
673         I915_WRITE(HDCP_BKSVHI(dev_priv, cpu_transcoder, port), bksv.reg[1]);
674
675         ret = shim->repeater_present(intel_dig_port, &repeater_present);
676         if (ret)
677                 return ret;
678         if (repeater_present)
679                 I915_WRITE(HDCP_REP_CTL,
680                            intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder,
681                                                        port));
682
683         ret = shim->toggle_signalling(intel_dig_port, true);
684         if (ret)
685                 return ret;
686
687         I915_WRITE(HDCP_CONF(dev_priv, cpu_transcoder, port),
688                    HDCP_CONF_AUTH_AND_ENC);
689
690         /* Wait for R0 ready */
691         if (wait_for(I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
692                      (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
693                 DRM_ERROR("Timed out waiting for R0 ready\n");
694                 return -ETIMEDOUT;
695         }
696
697         /*
698          * Wait for R0' to become available. The spec says 100ms from Aksv, but
699          * some monitors can take longer than this. We'll set the timeout at
700          * 300ms just to be sure.
701          *
702          * On DP, there's an R0_READY bit available but no such bit
703          * exists on HDMI. Since the upper-bound is the same, we'll just do
704          * the stupid thing instead of polling on one and not the other.
705          */
706         wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300);
707
708         tries = 3;
709
710         /*
711          * DP HDCP Spec mandates the two more reattempt to read R0, incase
712          * of R0 mismatch.
713          */
714         for (i = 0; i < tries; i++) {
715                 ri.reg = 0;
716                 ret = shim->read_ri_prime(intel_dig_port, ri.shim);
717                 if (ret)
718                         return ret;
719                 I915_WRITE(HDCP_RPRIME(dev_priv, cpu_transcoder, port), ri.reg);
720
721                 /* Wait for Ri prime match */
722                 if (!wait_for(I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder,
723                                                     port)) &
724                     (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
725                         break;
726         }
727
728         if (i == tries) {
729                 DRM_DEBUG_KMS("Timed out waiting for Ri prime match (%x)\n",
730                               I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder,
731                                                     port)));
732                 return -ETIMEDOUT;
733         }
734
735         /* Wait for encryption confirmation */
736         if (intel_de_wait_for_set(dev_priv,
737                                   HDCP_STATUS(dev_priv, cpu_transcoder, port),
738                                   HDCP_STATUS_ENC,
739                                   ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
740                 DRM_ERROR("Timed out waiting for encryption\n");
741                 return -ETIMEDOUT;
742         }
743
744         /*
745          * XXX: If we have MST-connected devices, we need to enable encryption
746          * on those as well.
747          */
748
749         if (repeater_present)
750                 return intel_hdcp_auth_downstream(connector);
751
752         DRM_DEBUG_KMS("HDCP is enabled (no repeater present)\n");
753         return 0;
754 }
755
756 static int _intel_hdcp_disable(struct intel_connector *connector)
757 {
758         struct intel_hdcp *hdcp = &connector->hdcp;
759         struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
760         struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
761         enum port port = intel_dig_port->base.port;
762         enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
763         int ret;
764
765         DRM_DEBUG_KMS("[%s:%d] HDCP is being disabled...\n",
766                       connector->base.name, connector->base.base.id);
767
768         hdcp->hdcp_encrypted = false;
769         I915_WRITE(HDCP_CONF(dev_priv, cpu_transcoder, port), 0);
770         if (intel_de_wait_for_clear(dev_priv,
771                                     HDCP_STATUS(dev_priv, cpu_transcoder, port),
772                                     ~0, ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
773                 DRM_ERROR("Failed to disable HDCP, timeout clearing status\n");
774                 return -ETIMEDOUT;
775         }
776
777         ret = hdcp->shim->toggle_signalling(intel_dig_port, false);
778         if (ret) {
779                 DRM_ERROR("Failed to disable HDCP signalling\n");
780                 return ret;
781         }
782
783         DRM_DEBUG_KMS("HDCP is disabled\n");
784         return 0;
785 }
786
787 static int _intel_hdcp_enable(struct intel_connector *connector)
788 {
789         struct intel_hdcp *hdcp = &connector->hdcp;
790         struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
791         int i, ret, tries = 3;
792
793         DRM_DEBUG_KMS("[%s:%d] HDCP is being enabled...\n",
794                       connector->base.name, connector->base.base.id);
795
796         if (!hdcp_key_loadable(dev_priv)) {
797                 DRM_ERROR("HDCP key Load is not possible\n");
798                 return -ENXIO;
799         }
800
801         for (i = 0; i < KEY_LOAD_TRIES; i++) {
802                 ret = intel_hdcp_load_keys(dev_priv);
803                 if (!ret)
804                         break;
805                 intel_hdcp_clear_keys(dev_priv);
806         }
807         if (ret) {
808                 DRM_ERROR("Could not load HDCP keys, (%d)\n", ret);
809                 return ret;
810         }
811
812         /* Incase of authentication failures, HDCP spec expects reauth. */
813         for (i = 0; i < tries; i++) {
814                 ret = intel_hdcp_auth(connector);
815                 if (!ret) {
816                         hdcp->hdcp_encrypted = true;
817                         return 0;
818                 }
819
820                 DRM_DEBUG_KMS("HDCP Auth failure (%d)\n", ret);
821
822                 /* Ensuring HDCP encryption and signalling are stopped. */
823                 _intel_hdcp_disable(connector);
824         }
825
826         DRM_DEBUG_KMS("HDCP authentication failed (%d tries/%d)\n", tries, ret);
827         return ret;
828 }
829
830 static inline
831 struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
832 {
833         return container_of(hdcp, struct intel_connector, hdcp);
834 }
835
836 /* Implements Part 3 of the HDCP authorization procedure */
837 static int intel_hdcp_check_link(struct intel_connector *connector)
838 {
839         struct intel_hdcp *hdcp = &connector->hdcp;
840         struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
841         struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
842         enum port port = intel_dig_port->base.port;
843         enum transcoder cpu_transcoder;
844         int ret = 0;
845
846         mutex_lock(&hdcp->mutex);
847         cpu_transcoder = hdcp->cpu_transcoder;
848
849         /* Check_link valid only when HDCP1.4 is enabled */
850         if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
851             !hdcp->hdcp_encrypted) {
852                 ret = -EINVAL;
853                 goto out;
854         }
855
856         if (WARN_ON(!intel_hdcp_in_use(dev_priv, cpu_transcoder, port))) {
857                 DRM_ERROR("%s:%d HDCP link stopped encryption,%x\n",
858                           connector->base.name, connector->base.base.id,
859                           I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder,
860                                                 port)));
861                 ret = -ENXIO;
862                 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
863                 schedule_work(&hdcp->prop_work);
864                 goto out;
865         }
866
867         if (hdcp->shim->check_link(intel_dig_port)) {
868                 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
869                         hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
870                         schedule_work(&hdcp->prop_work);
871                 }
872                 goto out;
873         }
874
875         DRM_DEBUG_KMS("[%s:%d] HDCP link failed, retrying authentication\n",
876                       connector->base.name, connector->base.base.id);
877
878         ret = _intel_hdcp_disable(connector);
879         if (ret) {
880                 DRM_ERROR("Failed to disable hdcp (%d)\n", ret);
881                 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
882                 schedule_work(&hdcp->prop_work);
883                 goto out;
884         }
885
886         ret = _intel_hdcp_enable(connector);
887         if (ret) {
888                 DRM_ERROR("Failed to enable hdcp (%d)\n", ret);
889                 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
890                 schedule_work(&hdcp->prop_work);
891                 goto out;
892         }
893
894 out:
895         mutex_unlock(&hdcp->mutex);
896         return ret;
897 }
898
899 static void intel_hdcp_prop_work(struct work_struct *work)
900 {
901         struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp,
902                                                prop_work);
903         struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
904         struct drm_device *dev = connector->base.dev;
905
906         drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
907         mutex_lock(&hdcp->mutex);
908
909         /*
910          * This worker is only used to flip between ENABLED/DESIRED. Either of
911          * those to UNDESIRED is handled by core. If value == UNDESIRED,
912          * we're running just after hdcp has been disabled, so just exit
913          */
914         if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
915                 drm_hdcp_update_content_protection(&connector->base,
916                                                    hdcp->value);
917
918         mutex_unlock(&hdcp->mutex);
919         drm_modeset_unlock(&dev->mode_config.connection_mutex);
920 }
921
922 bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
923 {
924         /* PORT E doesn't have HDCP, and PORT F is disabled */
925         return INTEL_INFO(dev_priv)->display.has_hdcp && port < PORT_E;
926 }
927
928 static int
929 hdcp2_prepare_ake_init(struct intel_connector *connector,
930                        struct hdcp2_ake_init *ake_data)
931 {
932         struct hdcp_port_data *data = &connector->hdcp.port_data;
933         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
934         struct i915_hdcp_comp_master *comp;
935         int ret;
936
937         mutex_lock(&dev_priv->hdcp_comp_mutex);
938         comp = dev_priv->hdcp_master;
939
940         if (!comp || !comp->ops) {
941                 mutex_unlock(&dev_priv->hdcp_comp_mutex);
942                 return -EINVAL;
943         }
944
945         ret = comp->ops->initiate_hdcp2_session(comp->mei_dev, data, ake_data);
946         if (ret)
947                 DRM_DEBUG_KMS("Prepare_ake_init failed. %d\n", ret);
948         mutex_unlock(&dev_priv->hdcp_comp_mutex);
949
950         return ret;
951 }
952
953 static int
954 hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
955                                 struct hdcp2_ake_send_cert *rx_cert,
956                                 bool *paired,
957                                 struct hdcp2_ake_no_stored_km *ek_pub_km,
958                                 size_t *msg_sz)
959 {
960         struct hdcp_port_data *data = &connector->hdcp.port_data;
961         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
962         struct i915_hdcp_comp_master *comp;
963         int ret;
964
965         mutex_lock(&dev_priv->hdcp_comp_mutex);
966         comp = dev_priv->hdcp_master;
967
968         if (!comp || !comp->ops) {
969                 mutex_unlock(&dev_priv->hdcp_comp_mutex);
970                 return -EINVAL;
971         }
972
973         ret = comp->ops->verify_receiver_cert_prepare_km(comp->mei_dev, data,
974                                                          rx_cert, paired,
975                                                          ek_pub_km, msg_sz);
976         if (ret < 0)
977                 DRM_DEBUG_KMS("Verify rx_cert failed. %d\n", ret);
978         mutex_unlock(&dev_priv->hdcp_comp_mutex);
979
980         return ret;
981 }
982
983 static int hdcp2_verify_hprime(struct intel_connector *connector,
984                                struct hdcp2_ake_send_hprime *rx_hprime)
985 {
986         struct hdcp_port_data *data = &connector->hdcp.port_data;
987         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
988         struct i915_hdcp_comp_master *comp;
989         int ret;
990
991         mutex_lock(&dev_priv->hdcp_comp_mutex);
992         comp = dev_priv->hdcp_master;
993
994         if (!comp || !comp->ops) {
995                 mutex_unlock(&dev_priv->hdcp_comp_mutex);
996                 return -EINVAL;
997         }
998
999         ret = comp->ops->verify_hprime(comp->mei_dev, data, rx_hprime);
1000         if (ret < 0)
1001                 DRM_DEBUG_KMS("Verify hprime failed. %d\n", ret);
1002         mutex_unlock(&dev_priv->hdcp_comp_mutex);
1003
1004         return ret;
1005 }
1006
1007 static int
1008 hdcp2_store_pairing_info(struct intel_connector *connector,
1009                          struct hdcp2_ake_send_pairing_info *pairing_info)
1010 {
1011         struct hdcp_port_data *data = &connector->hdcp.port_data;
1012         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1013         struct i915_hdcp_comp_master *comp;
1014         int ret;
1015
1016         mutex_lock(&dev_priv->hdcp_comp_mutex);
1017         comp = dev_priv->hdcp_master;
1018
1019         if (!comp || !comp->ops) {
1020                 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1021                 return -EINVAL;
1022         }
1023
1024         ret = comp->ops->store_pairing_info(comp->mei_dev, data, pairing_info);
1025         if (ret < 0)
1026                 DRM_DEBUG_KMS("Store pairing info failed. %d\n", ret);
1027         mutex_unlock(&dev_priv->hdcp_comp_mutex);
1028
1029         return ret;
1030 }
1031
1032 static int
1033 hdcp2_prepare_lc_init(struct intel_connector *connector,
1034                       struct hdcp2_lc_init *lc_init)
1035 {
1036         struct hdcp_port_data *data = &connector->hdcp.port_data;
1037         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1038         struct i915_hdcp_comp_master *comp;
1039         int ret;
1040
1041         mutex_lock(&dev_priv->hdcp_comp_mutex);
1042         comp = dev_priv->hdcp_master;
1043
1044         if (!comp || !comp->ops) {
1045                 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1046                 return -EINVAL;
1047         }
1048
1049         ret = comp->ops->initiate_locality_check(comp->mei_dev, data, lc_init);
1050         if (ret < 0)
1051                 DRM_DEBUG_KMS("Prepare lc_init failed. %d\n", ret);
1052         mutex_unlock(&dev_priv->hdcp_comp_mutex);
1053
1054         return ret;
1055 }
1056
1057 static int
1058 hdcp2_verify_lprime(struct intel_connector *connector,
1059                     struct hdcp2_lc_send_lprime *rx_lprime)
1060 {
1061         struct hdcp_port_data *data = &connector->hdcp.port_data;
1062         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1063         struct i915_hdcp_comp_master *comp;
1064         int ret;
1065
1066         mutex_lock(&dev_priv->hdcp_comp_mutex);
1067         comp = dev_priv->hdcp_master;
1068
1069         if (!comp || !comp->ops) {
1070                 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1071                 return -EINVAL;
1072         }
1073
1074         ret = comp->ops->verify_lprime(comp->mei_dev, data, rx_lprime);
1075         if (ret < 0)
1076                 DRM_DEBUG_KMS("Verify L_Prime failed. %d\n", ret);
1077         mutex_unlock(&dev_priv->hdcp_comp_mutex);
1078
1079         return ret;
1080 }
1081
1082 static int hdcp2_prepare_skey(struct intel_connector *connector,
1083                               struct hdcp2_ske_send_eks *ske_data)
1084 {
1085         struct hdcp_port_data *data = &connector->hdcp.port_data;
1086         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1087         struct i915_hdcp_comp_master *comp;
1088         int ret;
1089
1090         mutex_lock(&dev_priv->hdcp_comp_mutex);
1091         comp = dev_priv->hdcp_master;
1092
1093         if (!comp || !comp->ops) {
1094                 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1095                 return -EINVAL;
1096         }
1097
1098         ret = comp->ops->get_session_key(comp->mei_dev, data, ske_data);
1099         if (ret < 0)
1100                 DRM_DEBUG_KMS("Get session key failed. %d\n", ret);
1101         mutex_unlock(&dev_priv->hdcp_comp_mutex);
1102
1103         return ret;
1104 }
1105
1106 static int
1107 hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
1108                                       struct hdcp2_rep_send_receiverid_list
1109                                                                 *rep_topology,
1110                                       struct hdcp2_rep_send_ack *rep_send_ack)
1111 {
1112         struct hdcp_port_data *data = &connector->hdcp.port_data;
1113         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1114         struct i915_hdcp_comp_master *comp;
1115         int ret;
1116
1117         mutex_lock(&dev_priv->hdcp_comp_mutex);
1118         comp = dev_priv->hdcp_master;
1119
1120         if (!comp || !comp->ops) {
1121                 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1122                 return -EINVAL;
1123         }
1124
1125         ret = comp->ops->repeater_check_flow_prepare_ack(comp->mei_dev, data,
1126                                                          rep_topology,
1127                                                          rep_send_ack);
1128         if (ret < 0)
1129                 DRM_DEBUG_KMS("Verify rep topology failed. %d\n", ret);
1130         mutex_unlock(&dev_priv->hdcp_comp_mutex);
1131
1132         return ret;
1133 }
1134
1135 static int
1136 hdcp2_verify_mprime(struct intel_connector *connector,
1137                     struct hdcp2_rep_stream_ready *stream_ready)
1138 {
1139         struct hdcp_port_data *data = &connector->hdcp.port_data;
1140         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1141         struct i915_hdcp_comp_master *comp;
1142         int ret;
1143
1144         mutex_lock(&dev_priv->hdcp_comp_mutex);
1145         comp = dev_priv->hdcp_master;
1146
1147         if (!comp || !comp->ops) {
1148                 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1149                 return -EINVAL;
1150         }
1151
1152         ret = comp->ops->verify_mprime(comp->mei_dev, data, stream_ready);
1153         if (ret < 0)
1154                 DRM_DEBUG_KMS("Verify mprime failed. %d\n", ret);
1155         mutex_unlock(&dev_priv->hdcp_comp_mutex);
1156
1157         return ret;
1158 }
1159
1160 static int hdcp2_authenticate_port(struct intel_connector *connector)
1161 {
1162         struct hdcp_port_data *data = &connector->hdcp.port_data;
1163         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1164         struct i915_hdcp_comp_master *comp;
1165         int ret;
1166
1167         mutex_lock(&dev_priv->hdcp_comp_mutex);
1168         comp = dev_priv->hdcp_master;
1169
1170         if (!comp || !comp->ops) {
1171                 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1172                 return -EINVAL;
1173         }
1174
1175         ret = comp->ops->enable_hdcp_authentication(comp->mei_dev, data);
1176         if (ret < 0)
1177                 DRM_DEBUG_KMS("Enable hdcp auth failed. %d\n", ret);
1178         mutex_unlock(&dev_priv->hdcp_comp_mutex);
1179
1180         return ret;
1181 }
1182
1183 static int hdcp2_close_mei_session(struct intel_connector *connector)
1184 {
1185         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1186         struct i915_hdcp_comp_master *comp;
1187         int ret;
1188
1189         mutex_lock(&dev_priv->hdcp_comp_mutex);
1190         comp = dev_priv->hdcp_master;
1191
1192         if (!comp || !comp->ops) {
1193                 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1194                 return -EINVAL;
1195         }
1196
1197         ret = comp->ops->close_hdcp_session(comp->mei_dev,
1198                                              &connector->hdcp.port_data);
1199         mutex_unlock(&dev_priv->hdcp_comp_mutex);
1200
1201         return ret;
1202 }
1203
1204 static int hdcp2_deauthenticate_port(struct intel_connector *connector)
1205 {
1206         return hdcp2_close_mei_session(connector);
1207 }
1208
1209 /* Authentication flow starts from here */
1210 static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
1211 {
1212         struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
1213         struct intel_hdcp *hdcp = &connector->hdcp;
1214         struct drm_device *dev = connector->base.dev;
1215         union {
1216                 struct hdcp2_ake_init ake_init;
1217                 struct hdcp2_ake_send_cert send_cert;
1218                 struct hdcp2_ake_no_stored_km no_stored_km;
1219                 struct hdcp2_ake_send_hprime send_hprime;
1220                 struct hdcp2_ake_send_pairing_info pairing_info;
1221         } msgs;
1222         const struct intel_hdcp_shim *shim = hdcp->shim;
1223         size_t size;
1224         int ret;
1225
1226         /* Init for seq_num */
1227         hdcp->seq_num_v = 0;
1228         hdcp->seq_num_m = 0;
1229
1230         ret = hdcp2_prepare_ake_init(connector, &msgs.ake_init);
1231         if (ret < 0)
1232                 return ret;
1233
1234         ret = shim->write_2_2_msg(intel_dig_port, &msgs.ake_init,
1235                                   sizeof(msgs.ake_init));
1236         if (ret < 0)
1237                 return ret;
1238
1239         ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_AKE_SEND_CERT,
1240                                  &msgs.send_cert, sizeof(msgs.send_cert));
1241         if (ret < 0)
1242                 return ret;
1243
1244         if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) {
1245                 DRM_DEBUG_KMS("cert.rx_caps dont claim HDCP2.2\n");
1246                 return -EINVAL;
1247         }
1248
1249         hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]);
1250
1251         if (drm_hdcp_check_ksvs_revoked(dev, msgs.send_cert.cert_rx.receiver_id,
1252                                         1)) {
1253                 DRM_ERROR("Receiver ID is revoked\n");
1254                 return -EPERM;
1255         }
1256
1257         /*
1258          * Here msgs.no_stored_km will hold msgs corresponding to the km
1259          * stored also.
1260          */
1261         ret = hdcp2_verify_rx_cert_prepare_km(connector, &msgs.send_cert,
1262                                               &hdcp->is_paired,
1263                                               &msgs.no_stored_km, &size);
1264         if (ret < 0)
1265                 return ret;
1266
1267         ret = shim->write_2_2_msg(intel_dig_port, &msgs.no_stored_km, size);
1268         if (ret < 0)
1269                 return ret;
1270
1271         ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_AKE_SEND_HPRIME,
1272                                  &msgs.send_hprime, sizeof(msgs.send_hprime));
1273         if (ret < 0)
1274                 return ret;
1275
1276         ret = hdcp2_verify_hprime(connector, &msgs.send_hprime);
1277         if (ret < 0)
1278                 return ret;
1279
1280         if (!hdcp->is_paired) {
1281                 /* Pairing is required */
1282                 ret = shim->read_2_2_msg(intel_dig_port,
1283                                          HDCP_2_2_AKE_SEND_PAIRING_INFO,
1284                                          &msgs.pairing_info,
1285                                          sizeof(msgs.pairing_info));
1286                 if (ret < 0)
1287                         return ret;
1288
1289                 ret = hdcp2_store_pairing_info(connector, &msgs.pairing_info);
1290                 if (ret < 0)
1291                         return ret;
1292                 hdcp->is_paired = true;
1293         }
1294
1295         return 0;
1296 }
1297
1298 static int hdcp2_locality_check(struct intel_connector *connector)
1299 {
1300         struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
1301         struct intel_hdcp *hdcp = &connector->hdcp;
1302         union {
1303                 struct hdcp2_lc_init lc_init;
1304                 struct hdcp2_lc_send_lprime send_lprime;
1305         } msgs;
1306         const struct intel_hdcp_shim *shim = hdcp->shim;
1307         int tries = HDCP2_LC_RETRY_CNT, ret, i;
1308
1309         for (i = 0; i < tries; i++) {
1310                 ret = hdcp2_prepare_lc_init(connector, &msgs.lc_init);
1311                 if (ret < 0)
1312                         continue;
1313
1314                 ret = shim->write_2_2_msg(intel_dig_port, &msgs.lc_init,
1315                                       sizeof(msgs.lc_init));
1316                 if (ret < 0)
1317                         continue;
1318
1319                 ret = shim->read_2_2_msg(intel_dig_port,
1320                                          HDCP_2_2_LC_SEND_LPRIME,
1321                                          &msgs.send_lprime,
1322                                          sizeof(msgs.send_lprime));
1323                 if (ret < 0)
1324                         continue;
1325
1326                 ret = hdcp2_verify_lprime(connector, &msgs.send_lprime);
1327                 if (!ret)
1328                         break;
1329         }
1330
1331         return ret;
1332 }
1333
1334 static int hdcp2_session_key_exchange(struct intel_connector *connector)
1335 {
1336         struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
1337         struct intel_hdcp *hdcp = &connector->hdcp;
1338         struct hdcp2_ske_send_eks send_eks;
1339         int ret;
1340
1341         ret = hdcp2_prepare_skey(connector, &send_eks);
1342         if (ret < 0)
1343                 return ret;
1344
1345         ret = hdcp->shim->write_2_2_msg(intel_dig_port, &send_eks,
1346                                         sizeof(send_eks));
1347         if (ret < 0)
1348                 return ret;
1349
1350         return 0;
1351 }
1352
1353 static
1354 int hdcp2_propagate_stream_management_info(struct intel_connector *connector)
1355 {
1356         struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
1357         struct intel_hdcp *hdcp = &connector->hdcp;
1358         union {
1359                 struct hdcp2_rep_stream_manage stream_manage;
1360                 struct hdcp2_rep_stream_ready stream_ready;
1361         } msgs;
1362         const struct intel_hdcp_shim *shim = hdcp->shim;
1363         int ret;
1364
1365         /* Prepare RepeaterAuth_Stream_Manage msg */
1366         msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE;
1367         drm_hdcp_cpu_to_be24(msgs.stream_manage.seq_num_m, hdcp->seq_num_m);
1368
1369         /* K no of streams is fixed as 1. Stored as big-endian. */
1370         msgs.stream_manage.k = cpu_to_be16(1);
1371
1372         /* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
1373         msgs.stream_manage.streams[0].stream_id = 0;
1374         msgs.stream_manage.streams[0].stream_type = hdcp->content_type;
1375
1376         /* Send it to Repeater */
1377         ret = shim->write_2_2_msg(intel_dig_port, &msgs.stream_manage,
1378                                   sizeof(msgs.stream_manage));
1379         if (ret < 0)
1380                 return ret;
1381
1382         ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_REP_STREAM_READY,
1383                                  &msgs.stream_ready, sizeof(msgs.stream_ready));
1384         if (ret < 0)
1385                 return ret;
1386
1387         hdcp->port_data.seq_num_m = hdcp->seq_num_m;
1388         hdcp->port_data.streams[0].stream_type = hdcp->content_type;
1389
1390         ret = hdcp2_verify_mprime(connector, &msgs.stream_ready);
1391         if (ret < 0)
1392                 return ret;
1393
1394         hdcp->seq_num_m++;
1395
1396         if (hdcp->seq_num_m > HDCP_2_2_SEQ_NUM_MAX) {
1397                 DRM_DEBUG_KMS("seq_num_m roll over.\n");
1398                 return -1;
1399         }
1400
1401         return 0;
1402 }
1403
1404 static
1405 int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
1406 {
1407         struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
1408         struct intel_hdcp *hdcp = &connector->hdcp;
1409         struct drm_device *dev = connector->base.dev;
1410         union {
1411                 struct hdcp2_rep_send_receiverid_list recvid_list;
1412                 struct hdcp2_rep_send_ack rep_ack;
1413         } msgs;
1414         const struct intel_hdcp_shim *shim = hdcp->shim;
1415         u32 seq_num_v, device_cnt;
1416         u8 *rx_info;
1417         int ret;
1418
1419         ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_REP_SEND_RECVID_LIST,
1420                                  &msgs.recvid_list, sizeof(msgs.recvid_list));
1421         if (ret < 0)
1422                 return ret;
1423
1424         rx_info = msgs.recvid_list.rx_info;
1425
1426         if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) ||
1427             HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) {
1428                 DRM_DEBUG_KMS("Topology Max Size Exceeded\n");
1429                 return -EINVAL;
1430         }
1431
1432         /* Converting and Storing the seq_num_v to local variable as DWORD */
1433         seq_num_v =
1434                 drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v);
1435
1436         if (seq_num_v < hdcp->seq_num_v) {
1437                 /* Roll over of the seq_num_v from repeater. Reauthenticate. */
1438                 DRM_DEBUG_KMS("Seq_num_v roll over.\n");
1439                 return -EINVAL;
1440         }
1441
1442         device_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
1443                       HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
1444         if (drm_hdcp_check_ksvs_revoked(dev, msgs.recvid_list.receiver_ids,
1445                                         device_cnt)) {
1446                 DRM_ERROR("Revoked receiver ID(s) is in list\n");
1447                 return -EPERM;
1448         }
1449
1450         ret = hdcp2_verify_rep_topology_prepare_ack(connector,
1451                                                     &msgs.recvid_list,
1452                                                     &msgs.rep_ack);
1453         if (ret < 0)
1454                 return ret;
1455
1456         hdcp->seq_num_v = seq_num_v;
1457         ret = shim->write_2_2_msg(intel_dig_port, &msgs.rep_ack,
1458                                   sizeof(msgs.rep_ack));
1459         if (ret < 0)
1460                 return ret;
1461
1462         return 0;
1463 }
1464
1465 static int hdcp2_authenticate_repeater(struct intel_connector *connector)
1466 {
1467         int ret;
1468
1469         ret = hdcp2_authenticate_repeater_topology(connector);
1470         if (ret < 0)
1471                 return ret;
1472
1473         return hdcp2_propagate_stream_management_info(connector);
1474 }
1475
1476 static int hdcp2_authenticate_sink(struct intel_connector *connector)
1477 {
1478         struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
1479         struct intel_hdcp *hdcp = &connector->hdcp;
1480         const struct intel_hdcp_shim *shim = hdcp->shim;
1481         int ret;
1482
1483         ret = hdcp2_authentication_key_exchange(connector);
1484         if (ret < 0) {
1485                 DRM_DEBUG_KMS("AKE Failed. Err : %d\n", ret);
1486                 return ret;
1487         }
1488
1489         ret = hdcp2_locality_check(connector);
1490         if (ret < 0) {
1491                 DRM_DEBUG_KMS("Locality Check failed. Err : %d\n", ret);
1492                 return ret;
1493         }
1494
1495         ret = hdcp2_session_key_exchange(connector);
1496         if (ret < 0) {
1497                 DRM_DEBUG_KMS("SKE Failed. Err : %d\n", ret);
1498                 return ret;
1499         }
1500
1501         if (shim->config_stream_type) {
1502                 ret = shim->config_stream_type(intel_dig_port,
1503                                                hdcp->is_repeater,
1504                                                hdcp->content_type);
1505                 if (ret < 0)
1506                         return ret;
1507         }
1508
1509         if (hdcp->is_repeater) {
1510                 ret = hdcp2_authenticate_repeater(connector);
1511                 if (ret < 0) {
1512                         DRM_DEBUG_KMS("Repeater Auth Failed. Err: %d\n", ret);
1513                         return ret;
1514                 }
1515         }
1516
1517         hdcp->port_data.streams[0].stream_type = hdcp->content_type;
1518         ret = hdcp2_authenticate_port(connector);
1519         if (ret < 0)
1520                 return ret;
1521
1522         return ret;
1523 }
1524
1525 static int hdcp2_enable_encryption(struct intel_connector *connector)
1526 {
1527         struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
1528         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1529         struct intel_hdcp *hdcp = &connector->hdcp;
1530         enum port port = connector->encoder->port;
1531         enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1532         int ret;
1533
1534         WARN_ON(I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1535                 LINK_ENCRYPTION_STATUS);
1536         if (hdcp->shim->toggle_signalling) {
1537                 ret = hdcp->shim->toggle_signalling(intel_dig_port, true);
1538                 if (ret) {
1539                         DRM_ERROR("Failed to enable HDCP signalling. %d\n",
1540                                   ret);
1541                         return ret;
1542                 }
1543         }
1544
1545         if (I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1546             LINK_AUTH_STATUS) {
1547                 /* Link is Authenticated. Now set for Encryption */
1548                 I915_WRITE(HDCP2_CTL(dev_priv, cpu_transcoder, port),
1549                            I915_READ(HDCP2_CTL(dev_priv, cpu_transcoder,
1550                                                port)) |
1551                            CTL_LINK_ENCRYPTION_REQ);
1552         }
1553
1554         ret = intel_de_wait_for_set(dev_priv,
1555                                     HDCP2_STATUS(dev_priv, cpu_transcoder,
1556                                                  port),
1557                                     LINK_ENCRYPTION_STATUS,
1558                                     ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1559
1560         return ret;
1561 }
1562
1563 static int hdcp2_disable_encryption(struct intel_connector *connector)
1564 {
1565         struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
1566         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1567         struct intel_hdcp *hdcp = &connector->hdcp;
1568         enum port port = connector->encoder->port;
1569         enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1570         int ret;
1571
1572         WARN_ON(!(I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1573                             LINK_ENCRYPTION_STATUS));
1574
1575         I915_WRITE(HDCP2_CTL(dev_priv, cpu_transcoder, port),
1576                    I915_READ(HDCP2_CTL(dev_priv, cpu_transcoder, port)) &
1577                    ~CTL_LINK_ENCRYPTION_REQ);
1578
1579         ret = intel_de_wait_for_clear(dev_priv,
1580                                       HDCP2_STATUS(dev_priv, cpu_transcoder,
1581                                                    port),
1582                                       LINK_ENCRYPTION_STATUS,
1583                                       ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1584         if (ret == -ETIMEDOUT)
1585                 DRM_DEBUG_KMS("Disable Encryption Timedout");
1586
1587         if (hdcp->shim->toggle_signalling) {
1588                 ret = hdcp->shim->toggle_signalling(intel_dig_port, false);
1589                 if (ret) {
1590                         DRM_ERROR("Failed to disable HDCP signalling. %d\n",
1591                                   ret);
1592                         return ret;
1593                 }
1594         }
1595
1596         return ret;
1597 }
1598
1599 static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
1600 {
1601         int ret, i, tries = 3;
1602
1603         for (i = 0; i < tries; i++) {
1604                 ret = hdcp2_authenticate_sink(connector);
1605                 if (!ret)
1606                         break;
1607
1608                 /* Clearing the mei hdcp session */
1609                 DRM_DEBUG_KMS("HDCP2.2 Auth %d of %d Failed.(%d)\n",
1610                               i + 1, tries, ret);
1611                 if (hdcp2_deauthenticate_port(connector) < 0)
1612                         DRM_DEBUG_KMS("Port deauth failed.\n");
1613         }
1614
1615         if (i != tries) {
1616                 /*
1617                  * Ensuring the required 200mSec min time interval between
1618                  * Session Key Exchange and encryption.
1619                  */
1620                 msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN);
1621                 ret = hdcp2_enable_encryption(connector);
1622                 if (ret < 0) {
1623                         DRM_DEBUG_KMS("Encryption Enable Failed.(%d)\n", ret);
1624                         if (hdcp2_deauthenticate_port(connector) < 0)
1625                                 DRM_DEBUG_KMS("Port deauth failed.\n");
1626                 }
1627         }
1628
1629         return ret;
1630 }
1631
1632 static int _intel_hdcp2_enable(struct intel_connector *connector)
1633 {
1634         struct intel_hdcp *hdcp = &connector->hdcp;
1635         int ret;
1636
1637         DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is being enabled. Type: %d\n",
1638                       connector->base.name, connector->base.base.id,
1639                       hdcp->content_type);
1640
1641         ret = hdcp2_authenticate_and_encrypt(connector);
1642         if (ret) {
1643                 DRM_DEBUG_KMS("HDCP2 Type%d  Enabling Failed. (%d)\n",
1644                               hdcp->content_type, ret);
1645                 return ret;
1646         }
1647
1648         DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is enabled. Type %d\n",
1649                       connector->base.name, connector->base.base.id,
1650                       hdcp->content_type);
1651
1652         hdcp->hdcp2_encrypted = true;
1653         return 0;
1654 }
1655
1656 static int _intel_hdcp2_disable(struct intel_connector *connector)
1657 {
1658         int ret;
1659
1660         DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is being Disabled\n",
1661                       connector->base.name, connector->base.base.id);
1662
1663         ret = hdcp2_disable_encryption(connector);
1664
1665         if (hdcp2_deauthenticate_port(connector) < 0)
1666                 DRM_DEBUG_KMS("Port deauth failed.\n");
1667
1668         connector->hdcp.hdcp2_encrypted = false;
1669
1670         return ret;
1671 }
1672
1673 /* Implements the Link Integrity Check for HDCP2.2 */
1674 static int intel_hdcp2_check_link(struct intel_connector *connector)
1675 {
1676         struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
1677         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1678         struct intel_hdcp *hdcp = &connector->hdcp;
1679         enum port port = connector->encoder->port;
1680         enum transcoder cpu_transcoder;
1681         int ret = 0;
1682
1683         mutex_lock(&hdcp->mutex);
1684         cpu_transcoder = hdcp->cpu_transcoder;
1685
1686         /* hdcp2_check_link is expected only when HDCP2.2 is Enabled */
1687         if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
1688             !hdcp->hdcp2_encrypted) {
1689                 ret = -EINVAL;
1690                 goto out;
1691         }
1692
1693         if (WARN_ON(!intel_hdcp2_in_use(dev_priv, cpu_transcoder, port))) {
1694                 DRM_ERROR("HDCP2.2 link stopped the encryption, %x\n",
1695                           I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder,
1696                                                  port)));
1697                 ret = -ENXIO;
1698                 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
1699                 schedule_work(&hdcp->prop_work);
1700                 goto out;
1701         }
1702
1703         ret = hdcp->shim->check_2_2_link(intel_dig_port);
1704         if (ret == HDCP_LINK_PROTECTED) {
1705                 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
1706                         hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
1707                         schedule_work(&hdcp->prop_work);
1708                 }
1709                 goto out;
1710         }
1711
1712         if (ret == HDCP_TOPOLOGY_CHANGE) {
1713                 if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
1714                         goto out;
1715
1716                 DRM_DEBUG_KMS("HDCP2.2 Downstream topology change\n");
1717                 ret = hdcp2_authenticate_repeater_topology(connector);
1718                 if (!ret) {
1719                         hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
1720                         schedule_work(&hdcp->prop_work);
1721                         goto out;
1722                 }
1723                 DRM_DEBUG_KMS("[%s:%d] Repeater topology auth failed.(%d)\n",
1724                               connector->base.name, connector->base.base.id,
1725                               ret);
1726         } else {
1727                 DRM_DEBUG_KMS("[%s:%d] HDCP2.2 link failed, retrying auth\n",
1728                               connector->base.name, connector->base.base.id);
1729         }
1730
1731         ret = _intel_hdcp2_disable(connector);
1732         if (ret) {
1733                 DRM_ERROR("[%s:%d] Failed to disable hdcp2.2 (%d)\n",
1734                           connector->base.name, connector->base.base.id, ret);
1735                 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
1736                 schedule_work(&hdcp->prop_work);
1737                 goto out;
1738         }
1739
1740         ret = _intel_hdcp2_enable(connector);
1741         if (ret) {
1742                 DRM_DEBUG_KMS("[%s:%d] Failed to enable hdcp2.2 (%d)\n",
1743                               connector->base.name, connector->base.base.id,
1744                               ret);
1745                 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
1746                 schedule_work(&hdcp->prop_work);
1747                 goto out;
1748         }
1749
1750 out:
1751         mutex_unlock(&hdcp->mutex);
1752         return ret;
1753 }
1754
1755 static void intel_hdcp_check_work(struct work_struct *work)
1756 {
1757         struct intel_hdcp *hdcp = container_of(to_delayed_work(work),
1758                                                struct intel_hdcp,
1759                                                check_work);
1760         struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
1761
1762         if (!intel_hdcp2_check_link(connector))
1763                 schedule_delayed_work(&hdcp->check_work,
1764                                       DRM_HDCP2_CHECK_PERIOD_MS);
1765         else if (!intel_hdcp_check_link(connector))
1766                 schedule_delayed_work(&hdcp->check_work,
1767                                       DRM_HDCP_CHECK_PERIOD_MS);
1768 }
1769
1770 static int i915_hdcp_component_bind(struct device *i915_kdev,
1771                                     struct device *mei_kdev, void *data)
1772 {
1773         struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
1774
1775         DRM_DEBUG("I915 HDCP comp bind\n");
1776         mutex_lock(&dev_priv->hdcp_comp_mutex);
1777         dev_priv->hdcp_master = (struct i915_hdcp_comp_master *)data;
1778         dev_priv->hdcp_master->mei_dev = mei_kdev;
1779         mutex_unlock(&dev_priv->hdcp_comp_mutex);
1780
1781         return 0;
1782 }
1783
1784 static void i915_hdcp_component_unbind(struct device *i915_kdev,
1785                                        struct device *mei_kdev, void *data)
1786 {
1787         struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
1788
1789         DRM_DEBUG("I915 HDCP comp unbind\n");
1790         mutex_lock(&dev_priv->hdcp_comp_mutex);
1791         dev_priv->hdcp_master = NULL;
1792         mutex_unlock(&dev_priv->hdcp_comp_mutex);
1793 }
1794
1795 static const struct component_ops i915_hdcp_component_ops = {
1796         .bind   = i915_hdcp_component_bind,
1797         .unbind = i915_hdcp_component_unbind,
1798 };
1799
1800 static inline
1801 enum mei_fw_ddi intel_get_mei_fw_ddi_index(enum port port)
1802 {
1803         switch (port) {
1804         case PORT_A:
1805                 return MEI_DDI_A;
1806         case PORT_B ... PORT_F:
1807                 return (enum mei_fw_ddi)port;
1808         default:
1809                 return MEI_DDI_INVALID_PORT;
1810         }
1811 }
1812
1813 static inline
1814 enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder)
1815 {
1816         switch (cpu_transcoder) {
1817         case TRANSCODER_A ... TRANSCODER_D:
1818                 return (enum mei_fw_tc)(cpu_transcoder | 0x10);
1819         default: /* eDP, DSI TRANSCODERS are non HDCP capable */
1820                 return MEI_INVALID_TRANSCODER;
1821         }
1822 }
1823
1824 static inline int initialize_hdcp_port_data(struct intel_connector *connector,
1825                                             const struct intel_hdcp_shim *shim)
1826 {
1827         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1828         struct intel_hdcp *hdcp = &connector->hdcp;
1829         struct hdcp_port_data *data = &hdcp->port_data;
1830
1831         if (INTEL_GEN(dev_priv) < 12)
1832                 data->fw_ddi =
1833                         intel_get_mei_fw_ddi_index(connector->encoder->port);
1834         else
1835                 /*
1836                  * As per ME FW API expectation, for GEN 12+, fw_ddi is filled
1837                  * with zero(INVALID PORT index).
1838                  */
1839                 data->fw_ddi = MEI_DDI_INVALID_PORT;
1840
1841         /*
1842          * As associated transcoder is set and modified at modeset, here fw_tc
1843          * is initialized to zero (invalid transcoder index). This will be
1844          * retained for <Gen12 forever.
1845          */
1846         data->fw_tc = MEI_INVALID_TRANSCODER;
1847
1848         data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
1849         data->protocol = (u8)shim->protocol;
1850
1851         data->k = 1;
1852         if (!data->streams)
1853                 data->streams = kcalloc(data->k,
1854                                         sizeof(struct hdcp2_streamid_type),
1855                                         GFP_KERNEL);
1856         if (!data->streams) {
1857                 DRM_ERROR("Out of Memory\n");
1858                 return -ENOMEM;
1859         }
1860
1861         data->streams[0].stream_id = 0;
1862         data->streams[0].stream_type = hdcp->content_type;
1863
1864         return 0;
1865 }
1866
1867 static bool is_hdcp2_supported(struct drm_i915_private *dev_priv)
1868 {
1869         if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP))
1870                 return false;
1871
1872         return (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv) ||
1873                 IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv));
1874 }
1875
1876 void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
1877 {
1878         int ret;
1879
1880         if (!is_hdcp2_supported(dev_priv))
1881                 return;
1882
1883         mutex_lock(&dev_priv->hdcp_comp_mutex);
1884         WARN_ON(dev_priv->hdcp_comp_added);
1885
1886         dev_priv->hdcp_comp_added = true;
1887         mutex_unlock(&dev_priv->hdcp_comp_mutex);
1888         ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_component_ops,
1889                                   I915_COMPONENT_HDCP);
1890         if (ret < 0) {
1891                 DRM_DEBUG_KMS("Failed at component add(%d)\n", ret);
1892                 mutex_lock(&dev_priv->hdcp_comp_mutex);
1893                 dev_priv->hdcp_comp_added = false;
1894                 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1895                 return;
1896         }
1897 }
1898
1899 static void intel_hdcp2_init(struct intel_connector *connector,
1900                              const struct intel_hdcp_shim *shim)
1901 {
1902         struct intel_hdcp *hdcp = &connector->hdcp;
1903         int ret;
1904
1905         ret = initialize_hdcp_port_data(connector, shim);
1906         if (ret) {
1907                 DRM_DEBUG_KMS("Mei hdcp data init failed\n");
1908                 return;
1909         }
1910
1911         hdcp->hdcp2_supported = true;
1912 }
1913
1914 int intel_hdcp_init(struct intel_connector *connector,
1915                     const struct intel_hdcp_shim *shim)
1916 {
1917         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1918         struct intel_hdcp *hdcp = &connector->hdcp;
1919         int ret;
1920
1921         if (!shim)
1922                 return -EINVAL;
1923
1924         if (is_hdcp2_supported(dev_priv))
1925                 intel_hdcp2_init(connector, shim);
1926
1927         ret =
1928         drm_connector_attach_content_protection_property(&connector->base,
1929                                                          hdcp->hdcp2_supported);
1930         if (ret) {
1931                 hdcp->hdcp2_supported = false;
1932                 kfree(hdcp->port_data.streams);
1933                 return ret;
1934         }
1935
1936         hdcp->shim = shim;
1937         mutex_init(&hdcp->mutex);
1938         INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work);
1939         INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work);
1940         init_waitqueue_head(&hdcp->cp_irq_queue);
1941
1942         return 0;
1943 }
1944
1945 int intel_hdcp_enable(struct intel_connector *connector,
1946                       enum transcoder cpu_transcoder, u8 content_type)
1947 {
1948         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1949         struct intel_hdcp *hdcp = &connector->hdcp;
1950         unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS;
1951         int ret = -EINVAL;
1952
1953         if (!hdcp->shim)
1954                 return -ENOENT;
1955
1956         mutex_lock(&hdcp->mutex);
1957         WARN_ON(hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
1958         hdcp->content_type = content_type;
1959
1960         if (INTEL_GEN(dev_priv) >= 12) {
1961                 hdcp->cpu_transcoder = cpu_transcoder;
1962                 hdcp->port_data.fw_tc = intel_get_mei_fw_tc(cpu_transcoder);
1963         }
1964
1965         /*
1966          * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
1967          * is capable of HDCP2.2, it is preferred to use HDCP2.2.
1968          */
1969         if (intel_hdcp2_capable(connector)) {
1970                 ret = _intel_hdcp2_enable(connector);
1971                 if (!ret)
1972                         check_link_interval = DRM_HDCP2_CHECK_PERIOD_MS;
1973         }
1974
1975         /*
1976          * When HDCP2.2 fails and Content Type is not Type1, HDCP1.4 will
1977          * be attempted.
1978          */
1979         if (ret && intel_hdcp_capable(connector) &&
1980             hdcp->content_type != DRM_MODE_HDCP_CONTENT_TYPE1) {
1981                 ret = _intel_hdcp_enable(connector);
1982         }
1983
1984         if (!ret) {
1985                 schedule_delayed_work(&hdcp->check_work, check_link_interval);
1986                 hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
1987                 schedule_work(&hdcp->prop_work);
1988         }
1989
1990         mutex_unlock(&hdcp->mutex);
1991         return ret;
1992 }
1993
1994 int intel_hdcp_disable(struct intel_connector *connector)
1995 {
1996         struct intel_hdcp *hdcp = &connector->hdcp;
1997         int ret = 0;
1998
1999         if (!hdcp->shim)
2000                 return -ENOENT;
2001
2002         mutex_lock(&hdcp->mutex);
2003
2004         if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
2005                 hdcp->value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED;
2006                 if (hdcp->hdcp2_encrypted)
2007                         ret = _intel_hdcp2_disable(connector);
2008                 else if (hdcp->hdcp_encrypted)
2009                         ret = _intel_hdcp_disable(connector);
2010         }
2011
2012         mutex_unlock(&hdcp->mutex);
2013         cancel_delayed_work_sync(&hdcp->check_work);
2014         return ret;
2015 }
2016
2017 void intel_hdcp_component_fini(struct drm_i915_private *dev_priv)
2018 {
2019         mutex_lock(&dev_priv->hdcp_comp_mutex);
2020         if (!dev_priv->hdcp_comp_added) {
2021                 mutex_unlock(&dev_priv->hdcp_comp_mutex);
2022                 return;
2023         }
2024
2025         dev_priv->hdcp_comp_added = false;
2026         mutex_unlock(&dev_priv->hdcp_comp_mutex);
2027
2028         component_del(dev_priv->drm.dev, &i915_hdcp_component_ops);
2029 }
2030
2031 void intel_hdcp_cleanup(struct intel_connector *connector)
2032 {
2033         if (!connector->hdcp.shim)
2034                 return;
2035
2036         mutex_lock(&connector->hdcp.mutex);
2037         kfree(connector->hdcp.port_data.streams);
2038         mutex_unlock(&connector->hdcp.mutex);
2039 }
2040
2041 void intel_hdcp_atomic_check(struct drm_connector *connector,
2042                              struct drm_connector_state *old_state,
2043                              struct drm_connector_state *new_state)
2044 {
2045         u64 old_cp = old_state->content_protection;
2046         u64 new_cp = new_state->content_protection;
2047         struct drm_crtc_state *crtc_state;
2048
2049         if (!new_state->crtc) {
2050                 /*
2051                  * If the connector is being disabled with CP enabled, mark it
2052                  * desired so it's re-enabled when the connector is brought back
2053                  */
2054                 if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2055                         new_state->content_protection =
2056                                 DRM_MODE_CONTENT_PROTECTION_DESIRED;
2057                 return;
2058         }
2059
2060         /*
2061          * Nothing to do if the state didn't change, or HDCP was activated since
2062          * the last commit. And also no change in hdcp content type.
2063          */
2064         if (old_cp == new_cp ||
2065             (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
2066              new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)) {
2067                 if (old_state->hdcp_content_type ==
2068                                 new_state->hdcp_content_type)
2069                         return;
2070         }
2071
2072         crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
2073                                                    new_state->crtc);
2074         crtc_state->mode_changed = true;
2075 }
2076
2077 /* Handles the CP_IRQ raised from the DP HDCP sink */
2078 void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
2079 {
2080         struct intel_hdcp *hdcp = &connector->hdcp;
2081
2082         if (!hdcp->shim)
2083                 return;
2084
2085         atomic_inc(&connector->hdcp.cp_irq_count);
2086         wake_up_all(&connector->hdcp.cp_irq_queue);
2087
2088         schedule_delayed_work(&hdcp->check_work, 0);
2089 }