2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 #include <drm/drm_atomic_helper.h>
25 #include <drm/drm_damage_helper.h>
26 #include <drm/drm_debugfs.h>
30 #include "intel_atomic.h"
31 #include "intel_crtc.h"
32 #include "intel_ddi.h"
34 #include "intel_display_types.h"
36 #include "intel_dp_aux.h"
37 #include "intel_frontbuffer.h"
38 #include "intel_hdmi.h"
39 #include "intel_psr.h"
40 #include "intel_psr_regs.h"
41 #include "intel_snps_phy.h"
42 #include "skl_universal_plane.h"
45 * DOC: Panel Self Refresh (PSR/SRD)
47 * Since Haswell Display controller supports Panel Self-Refresh on display
48 * panels witch have a remote frame buffer (RFB) implemented according to PSR
49 * spec in eDP1.3. PSR feature allows the display to go to lower standby states
50 * when system is idle but display is on as it eliminates display refresh
51 * request to DDR memory completely as long as the frame buffer for that
52 * display is unchanged.
54 * Panel Self Refresh must be supported by both Hardware (source) and
57 * PSR saves power by caching the framebuffer in the panel RFB, which allows us
58 * to power down the link and memory controller. For DSI panels the same idea
59 * is called "manual mode".
61 * The implementation uses the hardware-based PSR support which automatically
62 * enters/exits self-refresh mode. The hardware takes care of sending the
63 * required DP aux message and could even retrain the link (that part isn't
64 * enabled yet though). The hardware also keeps track of any frontbuffer
65 * changes to know when to exit self-refresh mode again. Unfortunately that
66 * part doesn't work too well, hence why the i915 PSR support uses the
67 * software frontbuffer tracking to make sure it doesn't miss a screen
68 * update. For this integration intel_psr_invalidate() and intel_psr_flush()
69 * get called by the frontbuffer tracking code. Note that because of locking
70 * issues the self-refresh re-enable code is done from a work queue, which
71 * must be correctly synchronized/cancelled when shutting down the pipe."
73 * DC3CO (DC3 clock off)
75 * On top of PSR2, GEN12 adds a intermediate power savings state that turns
76 * clock off automatically during PSR2 idle state.
77 * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
78 * entry/exit allows the HW to enter a low-power state even when page flipping
79 * periodically (for instance a 30fps video playback scenario).
81 * Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
82 * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
83 * frames, if no other flip occurs and the function above is executed, DC3CO is
84 * disabled and PSR2 is configured to enter deep sleep, resetting again in case
86 * Front buffer modifications do not trigger DC3CO activation on purpose as it
87 * would bring a lot of complexity and most of the moderns systems will only
92 * Description of PSR mask bits:
94 * EDP_PSR_DEBUG[16]/EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw-skl):
96 * When unmasked (nearly) all display register writes (eg. even
97 * SWF) trigger a PSR exit. Some registers are excluded from this
98 * and they have a more specific mask (described below). On icl+
99 * this bit no longer exists and is effectively always set.
101 * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+):
103 * When unmasked (nearly) all pipe/plane register writes
104 * trigger a PSR exit. Some plane registers are excluded from this
105 * and they have a more specific mask (described below).
107 * CHICKEN_PIPESL_1[11]/SKL_PSR_MASK_PLANE_FLIP (skl+):
108 * PIPE_MISC[23]/PIPE_MISC_PSR_MASK_PRIMARY_FLIP (bdw):
109 * EDP_PSR_DEBUG[23]/EDP_PSR_DEBUG_MASK_PRIMARY_FLIP (hsw):
111 * When unmasked PRI_SURF/PLANE_SURF writes trigger a PSR exit.
112 * SPR_SURF/CURBASE are not included in this and instead are
113 * controlled by PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+) or
114 * EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw/bdw).
116 * PIPE_MISC[22]/PIPE_MISC_PSR_MASK_SPRITE_ENABLE (bdw):
117 * EDP_PSR_DEBUG[21]/EDP_PSR_DEBUG_MASK_SPRITE_ENABLE (hsw):
119 * When unmasked PSR is blocked as long as the sprite
120 * plane is enabled. skl+ with their universal planes no
121 * longer have a mask bit like this, and no plane being
122 * enabledb blocks PSR.
124 * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_CURSOR_MOVE (bdw):
125 * EDP_PSR_DEBUG[20]/EDP_PSR_DEBUG_MASK_CURSOR_MOVE (hsw):
127 * When umasked CURPOS writes trigger a PSR exit. On skl+
128 * this doesn't exit but CURPOS is included in the
129 * PIPE_MISC_PSR_MASK_PIPE_REG_WRITE mask.
131 * PIPE_MISC[20]/PIPE_MISC_PSR_MASK_VBLANK_VSYNC_INT (bdw+):
132 * EDP_PSR_DEBUG[19]/EDP_PSR_DEBUG_MASK_VBLANK_VSYNC_INT (hsw):
134 * When unmasked PSR is blocked as long as vblank and/or vsync
135 * interrupt is unmasked in IMR *and* enabled in IER.
137 * CHICKEN_TRANS[30]/SKL_UNMASK_VBL_TO_PIPE_IN_SRD (skl+):
138 * CHICKEN_PAR1_1[15]/HSW_MASK_VBL_TO_PIPE_IN_SRD (hsw/bdw):
140 * Selectcs whether PSR exit generates an extra vblank before
141 * the first frame is transmitted. Also note the opposite polarity
142 * if the bit on hsw/bdw vs. skl+ (masked==generate the extra vblank,
143 * unmasked==do not generate the extra vblank).
145 * With DC states enabled the extra vblank happens after link training,
146 * with DC states disabled it happens immediately upuon PSR exit trigger.
147 * No idea as of now why there is a difference. HSW/BDW (which don't
148 * even have DMC) always generate it after link training. Go figure.
150 * Unfortunately CHICKEN_TRANS itself seems to be double buffered
151 * and thus won't latch until the first vblank. So with DC states
152 * enabled the register effctively uses the reset value during DC5
153 * exit+PSR exit sequence, and thus the bit does nothing until
154 * latched by the vblank that it was trying to prevent from being
155 * generated in the first place. So we should probably call this
156 * one a chicken/egg bit instead on skl+.
158 * In standby mode (as opposed to link-off) this makes no difference
159 * as the timing generator keeps running the whole time generating
160 * normal periodic vblanks.
162 * WaPsrDPAMaskVBlankInSRD asks us to set the bit on hsw/bdw,
163 * and doing so makes the behaviour match the skl+ reset value.
165 * CHICKEN_PIPESL_1[0]/BDW_UNMASK_VBL_TO_REGS_IN_SRD (bdw):
166 * CHICKEN_PIPESL_1[15]/HSW_UNMASK_VBL_TO_REGS_IN_SRD (hsw):
168 * On BDW without this bit is no vblanks whatsoever are
169 * generated after PSR exit. On HSW this has no apparant effect.
170 * WaPsrDPRSUnmaskVBlankInSRD says to set this.
172 * The rest of the bits are more self-explanatory and/or
173 * irrelevant for normal operation.
176 #define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \
177 (intel_dp)->psr.source_support)
179 #define CAN_PANEL_REPLAY(intel_dp) ((intel_dp)->psr.sink_panel_replay_support && \
180 (intel_dp)->psr.source_panel_replay_support)
182 bool intel_encoder_can_psr(struct intel_encoder *encoder)
184 if (intel_encoder_is_dp(encoder) || encoder->type == INTEL_OUTPUT_DP_MST)
185 return CAN_PSR(enc_to_intel_dp(encoder)) ||
186 CAN_PANEL_REPLAY(enc_to_intel_dp(encoder));
191 static bool psr_global_enabled(struct intel_dp *intel_dp)
193 struct intel_connector *connector = intel_dp->attached_connector;
194 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
196 switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
197 case I915_PSR_DEBUG_DEFAULT:
198 if (i915->display.params.enable_psr == -1)
199 return connector->panel.vbt.psr.enable;
200 return i915->display.params.enable_psr;
201 case I915_PSR_DEBUG_DISABLE:
208 static bool psr2_global_enabled(struct intel_dp *intel_dp)
210 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
212 switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
213 case I915_PSR_DEBUG_DISABLE:
214 case I915_PSR_DEBUG_FORCE_PSR1:
217 if (i915->display.params.enable_psr == 1)
223 static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp)
225 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
227 return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_ERROR :
228 EDP_PSR_ERROR(intel_dp->psr.transcoder);
231 static u32 psr_irq_post_exit_bit_get(struct intel_dp *intel_dp)
233 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
235 return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_POST_EXIT :
236 EDP_PSR_POST_EXIT(intel_dp->psr.transcoder);
239 static u32 psr_irq_pre_entry_bit_get(struct intel_dp *intel_dp)
241 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
243 return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_PRE_ENTRY :
244 EDP_PSR_PRE_ENTRY(intel_dp->psr.transcoder);
247 static u32 psr_irq_mask_get(struct intel_dp *intel_dp)
249 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
251 return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_MASK :
252 EDP_PSR_MASK(intel_dp->psr.transcoder);
255 static i915_reg_t psr_ctl_reg(struct drm_i915_private *dev_priv,
256 enum transcoder cpu_transcoder)
258 if (DISPLAY_VER(dev_priv) >= 8)
259 return EDP_PSR_CTL(cpu_transcoder);
264 static i915_reg_t psr_debug_reg(struct drm_i915_private *dev_priv,
265 enum transcoder cpu_transcoder)
267 if (DISPLAY_VER(dev_priv) >= 8)
268 return EDP_PSR_DEBUG(cpu_transcoder);
270 return HSW_SRD_DEBUG;
273 static i915_reg_t psr_perf_cnt_reg(struct drm_i915_private *dev_priv,
274 enum transcoder cpu_transcoder)
276 if (DISPLAY_VER(dev_priv) >= 8)
277 return EDP_PSR_PERF_CNT(cpu_transcoder);
279 return HSW_SRD_PERF_CNT;
282 static i915_reg_t psr_status_reg(struct drm_i915_private *dev_priv,
283 enum transcoder cpu_transcoder)
285 if (DISPLAY_VER(dev_priv) >= 8)
286 return EDP_PSR_STATUS(cpu_transcoder);
288 return HSW_SRD_STATUS;
291 static i915_reg_t psr_imr_reg(struct drm_i915_private *dev_priv,
292 enum transcoder cpu_transcoder)
294 if (DISPLAY_VER(dev_priv) >= 12)
295 return TRANS_PSR_IMR(cpu_transcoder);
300 static i915_reg_t psr_iir_reg(struct drm_i915_private *dev_priv,
301 enum transcoder cpu_transcoder)
303 if (DISPLAY_VER(dev_priv) >= 12)
304 return TRANS_PSR_IIR(cpu_transcoder);
309 static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv,
310 enum transcoder cpu_transcoder)
312 if (DISPLAY_VER(dev_priv) >= 8)
313 return EDP_PSR_AUX_CTL(cpu_transcoder);
315 return HSW_SRD_AUX_CTL;
318 static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv,
319 enum transcoder cpu_transcoder, int i)
321 if (DISPLAY_VER(dev_priv) >= 8)
322 return EDP_PSR_AUX_DATA(cpu_transcoder, i);
324 return HSW_SRD_AUX_DATA(i);
327 static void psr_irq_control(struct intel_dp *intel_dp)
329 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
330 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
333 mask = psr_irq_psr_error_bit_get(intel_dp);
334 if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
335 mask |= psr_irq_post_exit_bit_get(intel_dp) |
336 psr_irq_pre_entry_bit_get(intel_dp);
338 intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder),
339 psr_irq_mask_get(intel_dp), ~mask);
342 static void psr_event_print(struct drm_i915_private *i915,
343 u32 val, bool psr2_enabled)
345 drm_dbg_kms(&i915->drm, "PSR exit events: 0x%x\n", val);
346 if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
347 drm_dbg_kms(&i915->drm, "\tPSR2 watchdog timer expired\n");
348 if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled)
349 drm_dbg_kms(&i915->drm, "\tPSR2 disabled\n");
350 if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
351 drm_dbg_kms(&i915->drm, "\tSU dirty FIFO underrun\n");
352 if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
353 drm_dbg_kms(&i915->drm, "\tSU CRC FIFO underrun\n");
354 if (val & PSR_EVENT_GRAPHICS_RESET)
355 drm_dbg_kms(&i915->drm, "\tGraphics reset\n");
356 if (val & PSR_EVENT_PCH_INTERRUPT)
357 drm_dbg_kms(&i915->drm, "\tPCH interrupt\n");
358 if (val & PSR_EVENT_MEMORY_UP)
359 drm_dbg_kms(&i915->drm, "\tMemory up\n");
360 if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
361 drm_dbg_kms(&i915->drm, "\tFront buffer modification\n");
362 if (val & PSR_EVENT_WD_TIMER_EXPIRE)
363 drm_dbg_kms(&i915->drm, "\tPSR watchdog timer expired\n");
364 if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
365 drm_dbg_kms(&i915->drm, "\tPIPE registers updated\n");
366 if (val & PSR_EVENT_REGISTER_UPDATE)
367 drm_dbg_kms(&i915->drm, "\tRegister updated\n");
368 if (val & PSR_EVENT_HDCP_ENABLE)
369 drm_dbg_kms(&i915->drm, "\tHDCP enabled\n");
370 if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
371 drm_dbg_kms(&i915->drm, "\tKVMR session enabled\n");
372 if (val & PSR_EVENT_VBI_ENABLE)
373 drm_dbg_kms(&i915->drm, "\tVBI enabled\n");
374 if (val & PSR_EVENT_LPSP_MODE_EXIT)
375 drm_dbg_kms(&i915->drm, "\tLPSP mode exited\n");
376 if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled)
377 drm_dbg_kms(&i915->drm, "\tPSR disabled\n");
380 void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
382 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
383 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
384 ktime_t time_ns = ktime_get();
386 if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) {
387 intel_dp->psr.last_entry_attempt = time_ns;
388 drm_dbg_kms(&dev_priv->drm,
389 "[transcoder %s] PSR entry attempt in 2 vblanks\n",
390 transcoder_name(cpu_transcoder));
393 if (psr_iir & psr_irq_post_exit_bit_get(intel_dp)) {
394 intel_dp->psr.last_exit = time_ns;
395 drm_dbg_kms(&dev_priv->drm,
396 "[transcoder %s] PSR exit completed\n",
397 transcoder_name(cpu_transcoder));
399 if (DISPLAY_VER(dev_priv) >= 9) {
402 val = intel_de_rmw(dev_priv, PSR_EVENT(cpu_transcoder), 0, 0);
404 psr_event_print(dev_priv, val, intel_dp->psr.psr2_enabled);
408 if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) {
409 drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
410 transcoder_name(cpu_transcoder));
412 intel_dp->psr.irq_aux_error = true;
415 * If this interruption is not masked it will keep
416 * interrupting so fast that it prevents the scheduled
418 * Also after a PSR error, we don't want to arm PSR
419 * again so we don't care about unmask the interruption
420 * or unset irq_aux_error.
422 intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder),
423 0, psr_irq_psr_error_bit_get(intel_dp));
425 queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
429 static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
433 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
436 return alpm_caps & DP_ALPM_CAP;
439 static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
441 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
442 u8 val = 8; /* assume the worst if we can't read the value */
444 if (drm_dp_dpcd_readb(&intel_dp->aux,
445 DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
446 val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
448 drm_dbg_kms(&i915->drm,
449 "Unable to get sink synchronization latency, assuming 8 frames\n");
453 static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
455 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
460 /* If sink don't have specific granularity requirements set legacy ones */
461 if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED)) {
462 /* As PSR2 HW sends full lines, we do not care about x granularity */
468 r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &w, 2);
470 drm_dbg_kms(&i915->drm,
471 "Unable to read DP_PSR2_SU_X_GRANULARITY\n");
473 * Spec says that if the value read is 0 the default granularity should
476 if (r != 2 || w == 0)
479 r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_Y_GRANULARITY, &y, 1);
481 drm_dbg_kms(&i915->drm,
482 "Unable to read DP_PSR2_SU_Y_GRANULARITY\n");
489 intel_dp->psr.su_w_granularity = w;
490 intel_dp->psr.su_y_granularity = y;
493 static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
495 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
498 intel_dp->psr.sink_panel_replay_support = false;
499 drm_dp_dpcd_readb(&intel_dp->aux, DP_PANEL_REPLAY_CAP, &pr_dpcd);
501 if (!(pr_dpcd & DP_PANEL_REPLAY_SUPPORT)) {
502 drm_dbg_kms(&i915->drm,
503 "Panel replay is not supported by panel\n");
507 drm_dbg_kms(&i915->drm,
508 "Panel replay is supported by panel\n");
509 intel_dp->psr.sink_panel_replay_support = true;
512 static void _psr_init_dpcd(struct intel_dp *intel_dp)
514 struct drm_i915_private *i915 =
515 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
517 drm_dbg_kms(&i915->drm, "eDP panel supports PSR version %x\n",
518 intel_dp->psr_dpcd[0]);
520 if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
521 drm_dbg_kms(&i915->drm,
522 "PSR support not currently available for this panel\n");
526 if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
527 drm_dbg_kms(&i915->drm,
528 "Panel lacks power state control, PSR cannot be enabled\n");
532 intel_dp->psr.sink_support = true;
533 intel_dp->psr.sink_sync_latency =
534 intel_dp_get_sink_sync_latency(intel_dp);
536 if (DISPLAY_VER(i915) >= 9 &&
537 intel_dp->psr_dpcd[0] >= DP_PSR2_WITH_Y_COORD_IS_SUPPORTED) {
538 bool y_req = intel_dp->psr_dpcd[1] &
539 DP_PSR2_SU_Y_COORDINATE_REQUIRED;
540 bool alpm = intel_dp_get_alpm_status(intel_dp);
543 * All panels that supports PSR version 03h (PSR2 +
544 * Y-coordinate) can handle Y-coordinates in VSC but we are
545 * only sure that it is going to be used when required by the
546 * panel. This way panel is capable to do selective update
547 * without a aux frame sync.
549 * To support PSR version 02h and PSR version 03h without
550 * Y-coordinate requirement panels we would need to enable
553 intel_dp->psr.sink_psr2_support = y_req && alpm;
554 drm_dbg_kms(&i915->drm, "PSR2 %ssupported\n",
555 intel_dp->psr.sink_psr2_support ? "" : "not ");
559 void intel_psr_init_dpcd(struct intel_dp *intel_dp)
561 _panel_replay_init_dpcd(intel_dp);
563 drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
564 sizeof(intel_dp->psr_dpcd));
566 if (intel_dp->psr_dpcd[0])
567 _psr_init_dpcd(intel_dp);
569 if (intel_dp->psr.sink_psr2_support)
570 intel_dp_get_su_granularity(intel_dp);
573 static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
575 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
576 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
577 u32 aux_clock_divider, aux_ctl;
578 /* write DP_SET_POWER=D0 */
579 static const u8 aux_msg[] = {
580 [0] = (DP_AUX_NATIVE_WRITE << 4) | ((DP_SET_POWER >> 16) & 0xf),
581 [1] = (DP_SET_POWER >> 8) & 0xff,
582 [2] = DP_SET_POWER & 0xff,
584 [4] = DP_SET_POWER_D0,
588 BUILD_BUG_ON(sizeof(aux_msg) > 20);
589 for (i = 0; i < sizeof(aux_msg); i += 4)
590 intel_de_write(dev_priv,
591 psr_aux_data_reg(dev_priv, cpu_transcoder, i >> 2),
592 intel_dp_aux_pack(&aux_msg[i], sizeof(aux_msg) - i));
594 aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
596 /* Start with bits set for DDI_AUX_CTL register */
597 aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
600 /* Select only valid bits for SRD_AUX_CTL */
601 aux_ctl &= EDP_PSR_AUX_CTL_TIME_OUT_MASK |
602 EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
603 EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
604 EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
606 intel_de_write(dev_priv, psr_aux_ctl_reg(dev_priv, cpu_transcoder),
610 static bool psr2_su_region_et_valid(struct intel_dp *intel_dp)
612 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
614 if (DISPLAY_VER(i915) >= 20 &&
615 intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED &&
616 !(intel_dp->psr.debug & I915_PSR_DEBUG_SU_REGION_ET_DISABLE))
622 static void intel_psr_enable_sink(struct intel_dp *intel_dp)
624 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
625 u8 dpcd_val = DP_PSR_ENABLE;
627 if (intel_dp->psr.panel_replay_enabled)
630 if (intel_dp->psr.psr2_enabled) {
631 /* Enable ALPM at sink for psr2 */
632 drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
634 DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
636 dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
637 if (psr2_su_region_et_valid(intel_dp))
638 dpcd_val |= DP_PSR_ENABLE_SU_REGION_ET;
640 if (intel_dp->psr.link_standby)
641 dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
643 if (DISPLAY_VER(dev_priv) >= 8)
644 dpcd_val |= DP_PSR_CRC_VERIFICATION;
647 if (intel_dp->psr.req_psr2_sdp_prior_scanline)
648 dpcd_val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE;
650 if (intel_dp->psr.entry_setup_frames > 0)
651 dpcd_val |= DP_PSR_FRAME_CAPTURE;
653 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
655 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
658 static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
660 struct intel_connector *connector = intel_dp->attached_connector;
661 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
664 if (DISPLAY_VER(dev_priv) >= 11)
665 val |= EDP_PSR_TP4_TIME_0us;
667 if (dev_priv->display.params.psr_safest_params) {
668 val |= EDP_PSR_TP1_TIME_2500us;
669 val |= EDP_PSR_TP2_TP3_TIME_2500us;
673 if (connector->panel.vbt.psr.tp1_wakeup_time_us == 0)
674 val |= EDP_PSR_TP1_TIME_0us;
675 else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 100)
676 val |= EDP_PSR_TP1_TIME_100us;
677 else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 500)
678 val |= EDP_PSR_TP1_TIME_500us;
680 val |= EDP_PSR_TP1_TIME_2500us;
682 if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
683 val |= EDP_PSR_TP2_TP3_TIME_0us;
684 else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 100)
685 val |= EDP_PSR_TP2_TP3_TIME_100us;
686 else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 500)
687 val |= EDP_PSR_TP2_TP3_TIME_500us;
689 val |= EDP_PSR_TP2_TP3_TIME_2500us;
693 * "Do not skip both TP1 and TP2/TP3"
695 if (DISPLAY_VER(dev_priv) < 9 &&
696 connector->panel.vbt.psr.tp1_wakeup_time_us == 0 &&
697 connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
698 val |= EDP_PSR_TP2_TP3_TIME_100us;
701 if (intel_dp_source_supports_tps3(dev_priv) &&
702 drm_dp_tps3_supported(intel_dp->dpcd))
703 val |= EDP_PSR_TP_TP1_TP3;
705 val |= EDP_PSR_TP_TP1_TP2;
710 static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
712 struct intel_connector *connector = intel_dp->attached_connector;
713 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
716 /* Let's use 6 as the minimum to cover all known cases including the
717 * off-by-one issue that HW has in some cases.
719 idle_frames = max(6, connector->panel.vbt.psr.idle_frames);
720 idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
722 if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf))
728 static void hsw_activate_psr1(struct intel_dp *intel_dp)
730 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
731 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
732 u32 max_sleep_time = 0x1f;
733 u32 val = EDP_PSR_ENABLE;
735 val |= EDP_PSR_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
737 if (DISPLAY_VER(dev_priv) < 20)
738 val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time);
740 if (IS_HASWELL(dev_priv))
741 val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
743 if (intel_dp->psr.link_standby)
744 val |= EDP_PSR_LINK_STANDBY;
746 val |= intel_psr1_get_tp_time(intel_dp);
748 if (DISPLAY_VER(dev_priv) >= 8)
749 val |= EDP_PSR_CRC_ENABLE;
751 if (DISPLAY_VER(dev_priv) >= 20)
752 val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
754 intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
755 ~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val);
758 static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
760 struct intel_connector *connector = intel_dp->attached_connector;
761 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
764 if (dev_priv->display.params.psr_safest_params)
765 return EDP_PSR2_TP2_TIME_2500us;
767 if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
768 connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
769 val |= EDP_PSR2_TP2_TIME_50us;
770 else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
771 val |= EDP_PSR2_TP2_TIME_100us;
772 else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
773 val |= EDP_PSR2_TP2_TIME_500us;
775 val |= EDP_PSR2_TP2_TIME_2500us;
780 static int psr2_block_count_lines(struct intel_dp *intel_dp)
782 return intel_dp->psr.alpm_parameters.io_wake_lines < 9 &&
783 intel_dp->psr.alpm_parameters.fast_wake_lines < 9 ? 8 : 12;
786 static int psr2_block_count(struct intel_dp *intel_dp)
788 return psr2_block_count_lines(intel_dp) / 4;
791 static u8 frames_before_su_entry(struct intel_dp *intel_dp)
793 u8 frames_before_su_entry;
795 frames_before_su_entry = max_t(u8,
796 intel_dp->psr.sink_sync_latency + 1,
799 /* Entry setup frames must be at least 1 less than frames before SU entry */
800 if (intel_dp->psr.entry_setup_frames >= frames_before_su_entry)
801 frames_before_su_entry = intel_dp->psr.entry_setup_frames + 1;
803 return frames_before_su_entry;
806 static void dg2_activate_panel_replay(struct intel_dp *intel_dp)
808 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
810 intel_de_rmw(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder),
811 0, ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME);
813 intel_de_rmw(dev_priv, TRANS_DP2_CTL(intel_dp->psr.transcoder), 0,
814 TRANS_DP2_PANEL_REPLAY_ENABLE);
817 static void hsw_activate_psr2(struct intel_dp *intel_dp)
819 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
820 struct intel_psr *psr = &intel_dp->psr;
821 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
822 u32 val = EDP_PSR2_ENABLE;
825 val |= EDP_PSR2_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
827 if (DISPLAY_VER(dev_priv) < 14 && !IS_ALDERLAKE_P(dev_priv))
828 val |= EDP_SU_TRACK_ENABLE;
830 if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) < 13)
831 val |= EDP_Y_COORDINATE_ENABLE;
833 val |= EDP_PSR2_FRAME_BEFORE_SU(frames_before_su_entry(intel_dp));
835 val |= intel_psr2_get_tp_time(intel_dp);
837 if (DISPLAY_VER(dev_priv) >= 12) {
838 if (psr2_block_count(intel_dp) > 2)
839 val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3;
841 val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
844 /* Wa_22012278275:adl-p */
845 if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) {
846 static const u8 map[] = {
857 * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
858 * comments bellow for more information
862 tmp = map[psr->alpm_parameters.io_wake_lines -
863 TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
864 val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(tmp + TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES);
866 tmp = map[psr->alpm_parameters.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
867 val |= TGL_EDP_PSR2_FAST_WAKE(tmp + TGL_EDP_PSR2_FAST_WAKE_MIN_LINES);
868 } else if (DISPLAY_VER(dev_priv) >= 12) {
869 val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(psr->alpm_parameters.io_wake_lines);
870 val |= TGL_EDP_PSR2_FAST_WAKE(psr->alpm_parameters.fast_wake_lines);
871 } else if (DISPLAY_VER(dev_priv) >= 9) {
872 val |= EDP_PSR2_IO_BUFFER_WAKE(psr->alpm_parameters.io_wake_lines);
873 val |= EDP_PSR2_FAST_WAKE(psr->alpm_parameters.fast_wake_lines);
876 if (intel_dp->psr.req_psr2_sdp_prior_scanline)
877 val |= EDP_PSR2_SU_SDP_SCANLINE;
879 if (DISPLAY_VER(dev_priv) >= 20)
880 psr_val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
882 if (intel_dp->psr.psr2_sel_fetch_enabled) {
885 tmp = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder));
886 drm_WARN_ON(&dev_priv->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
887 } else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
888 intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), 0);
891 if (psr2_su_region_et_valid(intel_dp))
892 val |= LNL_EDP_PSR2_SU_REGION_ET_ENABLE;
895 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
896 * recommending keep this bit unset while PSR2 is enabled.
898 intel_de_write(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder), psr_val);
900 intel_de_write(dev_priv, EDP_PSR2_CTL(cpu_transcoder), val);
904 transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder)
906 if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
907 return cpu_transcoder == TRANSCODER_A || cpu_transcoder == TRANSCODER_B;
908 else if (DISPLAY_VER(dev_priv) >= 12)
909 return cpu_transcoder == TRANSCODER_A;
910 else if (DISPLAY_VER(dev_priv) >= 9)
911 return cpu_transcoder == TRANSCODER_EDP;
916 static u32 intel_get_frame_time_us(const struct intel_crtc_state *crtc_state)
918 if (!crtc_state->hw.active)
921 return DIV_ROUND_UP(1000 * 1000,
922 drm_mode_vrefresh(&crtc_state->hw.adjusted_mode));
925 static void psr2_program_idle_frames(struct intel_dp *intel_dp,
928 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
929 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
931 intel_de_rmw(dev_priv, EDP_PSR2_CTL(cpu_transcoder),
932 EDP_PSR2_IDLE_FRAMES_MASK,
933 EDP_PSR2_IDLE_FRAMES(idle_frames));
936 static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
938 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
940 psr2_program_idle_frames(intel_dp, 0);
941 intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
944 static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
946 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
948 intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
949 psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
952 static void tgl_dc3co_disable_work(struct work_struct *work)
954 struct intel_dp *intel_dp =
955 container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
957 mutex_lock(&intel_dp->psr.lock);
958 /* If delayed work is pending, it is not idle */
959 if (delayed_work_pending(&intel_dp->psr.dc3co_work))
962 tgl_psr2_disable_dc3co(intel_dp);
964 mutex_unlock(&intel_dp->psr.lock);
967 static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
969 if (!intel_dp->psr.dc3co_exitline)
972 cancel_delayed_work(&intel_dp->psr.dc3co_work);
973 /* Before PSR2 exit disallow dc3co*/
974 tgl_psr2_disable_dc3co(intel_dp);
978 dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
979 struct intel_crtc_state *crtc_state)
981 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
982 enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
983 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
984 enum port port = dig_port->base.port;
986 if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
987 return pipe <= PIPE_B && port <= PORT_B;
989 return pipe == PIPE_A && port == PORT_A;
993 tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
994 struct intel_crtc_state *crtc_state)
996 const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
997 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
998 struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
1002 * FIXME: Due to the changed sequence of activating/deactivating DC3CO,
1003 * disable DC3CO until the changed dc3co activating/deactivating sequence
1004 * is applied. B.Specs:49196
1009 * DMC's DC3CO exit mechanism has an issue with Selective Fecth
1010 * TODO: when the issue is addressed, this restriction should be removed.
1012 if (crtc_state->enable_psr2_sel_fetch)
1015 if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC3CO))
1018 if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
1021 /* Wa_16011303918:adl-p */
1022 if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
1026 * DC3CO Exit time 200us B.Spec 49196
1027 * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
1030 intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;
1032 if (drm_WARN_ON(&dev_priv->drm, exit_scanlines > crtc_vdisplay))
1035 crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
1038 static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
1039 struct intel_crtc_state *crtc_state)
1041 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1043 if (!dev_priv->display.params.enable_psr2_sel_fetch &&
1044 intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
1045 drm_dbg_kms(&dev_priv->drm,
1046 "PSR2 sel fetch not enabled, disabled by parameter\n");
1050 if (crtc_state->uapi.async_flip) {
1051 drm_dbg_kms(&dev_priv->drm,
1052 "PSR2 sel fetch not enabled, async flip enabled\n");
1056 if (psr2_su_region_et_valid(intel_dp))
1057 crtc_state->enable_psr2_su_region_et = true;
1059 return crtc_state->enable_psr2_sel_fetch = true;
1062 static bool psr2_granularity_check(struct intel_dp *intel_dp,
1063 struct intel_crtc_state *crtc_state)
1065 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1066 const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
1067 const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1068 const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1069 u16 y_granularity = 0;
1071 /* PSR2 HW only send full lines so we only need to validate the width */
1072 if (crtc_hdisplay % intel_dp->psr.su_w_granularity)
1075 if (crtc_vdisplay % intel_dp->psr.su_y_granularity)
1078 /* HW tracking is only aligned to 4 lines */
1079 if (!crtc_state->enable_psr2_sel_fetch)
1080 return intel_dp->psr.su_y_granularity == 4;
1083 * adl_p and mtl platforms have 1 line granularity.
1084 * For other platforms with SW tracking we can adjust the y coordinates
1085 * to match sink requirement if multiple of 4.
1087 if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
1088 y_granularity = intel_dp->psr.su_y_granularity;
1089 else if (intel_dp->psr.su_y_granularity <= 2)
1091 else if ((intel_dp->psr.su_y_granularity % 4) == 0)
1092 y_granularity = intel_dp->psr.su_y_granularity;
1094 if (y_granularity == 0 || crtc_vdisplay % y_granularity)
1097 if (crtc_state->dsc.compression_enable &&
1098 vdsc_cfg->slice_height % y_granularity)
1101 crtc_state->su_y_granularity = y_granularity;
1105 static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_dp,
1106 struct intel_crtc_state *crtc_state)
1108 const struct drm_display_mode *adjusted_mode = &crtc_state->uapi.adjusted_mode;
1109 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1110 u32 hblank_total, hblank_ns, req_ns;
1112 hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
1113 hblank_ns = div_u64(1000000ULL * hblank_total, adjusted_mode->crtc_clock);
1115 /* From spec: ((60 / number of lanes) + 11) * 1000 / symbol clock frequency MHz */
1116 req_ns = ((60 / crtc_state->lane_count) + 11) * 1000 / (crtc_state->port_clock / 1000);
1118 if ((hblank_ns - req_ns) > 100)
1121 /* Not supported <13 / Wa_22012279113:adl-p */
1122 if (DISPLAY_VER(dev_priv) < 14 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
1125 crtc_state->req_psr2_sdp_prior_scanline = true;
1129 static bool _lnl_compute_alpm_params(struct intel_dp *intel_dp,
1130 struct intel_crtc_state *crtc_state)
1132 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1133 int check_entry_lines;
1135 if (DISPLAY_VER(i915) < 20)
1138 /* ALPM Entry Check = 2 + CEILING( 5us /tline ) */
1139 check_entry_lines = 2 +
1140 intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode, 5);
1142 if (check_entry_lines > 15)
1145 if (i915->display.params.psr_safest_params)
1146 check_entry_lines = 15;
1148 intel_dp->psr.alpm_parameters.check_entry_lines = check_entry_lines;
1153 static bool _compute_alpm_params(struct intel_dp *intel_dp,
1154 struct intel_crtc_state *crtc_state)
1156 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1157 int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time;
1160 if (DISPLAY_VER(i915) >= 12) {
1163 * According to Bspec it's 42us, but based on testing
1164 * it is not enough -> use 45 us.
1166 fast_wake_time = 45;
1168 /* TODO: Check how we can use ALPM_CTL fast wake extended field */
1169 max_wake_lines = 12;
1172 fast_wake_time = 32;
1176 io_wake_lines = intel_usecs_to_scanlines(
1177 &crtc_state->hw.adjusted_mode, io_wake_time);
1178 fast_wake_lines = intel_usecs_to_scanlines(
1179 &crtc_state->hw.adjusted_mode, fast_wake_time);
1181 if (io_wake_lines > max_wake_lines ||
1182 fast_wake_lines > max_wake_lines)
1185 if (!_lnl_compute_alpm_params(intel_dp, crtc_state))
1188 if (i915->display.params.psr_safest_params)
1189 io_wake_lines = fast_wake_lines = max_wake_lines;
1191 /* According to Bspec lower limit should be set as 7 lines. */
1192 intel_dp->psr.alpm_parameters.io_wake_lines = max(io_wake_lines, 7);
1193 intel_dp->psr.alpm_parameters.fast_wake_lines = max(fast_wake_lines, 7);
1198 static int intel_psr_entry_setup_frames(struct intel_dp *intel_dp,
1199 const struct drm_display_mode *adjusted_mode)
1201 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1202 int psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
1203 int entry_setup_frames = 0;
1205 if (psr_setup_time < 0) {
1206 drm_dbg_kms(&i915->drm,
1207 "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
1208 intel_dp->psr_dpcd[1]);
1212 if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
1213 adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
1214 if (DISPLAY_VER(i915) >= 20) {
1215 /* setup entry frames can be up to 3 frames */
1216 entry_setup_frames = 1;
1217 drm_dbg_kms(&i915->drm,
1218 "PSR setup entry frames %d\n",
1219 entry_setup_frames);
1221 drm_dbg_kms(&i915->drm,
1222 "PSR condition failed: PSR setup time (%d us) too long\n",
1228 return entry_setup_frames;
1231 static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
1232 struct intel_crtc_state *crtc_state)
1234 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1235 int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1236 int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1237 int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
1239 if (!intel_dp->psr.sink_psr2_support)
1242 /* JSL and EHL only supports eDP 1.3 */
1243 if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
1244 drm_dbg_kms(&dev_priv->drm, "PSR2 not supported by phy\n");
1248 /* Wa_16011181250 */
1249 if (IS_ROCKETLAKE(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
1251 drm_dbg_kms(&dev_priv->drm, "PSR2 is defeatured for this platform\n");
1255 if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
1256 drm_dbg_kms(&dev_priv->drm, "PSR2 not completely functional in this stepping\n");
1260 if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
1261 drm_dbg_kms(&dev_priv->drm,
1262 "PSR2 not supported in transcoder %s\n",
1263 transcoder_name(crtc_state->cpu_transcoder));
1267 if (!psr2_global_enabled(intel_dp)) {
1268 drm_dbg_kms(&dev_priv->drm, "PSR2 disabled by flag\n");
1273 * DSC and PSR2 cannot be enabled simultaneously. If a requested
1274 * resolution requires DSC to be enabled, priority is given to DSC
1277 if (crtc_state->dsc.compression_enable &&
1278 (DISPLAY_VER(dev_priv) < 14 && !IS_ALDERLAKE_P(dev_priv))) {
1279 drm_dbg_kms(&dev_priv->drm,
1280 "PSR2 cannot be enabled since DSC is enabled\n");
1284 if (crtc_state->crc_enabled) {
1285 drm_dbg_kms(&dev_priv->drm,
1286 "PSR2 not enabled because it would inhibit pipe CRC calculation\n");
1290 if (DISPLAY_VER(dev_priv) >= 12) {
1294 } else if (DISPLAY_VER(dev_priv) >= 10) {
1298 } else if (DISPLAY_VER(dev_priv) == 9) {
1304 if (crtc_state->pipe_bpp > max_bpp) {
1305 drm_dbg_kms(&dev_priv->drm,
1306 "PSR2 not enabled, pipe bpp %d > max supported %d\n",
1307 crtc_state->pipe_bpp, max_bpp);
1311 /* Wa_16011303918:adl-p */
1312 if (crtc_state->vrr.enable &&
1313 IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
1314 drm_dbg_kms(&dev_priv->drm,
1315 "PSR2 not enabled, not compatible with HW stepping + VRR\n");
1319 if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
1320 drm_dbg_kms(&dev_priv->drm,
1321 "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n");
1325 if (!_compute_alpm_params(intel_dp, crtc_state)) {
1326 drm_dbg_kms(&dev_priv->drm,
1327 "PSR2 not enabled, Unable to use long enough wake times\n");
1331 /* Vblank >= PSR2_CTL Block Count Number maximum line count */
1332 if (crtc_state->hw.adjusted_mode.crtc_vblank_end -
1333 crtc_state->hw.adjusted_mode.crtc_vblank_start <
1334 psr2_block_count_lines(intel_dp)) {
1335 drm_dbg_kms(&dev_priv->drm,
1336 "PSR2 not enabled, too short vblank time\n");
1340 if (HAS_PSR2_SEL_FETCH(dev_priv)) {
1341 if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
1342 !HAS_PSR_HW_TRACKING(dev_priv)) {
1343 drm_dbg_kms(&dev_priv->drm,
1344 "PSR2 not enabled, selective fetch not valid and no HW tracking available\n");
1349 if (!psr2_granularity_check(intel_dp, crtc_state)) {
1350 drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, SU granularity not compatible\n");
1354 if (!crtc_state->enable_psr2_sel_fetch &&
1355 (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
1356 drm_dbg_kms(&dev_priv->drm,
1357 "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
1358 crtc_hdisplay, crtc_vdisplay,
1359 psr_max_h, psr_max_v);
1363 tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
1367 crtc_state->enable_psr2_sel_fetch = false;
1371 static bool _psr_compute_config(struct intel_dp *intel_dp,
1372 struct intel_crtc_state *crtc_state)
1374 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1375 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1376 int entry_setup_frames;
1379 * Current PSR panels don't work reliably with VRR enabled
1380 * So if VRR is enabled, do not enable PSR.
1382 if (crtc_state->vrr.enable)
1385 if (!CAN_PSR(intel_dp))
1388 entry_setup_frames = intel_psr_entry_setup_frames(intel_dp, adjusted_mode);
1390 if (entry_setup_frames >= 0) {
1391 intel_dp->psr.entry_setup_frames = entry_setup_frames;
1393 drm_dbg_kms(&dev_priv->drm,
1394 "PSR condition failed: PSR setup timing not met\n");
1401 void intel_psr_compute_config(struct intel_dp *intel_dp,
1402 struct intel_crtc_state *crtc_state,
1403 struct drm_connector_state *conn_state)
1405 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1406 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1408 if (!psr_global_enabled(intel_dp)) {
1409 drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
1413 if (intel_dp->psr.sink_not_reliable) {
1414 drm_dbg_kms(&dev_priv->drm,
1415 "PSR sink implementation is not reliable\n");
1419 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
1420 drm_dbg_kms(&dev_priv->drm,
1421 "PSR condition failed: Interlaced mode enabled\n");
1425 if (CAN_PANEL_REPLAY(intel_dp))
1426 crtc_state->has_panel_replay = true;
1428 crtc_state->has_psr = _psr_compute_config(intel_dp, crtc_state);
1430 if (!(crtc_state->has_panel_replay || crtc_state->has_psr))
1433 crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
1436 void intel_psr_get_config(struct intel_encoder *encoder,
1437 struct intel_crtc_state *pipe_config)
1439 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1440 struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1441 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
1442 struct intel_dp *intel_dp;
1448 intel_dp = &dig_port->dp;
1449 if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp)))
1452 mutex_lock(&intel_dp->psr.lock);
1453 if (!intel_dp->psr.enabled)
1456 if (intel_dp->psr.panel_replay_enabled) {
1457 pipe_config->has_panel_replay = true;
1460 * Not possible to read EDP_PSR/PSR2_CTL registers as it is
1461 * enabled/disabled because of frontbuffer tracking and others.
1463 pipe_config->has_psr = true;
1466 pipe_config->has_psr2 = intel_dp->psr.psr2_enabled;
1467 pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1469 if (!intel_dp->psr.psr2_enabled)
1472 if (HAS_PSR2_SEL_FETCH(dev_priv)) {
1473 val = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder));
1474 if (val & PSR2_MAN_TRK_CTL_ENABLE)
1475 pipe_config->enable_psr2_sel_fetch = true;
1478 if (DISPLAY_VER(dev_priv) >= 12) {
1479 val = intel_de_read(dev_priv, TRANS_EXITLINE(cpu_transcoder));
1480 pipe_config->dc3co_exitline = REG_FIELD_GET(EXITLINE_MASK, val);
1483 mutex_unlock(&intel_dp->psr.lock);
1486 static void intel_psr_activate(struct intel_dp *intel_dp)
1488 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1489 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1491 drm_WARN_ON(&dev_priv->drm,
1492 transcoder_has_psr2(dev_priv, cpu_transcoder) &&
1493 intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder)) & EDP_PSR2_ENABLE);
1495 drm_WARN_ON(&dev_priv->drm,
1496 intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder)) & EDP_PSR_ENABLE);
1498 drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active);
1500 lockdep_assert_held(&intel_dp->psr.lock);
1502 /* psr1, psr2 and panel-replay are mutually exclusive.*/
1503 if (intel_dp->psr.panel_replay_enabled)
1504 dg2_activate_panel_replay(intel_dp);
1505 else if (intel_dp->psr.psr2_enabled)
1506 hsw_activate_psr2(intel_dp);
1508 hsw_activate_psr1(intel_dp);
1510 intel_dp->psr.active = true;
1513 static u32 wa_16013835468_bit_get(struct intel_dp *intel_dp)
1515 switch (intel_dp->psr.pipe) {
1517 return LATENCY_REPORTING_REMOVED_PIPE_A;
1519 return LATENCY_REPORTING_REMOVED_PIPE_B;
1521 return LATENCY_REPORTING_REMOVED_PIPE_C;
1523 return LATENCY_REPORTING_REMOVED_PIPE_D;
1525 MISSING_CASE(intel_dp->psr.pipe);
1534 static void wm_optimization_wa(struct intel_dp *intel_dp,
1535 const struct intel_crtc_state *crtc_state)
1537 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1538 bool set_wa_bit = false;
1540 /* Wa_14015648006 */
1541 if (IS_DISPLAY_VER(dev_priv, 11, 14))
1542 set_wa_bit |= crtc_state->wm_level_disabled;
1544 /* Wa_16013835468 */
1545 if (DISPLAY_VER(dev_priv) == 12)
1546 set_wa_bit |= crtc_state->hw.adjusted_mode.crtc_vblank_start !=
1547 crtc_state->hw.adjusted_mode.crtc_vdisplay;
1550 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1551 0, wa_16013835468_bit_get(intel_dp));
1553 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1554 wa_16013835468_bit_get(intel_dp), 0);
1557 static void lnl_alpm_configure(struct intel_dp *intel_dp)
1559 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1560 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1561 struct intel_psr *psr = &intel_dp->psr;
1563 if (DISPLAY_VER(dev_priv) < 20)
1566 intel_de_write(dev_priv, ALPM_CTL(cpu_transcoder),
1567 ALPM_CTL_EXTENDED_FAST_WAKE_ENABLE |
1568 ALPM_CTL_ALPM_ENTRY_CHECK(psr->alpm_parameters.check_entry_lines) |
1569 ALPM_CTL_EXTENDED_FAST_WAKE_TIME(psr->alpm_parameters.fast_wake_lines));
1572 static void intel_psr_enable_source(struct intel_dp *intel_dp,
1573 const struct intel_crtc_state *crtc_state)
1575 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1576 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1580 * Only HSW and BDW have PSR AUX registers that need to be setup.
1581 * SKL+ use hardcoded values PSR AUX transactions
1583 if (DISPLAY_VER(dev_priv) < 9)
1584 hsw_psr_setup_aux(intel_dp);
1587 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
1588 * mask LPSP to avoid dependency on other drivers that might block
1589 * runtime_pm besides preventing other hw tracking issues now we
1590 * can rely on frontbuffer tracking.
1592 mask = EDP_PSR_DEBUG_MASK_MEMUP |
1593 EDP_PSR_DEBUG_MASK_HPD;
1596 * For some unknown reason on HSW non-ULT (or at least on
1597 * Dell Latitude E6540) external displays start to flicker
1598 * when PSR is enabled on the eDP. SR/PC6 residency is much
1599 * higher than should be possible with an external display.
1600 * As a workaround leave LPSP unmasked to prevent PSR entry
1601 * when external displays are active.
1603 if (DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL_ULT(dev_priv))
1604 mask |= EDP_PSR_DEBUG_MASK_LPSP;
1606 if (DISPLAY_VER(dev_priv) < 20)
1607 mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP;
1610 * No separate pipe reg write mask on hsw/bdw, so have to unmask all
1611 * registers in order to keep the CURSURFLIVE tricks working :(
1613 if (IS_DISPLAY_VER(dev_priv, 9, 10))
1614 mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
1616 /* allow PSR with sprite enabled */
1617 if (IS_HASWELL(dev_priv))
1618 mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE;
1620 intel_de_write(dev_priv, psr_debug_reg(dev_priv, cpu_transcoder), mask);
1622 psr_irq_control(intel_dp);
1625 * TODO: if future platforms supports DC3CO in more than one
1626 * transcoder, EXITLINE will need to be unset when disabling PSR
1628 if (intel_dp->psr.dc3co_exitline)
1629 intel_de_rmw(dev_priv, TRANS_EXITLINE(cpu_transcoder), EXITLINE_MASK,
1630 intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT | EXITLINE_ENABLE);
1632 if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv))
1633 intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
1634 intel_dp->psr.psr2_sel_fetch_enabled ?
1635 IGNORE_PSR2_HW_TRACKING : 0);
1637 lnl_alpm_configure(intel_dp);
1643 wm_optimization_wa(intel_dp, crtc_state);
1645 if (intel_dp->psr.psr2_enabled) {
1646 if (DISPLAY_VER(dev_priv) == 9)
1647 intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
1648 PSR2_VSC_ENABLE_PROG_HEADER |
1649 PSR2_ADD_VERTICAL_LINE_COUNT);
1652 * Wa_16014451276:adlp,mtl[a0,b0]
1653 * All supported adlp panels have 1-based X granularity, this may
1654 * cause issues if non-supported panels are used.
1656 if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0) ||
1657 IS_ALDERLAKE_P(dev_priv))
1658 intel_de_rmw(dev_priv, hsw_chicken_trans_reg(dev_priv, cpu_transcoder),
1659 0, ADLP_1_BASED_X_GRANULARITY);
1661 /* Wa_16012604467:adlp,mtl[a0,b0] */
1662 if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0))
1663 intel_de_rmw(dev_priv,
1664 MTL_CLKGATE_DIS_TRANS(cpu_transcoder), 0,
1665 MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS);
1666 else if (IS_ALDERLAKE_P(dev_priv))
1667 intel_de_rmw(dev_priv, CLKGATE_DIS_MISC, 0,
1668 CLKGATE_DIS_MISC_DMASC_GATING_DIS);
1672 static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
1674 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1675 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1679 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
1680 * will still keep the error set even after the reset done in the
1681 * irq_preinstall and irq_uninstall hooks.
1682 * And enabling in this situation cause the screen to freeze in the
1683 * first time that PSR HW tries to activate so lets keep PSR disabled
1684 * to avoid any rendering problems.
1686 val = intel_de_read(dev_priv, psr_iir_reg(dev_priv, cpu_transcoder));
1687 val &= psr_irq_psr_error_bit_get(intel_dp);
1689 intel_dp->psr.sink_not_reliable = true;
1690 drm_dbg_kms(&dev_priv->drm,
1691 "PSR interruption error set, not enabling PSR\n");
1698 static void intel_psr_enable_locked(struct intel_dp *intel_dp,
1699 const struct intel_crtc_state *crtc_state)
1701 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1702 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1703 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
1706 drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
1708 intel_dp->psr.psr2_enabled = crtc_state->has_psr2;
1709 intel_dp->psr.panel_replay_enabled = crtc_state->has_panel_replay;
1710 intel_dp->psr.busy_frontbuffer_bits = 0;
1711 intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
1712 intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
1713 /* DC5/DC6 requires at least 6 idle frames */
1714 val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
1715 intel_dp->psr.dc3co_exit_delay = val;
1716 intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
1717 intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
1718 intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
1719 intel_dp->psr.req_psr2_sdp_prior_scanline =
1720 crtc_state->req_psr2_sdp_prior_scanline;
1722 if (!psr_interrupt_error_check(intel_dp))
1725 if (intel_dp->psr.panel_replay_enabled)
1726 drm_dbg_kms(&dev_priv->drm, "Enabling Panel Replay\n");
1728 drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
1729 intel_dp->psr.psr2_enabled ? "2" : "1");
1731 intel_snps_phy_update_psr_power_state(dev_priv, phy, true);
1732 intel_psr_enable_sink(intel_dp);
1733 intel_psr_enable_source(intel_dp, crtc_state);
1734 intel_dp->psr.enabled = true;
1735 intel_dp->psr.paused = false;
1737 intel_psr_activate(intel_dp);
1740 static void intel_psr_exit(struct intel_dp *intel_dp)
1742 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1743 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1746 if (!intel_dp->psr.active) {
1747 if (transcoder_has_psr2(dev_priv, cpu_transcoder)) {
1748 val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder));
1749 drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE);
1752 val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
1753 drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE);
1758 if (intel_dp->psr.panel_replay_enabled) {
1759 intel_de_rmw(dev_priv, TRANS_DP2_CTL(intel_dp->psr.transcoder),
1760 TRANS_DP2_PANEL_REPLAY_ENABLE, 0);
1761 } else if (intel_dp->psr.psr2_enabled) {
1762 tgl_disallow_dc3co_on_psr2_exit(intel_dp);
1764 val = intel_de_rmw(dev_priv, EDP_PSR2_CTL(cpu_transcoder),
1765 EDP_PSR2_ENABLE, 0);
1767 drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE));
1769 val = intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
1772 drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE));
1774 intel_dp->psr.active = false;
1777 static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
1779 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1780 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1781 i915_reg_t psr_status;
1782 u32 psr_status_mask;
1784 if (intel_dp->psr.psr2_enabled) {
1785 psr_status = EDP_PSR2_STATUS(cpu_transcoder);
1786 psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
1788 psr_status = psr_status_reg(dev_priv, cpu_transcoder);
1789 psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
1792 /* Wait till PSR is idle */
1793 if (intel_de_wait_for_clear(dev_priv, psr_status,
1794 psr_status_mask, 2000))
1795 drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n");
1798 static void intel_psr_disable_locked(struct intel_dp *intel_dp)
1800 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1801 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1802 enum phy phy = intel_port_to_phy(dev_priv,
1803 dp_to_dig_port(intel_dp)->base.port);
1805 lockdep_assert_held(&intel_dp->psr.lock);
1807 if (!intel_dp->psr.enabled)
1810 if (intel_dp->psr.panel_replay_enabled)
1811 drm_dbg_kms(&dev_priv->drm, "Disabling Panel Replay\n");
1813 drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
1814 intel_dp->psr.psr2_enabled ? "2" : "1");
1816 intel_psr_exit(intel_dp);
1817 intel_psr_wait_exit_locked(intel_dp);
1823 if (DISPLAY_VER(dev_priv) >= 11)
1824 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1825 wa_16013835468_bit_get(intel_dp), 0);
1827 if (intel_dp->psr.psr2_enabled) {
1828 /* Wa_16012604467:adlp,mtl[a0,b0] */
1829 if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0))
1830 intel_de_rmw(dev_priv,
1831 MTL_CLKGATE_DIS_TRANS(cpu_transcoder),
1832 MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0);
1833 else if (IS_ALDERLAKE_P(dev_priv))
1834 intel_de_rmw(dev_priv, CLKGATE_DIS_MISC,
1835 CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
1838 intel_snps_phy_update_psr_power_state(dev_priv, phy, false);
1840 /* Disable PSR on Sink */
1841 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
1843 if (intel_dp->psr.psr2_enabled)
1844 drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);
1846 intel_dp->psr.enabled = false;
1847 intel_dp->psr.panel_replay_enabled = false;
1848 intel_dp->psr.psr2_enabled = false;
1849 intel_dp->psr.psr2_sel_fetch_enabled = false;
1850 intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
1854 * intel_psr_disable - Disable PSR
1855 * @intel_dp: Intel DP
1856 * @old_crtc_state: old CRTC state
1858 * This function needs to be called before disabling pipe.
1860 void intel_psr_disable(struct intel_dp *intel_dp,
1861 const struct intel_crtc_state *old_crtc_state)
1863 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1865 if (!old_crtc_state->has_psr)
1868 if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(intel_dp)))
1871 mutex_lock(&intel_dp->psr.lock);
1873 intel_psr_disable_locked(intel_dp);
1875 mutex_unlock(&intel_dp->psr.lock);
1876 cancel_work_sync(&intel_dp->psr.work);
1877 cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
1881 * intel_psr_pause - Pause PSR
1882 * @intel_dp: Intel DP
1884 * This function need to be called after enabling psr.
1886 void intel_psr_pause(struct intel_dp *intel_dp)
1888 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1889 struct intel_psr *psr = &intel_dp->psr;
1891 if (!CAN_PSR(intel_dp))
1894 mutex_lock(&psr->lock);
1896 if (!psr->enabled) {
1897 mutex_unlock(&psr->lock);
1901 /* If we ever hit this, we will need to add refcount to pause/resume */
1902 drm_WARN_ON(&dev_priv->drm, psr->paused);
1904 intel_psr_exit(intel_dp);
1905 intel_psr_wait_exit_locked(intel_dp);
1908 mutex_unlock(&psr->lock);
1910 cancel_work_sync(&psr->work);
1911 cancel_delayed_work_sync(&psr->dc3co_work);
1915 * intel_psr_resume - Resume PSR
1916 * @intel_dp: Intel DP
1918 * This function need to be called after pausing psr.
1920 void intel_psr_resume(struct intel_dp *intel_dp)
1922 struct intel_psr *psr = &intel_dp->psr;
1924 if (!CAN_PSR(intel_dp))
1927 mutex_lock(&psr->lock);
1932 psr->paused = false;
1933 intel_psr_activate(intel_dp);
1936 mutex_unlock(&psr->lock);
1939 static u32 man_trk_ctl_enable_bit_get(struct drm_i915_private *dev_priv)
1941 return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ? 0 :
1942 PSR2_MAN_TRK_CTL_ENABLE;
1945 static u32 man_trk_ctl_single_full_frame_bit_get(struct drm_i915_private *dev_priv)
1947 return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1948 ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME :
1949 PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
1952 static u32 man_trk_ctl_partial_frame_bit_get(struct drm_i915_private *dev_priv)
1954 return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1955 ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
1956 PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
1959 static u32 man_trk_ctl_continuos_full_frame(struct drm_i915_private *dev_priv)
1961 return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1962 ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME :
1963 PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME;
1966 static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
1968 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1969 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1971 if (intel_dp->psr.psr2_sel_fetch_enabled)
1972 intel_de_write(dev_priv,
1973 PSR2_MAN_TRK_CTL(cpu_transcoder),
1974 man_trk_ctl_enable_bit_get(dev_priv) |
1975 man_trk_ctl_partial_frame_bit_get(dev_priv) |
1976 man_trk_ctl_single_full_frame_bit_get(dev_priv) |
1977 man_trk_ctl_continuos_full_frame(dev_priv));
1980 * Display WA #0884: skl+
1981 * This documented WA for bxt can be safely applied
1982 * broadly so we can force HW tracking to exit PSR
1983 * instead of disabling and re-enabling.
1984 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
1985 * but it makes more sense write to the current active
1988 * This workaround do not exist for platforms with display 10 or newer
1989 * but testing proved that it works for up display 13, for newer
1990 * than that testing will be needed.
1992 intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
1995 void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
1997 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1998 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1999 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2000 struct intel_encoder *encoder;
2002 if (!crtc_state->enable_psr2_sel_fetch)
2005 for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
2006 crtc_state->uapi.encoder_mask) {
2007 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2009 lockdep_assert_held(&intel_dp->psr.lock);
2010 if (intel_dp->psr.psr2_sel_fetch_cff_enabled)
2015 intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder),
2016 crtc_state->psr2_man_track_ctl);
2018 if (!crtc_state->enable_psr2_su_region_et)
2021 intel_de_write(dev_priv, PIPE_SRCSZ_ERLY_TPT(crtc->pipe),
2022 crtc_state->pipe_srcsz_early_tpt);
2025 static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
2028 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2029 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2030 u32 val = man_trk_ctl_enable_bit_get(dev_priv);
2032 /* SF partial frame enable has to be set even on full update */
2033 val |= man_trk_ctl_partial_frame_bit_get(dev_priv);
2036 val |= man_trk_ctl_single_full_frame_bit_get(dev_priv);
2037 val |= man_trk_ctl_continuos_full_frame(dev_priv);
2041 if (crtc_state->psr2_su_area.y1 == -1)
2044 if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14) {
2045 val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(crtc_state->psr2_su_area.y1);
2046 val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(crtc_state->psr2_su_area.y2 - 1);
2048 drm_WARN_ON(crtc_state->uapi.crtc->dev,
2049 crtc_state->psr2_su_area.y1 % 4 ||
2050 crtc_state->psr2_su_area.y2 % 4);
2052 val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(
2053 crtc_state->psr2_su_area.y1 / 4 + 1);
2054 val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(
2055 crtc_state->psr2_su_area.y2 / 4 + 1);
2058 crtc_state->psr2_man_track_ctl = val;
2061 static u32 psr2_pipe_srcsz_early_tpt_calc(struct intel_crtc_state *crtc_state,
2066 if (!crtc_state->enable_psr2_su_region_et || full_update)
2069 width = drm_rect_width(&crtc_state->psr2_su_area);
2070 height = drm_rect_height(&crtc_state->psr2_su_area);
2072 return PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1);
2075 static void clip_area_update(struct drm_rect *overlap_damage_area,
2076 struct drm_rect *damage_area,
2077 struct drm_rect *pipe_src)
2079 if (!drm_rect_intersect(damage_area, pipe_src))
2082 if (overlap_damage_area->y1 == -1) {
2083 overlap_damage_area->y1 = damage_area->y1;
2084 overlap_damage_area->y2 = damage_area->y2;
2088 if (damage_area->y1 < overlap_damage_area->y1)
2089 overlap_damage_area->y1 = damage_area->y1;
2091 if (damage_area->y2 > overlap_damage_area->y2)
2092 overlap_damage_area->y2 = damage_area->y2;
2095 static void intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_state)
2097 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2098 const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
2101 /* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */
2102 if (crtc_state->dsc.compression_enable &&
2103 (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14))
2104 y_alignment = vdsc_cfg->slice_height;
2106 y_alignment = crtc_state->su_y_granularity;
2108 crtc_state->psr2_su_area.y1 -= crtc_state->psr2_su_area.y1 % y_alignment;
2109 if (crtc_state->psr2_su_area.y2 % y_alignment)
2110 crtc_state->psr2_su_area.y2 = ((crtc_state->psr2_su_area.y2 /
2111 y_alignment) + 1) * y_alignment;
2115 * When early transport is in use we need to extend SU area to cover
2116 * cursor fully when cursor is in SU area.
2119 intel_psr2_sel_fetch_et_alignment(struct intel_atomic_state *state,
2120 struct intel_crtc *crtc)
2122 struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
2123 struct intel_plane_state *new_plane_state;
2124 struct intel_plane *plane;
2127 if (!crtc_state->enable_psr2_su_region_et)
2130 for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
2131 struct drm_rect inter;
2133 if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
2136 if (plane->id != PLANE_CURSOR)
2139 if (!new_plane_state->uapi.visible)
2142 inter = crtc_state->psr2_su_area;
2143 if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
2146 clip_area_update(&crtc_state->psr2_su_area, &new_plane_state->uapi.dst,
2147 &crtc_state->pipe_src);
2152 * TODO: Not clear how to handle planes with negative position,
2153 * also planes are not updated if they have a negative X
2154 * position so for now doing a full update in this cases
2156 * Plane scaling and rotation is not supported by selective fetch and both
2157 * properties can change without a modeset, so need to be check at every
2160 static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state *plane_state)
2162 if (plane_state->uapi.dst.y1 < 0 ||
2163 plane_state->uapi.dst.x1 < 0 ||
2164 plane_state->scaler_id >= 0 ||
2165 plane_state->uapi.rotation != DRM_MODE_ROTATE_0)
2172 * Check for pipe properties that is not supported by selective fetch.
2174 * TODO: pipe scaling causes a modeset but skl_update_scaler_crtc() is executed
2175 * after intel_psr_compute_config(), so for now keeping PSR2 selective fetch
2176 * enabled and going to the full update path.
2178 static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *crtc_state)
2180 if (crtc_state->scaler_state.scaler_id >= 0)
2186 int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
2187 struct intel_crtc *crtc)
2189 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2190 struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
2191 struct intel_plane_state *new_plane_state, *old_plane_state;
2192 struct intel_plane *plane;
2193 bool full_update = false;
2196 if (!crtc_state->enable_psr2_sel_fetch)
2199 if (!psr2_sel_fetch_pipe_state_supported(crtc_state)) {
2201 goto skip_sel_fetch_set_loop;
2204 crtc_state->psr2_su_area.x1 = 0;
2205 crtc_state->psr2_su_area.y1 = -1;
2206 crtc_state->psr2_su_area.x2 = INT_MAX;
2207 crtc_state->psr2_su_area.y2 = -1;
2210 * Calculate minimal selective fetch area of each plane and calculate
2211 * the pipe damaged area.
2212 * In the next loop the plane selective fetch area will actually be set
2213 * using whole pipe damaged area.
2215 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2216 new_plane_state, i) {
2217 struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1,
2220 if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
2223 if (!new_plane_state->uapi.visible &&
2224 !old_plane_state->uapi.visible)
2227 if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2233 * If visibility or plane moved, mark the whole plane area as
2234 * damaged as it needs to be complete redraw in the new and old
2237 if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
2238 !drm_rect_equals(&new_plane_state->uapi.dst,
2239 &old_plane_state->uapi.dst)) {
2240 if (old_plane_state->uapi.visible) {
2241 damaged_area.y1 = old_plane_state->uapi.dst.y1;
2242 damaged_area.y2 = old_plane_state->uapi.dst.y2;
2243 clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2244 &crtc_state->pipe_src);
2247 if (new_plane_state->uapi.visible) {
2248 damaged_area.y1 = new_plane_state->uapi.dst.y1;
2249 damaged_area.y2 = new_plane_state->uapi.dst.y2;
2250 clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2251 &crtc_state->pipe_src);
2254 } else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) {
2255 /* If alpha changed mark the whole plane area as damaged */
2256 damaged_area.y1 = new_plane_state->uapi.dst.y1;
2257 damaged_area.y2 = new_plane_state->uapi.dst.y2;
2258 clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2259 &crtc_state->pipe_src);
2263 src = drm_plane_state_src(&new_plane_state->uapi);
2264 drm_rect_fp_to_int(&src, &src);
2266 if (!drm_atomic_helper_damage_merged(&old_plane_state->uapi,
2267 &new_plane_state->uapi, &damaged_area))
2270 damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
2271 damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
2272 damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1;
2273 damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
2275 clip_area_update(&crtc_state->psr2_su_area, &damaged_area, &crtc_state->pipe_src);
2279 * TODO: For now we are just using full update in case
2280 * selective fetch area calculation fails. To optimize this we
2281 * should identify cases where this happens and fix the area
2282 * calculation for those.
2284 if (crtc_state->psr2_su_area.y1 == -1) {
2285 drm_info_once(&dev_priv->drm,
2286 "Selective fetch area calculation failed in pipe %c\n",
2287 pipe_name(crtc->pipe));
2292 goto skip_sel_fetch_set_loop;
2294 /* Wa_14014971492 */
2295 if ((IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0) ||
2296 IS_ALDERLAKE_P(dev_priv) || IS_TIGERLAKE(dev_priv)) &&
2297 crtc_state->splitter.enable)
2298 crtc_state->psr2_su_area.y1 = 0;
2300 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
2305 * Adjust su area to cover cursor fully as necessary (early
2306 * transport). This needs to be done after
2307 * drm_atomic_add_affected_planes to ensure visible cursor is added into
2308 * affected planes even when cursor is not updated by itself.
2310 intel_psr2_sel_fetch_et_alignment(state, crtc);
2312 intel_psr2_sel_fetch_pipe_alignment(crtc_state);
2315 * Now that we have the pipe damaged area check if it intersect with
2316 * every plane, if it does set the plane selective fetch area.
2318 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2319 new_plane_state, i) {
2320 struct drm_rect *sel_fetch_area, inter;
2321 struct intel_plane *linked = new_plane_state->planar_linked_plane;
2323 if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
2324 !new_plane_state->uapi.visible)
2327 inter = crtc_state->psr2_su_area;
2328 sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
2329 if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst)) {
2330 sel_fetch_area->y1 = -1;
2331 sel_fetch_area->y2 = -1;
2333 * if plane sel fetch was previously enabled ->
2336 if (drm_rect_height(&old_plane_state->psr2_sel_fetch_area) > 0)
2337 crtc_state->update_planes |= BIT(plane->id);
2342 if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2347 sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
2348 sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
2349 sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
2350 crtc_state->update_planes |= BIT(plane->id);
2353 * Sel_fetch_area is calculated for UV plane. Use
2354 * same area for Y plane as well.
2357 struct intel_plane_state *linked_new_plane_state;
2358 struct drm_rect *linked_sel_fetch_area;
2360 linked_new_plane_state = intel_atomic_get_plane_state(state, linked);
2361 if (IS_ERR(linked_new_plane_state))
2362 return PTR_ERR(linked_new_plane_state);
2364 linked_sel_fetch_area = &linked_new_plane_state->psr2_sel_fetch_area;
2365 linked_sel_fetch_area->y1 = sel_fetch_area->y1;
2366 linked_sel_fetch_area->y2 = sel_fetch_area->y2;
2367 crtc_state->update_planes |= BIT(linked->id);
2371 skip_sel_fetch_set_loop:
2372 psr2_man_trk_ctl_calc(crtc_state, full_update);
2373 crtc_state->pipe_srcsz_early_tpt =
2374 psr2_pipe_srcsz_early_tpt_calc(crtc_state, full_update);
2378 void intel_psr_pre_plane_update(struct intel_atomic_state *state,
2379 struct intel_crtc *crtc)
2381 struct drm_i915_private *i915 = to_i915(state->base.dev);
2382 const struct intel_crtc_state *old_crtc_state =
2383 intel_atomic_get_old_crtc_state(state, crtc);
2384 const struct intel_crtc_state *new_crtc_state =
2385 intel_atomic_get_new_crtc_state(state, crtc);
2386 struct intel_encoder *encoder;
2391 for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2392 old_crtc_state->uapi.encoder_mask) {
2393 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2394 struct intel_psr *psr = &intel_dp->psr;
2395 bool needs_to_disable = false;
2397 mutex_lock(&psr->lock);
2400 * Reasons to disable:
2401 * - PSR disabled in new state
2402 * - All planes will go inactive
2403 * - Changing between PSR versions
2404 * - Display WA #1136: skl, bxt
2406 needs_to_disable |= intel_crtc_needs_modeset(new_crtc_state);
2407 needs_to_disable |= !new_crtc_state->has_psr;
2408 needs_to_disable |= !new_crtc_state->active_planes;
2409 needs_to_disable |= new_crtc_state->has_psr2 != psr->psr2_enabled;
2410 needs_to_disable |= DISPLAY_VER(i915) < 11 &&
2411 new_crtc_state->wm_level_disabled;
2413 if (psr->enabled && needs_to_disable)
2414 intel_psr_disable_locked(intel_dp);
2415 else if (psr->enabled && new_crtc_state->wm_level_disabled)
2416 /* Wa_14015648006 */
2417 wm_optimization_wa(intel_dp, new_crtc_state);
2419 mutex_unlock(&psr->lock);
2423 void intel_psr_post_plane_update(struct intel_atomic_state *state,
2424 struct intel_crtc *crtc)
2426 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2427 const struct intel_crtc_state *crtc_state =
2428 intel_atomic_get_new_crtc_state(state, crtc);
2429 struct intel_encoder *encoder;
2431 if (!(crtc_state->has_psr || crtc_state->has_panel_replay))
2434 for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2435 crtc_state->uapi.encoder_mask) {
2436 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2437 struct intel_psr *psr = &intel_dp->psr;
2438 bool keep_disabled = false;
2440 mutex_lock(&psr->lock);
2442 drm_WARN_ON(&dev_priv->drm, psr->enabled && !crtc_state->active_planes);
2444 keep_disabled |= psr->sink_not_reliable;
2445 keep_disabled |= !crtc_state->active_planes;
2447 /* Display WA #1136: skl, bxt */
2448 keep_disabled |= DISPLAY_VER(dev_priv) < 11 &&
2449 crtc_state->wm_level_disabled;
2451 if (!psr->enabled && !keep_disabled)
2452 intel_psr_enable_locked(intel_dp, crtc_state);
2453 else if (psr->enabled && !crtc_state->wm_level_disabled)
2454 /* Wa_14015648006 */
2455 wm_optimization_wa(intel_dp, crtc_state);
2457 /* Force a PSR exit when enabling CRC to avoid CRC timeouts */
2458 if (crtc_state->crc_enabled && psr->enabled)
2459 psr_force_hw_tracking_exit(intel_dp);
2462 * Clear possible busy bits in case we have
2463 * invalidate -> flip -> flush sequence.
2465 intel_dp->psr.busy_frontbuffer_bits = 0;
2467 mutex_unlock(&psr->lock);
2471 static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2473 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2474 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2477 * Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough.
2478 * As all higher states has bit 4 of PSR2 state set we can just wait for
2479 * EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared.
2481 return intel_de_wait_for_clear(dev_priv,
2482 EDP_PSR2_STATUS(cpu_transcoder),
2483 EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 50);
2486 static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2488 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2489 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2492 * From bspec: Panel Self Refresh (BDW+)
2493 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
2494 * exit training time + 1.5 ms of aux channel handshake. 50 ms is
2495 * defensive enough to cover everything.
2497 return intel_de_wait_for_clear(dev_priv,
2498 psr_status_reg(dev_priv, cpu_transcoder),
2499 EDP_PSR_STATUS_STATE_MASK, 50);
2503 * intel_psr_wait_for_idle_locked - wait for PSR be ready for a pipe update
2504 * @new_crtc_state: new CRTC state
2506 * This function is expected to be called from pipe_update_start() where it is
2507 * not expected to race with PSR enable or disable.
2509 void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state)
2511 struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
2512 struct intel_encoder *encoder;
2514 if (!new_crtc_state->has_psr)
2517 for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
2518 new_crtc_state->uapi.encoder_mask) {
2519 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2522 lockdep_assert_held(&intel_dp->psr.lock);
2524 if (!intel_dp->psr.enabled)
2527 if (intel_dp->psr.psr2_enabled)
2528 ret = _psr2_ready_for_pipe_update_locked(intel_dp);
2530 ret = _psr1_ready_for_pipe_update_locked(intel_dp);
2533 drm_err(&dev_priv->drm, "PSR wait timed out, atomic update may fail\n");
2537 static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
2539 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2540 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2545 if (!intel_dp->psr.enabled)
2548 if (intel_dp->psr.psr2_enabled) {
2549 reg = EDP_PSR2_STATUS(cpu_transcoder);
2550 mask = EDP_PSR2_STATUS_STATE_MASK;
2552 reg = psr_status_reg(dev_priv, cpu_transcoder);
2553 mask = EDP_PSR_STATUS_STATE_MASK;
2556 mutex_unlock(&intel_dp->psr.lock);
2558 err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
2560 drm_err(&dev_priv->drm,
2561 "Timed out waiting for PSR Idle for re-enable\n");
2563 /* After the unlocked wait, verify that PSR is still wanted! */
2564 mutex_lock(&intel_dp->psr.lock);
2565 return err == 0 && intel_dp->psr.enabled;
2568 static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
2570 struct drm_connector_list_iter conn_iter;
2571 struct drm_modeset_acquire_ctx ctx;
2572 struct drm_atomic_state *state;
2573 struct drm_connector *conn;
2576 state = drm_atomic_state_alloc(&dev_priv->drm);
2580 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2582 state->acquire_ctx = &ctx;
2583 to_intel_atomic_state(state)->internal = true;
2586 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
2587 drm_for_each_connector_iter(conn, &conn_iter) {
2588 struct drm_connector_state *conn_state;
2589 struct drm_crtc_state *crtc_state;
2591 if (conn->connector_type != DRM_MODE_CONNECTOR_eDP)
2594 conn_state = drm_atomic_get_connector_state(state, conn);
2595 if (IS_ERR(conn_state)) {
2596 err = PTR_ERR(conn_state);
2600 if (!conn_state->crtc)
2603 crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc);
2604 if (IS_ERR(crtc_state)) {
2605 err = PTR_ERR(crtc_state);
2609 /* Mark mode as changed to trigger a pipe->update() */
2610 crtc_state->mode_changed = true;
2612 drm_connector_list_iter_end(&conn_iter);
2615 err = drm_atomic_commit(state);
2617 if (err == -EDEADLK) {
2618 drm_atomic_state_clear(state);
2619 err = drm_modeset_backoff(&ctx);
2624 drm_modeset_drop_locks(&ctx);
2625 drm_modeset_acquire_fini(&ctx);
2626 drm_atomic_state_put(state);
2631 int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
2633 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2634 const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
2638 if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
2639 mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
2640 drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val);
2644 ret = mutex_lock_interruptible(&intel_dp->psr.lock);
2648 old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
2649 intel_dp->psr.debug = val;
2652 * Do it right away if it's already enabled, otherwise it will be done
2653 * when enabling the source.
2655 if (intel_dp->psr.enabled)
2656 psr_irq_control(intel_dp);
2658 mutex_unlock(&intel_dp->psr.lock);
2660 if (old_mode != mode)
2661 ret = intel_psr_fastset_force(dev_priv);
2666 static void intel_psr_handle_irq(struct intel_dp *intel_dp)
2668 struct intel_psr *psr = &intel_dp->psr;
2670 intel_psr_disable_locked(intel_dp);
2671 psr->sink_not_reliable = true;
2672 /* let's make sure that sink is awaken */
2673 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
2676 static void intel_psr_work(struct work_struct *work)
2678 struct intel_dp *intel_dp =
2679 container_of(work, typeof(*intel_dp), psr.work);
2681 mutex_lock(&intel_dp->psr.lock);
2683 if (!intel_dp->psr.enabled)
2686 if (READ_ONCE(intel_dp->psr.irq_aux_error))
2687 intel_psr_handle_irq(intel_dp);
2690 * We have to make sure PSR is ready for re-enable
2691 * otherwise it keeps disabled until next full enable/disable cycle.
2692 * PSR might take some time to get fully disabled
2693 * and be ready for re-enable.
2695 if (!__psr_wait_for_idle_locked(intel_dp))
2699 * The delayed work can race with an invalidate hence we need to
2700 * recheck. Since psr_flush first clears this and then reschedules we
2701 * won't ever miss a flush when bailing out here.
2703 if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
2706 intel_psr_activate(intel_dp);
2708 mutex_unlock(&intel_dp->psr.lock);
2711 static void _psr_invalidate_handle(struct intel_dp *intel_dp)
2713 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2714 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2716 if (intel_dp->psr.psr2_sel_fetch_enabled) {
2719 if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
2720 /* Send one update otherwise lag is observed in screen */
2721 intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2725 val = man_trk_ctl_enable_bit_get(dev_priv) |
2726 man_trk_ctl_partial_frame_bit_get(dev_priv) |
2727 man_trk_ctl_continuos_full_frame(dev_priv);
2728 intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), val);
2729 intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2730 intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
2732 intel_psr_exit(intel_dp);
2737 * intel_psr_invalidate - Invalidate PSR
2738 * @dev_priv: i915 device
2739 * @frontbuffer_bits: frontbuffer plane tracking bits
2740 * @origin: which operation caused the invalidate
2742 * Since the hardware frontbuffer tracking has gaps we need to integrate
2743 * with the software frontbuffer tracking. This function gets called every
2744 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
2745 * disabled if the frontbuffer mask contains a buffer relevant to PSR.
2747 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
2749 void intel_psr_invalidate(struct drm_i915_private *dev_priv,
2750 unsigned frontbuffer_bits, enum fb_op_origin origin)
2752 struct intel_encoder *encoder;
2754 if (origin == ORIGIN_FLIP)
2757 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2758 unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
2759 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2761 mutex_lock(&intel_dp->psr.lock);
2762 if (!intel_dp->psr.enabled) {
2763 mutex_unlock(&intel_dp->psr.lock);
2767 pipe_frontbuffer_bits &=
2768 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
2769 intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
2771 if (pipe_frontbuffer_bits)
2772 _psr_invalidate_handle(intel_dp);
2774 mutex_unlock(&intel_dp->psr.lock);
2778 * When we will be completely rely on PSR2 S/W tracking in future,
2779 * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
2780 * event also therefore tgl_dc3co_flush_locked() require to be changed
2781 * accordingly in future.
2784 tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
2785 enum fb_op_origin origin)
2787 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2789 if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.psr2_enabled ||
2790 !intel_dp->psr.active)
2794 * At every frontbuffer flush flip event modified delay of delayed work,
2795 * when delayed work schedules that means display has been idle.
2797 if (!(frontbuffer_bits &
2798 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
2801 tgl_psr2_enable_dc3co(intel_dp);
2802 mod_delayed_work(i915->unordered_wq, &intel_dp->psr.dc3co_work,
2803 intel_dp->psr.dc3co_exit_delay);
2806 static void _psr_flush_handle(struct intel_dp *intel_dp)
2808 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2809 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2811 if (intel_dp->psr.psr2_sel_fetch_enabled) {
2812 if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
2813 /* can we turn CFF off? */
2814 if (intel_dp->psr.busy_frontbuffer_bits == 0) {
2815 u32 val = man_trk_ctl_enable_bit_get(dev_priv) |
2816 man_trk_ctl_partial_frame_bit_get(dev_priv) |
2817 man_trk_ctl_single_full_frame_bit_get(dev_priv) |
2818 man_trk_ctl_continuos_full_frame(dev_priv);
2821 * Set psr2_sel_fetch_cff_enabled as false to allow selective
2822 * updates. Still keep cff bit enabled as we don't have proper
2823 * SU configuration in case update is sent for any reason after
2824 * sff bit gets cleared by the HW on next vblank.
2826 intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder),
2828 intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2829 intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
2833 * continuous full frame is disabled, only a single full
2836 psr_force_hw_tracking_exit(intel_dp);
2839 psr_force_hw_tracking_exit(intel_dp);
2841 if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
2842 queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
2847 * intel_psr_flush - Flush PSR
2848 * @dev_priv: i915 device
2849 * @frontbuffer_bits: frontbuffer plane tracking bits
2850 * @origin: which operation caused the flush
2852 * Since the hardware frontbuffer tracking has gaps we need to integrate
2853 * with the software frontbuffer tracking. This function gets called every
2854 * time frontbuffer rendering has completed and flushed out to memory. PSR
2855 * can be enabled again if no other frontbuffer relevant to PSR is dirty.
2857 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
2859 void intel_psr_flush(struct drm_i915_private *dev_priv,
2860 unsigned frontbuffer_bits, enum fb_op_origin origin)
2862 struct intel_encoder *encoder;
2864 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2865 unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
2866 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2868 mutex_lock(&intel_dp->psr.lock);
2869 if (!intel_dp->psr.enabled) {
2870 mutex_unlock(&intel_dp->psr.lock);
2874 pipe_frontbuffer_bits &=
2875 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
2876 intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
2879 * If the PSR is paused by an explicit intel_psr_paused() call,
2880 * we have to ensure that the PSR is not activated until
2881 * intel_psr_resume() is called.
2883 if (intel_dp->psr.paused)
2886 if (origin == ORIGIN_FLIP ||
2887 (origin == ORIGIN_CURSOR_UPDATE &&
2888 !intel_dp->psr.psr2_sel_fetch_enabled)) {
2889 tgl_dc3co_flush_locked(intel_dp, frontbuffer_bits, origin);
2893 if (pipe_frontbuffer_bits == 0)
2896 /* By definition flush = invalidate + flush */
2897 _psr_flush_handle(intel_dp);
2899 mutex_unlock(&intel_dp->psr.lock);
2904 * intel_psr_init - Init basic PSR work and mutex.
2905 * @intel_dp: Intel DP
2907 * This function is called after the initializing connector.
2908 * (the initializing of connector treats the handling of connector capabilities)
2909 * And it initializes basic PSR stuff for each DP Encoder.
2911 void intel_psr_init(struct intel_dp *intel_dp)
2913 struct intel_connector *connector = intel_dp->attached_connector;
2914 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2915 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2917 if (!(HAS_PSR(dev_priv) || HAS_DP20(dev_priv)))
2921 * HSW spec explicitly says PSR is tied to port A.
2922 * BDW+ platforms have a instance of PSR registers per transcoder but
2923 * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
2925 * For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
2926 * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
2927 * But GEN12 supports a instance of PSR registers per transcoder.
2929 if (DISPLAY_VER(dev_priv) < 12 && dig_port->base.port != PORT_A) {
2930 drm_dbg_kms(&dev_priv->drm,
2931 "PSR condition failed: Port not supported\n");
2935 if (HAS_DP20(dev_priv) && !intel_dp_is_edp(intel_dp))
2936 intel_dp->psr.source_panel_replay_support = true;
2938 intel_dp->psr.source_support = true;
2940 /* Disable early transport for now */
2941 intel_dp->psr.debug |= I915_PSR_DEBUG_SU_REGION_ET_DISABLE;
2943 /* Set link_standby x link_off defaults */
2944 if (DISPLAY_VER(dev_priv) < 12)
2945 /* For new platforms up to TGL let's respect VBT back again */
2946 intel_dp->psr.link_standby = connector->panel.vbt.psr.full_link;
2948 INIT_WORK(&intel_dp->psr.work, intel_psr_work);
2949 INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
2950 mutex_init(&intel_dp->psr.lock);
2953 static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
2954 u8 *status, u8 *error_status)
2956 struct drm_dp_aux *aux = &intel_dp->aux;
2958 unsigned int offset;
2960 offset = intel_dp->psr.panel_replay_enabled ?
2961 DP_SINK_DEVICE_PR_AND_FRAME_LOCK_STATUS : DP_PSR_STATUS;
2963 ret = drm_dp_dpcd_readb(aux, offset, status);
2967 offset = intel_dp->psr.panel_replay_enabled ?
2968 DP_PANEL_REPLAY_ERROR_STATUS : DP_PSR_ERROR_STATUS;
2970 ret = drm_dp_dpcd_readb(aux, offset, error_status);
2974 *status = *status & DP_PSR_SINK_STATE_MASK;
2979 static void psr_alpm_check(struct intel_dp *intel_dp)
2981 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2982 struct drm_dp_aux *aux = &intel_dp->aux;
2983 struct intel_psr *psr = &intel_dp->psr;
2987 if (!psr->psr2_enabled)
2990 r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
2992 drm_err(&dev_priv->drm, "Error reading ALPM status\n");
2996 if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
2997 intel_psr_disable_locked(intel_dp);
2998 psr->sink_not_reliable = true;
2999 drm_dbg_kms(&dev_priv->drm,
3000 "ALPM lock timeout error, disabling PSR\n");
3002 /* Clearing error */
3003 drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
3007 static void psr_capability_changed_check(struct intel_dp *intel_dp)
3009 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3010 struct intel_psr *psr = &intel_dp->psr;
3014 r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
3016 drm_err(&dev_priv->drm, "Error reading DP_PSR_ESI\n");
3020 if (val & DP_PSR_CAPS_CHANGE) {
3021 intel_psr_disable_locked(intel_dp);
3022 psr->sink_not_reliable = true;
3023 drm_dbg_kms(&dev_priv->drm,
3024 "Sink PSR capability changed, disabling PSR\n");
3027 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
3031 void intel_psr_short_pulse(struct intel_dp *intel_dp)
3033 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3034 struct intel_psr *psr = &intel_dp->psr;
3035 u8 status, error_status;
3036 const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
3037 DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
3038 DP_PSR_LINK_CRC_ERROR;
3040 if (!CAN_PSR(intel_dp))
3043 mutex_lock(&psr->lock);
3048 if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
3049 drm_err(&dev_priv->drm,
3050 "Error reading PSR status or error status\n");
3054 if (status == DP_PSR_SINK_INTERNAL_ERROR || (error_status & errors)) {
3055 intel_psr_disable_locked(intel_dp);
3056 psr->sink_not_reliable = true;
3059 if (status == DP_PSR_SINK_INTERNAL_ERROR && !error_status)
3060 drm_dbg_kms(&dev_priv->drm,
3061 "PSR sink internal error, disabling PSR\n");
3062 if (error_status & DP_PSR_RFB_STORAGE_ERROR)
3063 drm_dbg_kms(&dev_priv->drm,
3064 "PSR RFB storage error, disabling PSR\n");
3065 if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
3066 drm_dbg_kms(&dev_priv->drm,
3067 "PSR VSC SDP uncorrectable error, disabling PSR\n");
3068 if (error_status & DP_PSR_LINK_CRC_ERROR)
3069 drm_dbg_kms(&dev_priv->drm,
3070 "PSR Link CRC error, disabling PSR\n");
3072 if (error_status & ~errors)
3073 drm_err(&dev_priv->drm,
3074 "PSR_ERROR_STATUS unhandled errors %x\n",
3075 error_status & ~errors);
3076 /* clear status register */
3077 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
3079 psr_alpm_check(intel_dp);
3080 psr_capability_changed_check(intel_dp);
3083 mutex_unlock(&psr->lock);
3086 bool intel_psr_enabled(struct intel_dp *intel_dp)
3090 if (!CAN_PSR(intel_dp))
3093 mutex_lock(&intel_dp->psr.lock);
3094 ret = intel_dp->psr.enabled;
3095 mutex_unlock(&intel_dp->psr.lock);
3101 * intel_psr_lock - grab PSR lock
3102 * @crtc_state: the crtc state
3104 * This is initially meant to be used by around CRTC update, when
3105 * vblank sensitive registers are updated and we need grab the lock
3106 * before it to avoid vblank evasion.
3108 void intel_psr_lock(const struct intel_crtc_state *crtc_state)
3110 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
3111 struct intel_encoder *encoder;
3113 if (!crtc_state->has_psr)
3116 for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
3117 crtc_state->uapi.encoder_mask) {
3118 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3120 mutex_lock(&intel_dp->psr.lock);
3126 * intel_psr_unlock - release PSR lock
3127 * @crtc_state: the crtc state
3129 * Release the PSR lock that was held during pipe update.
3131 void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
3133 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
3134 struct intel_encoder *encoder;
3136 if (!crtc_state->has_psr)
3139 for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
3140 crtc_state->uapi.encoder_mask) {
3141 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3143 mutex_unlock(&intel_dp->psr.lock);
3149 psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
3151 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3152 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3153 const char *status = "unknown";
3154 u32 val, status_val;
3156 if (intel_dp->psr.psr2_enabled) {
3157 static const char * const live_status[] = {
3170 val = intel_de_read(dev_priv, EDP_PSR2_STATUS(cpu_transcoder));
3171 status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
3172 if (status_val < ARRAY_SIZE(live_status))
3173 status = live_status[status_val];
3175 static const char * const live_status[] = {
3185 val = intel_de_read(dev_priv, psr_status_reg(dev_priv, cpu_transcoder));
3186 status_val = REG_FIELD_GET(EDP_PSR_STATUS_STATE_MASK, val);
3187 if (status_val < ARRAY_SIZE(live_status))
3188 status = live_status[status_val];
3191 seq_printf(m, "Source PSR/PanelReplay status: %s [0x%08x]\n", status, val);
3194 static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
3196 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3197 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3198 struct intel_psr *psr = &intel_dp->psr;
3199 intel_wakeref_t wakeref;
3204 seq_printf(m, "Sink support: PSR = %s",
3205 str_yes_no(psr->sink_support));
3207 if (psr->sink_support)
3208 seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
3209 seq_printf(m, ", Panel Replay = %s\n", str_yes_no(psr->sink_panel_replay_support));
3211 if (!(psr->sink_support || psr->sink_panel_replay_support))
3214 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
3215 mutex_lock(&psr->lock);
3217 if (psr->panel_replay_enabled)
3218 status = "Panel Replay Enabled";
3219 else if (psr->enabled)
3220 status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
3222 status = "disabled";
3223 seq_printf(m, "PSR mode: %s\n", status);
3225 if (!psr->enabled) {
3226 seq_printf(m, "PSR sink not reliable: %s\n",
3227 str_yes_no(psr->sink_not_reliable));
3232 if (psr->panel_replay_enabled) {
3233 val = intel_de_read(dev_priv, TRANS_DP2_CTL(cpu_transcoder));
3234 enabled = val & TRANS_DP2_PANEL_REPLAY_ENABLE;
3235 } else if (psr->psr2_enabled) {
3236 val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder));
3237 enabled = val & EDP_PSR2_ENABLE;
3239 val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
3240 enabled = val & EDP_PSR_ENABLE;
3242 seq_printf(m, "Source PSR/PanelReplay ctl: %s [0x%08x]\n",
3243 str_enabled_disabled(enabled), val);
3244 psr_source_status(intel_dp, m);
3245 seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
3246 psr->busy_frontbuffer_bits);
3249 * SKL+ Perf counter is reset to 0 everytime DC state is entered
3251 val = intel_de_read(dev_priv, psr_perf_cnt_reg(dev_priv, cpu_transcoder));
3252 seq_printf(m, "Performance counter: %u\n",
3253 REG_FIELD_GET(EDP_PSR_PERF_CNT_MASK, val));
3255 if (psr->debug & I915_PSR_DEBUG_IRQ) {
3256 seq_printf(m, "Last attempted entry at: %lld\n",
3257 psr->last_entry_attempt);
3258 seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
3261 if (psr->psr2_enabled) {
3262 u32 su_frames_val[3];
3266 * Reading all 3 registers before hand to minimize crossing a
3267 * frame boundary between register reads
3269 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
3270 val = intel_de_read(dev_priv, PSR2_SU_STATUS(cpu_transcoder, frame));
3271 su_frames_val[frame / 3] = val;
3274 seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
3276 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
3279 su_blocks = su_frames_val[frame / 3] &
3280 PSR2_SU_STATUS_MASK(frame);
3281 su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
3282 seq_printf(m, "%d\t%d\n", frame, su_blocks);
3285 seq_printf(m, "PSR2 selective fetch: %s\n",
3286 str_enabled_disabled(psr->psr2_sel_fetch_enabled));
3290 mutex_unlock(&psr->lock);
3291 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
3296 static int i915_edp_psr_status_show(struct seq_file *m, void *data)
3298 struct drm_i915_private *dev_priv = m->private;
3299 struct intel_dp *intel_dp = NULL;
3300 struct intel_encoder *encoder;
3302 if (!HAS_PSR(dev_priv))
3305 /* Find the first EDP which supports PSR */
3306 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3307 intel_dp = enc_to_intel_dp(encoder);
3314 return intel_psr_status(m, intel_dp);
3316 DEFINE_SHOW_ATTRIBUTE(i915_edp_psr_status);
3319 i915_edp_psr_debug_set(void *data, u64 val)
3321 struct drm_i915_private *dev_priv = data;
3322 struct intel_encoder *encoder;
3323 intel_wakeref_t wakeref;
3326 if (!HAS_PSR(dev_priv))
3329 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3330 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3332 drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
3334 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
3336 // TODO: split to each transcoder's PSR debug state
3337 ret = intel_psr_debug_set(intel_dp, val);
3339 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
3346 i915_edp_psr_debug_get(void *data, u64 *val)
3348 struct drm_i915_private *dev_priv = data;
3349 struct intel_encoder *encoder;
3351 if (!HAS_PSR(dev_priv))
3354 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3355 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3357 // TODO: split to each transcoder's PSR debug state
3358 *val = READ_ONCE(intel_dp->psr.debug);
3365 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
3366 i915_edp_psr_debug_get, i915_edp_psr_debug_set,
3369 void intel_psr_debugfs_register(struct drm_i915_private *i915)
3371 struct drm_minor *minor = i915->drm.primary;
3373 debugfs_create_file("i915_edp_psr_debug", 0644, minor->debugfs_root,
3374 i915, &i915_edp_psr_debug_fops);
3376 debugfs_create_file("i915_edp_psr_status", 0444, minor->debugfs_root,
3377 i915, &i915_edp_psr_status_fops);
3380 static const char *psr_mode_str(struct intel_dp *intel_dp)
3382 if (intel_dp->psr.panel_replay_enabled)
3383 return "PANEL-REPLAY";
3384 else if (intel_dp->psr.enabled)
3390 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
3392 struct intel_connector *connector = m->private;
3393 struct intel_dp *intel_dp = intel_attached_dp(connector);
3394 static const char * const sink_status[] = {
3396 "transition to active, capture and display",
3397 "active, display from RFB",
3398 "active, capture and display on sink device timings",
3399 "transition to inactive, capture and display, timing re-sync",
3402 "sink internal error",
3404 static const char * const panel_replay_status[] = {
3405 "Sink device frame is locked to the Source device",
3406 "Sink device is coasting, using the VTotal target",
3407 "Sink device is governing the frame rate (frame rate unlock is granted)",
3408 "Sink device in the process of re-locking with the Source device",
3412 u8 status, error_status;
3415 if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp))) {
3416 seq_puts(m, "PSR/Panel-Replay Unsupported\n");
3420 if (connector->base.status != connector_status_connected)
3423 ret = psr_get_status_and_error_status(intel_dp, &status, &error_status);
3428 if (intel_dp->psr.panel_replay_enabled) {
3429 idx = (status & DP_SINK_FRAME_LOCKED_MASK) >> DP_SINK_FRAME_LOCKED_SHIFT;
3430 if (idx < ARRAY_SIZE(panel_replay_status))
3431 str = panel_replay_status[idx];
3432 } else if (intel_dp->psr.enabled) {
3433 idx = status & DP_PSR_SINK_STATE_MASK;
3434 if (idx < ARRAY_SIZE(sink_status))
3435 str = sink_status[idx];
3438 seq_printf(m, "Sink %s status: 0x%x [%s]\n", psr_mode_str(intel_dp), status, str);
3440 seq_printf(m, "Sink %s error status: 0x%x", psr_mode_str(intel_dp), error_status);
3442 if (error_status & (DP_PSR_RFB_STORAGE_ERROR |
3443 DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
3444 DP_PSR_LINK_CRC_ERROR))
3448 if (error_status & DP_PSR_RFB_STORAGE_ERROR)
3449 seq_printf(m, "\t%s RFB storage error\n", psr_mode_str(intel_dp));
3450 if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
3451 seq_printf(m, "\t%s VSC SDP uncorrectable error\n", psr_mode_str(intel_dp));
3452 if (error_status & DP_PSR_LINK_CRC_ERROR)
3453 seq_printf(m, "\t%s Link CRC error\n", psr_mode_str(intel_dp));
3457 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
3459 static int i915_psr_status_show(struct seq_file *m, void *data)
3461 struct intel_connector *connector = m->private;
3462 struct intel_dp *intel_dp = intel_attached_dp(connector);
3464 return intel_psr_status(m, intel_dp);
3466 DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
3468 void intel_psr_connector_debugfs_add(struct intel_connector *connector)
3470 struct drm_i915_private *i915 = to_i915(connector->base.dev);
3471 struct dentry *root = connector->base.debugfs_entry;
3473 /* TODO: Add support for MST connectors as well. */
3474 if ((connector->base.connector_type != DRM_MODE_CONNECTOR_eDP &&
3475 connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) ||
3476 connector->mst_port)
3479 debugfs_create_file("i915_psr_sink_status", 0444, root,
3480 connector, &i915_psr_sink_status_fops);
3482 if (HAS_PSR(i915) || HAS_DP20(i915))
3483 debugfs_create_file("i915_psr_status", 0444, root,
3484 connector, &i915_psr_status_fops);