fs: smb: common: add missing MODULE_DESCRIPTION() macros
[sfrench/cifs-2.6.git] / drivers / gpu / drm / amd / display / dc / core / dc.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  */
24
25 #include "dm_services.h"
26
27 #include "amdgpu.h"
28
29 #include "dc.h"
30
31 #include "core_status.h"
32 #include "core_types.h"
33 #include "hw_sequencer.h"
34 #include "dce/dce_hwseq.h"
35
36 #include "resource.h"
37 #include "dc_state.h"
38 #include "dc_state_priv.h"
39
40 #include "gpio_service_interface.h"
41 #include "clk_mgr.h"
42 #include "clock_source.h"
43 #include "dc_bios_types.h"
44
45 #include "bios_parser_interface.h"
46 #include "bios/bios_parser_helper.h"
47 #include "include/irq_service_interface.h"
48 #include "transform.h"
49 #include "dmcu.h"
50 #include "dpp.h"
51 #include "timing_generator.h"
52 #include "abm.h"
53 #include "virtual/virtual_link_encoder.h"
54 #include "hubp.h"
55
56 #include "link_hwss.h"
57 #include "link_encoder.h"
58 #include "link_enc_cfg.h"
59
60 #include "link.h"
61 #include "dm_helpers.h"
62 #include "mem_input.h"
63
64 #include "dc_dmub_srv.h"
65
66 #include "dsc.h"
67
68 #include "vm_helper.h"
69
70 #include "dce/dce_i2c.h"
71
72 #include "dmub/dmub_srv.h"
73
74 #include "dce/dmub_psr.h"
75
76 #include "dce/dmub_hw_lock_mgr.h"
77
78 #include "dc_trace.h"
79
80 #include "hw_sequencer_private.h"
81
82 #include "dml2/dml2_internal_types.h"
83
84 #include "dce/dmub_outbox.h"
85
86 #define CTX \
87         dc->ctx
88
89 #define DC_LOGGER \
90         dc->ctx->logger
91
92 static const char DC_BUILD_ID[] = "production-build";
93
94 /**
95  * DOC: Overview
96  *
97  * DC is the OS-agnostic component of the amdgpu DC driver.
98  *
99  * DC maintains and validates a set of structs representing the state of the
100  * driver and writes that state to AMD hardware
101  *
102  * Main DC HW structs:
103  *
104  * struct dc - The central struct.  One per driver.  Created on driver load,
105  * destroyed on driver unload.
106  *
107  * struct dc_context - One per driver.
108  * Used as a backpointer by most other structs in dc.
109  *
110  * struct dc_link - One per connector (the physical DP, HDMI, miniDP, or eDP
111  * plugpoints).  Created on driver load, destroyed on driver unload.
112  *
113  * struct dc_sink - One per display.  Created on boot or hotplug.
114  * Destroyed on shutdown or hotunplug.  A dc_link can have a local sink
115  * (the display directly attached).  It may also have one or more remote
116  * sinks (in the Multi-Stream Transport case)
117  *
118  * struct resource_pool - One per driver.  Represents the hw blocks not in the
119  * main pipeline.  Not directly accessible by dm.
120  *
121  * Main dc state structs:
122  *
123  * These structs can be created and destroyed as needed.  There is a full set of
124  * these structs in dc->current_state representing the currently programmed state.
125  *
126  * struct dc_state - The global DC state to track global state information,
127  * such as bandwidth values.
128  *
129  * struct dc_stream_state - Represents the hw configuration for the pipeline from
130  * a framebuffer to a display.  Maps one-to-one with dc_sink.
131  *
132  * struct dc_plane_state - Represents a framebuffer.  Each stream has at least one,
133  * and may have more in the Multi-Plane Overlay case.
134  *
135  * struct resource_context - Represents the programmable state of everything in
136  * the resource_pool.  Not directly accessible by dm.
137  *
138  * struct pipe_ctx - A member of struct resource_context.  Represents the
139  * internal hardware pipeline components.  Each dc_plane_state has either
140  * one or two (in the pipe-split case).
141  */
142
143 /* Private functions */
144
145 static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new)
146 {
147         if (new > *original)
148                 *original = new;
149 }
150
151 static void destroy_links(struct dc *dc)
152 {
153         uint32_t i;
154
155         for (i = 0; i < dc->link_count; i++) {
156                 if (NULL != dc->links[i])
157                         dc->link_srv->destroy_link(&dc->links[i]);
158         }
159 }
160
161 static uint32_t get_num_of_internal_disp(struct dc_link **links, uint32_t num_links)
162 {
163         int i;
164         uint32_t count = 0;
165
166         for (i = 0; i < num_links; i++) {
167                 if (links[i]->connector_signal == SIGNAL_TYPE_EDP ||
168                                 links[i]->is_internal_display)
169                         count++;
170         }
171
172         return count;
173 }
174
175 static int get_seamless_boot_stream_count(struct dc_state *ctx)
176 {
177         uint8_t i;
178         uint8_t seamless_boot_stream_count = 0;
179
180         for (i = 0; i < ctx->stream_count; i++)
181                 if (ctx->streams[i]->apply_seamless_boot_optimization)
182                         seamless_boot_stream_count++;
183
184         return seamless_boot_stream_count;
185 }
186
187 static bool create_links(
188                 struct dc *dc,
189                 uint32_t num_virtual_links)
190 {
191         int i;
192         int connectors_num;
193         struct dc_bios *bios = dc->ctx->dc_bios;
194
195         dc->link_count = 0;
196
197         connectors_num = bios->funcs->get_connectors_number(bios);
198
199         DC_LOG_DC("BIOS object table - number of connectors: %d", connectors_num);
200
201         if (connectors_num > ENUM_ID_COUNT) {
202                 dm_error(
203                         "DC: Number of connectors %d exceeds maximum of %d!\n",
204                         connectors_num,
205                         ENUM_ID_COUNT);
206                 return false;
207         }
208
209         dm_output_to_console(
210                 "DC: %s: connectors_num: physical:%d, virtual:%d\n",
211                 __func__,
212                 connectors_num,
213                 num_virtual_links);
214
215         for (i = 0; i < connectors_num; i++) {
216                 struct link_init_data link_init_params = {0};
217                 struct dc_link *link;
218
219                 DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count);
220
221                 link_init_params.ctx = dc->ctx;
222                 /* next BIOS object table connector */
223                 link_init_params.connector_index = i;
224                 link_init_params.link_index = dc->link_count;
225                 link_init_params.dc = dc;
226                 link = dc->link_srv->create_link(&link_init_params);
227
228                 if (link) {
229                         dc->links[dc->link_count] = link;
230                         link->dc = dc;
231                         ++dc->link_count;
232                 }
233         }
234
235         DC_LOG_DC("BIOS object table - end");
236
237         /* Create a link for each usb4 dpia port */
238         for (i = 0; i < dc->res_pool->usb4_dpia_count; i++) {
239                 struct link_init_data link_init_params = {0};
240                 struct dc_link *link;
241
242                 link_init_params.ctx = dc->ctx;
243                 link_init_params.connector_index = i;
244                 link_init_params.link_index = dc->link_count;
245                 link_init_params.dc = dc;
246                 link_init_params.is_dpia_link = true;
247
248                 link = dc->link_srv->create_link(&link_init_params);
249                 if (link) {
250                         dc->links[dc->link_count] = link;
251                         link->dc = dc;
252                         ++dc->link_count;
253                 }
254         }
255
256         for (i = 0; i < num_virtual_links; i++) {
257                 struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL);
258                 struct encoder_init_data enc_init = {0};
259
260                 if (link == NULL) {
261                         BREAK_TO_DEBUGGER();
262                         goto failed_alloc;
263                 }
264
265                 link->link_index = dc->link_count;
266                 dc->links[dc->link_count] = link;
267                 dc->link_count++;
268
269                 link->ctx = dc->ctx;
270                 link->dc = dc;
271                 link->connector_signal = SIGNAL_TYPE_VIRTUAL;
272                 link->link_id.type = OBJECT_TYPE_CONNECTOR;
273                 link->link_id.id = CONNECTOR_ID_VIRTUAL;
274                 link->link_id.enum_id = ENUM_ID_1;
275                 link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
276
277                 if (!link->link_enc) {
278                         BREAK_TO_DEBUGGER();
279                         goto failed_alloc;
280                 }
281
282                 link->link_status.dpcd_caps = &link->dpcd_caps;
283
284                 enc_init.ctx = dc->ctx;
285                 enc_init.channel = CHANNEL_ID_UNKNOWN;
286                 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
287                 enc_init.transmitter = TRANSMITTER_UNKNOWN;
288                 enc_init.connector = link->link_id;
289                 enc_init.encoder.type = OBJECT_TYPE_ENCODER;
290                 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
291                 enc_init.encoder.enum_id = ENUM_ID_1;
292                 virtual_link_encoder_construct(link->link_enc, &enc_init);
293         }
294
295         dc->caps.num_of_internal_disp = get_num_of_internal_disp(dc->links, dc->link_count);
296
297         return true;
298
299 failed_alloc:
300         return false;
301 }
302
303 /* Create additional DIG link encoder objects if fewer than the platform
304  * supports were created during link construction. This can happen if the
305  * number of physical connectors is less than the number of DIGs.
306  */
307 static bool create_link_encoders(struct dc *dc)
308 {
309         bool res = true;
310         unsigned int num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia;
311         unsigned int num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc;
312         int i;
313
314         /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG
315          * link encoders and physical display endpoints and does not require
316          * additional link encoder objects.
317          */
318         if (num_usb4_dpia == 0)
319                 return res;
320
321         /* Create as many link encoder objects as the platform supports. DPIA
322          * endpoints can be programmably mapped to any DIG.
323          */
324         if (num_dig_link_enc > dc->res_pool->dig_link_enc_count) {
325                 for (i = 0; i < num_dig_link_enc; i++) {
326                         struct link_encoder *link_enc = dc->res_pool->link_encoders[i];
327
328                         if (!link_enc && dc->res_pool->funcs->link_enc_create_minimal) {
329                                 link_enc = dc->res_pool->funcs->link_enc_create_minimal(dc->ctx,
330                                                 (enum engine_id)(ENGINE_ID_DIGA + i));
331                                 if (link_enc) {
332                                         dc->res_pool->link_encoders[i] = link_enc;
333                                         dc->res_pool->dig_link_enc_count++;
334                                 } else {
335                                         res = false;
336                                 }
337                         }
338                 }
339         }
340
341         return res;
342 }
343
344 /* Destroy any additional DIG link encoder objects created by
345  * create_link_encoders().
346  * NB: Must only be called after destroy_links().
347  */
348 static void destroy_link_encoders(struct dc *dc)
349 {
350         unsigned int num_usb4_dpia;
351         unsigned int num_dig_link_enc;
352         int i;
353
354         if (!dc->res_pool)
355                 return;
356
357         num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia;
358         num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc;
359
360         /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG
361          * link encoders and physical display endpoints and does not require
362          * additional link encoder objects.
363          */
364         if (num_usb4_dpia == 0)
365                 return;
366
367         for (i = 0; i < num_dig_link_enc; i++) {
368                 struct link_encoder *link_enc = dc->res_pool->link_encoders[i];
369
370                 if (link_enc) {
371                         link_enc->funcs->destroy(&link_enc);
372                         dc->res_pool->link_encoders[i] = NULL;
373                         dc->res_pool->dig_link_enc_count--;
374                 }
375         }
376 }
377
378 static struct dc_perf_trace *dc_perf_trace_create(void)
379 {
380         return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL);
381 }
382
383 static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)
384 {
385         kfree(*perf_trace);
386         *perf_trace = NULL;
387 }
388
389 /**
390  *  dc_stream_adjust_vmin_vmax - look up pipe context & update parts of DRR
391  *  @dc:     dc reference
392  *  @stream: Initial dc stream state
393  *  @adjust: Updated parameters for vertical_total_min and vertical_total_max
394  *
395  *  Looks up the pipe context of dc_stream_state and updates the
396  *  vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh
397  *  Rate, which is a power-saving feature that targets reducing panel
398  *  refresh rate while the screen is static
399  *
400  *  Return: %true if the pipe context is found and adjusted;
401  *          %false if the pipe context is not found.
402  */
403 bool dc_stream_adjust_vmin_vmax(struct dc *dc,
404                 struct dc_stream_state *stream,
405                 struct dc_crtc_timing_adjust *adjust)
406 {
407         int i;
408
409         /*
410          * Don't adjust DRR while there's bandwidth optimizations pending to
411          * avoid conflicting with firmware updates.
412          */
413         if (dc->ctx->dce_version > DCE_VERSION_MAX)
414                 if (dc->optimized_required || dc->wm_optimized_required)
415                         return false;
416
417         dc_exit_ips_for_hw_access(dc);
418
419         stream->adjust.v_total_max = adjust->v_total_max;
420         stream->adjust.v_total_mid = adjust->v_total_mid;
421         stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num;
422         stream->adjust.v_total_min = adjust->v_total_min;
423
424         for (i = 0; i < MAX_PIPES; i++) {
425                 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
426
427                 if (pipe->stream == stream && pipe->stream_res.tg) {
428                         dc->hwss.set_drr(&pipe,
429                                         1,
430                                         *adjust);
431
432                         return true;
433                 }
434         }
435         return false;
436 }
437
438 /**
439  * dc_stream_get_last_used_drr_vtotal - Looks up the pipe context of
440  * dc_stream_state and gets the last VTOTAL used by DRR (Dynamic Refresh Rate)
441  *
442  * @dc: [in] dc reference
443  * @stream: [in] Initial dc stream state
444  * @refresh_rate: [in] new refresh_rate
445  *
446  * Return: %true if the pipe context is found and there is an associated
447  *         timing_generator for the DC;
448  *         %false if the pipe context is not found or there is no
449  *         timing_generator for the DC.
450  */
451 bool dc_stream_get_last_used_drr_vtotal(struct dc *dc,
452                 struct dc_stream_state *stream,
453                 uint32_t *refresh_rate)
454 {
455         bool status = false;
456
457         int i = 0;
458
459         dc_exit_ips_for_hw_access(dc);
460
461         for (i = 0; i < MAX_PIPES; i++) {
462                 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
463
464                 if (pipe->stream == stream && pipe->stream_res.tg) {
465                         /* Only execute if a function pointer has been defined for
466                          * the DC version in question
467                          */
468                         if (pipe->stream_res.tg->funcs->get_last_used_drr_vtotal) {
469                                 pipe->stream_res.tg->funcs->get_last_used_drr_vtotal(pipe->stream_res.tg, refresh_rate);
470
471                                 status = true;
472
473                                 break;
474                         }
475                 }
476         }
477
478         return status;
479 }
480
481 bool dc_stream_get_crtc_position(struct dc *dc,
482                 struct dc_stream_state **streams, int num_streams,
483                 unsigned int *v_pos, unsigned int *nom_v_pos)
484 {
485         /* TODO: Support multiple streams */
486         const struct dc_stream_state *stream = streams[0];
487         int i;
488         bool ret = false;
489         struct crtc_position position;
490
491         dc_exit_ips_for_hw_access(dc);
492
493         for (i = 0; i < MAX_PIPES; i++) {
494                 struct pipe_ctx *pipe =
495                                 &dc->current_state->res_ctx.pipe_ctx[i];
496
497                 if (pipe->stream == stream && pipe->stream_res.stream_enc) {
498                         dc->hwss.get_position(&pipe, 1, &position);
499
500                         *v_pos = position.vertical_count;
501                         *nom_v_pos = position.nominal_vcount;
502                         ret = true;
503                 }
504         }
505         return ret;
506 }
507
508 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
509 static inline void
510 dc_stream_forward_dmub_crc_window(struct dc_dmub_srv *dmub_srv,
511                 struct rect *rect, struct otg_phy_mux *mux_mapping, bool is_stop)
512 {
513         union dmub_rb_cmd cmd = {0};
514
515         cmd.secure_display.roi_info.phy_id = mux_mapping->phy_output_num;
516         cmd.secure_display.roi_info.otg_id = mux_mapping->otg_output_num;
517
518         if (is_stop) {
519                 cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY;
520                 cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_CRC_STOP_UPDATE;
521         } else {
522                 cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY;
523                 cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_CRC_WIN_NOTIFY;
524                 cmd.secure_display.roi_info.x_start = rect->x;
525                 cmd.secure_display.roi_info.y_start = rect->y;
526                 cmd.secure_display.roi_info.x_end = rect->x + rect->width;
527                 cmd.secure_display.roi_info.y_end = rect->y + rect->height;
528         }
529
530         dc_wake_and_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
531 }
532
533 static inline void
534 dc_stream_forward_dmcu_crc_window(struct dmcu *dmcu,
535                 struct rect *rect, struct otg_phy_mux *mux_mapping, bool is_stop)
536 {
537         if (is_stop)
538                 dmcu->funcs->stop_crc_win_update(dmcu, mux_mapping);
539         else
540                 dmcu->funcs->forward_crc_window(dmcu, rect, mux_mapping);
541 }
542
543 bool
544 dc_stream_forward_crc_window(struct dc_stream_state *stream,
545                 struct rect *rect, bool is_stop)
546 {
547         struct dmcu *dmcu;
548         struct dc_dmub_srv *dmub_srv;
549         struct otg_phy_mux mux_mapping;
550         struct pipe_ctx *pipe;
551         int i;
552         struct dc *dc = stream->ctx->dc;
553
554         for (i = 0; i < MAX_PIPES; i++) {
555                 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
556                 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
557                         break;
558         }
559
560         /* Stream not found */
561         if (i == MAX_PIPES)
562                 return false;
563
564         mux_mapping.phy_output_num = stream->link->link_enc_hw_inst;
565         mux_mapping.otg_output_num = pipe->stream_res.tg->inst;
566
567         dmcu = dc->res_pool->dmcu;
568         dmub_srv = dc->ctx->dmub_srv;
569
570         /* forward to dmub */
571         if (dmub_srv)
572                 dc_stream_forward_dmub_crc_window(dmub_srv, rect, &mux_mapping, is_stop);
573         /* forward to dmcu */
574         else if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu))
575                 dc_stream_forward_dmcu_crc_window(dmcu, rect, &mux_mapping, is_stop);
576         else
577                 return false;
578
579         return true;
580 }
581 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
582
583 /**
584  * dc_stream_configure_crc() - Configure CRC capture for the given stream.
585  * @dc: DC Object
586  * @stream: The stream to configure CRC on.
587  * @enable: Enable CRC if true, disable otherwise.
588  * @crc_window: CRC window (x/y start/end) information
589  * @continuous: Capture CRC on every frame if true. Otherwise, only capture
590  *              once.
591  *
592  * By default, only CRC0 is configured, and the entire frame is used to
593  * calculate the CRC.
594  *
595  * Return: %false if the stream is not found or CRC capture is not supported;
596  *         %true if the stream has been configured.
597  */
598 bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
599                              struct crc_params *crc_window, bool enable, bool continuous)
600 {
601         struct pipe_ctx *pipe;
602         struct crc_params param;
603         struct timing_generator *tg;
604
605         pipe = resource_get_otg_master_for_stream(
606                         &dc->current_state->res_ctx, stream);
607
608         /* Stream not found */
609         if (pipe == NULL)
610                 return false;
611
612         dc_exit_ips_for_hw_access(dc);
613
614         /* By default, capture the full frame */
615         param.windowa_x_start = 0;
616         param.windowa_y_start = 0;
617         param.windowa_x_end = pipe->stream->timing.h_addressable;
618         param.windowa_y_end = pipe->stream->timing.v_addressable;
619         param.windowb_x_start = 0;
620         param.windowb_y_start = 0;
621         param.windowb_x_end = pipe->stream->timing.h_addressable;
622         param.windowb_y_end = pipe->stream->timing.v_addressable;
623
624         if (crc_window) {
625                 param.windowa_x_start = crc_window->windowa_x_start;
626                 param.windowa_y_start = crc_window->windowa_y_start;
627                 param.windowa_x_end = crc_window->windowa_x_end;
628                 param.windowa_y_end = crc_window->windowa_y_end;
629                 param.windowb_x_start = crc_window->windowb_x_start;
630                 param.windowb_y_start = crc_window->windowb_y_start;
631                 param.windowb_x_end = crc_window->windowb_x_end;
632                 param.windowb_y_end = crc_window->windowb_y_end;
633         }
634
635         param.dsc_mode = pipe->stream->timing.flags.DSC ? 1:0;
636         param.odm_mode = pipe->next_odm_pipe ? 1:0;
637
638         /* Default to the union of both windows */
639         param.selection = UNION_WINDOW_A_B;
640         param.continuous_mode = continuous;
641         param.enable = enable;
642
643         tg = pipe->stream_res.tg;
644
645         /* Only call if supported */
646         if (tg->funcs->configure_crc)
647                 return tg->funcs->configure_crc(tg, &param);
648         DC_LOG_WARNING("CRC capture not supported.");
649         return false;
650 }
651
652 /**
653  * dc_stream_get_crc() - Get CRC values for the given stream.
654  *
655  * @dc: DC object.
656  * @stream: The DC stream state of the stream to get CRCs from.
657  * @r_cr: CRC value for the red component.
658  * @g_y:  CRC value for the green component.
659  * @b_cb: CRC value for the blue component.
660  *
661  * dc_stream_configure_crc needs to be called beforehand to enable CRCs.
662  *
663  * Return:
664  * %false if stream is not found, or if CRCs are not enabled.
665  */
666 bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
667                        uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
668 {
669         int i;
670         struct pipe_ctx *pipe;
671         struct timing_generator *tg;
672
673         dc_exit_ips_for_hw_access(dc);
674
675         for (i = 0; i < MAX_PIPES; i++) {
676                 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
677                 if (pipe->stream == stream)
678                         break;
679         }
680         /* Stream not found */
681         if (i == MAX_PIPES)
682                 return false;
683
684         tg = pipe->stream_res.tg;
685
686         if (tg->funcs->get_crc)
687                 return tg->funcs->get_crc(tg, r_cr, g_y, b_cb);
688         DC_LOG_WARNING("CRC capture not supported.");
689         return false;
690 }
691
692 void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,
693                 enum dc_dynamic_expansion option)
694 {
695         /* OPP FMT dyn expansion updates*/
696         int i;
697         struct pipe_ctx *pipe_ctx;
698
699         dc_exit_ips_for_hw_access(dc);
700
701         for (i = 0; i < MAX_PIPES; i++) {
702                 if (dc->current_state->res_ctx.pipe_ctx[i].stream
703                                 == stream) {
704                         pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
705                         pipe_ctx->stream_res.opp->dyn_expansion = option;
706                         pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
707                                         pipe_ctx->stream_res.opp,
708                                         COLOR_SPACE_YCBCR601,
709                                         stream->timing.display_color_depth,
710                                         stream->signal);
711                 }
712         }
713 }
714
715 void dc_stream_set_dither_option(struct dc_stream_state *stream,
716                 enum dc_dither_option option)
717 {
718         struct bit_depth_reduction_params params;
719         struct dc_link *link = stream->link;
720         struct pipe_ctx *pipes = NULL;
721         int i;
722
723         for (i = 0; i < MAX_PIPES; i++) {
724                 if (link->dc->current_state->res_ctx.pipe_ctx[i].stream ==
725                                 stream) {
726                         pipes = &link->dc->current_state->res_ctx.pipe_ctx[i];
727                         break;
728                 }
729         }
730
731         if (!pipes)
732                 return;
733         if (option > DITHER_OPTION_MAX)
734                 return;
735
736         dc_exit_ips_for_hw_access(stream->ctx->dc);
737
738         stream->dither_option = option;
739
740         memset(&params, 0, sizeof(params));
741         resource_build_bit_depth_reduction_params(stream, &params);
742         stream->bit_depth_params = params;
743
744         if (pipes->plane_res.xfm &&
745             pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) {
746                 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth(
747                         pipes->plane_res.xfm,
748                         pipes->plane_res.scl_data.lb_params.depth,
749                         &stream->bit_depth_params);
750         }
751
752         pipes->stream_res.opp->funcs->
753                 opp_program_bit_depth_reduction(pipes->stream_res.opp, &params);
754 }
755
756 bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream)
757 {
758         int i;
759         bool ret = false;
760         struct pipe_ctx *pipes;
761
762         dc_exit_ips_for_hw_access(dc);
763
764         for (i = 0; i < MAX_PIPES; i++) {
765                 if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
766                         pipes = &dc->current_state->res_ctx.pipe_ctx[i];
767                         dc->hwss.program_gamut_remap(pipes);
768                         ret = true;
769                 }
770         }
771
772         return ret;
773 }
774
775 bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
776 {
777         int i;
778         bool ret = false;
779         struct pipe_ctx *pipes;
780
781         dc_exit_ips_for_hw_access(dc);
782
783         for (i = 0; i < MAX_PIPES; i++) {
784                 if (dc->current_state->res_ctx.pipe_ctx[i].stream
785                                 == stream) {
786
787                         pipes = &dc->current_state->res_ctx.pipe_ctx[i];
788                         dc->hwss.program_output_csc(dc,
789                                         pipes,
790                                         stream->output_color_space,
791                                         stream->csc_color_matrix.matrix,
792                                         pipes->stream_res.opp->inst);
793                         ret = true;
794                 }
795         }
796
797         return ret;
798 }
799
800 void dc_stream_set_static_screen_params(struct dc *dc,
801                 struct dc_stream_state **streams,
802                 int num_streams,
803                 const struct dc_static_screen_params *params)
804 {
805         int i, j;
806         struct pipe_ctx *pipes_affected[MAX_PIPES];
807         int num_pipes_affected = 0;
808
809         dc_exit_ips_for_hw_access(dc);
810
811         for (i = 0; i < num_streams; i++) {
812                 struct dc_stream_state *stream = streams[i];
813
814                 for (j = 0; j < MAX_PIPES; j++) {
815                         if (dc->current_state->res_ctx.pipe_ctx[j].stream
816                                         == stream) {
817                                 pipes_affected[num_pipes_affected++] =
818                                                 &dc->current_state->res_ctx.pipe_ctx[j];
819                         }
820                 }
821         }
822
823         dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, params);
824 }
825
826 static void dc_destruct(struct dc *dc)
827 {
828         // reset link encoder assignment table on destruct
829         if (dc->res_pool && dc->res_pool->funcs->link_encs_assign)
830                 link_enc_cfg_init(dc, dc->current_state);
831
832         if (dc->current_state) {
833                 dc_state_release(dc->current_state);
834                 dc->current_state = NULL;
835         }
836
837         destroy_links(dc);
838
839         destroy_link_encoders(dc);
840
841         if (dc->clk_mgr) {
842                 dc_destroy_clk_mgr(dc->clk_mgr);
843                 dc->clk_mgr = NULL;
844         }
845
846         dc_destroy_resource_pool(dc);
847
848         if (dc->link_srv)
849                 link_destroy_link_service(&dc->link_srv);
850
851         if (dc->ctx->gpio_service)
852                 dal_gpio_service_destroy(&dc->ctx->gpio_service);
853
854         if (dc->ctx->created_bios)
855                 dal_bios_parser_destroy(&dc->ctx->dc_bios);
856
857         kfree(dc->ctx->logger);
858         dc_perf_trace_destroy(&dc->ctx->perf_trace);
859
860         kfree(dc->ctx);
861         dc->ctx = NULL;
862
863         kfree(dc->bw_vbios);
864         dc->bw_vbios = NULL;
865
866         kfree(dc->bw_dceip);
867         dc->bw_dceip = NULL;
868
869         kfree(dc->dcn_soc);
870         dc->dcn_soc = NULL;
871
872         kfree(dc->dcn_ip);
873         dc->dcn_ip = NULL;
874
875         kfree(dc->vm_helper);
876         dc->vm_helper = NULL;
877
878 }
879
880 static bool dc_construct_ctx(struct dc *dc,
881                 const struct dc_init_data *init_params)
882 {
883         struct dc_context *dc_ctx;
884
885         dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
886         if (!dc_ctx)
887                 return false;
888
889         dc_ctx->cgs_device = init_params->cgs_device;
890         dc_ctx->driver_context = init_params->driver;
891         dc_ctx->dc = dc;
892         dc_ctx->asic_id = init_params->asic_id;
893         dc_ctx->dc_sink_id_count = 0;
894         dc_ctx->dc_stream_id_count = 0;
895         dc_ctx->dce_environment = init_params->dce_environment;
896         dc_ctx->dcn_reg_offsets = init_params->dcn_reg_offsets;
897         dc_ctx->nbio_reg_offsets = init_params->nbio_reg_offsets;
898         dc_ctx->clk_reg_offsets = init_params->clk_reg_offsets;
899
900         /* Create logger */
901         dc_ctx->logger = kmalloc(sizeof(*dc_ctx->logger), GFP_KERNEL);
902
903         if (!dc_ctx->logger) {
904                 kfree(dc_ctx);
905                 return false;
906         }
907
908         dc_ctx->logger->dev = adev_to_drm(init_params->driver);
909         dc->dml.logger = dc_ctx->logger;
910
911         dc_ctx->dce_version = resource_parse_asic_id(init_params->asic_id);
912
913         dc_ctx->perf_trace = dc_perf_trace_create();
914         if (!dc_ctx->perf_trace) {
915                 kfree(dc_ctx);
916                 ASSERT_CRITICAL(false);
917                 return false;
918         }
919
920         dc->ctx = dc_ctx;
921
922         dc->link_srv = link_create_link_service();
923         if (!dc->link_srv)
924                 return false;
925
926         return true;
927 }
928
929 static bool dc_construct(struct dc *dc,
930                 const struct dc_init_data *init_params)
931 {
932         struct dc_context *dc_ctx;
933         struct bw_calcs_dceip *dc_dceip;
934         struct bw_calcs_vbios *dc_vbios;
935         struct dcn_soc_bounding_box *dcn_soc;
936         struct dcn_ip_params *dcn_ip;
937
938         dc->config = init_params->flags;
939
940         // Allocate memory for the vm_helper
941         dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL);
942         if (!dc->vm_helper) {
943                 dm_error("%s: failed to create dc->vm_helper\n", __func__);
944                 goto fail;
945         }
946
947         memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides));
948
949         dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
950         if (!dc_dceip) {
951                 dm_error("%s: failed to create dceip\n", __func__);
952                 goto fail;
953         }
954
955         dc->bw_dceip = dc_dceip;
956
957         dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL);
958         if (!dc_vbios) {
959                 dm_error("%s: failed to create vbios\n", __func__);
960                 goto fail;
961         }
962
963         dc->bw_vbios = dc_vbios;
964         dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);
965         if (!dcn_soc) {
966                 dm_error("%s: failed to create dcn_soc\n", __func__);
967                 goto fail;
968         }
969
970         dc->dcn_soc = dcn_soc;
971
972         dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL);
973         if (!dcn_ip) {
974                 dm_error("%s: failed to create dcn_ip\n", __func__);
975                 goto fail;
976         }
977
978         dc->dcn_ip = dcn_ip;
979
980         if (!dc_construct_ctx(dc, init_params)) {
981                 dm_error("%s: failed to create ctx\n", __func__);
982                 goto fail;
983         }
984
985         dc_ctx = dc->ctx;
986
987         /* Resource should construct all asic specific resources.
988          * This should be the only place where we need to parse the asic id
989          */
990         if (init_params->vbios_override)
991                 dc_ctx->dc_bios = init_params->vbios_override;
992         else {
993                 /* Create BIOS parser */
994                 struct bp_init_data bp_init_data;
995
996                 bp_init_data.ctx = dc_ctx;
997                 bp_init_data.bios = init_params->asic_id.atombios_base_address;
998
999                 dc_ctx->dc_bios = dal_bios_parser_create(
1000                                 &bp_init_data, dc_ctx->dce_version);
1001
1002                 if (!dc_ctx->dc_bios) {
1003                         ASSERT_CRITICAL(false);
1004                         goto fail;
1005                 }
1006
1007                 dc_ctx->created_bios = true;
1008         }
1009
1010         dc->vendor_signature = init_params->vendor_signature;
1011
1012         /* Create GPIO service */
1013         dc_ctx->gpio_service = dal_gpio_service_create(
1014                         dc_ctx->dce_version,
1015                         dc_ctx->dce_environment,
1016                         dc_ctx);
1017
1018         if (!dc_ctx->gpio_service) {
1019                 ASSERT_CRITICAL(false);
1020                 goto fail;
1021         }
1022
1023         dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version);
1024         if (!dc->res_pool)
1025                 goto fail;
1026
1027         /* set i2c speed if not done by the respective dcnxxx__resource.c */
1028         if (dc->caps.i2c_speed_in_khz_hdcp == 0)
1029                 dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz;
1030         if (dc->caps.max_optimizable_video_width == 0)
1031                 dc->caps.max_optimizable_video_width = 5120;
1032         dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
1033         if (!dc->clk_mgr)
1034                 goto fail;
1035 #ifdef CONFIG_DRM_AMD_DC_FP
1036         dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present;
1037
1038         if (dc->res_pool->funcs->update_bw_bounding_box) {
1039                 DC_FP_START();
1040                 dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
1041                 DC_FP_END();
1042         }
1043 #endif
1044
1045         if (!create_links(dc, init_params->num_virtual_links))
1046                 goto fail;
1047
1048         /* Create additional DIG link encoder objects if fewer than the platform
1049          * supports were created during link construction.
1050          */
1051         if (!create_link_encoders(dc))
1052                 goto fail;
1053
1054         /* Creation of current_state must occur after dc->dml
1055          * is initialized in dc_create_resource_pool because
1056          * on creation it copies the contents of dc->dml
1057          */
1058
1059         dc->current_state = dc_state_create(dc);
1060
1061         if (!dc->current_state) {
1062                 dm_error("%s: failed to create validate ctx\n", __func__);
1063                 goto fail;
1064         }
1065
1066         return true;
1067
1068 fail:
1069         return false;
1070 }
1071
1072 static void disable_all_writeback_pipes_for_stream(
1073                 const struct dc *dc,
1074                 struct dc_stream_state *stream,
1075                 struct dc_state *context)
1076 {
1077         int i;
1078
1079         for (i = 0; i < stream->num_wb_info; i++)
1080                 stream->writeback_info[i].wb_enabled = false;
1081 }
1082
1083 static void apply_ctx_interdependent_lock(struct dc *dc,
1084                                           struct dc_state *context,
1085                                           struct dc_stream_state *stream,
1086                                           bool lock)
1087 {
1088         int i;
1089
1090         /* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */
1091         if (dc->hwss.interdependent_update_lock)
1092                 dc->hwss.interdependent_update_lock(dc, context, lock);
1093         else {
1094                 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1095                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1096                         struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1097
1098                         // Copied conditions that were previously in dce110_apply_ctx_for_surface
1099                         if (stream == pipe_ctx->stream) {
1100                                 if (resource_is_pipe_type(pipe_ctx, OPP_HEAD) &&
1101                                         (pipe_ctx->plane_state || old_pipe_ctx->plane_state))
1102                                         dc->hwss.pipe_control_lock(dc, pipe_ctx, lock);
1103                         }
1104                 }
1105         }
1106 }
1107
1108 static void dc_update_visual_confirm_color(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
1109 {
1110         if (dc->ctx->dce_version >= DCN_VERSION_1_0) {
1111                 memset(&pipe_ctx->visual_confirm_color, 0, sizeof(struct tg_color));
1112
1113                 if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR)
1114                         get_hdr_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
1115                 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE)
1116                         get_surface_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
1117                 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE)
1118                         get_surface_tile_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
1119                 else {
1120                         if (dc->ctx->dce_version < DCN_VERSION_2_0)
1121                                 color_space_to_black_color(
1122                                         dc, pipe_ctx->stream->output_color_space, &(pipe_ctx->visual_confirm_color));
1123                 }
1124                 if (dc->ctx->dce_version >= DCN_VERSION_2_0) {
1125                         if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE)
1126                                 get_mpctree_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
1127                         else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP)
1128                                 get_subvp_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
1129                         else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH)
1130                                 get_mclk_switch_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
1131                 }
1132         }
1133 }
1134
1135 static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
1136 {
1137         int i, j;
1138         struct dc_state *dangling_context = dc_state_create_current_copy(dc);
1139         struct dc_state *current_ctx;
1140         struct pipe_ctx *pipe;
1141         struct timing_generator *tg;
1142
1143         if (dangling_context == NULL)
1144                 return;
1145
1146         for (i = 0; i < dc->res_pool->pipe_count; i++) {
1147                 struct dc_stream_state *old_stream =
1148                                 dc->current_state->res_ctx.pipe_ctx[i].stream;
1149                 bool should_disable = true;
1150                 bool pipe_split_change = false;
1151
1152                 if ((context->res_ctx.pipe_ctx[i].top_pipe) &&
1153                         (dc->current_state->res_ctx.pipe_ctx[i].top_pipe))
1154                         pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe->pipe_idx !=
1155                                 dc->current_state->res_ctx.pipe_ctx[i].top_pipe->pipe_idx;
1156                 else
1157                         pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe !=
1158                                 dc->current_state->res_ctx.pipe_ctx[i].top_pipe;
1159
1160                 for (j = 0; j < context->stream_count; j++) {
1161                         if (old_stream == context->streams[j]) {
1162                                 should_disable = false;
1163                                 break;
1164                         }
1165                 }
1166                 if (!should_disable && pipe_split_change &&
1167                                 dc->current_state->stream_count != context->stream_count)
1168                         should_disable = true;
1169
1170                 if (old_stream && !dc->current_state->res_ctx.pipe_ctx[i].top_pipe &&
1171                                 !dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe) {
1172                         struct pipe_ctx *old_pipe, *new_pipe;
1173
1174                         old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1175                         new_pipe = &context->res_ctx.pipe_ctx[i];
1176
1177                         if (old_pipe->plane_state && !new_pipe->plane_state)
1178                                 should_disable = true;
1179                 }
1180
1181                 if (should_disable && old_stream) {
1182                         bool is_phantom = dc_state_get_stream_subvp_type(dc->current_state, old_stream) == SUBVP_PHANTOM;
1183                         pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1184                         tg = pipe->stream_res.tg;
1185                         /* When disabling plane for a phantom pipe, we must turn on the
1186                          * phantom OTG so the disable programming gets the double buffer
1187                          * update. Otherwise the pipe will be left in a partially disabled
1188                          * state that can result in underflow or hang when enabling it
1189                          * again for different use.
1190                          */
1191                         if (is_phantom) {
1192                                 if (tg->funcs->enable_crtc) {
1193                                         int main_pipe_width, main_pipe_height;
1194                                         struct dc_stream_state *old_paired_stream = dc_state_get_paired_subvp_stream(dc->current_state, old_stream);
1195
1196                                         main_pipe_width = old_paired_stream->dst.width;
1197                                         main_pipe_height = old_paired_stream->dst.height;
1198                                         if (dc->hwss.blank_phantom)
1199                                                 dc->hwss.blank_phantom(dc, tg, main_pipe_width, main_pipe_height);
1200                                         tg->funcs->enable_crtc(tg);
1201                                 }
1202                         }
1203
1204                         if (is_phantom)
1205                                 dc_state_rem_all_phantom_planes_for_stream(dc, old_stream, dangling_context, true);
1206                         else
1207                                 dc_state_rem_all_planes_for_stream(dc, old_stream, dangling_context);
1208                         disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
1209
1210                         if (pipe->stream && pipe->plane_state) {
1211                                 set_p_state_switch_method(dc, context, pipe);
1212                                 dc_update_visual_confirm_color(dc, context, pipe);
1213                         }
1214
1215                         if (dc->hwss.apply_ctx_for_surface) {
1216                                 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true);
1217                                 dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
1218                                 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, false);
1219                                 dc->hwss.post_unlock_program_front_end(dc, dangling_context);
1220                         }
1221                         if (dc->hwss.program_front_end_for_ctx) {
1222                                 dc->hwss.interdependent_update_lock(dc, dc->current_state, true);
1223                                 dc->hwss.program_front_end_for_ctx(dc, dangling_context);
1224                                 dc->hwss.interdependent_update_lock(dc, dc->current_state, false);
1225                                 dc->hwss.post_unlock_program_front_end(dc, dangling_context);
1226                         }
1227                         /* We need to put the phantom OTG back into it's default (disabled) state or we
1228                          * can get corruption when transition from one SubVP config to a different one.
1229                          * The OTG is set to disable on falling edge of VUPDATE so the plane disable
1230                          * will still get it's double buffer update.
1231                          */
1232                         if (is_phantom) {
1233                                 if (tg->funcs->disable_phantom_crtc)
1234                                         tg->funcs->disable_phantom_crtc(tg);
1235                         }
1236                 }
1237         }
1238
1239         current_ctx = dc->current_state;
1240         dc->current_state = dangling_context;
1241         dc_state_release(current_ctx);
1242 }
1243
1244 static void disable_vbios_mode_if_required(
1245                 struct dc *dc,
1246                 struct dc_state *context)
1247 {
1248         unsigned int i, j;
1249
1250         /* check if timing_changed, disable stream*/
1251         for (i = 0; i < dc->res_pool->pipe_count; i++) {
1252                 struct dc_stream_state *stream = NULL;
1253                 struct dc_link *link = NULL;
1254                 struct pipe_ctx *pipe = NULL;
1255
1256                 pipe = &context->res_ctx.pipe_ctx[i];
1257                 stream = pipe->stream;
1258                 if (stream == NULL)
1259                         continue;
1260
1261                 if (stream->apply_seamless_boot_optimization)
1262                         continue;
1263
1264                 // only looking for first odm pipe
1265                 if (pipe->prev_odm_pipe)
1266                         continue;
1267
1268                 if (stream->link->local_sink &&
1269                         stream->link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
1270                         link = stream->link;
1271                 }
1272
1273                 if (link != NULL && link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
1274                         unsigned int enc_inst, tg_inst = 0;
1275                         unsigned int pix_clk_100hz;
1276
1277                         enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
1278                         if (enc_inst != ENGINE_ID_UNKNOWN) {
1279                                 for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
1280                                         if (dc->res_pool->stream_enc[j]->id == enc_inst) {
1281                                                 tg_inst = dc->res_pool->stream_enc[j]->funcs->dig_source_otg(
1282                                                         dc->res_pool->stream_enc[j]);
1283                                                 break;
1284                                         }
1285                                 }
1286
1287                                 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1288                                         dc->res_pool->dp_clock_source,
1289                                         tg_inst, &pix_clk_100hz);
1290
1291                                 if (link->link_status.link_active) {
1292                                         uint32_t requested_pix_clk_100hz =
1293                                                 pipe->stream_res.pix_clk_params.requested_pix_clk_100hz;
1294
1295                                         if (pix_clk_100hz != requested_pix_clk_100hz) {
1296                                                 dc->link_srv->set_dpms_off(pipe);
1297                                                 pipe->stream->dpms_off = false;
1298                                         }
1299                                 }
1300                         }
1301                 }
1302         }
1303 }
1304
1305 /**
1306  * wait_for_blank_complete - wait for all active OPPs to finish pending blank
1307  * pattern updates
1308  *
1309  * @dc: [in] dc reference
1310  * @context: [in] hardware context in use
1311  */
1312 static void wait_for_blank_complete(struct dc *dc,
1313                 struct dc_state *context)
1314 {
1315         struct pipe_ctx *opp_head;
1316         struct dce_hwseq *hws = dc->hwseq;
1317         int i;
1318
1319         if (!hws->funcs.wait_for_blank_complete)
1320                 return;
1321
1322         for (i = 0; i < MAX_PIPES; i++) {
1323                 opp_head = &context->res_ctx.pipe_ctx[i];
1324
1325                 if (!resource_is_pipe_type(opp_head, OPP_HEAD) ||
1326                                 dc_state_get_pipe_subvp_type(context, opp_head) == SUBVP_PHANTOM)
1327                         continue;
1328
1329                 hws->funcs.wait_for_blank_complete(opp_head->stream_res.opp);
1330         }
1331 }
1332
1333 static void wait_for_odm_update_pending_complete(struct dc *dc, struct dc_state *context)
1334 {
1335         struct pipe_ctx *otg_master;
1336         struct timing_generator *tg;
1337         int i;
1338
1339         for (i = 0; i < MAX_PIPES; i++) {
1340                 otg_master = &context->res_ctx.pipe_ctx[i];
1341                 if (!resource_is_pipe_type(otg_master, OTG_MASTER) ||
1342                                 dc_state_get_pipe_subvp_type(context, otg_master) == SUBVP_PHANTOM)
1343                         continue;
1344                 tg = otg_master->stream_res.tg;
1345                 if (tg->funcs->wait_odm_doublebuffer_pending_clear)
1346                         tg->funcs->wait_odm_doublebuffer_pending_clear(tg);
1347         }
1348
1349         /* ODM update may require to reprogram blank pattern for each OPP */
1350         wait_for_blank_complete(dc, context);
1351 }
1352
1353 static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
1354 {
1355         int i;
1356         PERF_TRACE();
1357         for (i = 0; i < MAX_PIPES; i++) {
1358                 int count = 0;
1359                 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
1360
1361                 if (!pipe->plane_state || dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM)
1362                         continue;
1363
1364                 /* Timeout 100 ms */
1365                 while (count < 100000) {
1366                         /* Must set to false to start with, due to OR in update function */
1367                         pipe->plane_state->status.is_flip_pending = false;
1368                         dc->hwss.update_pending_status(pipe);
1369                         if (!pipe->plane_state->status.is_flip_pending)
1370                                 break;
1371                         udelay(1);
1372                         count++;
1373                 }
1374                 ASSERT(!pipe->plane_state->status.is_flip_pending);
1375         }
1376         PERF_TRACE();
1377 }
1378
1379 /* Public functions */
1380
1381 struct dc *dc_create(const struct dc_init_data *init_params)
1382 {
1383         struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
1384         unsigned int full_pipe_count;
1385
1386         if (!dc)
1387                 return NULL;
1388
1389         if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) {
1390                 if (!dc_construct_ctx(dc, init_params))
1391                         goto destruct_dc;
1392         } else {
1393                 if (!dc_construct(dc, init_params))
1394                         goto destruct_dc;
1395
1396                 full_pipe_count = dc->res_pool->pipe_count;
1397                 if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
1398                         full_pipe_count--;
1399                 dc->caps.max_streams = min(
1400                                 full_pipe_count,
1401                                 dc->res_pool->stream_enc_count);
1402
1403                 dc->caps.max_links = dc->link_count;
1404                 dc->caps.max_audios = dc->res_pool->audio_count;
1405                 dc->caps.linear_pitch_alignment = 64;
1406
1407                 dc->caps.max_dp_protocol_version = DP_VERSION_1_4;
1408
1409                 dc->caps.max_otg_num = dc->res_pool->res_cap->num_timing_generator;
1410
1411                 if (dc->res_pool->dmcu != NULL)
1412                         dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
1413         }
1414
1415         dc->dcn_reg_offsets = init_params->dcn_reg_offsets;
1416         dc->nbio_reg_offsets = init_params->nbio_reg_offsets;
1417         dc->clk_reg_offsets = init_params->clk_reg_offsets;
1418
1419         /* Populate versioning information */
1420         dc->versions.dc_ver = DC_VER;
1421
1422         dc->build_id = DC_BUILD_ID;
1423
1424         DC_LOG_DC("Display Core initialized\n");
1425
1426
1427
1428         return dc;
1429
1430 destruct_dc:
1431         dc_destruct(dc);
1432         kfree(dc);
1433         return NULL;
1434 }
1435
1436 static void detect_edp_presence(struct dc *dc)
1437 {
1438         struct dc_link *edp_links[MAX_NUM_EDP];
1439         struct dc_link *edp_link = NULL;
1440         enum dc_connection_type type;
1441         int i;
1442         int edp_num;
1443
1444         dc_get_edp_links(dc, edp_links, &edp_num);
1445         if (!edp_num)
1446                 return;
1447
1448         for (i = 0; i < edp_num; i++) {
1449                 edp_link = edp_links[i];
1450                 if (dc->config.edp_not_connected) {
1451                         edp_link->edp_sink_present = false;
1452                 } else {
1453                         dc_link_detect_connection_type(edp_link, &type);
1454                         edp_link->edp_sink_present = (type != dc_connection_none);
1455                 }
1456         }
1457 }
1458
1459 void dc_hardware_init(struct dc *dc)
1460 {
1461
1462         detect_edp_presence(dc);
1463         if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW)
1464                 dc->hwss.init_hw(dc);
1465 }
1466
1467 void dc_init_callbacks(struct dc *dc,
1468                 const struct dc_callback_init *init_params)
1469 {
1470         dc->ctx->cp_psp = init_params->cp_psp;
1471 }
1472
1473 void dc_deinit_callbacks(struct dc *dc)
1474 {
1475         memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp));
1476 }
1477
1478 void dc_destroy(struct dc **dc)
1479 {
1480         dc_destruct(*dc);
1481         kfree(*dc);
1482         *dc = NULL;
1483 }
1484
1485 static void enable_timing_multisync(
1486                 struct dc *dc,
1487                 struct dc_state *ctx)
1488 {
1489         int i, multisync_count = 0;
1490         int pipe_count = dc->res_pool->pipe_count;
1491         struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL };
1492
1493         for (i = 0; i < pipe_count; i++) {
1494                 if (!ctx->res_ctx.pipe_ctx[i].stream ||
1495                                 !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled)
1496                         continue;
1497                 if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source)
1498                         continue;
1499                 multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i];
1500                 multisync_count++;
1501         }
1502
1503         if (multisync_count > 0) {
1504                 dc->hwss.enable_per_frame_crtc_position_reset(
1505                         dc, multisync_count, multisync_pipes);
1506         }
1507 }
1508
1509 static void program_timing_sync(
1510                 struct dc *dc,
1511                 struct dc_state *ctx)
1512 {
1513         int i, j, k;
1514         int group_index = 0;
1515         int num_group = 0;
1516         int pipe_count = dc->res_pool->pipe_count;
1517         struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
1518
1519         for (i = 0; i < pipe_count; i++) {
1520                 if (!ctx->res_ctx.pipe_ctx[i].stream
1521                                 || ctx->res_ctx.pipe_ctx[i].top_pipe
1522                                 || ctx->res_ctx.pipe_ctx[i].prev_odm_pipe)
1523                         continue;
1524
1525                 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
1526         }
1527
1528         for (i = 0; i < pipe_count; i++) {
1529                 int group_size = 1;
1530                 enum timing_synchronization_type sync_type = NOT_SYNCHRONIZABLE;
1531                 struct pipe_ctx *pipe_set[MAX_PIPES];
1532
1533                 if (!unsynced_pipes[i])
1534                         continue;
1535
1536                 pipe_set[0] = unsynced_pipes[i];
1537                 unsynced_pipes[i] = NULL;
1538
1539                 /* Add tg to the set, search rest of the tg's for ones with
1540                  * same timing, add all tgs with same timing to the group
1541                  */
1542                 for (j = i + 1; j < pipe_count; j++) {
1543                         if (!unsynced_pipes[j])
1544                                 continue;
1545                         if (sync_type != TIMING_SYNCHRONIZABLE &&
1546                                 dc->hwss.enable_vblanks_synchronization &&
1547                                 unsynced_pipes[j]->stream_res.tg->funcs->align_vblanks &&
1548                                 resource_are_vblanks_synchronizable(
1549                                         unsynced_pipes[j]->stream,
1550                                         pipe_set[0]->stream)) {
1551                                 sync_type = VBLANK_SYNCHRONIZABLE;
1552                                 pipe_set[group_size] = unsynced_pipes[j];
1553                                 unsynced_pipes[j] = NULL;
1554                                 group_size++;
1555                         } else
1556                         if (sync_type != VBLANK_SYNCHRONIZABLE &&
1557                                 resource_are_streams_timing_synchronizable(
1558                                         unsynced_pipes[j]->stream,
1559                                         pipe_set[0]->stream)) {
1560                                 sync_type = TIMING_SYNCHRONIZABLE;
1561                                 pipe_set[group_size] = unsynced_pipes[j];
1562                                 unsynced_pipes[j] = NULL;
1563                                 group_size++;
1564                         }
1565                 }
1566
1567                 /* set first unblanked pipe as master */
1568                 for (j = 0; j < group_size; j++) {
1569                         bool is_blanked;
1570
1571                         if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
1572                                 is_blanked =
1573                                         pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
1574                         else
1575                                 is_blanked =
1576                                         pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
1577                         if (!is_blanked) {
1578                                 if (j == 0)
1579                                         break;
1580
1581                                 swap(pipe_set[0], pipe_set[j]);
1582                                 break;
1583                         }
1584                 }
1585
1586                 for (k = 0; k < group_size; k++) {
1587                         struct dc_stream_status *status = dc_state_get_stream_status(ctx, pipe_set[k]->stream);
1588
1589                         status->timing_sync_info.group_id = num_group;
1590                         status->timing_sync_info.group_size = group_size;
1591                         if (k == 0)
1592                                 status->timing_sync_info.master = true;
1593                         else
1594                                 status->timing_sync_info.master = false;
1595
1596                 }
1597
1598                 /* remove any other unblanked pipes as they have already been synced */
1599                 if (dc->config.use_pipe_ctx_sync_logic) {
1600                         /* check pipe's syncd to decide which pipe to be removed */
1601                         for (j = 1; j < group_size; j++) {
1602                                 if (pipe_set[j]->pipe_idx_syncd == pipe_set[0]->pipe_idx_syncd) {
1603                                         group_size--;
1604                                         pipe_set[j] = pipe_set[group_size];
1605                                         j--;
1606                                 } else
1607                                         /* link slave pipe's syncd with master pipe */
1608                                         pipe_set[j]->pipe_idx_syncd = pipe_set[0]->pipe_idx_syncd;
1609                         }
1610                 } else {
1611                         /* remove any other pipes by checking valid plane */
1612                         for (j = j + 1; j < group_size; j++) {
1613                                 bool is_blanked;
1614
1615                                 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
1616                                         is_blanked =
1617                                                 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
1618                                 else
1619                                         is_blanked =
1620                                                 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
1621                                 if (!is_blanked) {
1622                                         group_size--;
1623                                         pipe_set[j] = pipe_set[group_size];
1624                                         j--;
1625                                 }
1626                         }
1627                 }
1628
1629                 if (group_size > 1) {
1630                         if (sync_type == TIMING_SYNCHRONIZABLE) {
1631                                 dc->hwss.enable_timing_synchronization(
1632                                         dc, ctx, group_index, group_size, pipe_set);
1633                         } else
1634                                 if (sync_type == VBLANK_SYNCHRONIZABLE) {
1635                                 dc->hwss.enable_vblanks_synchronization(
1636                                         dc, group_index, group_size, pipe_set);
1637                                 }
1638                         group_index++;
1639                 }
1640                 num_group++;
1641         }
1642 }
1643
1644 static bool streams_changed(struct dc *dc,
1645                             struct dc_stream_state *streams[],
1646                             uint8_t stream_count)
1647 {
1648         uint8_t i;
1649
1650         if (stream_count != dc->current_state->stream_count)
1651                 return true;
1652
1653         for (i = 0; i < dc->current_state->stream_count; i++) {
1654                 if (dc->current_state->streams[i] != streams[i])
1655                         return true;
1656                 if (!streams[i]->link->link_state_valid)
1657                         return true;
1658         }
1659
1660         return false;
1661 }
1662
1663 bool dc_validate_boot_timing(const struct dc *dc,
1664                                 const struct dc_sink *sink,
1665                                 struct dc_crtc_timing *crtc_timing)
1666 {
1667         struct timing_generator *tg;
1668         struct stream_encoder *se = NULL;
1669
1670         struct dc_crtc_timing hw_crtc_timing = {0};
1671
1672         struct dc_link *link = sink->link;
1673         unsigned int i, enc_inst, tg_inst = 0;
1674
1675         /* Support seamless boot on EDP displays only */
1676         if (sink->sink_signal != SIGNAL_TYPE_EDP) {
1677                 return false;
1678         }
1679
1680         if (dc->debug.force_odm_combine)
1681                 return false;
1682
1683         /* Check for enabled DIG to identify enabled display */
1684         if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
1685                 return false;
1686
1687         enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
1688
1689         if (enc_inst == ENGINE_ID_UNKNOWN)
1690                 return false;
1691
1692         for (i = 0; i < dc->res_pool->stream_enc_count; i++) {
1693                 if (dc->res_pool->stream_enc[i]->id == enc_inst) {
1694
1695                         se = dc->res_pool->stream_enc[i];
1696
1697                         tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg(
1698                                 dc->res_pool->stream_enc[i]);
1699                         break;
1700                 }
1701         }
1702
1703         // tg_inst not found
1704         if (i == dc->res_pool->stream_enc_count)
1705                 return false;
1706
1707         if (tg_inst >= dc->res_pool->timing_generator_count)
1708                 return false;
1709
1710         if (tg_inst != link->link_enc->preferred_engine)
1711                 return false;
1712
1713         tg = dc->res_pool->timing_generators[tg_inst];
1714
1715         if (!tg->funcs->get_hw_timing)
1716                 return false;
1717
1718         if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing))
1719                 return false;
1720
1721         if (crtc_timing->h_total != hw_crtc_timing.h_total)
1722                 return false;
1723
1724         if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left)
1725                 return false;
1726
1727         if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable)
1728                 return false;
1729
1730         if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right)
1731                 return false;
1732
1733         if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch)
1734                 return false;
1735
1736         if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width)
1737                 return false;
1738
1739         if (crtc_timing->v_total != hw_crtc_timing.v_total)
1740                 return false;
1741
1742         if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top)
1743                 return false;
1744
1745         if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable)
1746                 return false;
1747
1748         if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom)
1749                 return false;
1750
1751         if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch)
1752                 return false;
1753
1754         if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width)
1755                 return false;
1756
1757         /* block DSC for now, as VBIOS does not currently support DSC timings */
1758         if (crtc_timing->flags.DSC)
1759                 return false;
1760
1761         if (dc_is_dp_signal(link->connector_signal)) {
1762                 unsigned int pix_clk_100hz;
1763                 uint32_t numOdmPipes = 1;
1764                 uint32_t id_src[4] = {0};
1765
1766                 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1767                         dc->res_pool->dp_clock_source,
1768                         tg_inst, &pix_clk_100hz);
1769
1770                 if (tg->funcs->get_optc_source)
1771                         tg->funcs->get_optc_source(tg,
1772                                                 &numOdmPipes, &id_src[0], &id_src[1]);
1773
1774                 if (numOdmPipes == 2)
1775                         pix_clk_100hz *= 2;
1776                 if (numOdmPipes == 4)
1777                         pix_clk_100hz *= 4;
1778
1779                 // Note: In rare cases, HW pixclk may differ from crtc's pixclk
1780                 // slightly due to rounding issues in 10 kHz units.
1781                 if (crtc_timing->pix_clk_100hz != pix_clk_100hz)
1782                         return false;
1783
1784                 if (!se->funcs->dp_get_pixel_format)
1785                         return false;
1786
1787                 if (!se->funcs->dp_get_pixel_format(
1788                         se,
1789                         &hw_crtc_timing.pixel_encoding,
1790                         &hw_crtc_timing.display_color_depth))
1791                         return false;
1792
1793                 if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth)
1794                         return false;
1795
1796                 if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding)
1797                         return false;
1798         }
1799
1800         if (link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
1801                 return false;
1802         }
1803
1804         if (link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED)
1805                 return false;
1806
1807         if (dc->link_srv->edp_is_ilr_optimization_required(link, crtc_timing)) {
1808                 DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n");
1809                 return false;
1810         }
1811
1812         return true;
1813 }
1814
1815 static inline bool should_update_pipe_for_stream(
1816                 struct dc_state *context,
1817                 struct pipe_ctx *pipe_ctx,
1818                 struct dc_stream_state *stream)
1819 {
1820         return (pipe_ctx->stream && pipe_ctx->stream == stream);
1821 }
1822
1823 static inline bool should_update_pipe_for_plane(
1824                 struct dc_state *context,
1825                 struct pipe_ctx *pipe_ctx,
1826                 struct dc_plane_state *plane_state)
1827 {
1828         return (pipe_ctx->plane_state == plane_state);
1829 }
1830
1831 void dc_enable_stereo(
1832         struct dc *dc,
1833         struct dc_state *context,
1834         struct dc_stream_state *streams[],
1835         uint8_t stream_count)
1836 {
1837         int i, j;
1838         struct pipe_ctx *pipe;
1839
1840         dc_exit_ips_for_hw_access(dc);
1841
1842         for (i = 0; i < MAX_PIPES; i++) {
1843                 if (context != NULL) {
1844                         pipe = &context->res_ctx.pipe_ctx[i];
1845                 } else {
1846                         context = dc->current_state;
1847                         pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1848                 }
1849
1850                 for (j = 0; pipe && j < stream_count; j++)  {
1851                         if (should_update_pipe_for_stream(context, pipe, streams[j]) &&
1852                                 dc->hwss.setup_stereo)
1853                                 dc->hwss.setup_stereo(pipe, dc);
1854                 }
1855         }
1856 }
1857
1858 void dc_trigger_sync(struct dc *dc, struct dc_state *context)
1859 {
1860         if (context->stream_count > 1 && !dc->debug.disable_timing_sync) {
1861                 dc_exit_ips_for_hw_access(dc);
1862
1863                 enable_timing_multisync(dc, context);
1864                 program_timing_sync(dc, context);
1865         }
1866 }
1867
1868 static uint8_t get_stream_mask(struct dc *dc, struct dc_state *context)
1869 {
1870         int i;
1871         unsigned int stream_mask = 0;
1872
1873         for (i = 0; i < dc->res_pool->pipe_count; i++) {
1874                 if (context->res_ctx.pipe_ctx[i].stream)
1875                         stream_mask |= 1 << i;
1876         }
1877
1878         return stream_mask;
1879 }
1880
1881 void dc_z10_restore(const struct dc *dc)
1882 {
1883         if (dc->hwss.z10_restore)
1884                 dc->hwss.z10_restore(dc);
1885 }
1886
1887 void dc_z10_save_init(struct dc *dc)
1888 {
1889         if (dc->hwss.z10_save_init)
1890                 dc->hwss.z10_save_init(dc);
1891 }
1892
1893 /**
1894  * dc_commit_state_no_check - Apply context to the hardware
1895  *
1896  * @dc: DC object with the current status to be updated
1897  * @context: New state that will become the current status at the end of this function
1898  *
1899  * Applies given context to the hardware and copy it into current context.
1900  * It's up to the user to release the src context afterwards.
1901  *
1902  * Return: an enum dc_status result code for the operation
1903  */
1904 static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)
1905 {
1906         struct dc_bios *dcb = dc->ctx->dc_bios;
1907         enum dc_status result = DC_ERROR_UNEXPECTED;
1908         struct pipe_ctx *pipe;
1909         int i, k, l;
1910         struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
1911         struct dc_state *old_state;
1912         bool subvp_prev_use = false;
1913
1914         dc_z10_restore(dc);
1915         dc_allow_idle_optimizations(dc, false);
1916
1917         for (i = 0; i < dc->res_pool->pipe_count; i++) {
1918                 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1919
1920                 /* Check old context for SubVP */
1921                 subvp_prev_use |= (dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) == SUBVP_PHANTOM);
1922                 if (subvp_prev_use)
1923                         break;
1924         }
1925
1926         for (i = 0; i < context->stream_count; i++)
1927                 dc_streams[i] =  context->streams[i];
1928
1929         if (!dcb->funcs->is_accelerated_mode(dcb)) {
1930                 disable_vbios_mode_if_required(dc, context);
1931                 dc->hwss.enable_accelerated_mode(dc, context);
1932         }
1933
1934         if (context->stream_count > get_seamless_boot_stream_count(context) ||
1935                 context->stream_count == 0)
1936                 dc->hwss.prepare_bandwidth(dc, context);
1937
1938         /* When SubVP is active, all HW programming must be done while
1939          * SubVP lock is acquired
1940          */
1941         if (dc->hwss.subvp_pipe_control_lock)
1942                 dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use);
1943
1944         if (dc->hwss.update_dsc_pg)
1945                 dc->hwss.update_dsc_pg(dc, context, false);
1946
1947         disable_dangling_plane(dc, context);
1948         /* re-program planes for existing stream, in case we need to
1949          * free up plane resource for later use
1950          */
1951         if (dc->hwss.apply_ctx_for_surface) {
1952                 for (i = 0; i < context->stream_count; i++) {
1953                         if (context->streams[i]->mode_changed)
1954                                 continue;
1955                         apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
1956                         dc->hwss.apply_ctx_for_surface(
1957                                 dc, context->streams[i],
1958                                 context->stream_status[i].plane_count,
1959                                 context); /* use new pipe config in new context */
1960                         apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
1961                         dc->hwss.post_unlock_program_front_end(dc, context);
1962                 }
1963         }
1964
1965         /* Program hardware */
1966         for (i = 0; i < dc->res_pool->pipe_count; i++) {
1967                 pipe = &context->res_ctx.pipe_ctx[i];
1968                 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
1969         }
1970
1971         result = dc->hwss.apply_ctx_to_hw(dc, context);
1972
1973         if (result != DC_OK) {
1974                 /* Application of dc_state to hardware stopped. */
1975                 dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY;
1976                 return result;
1977         }
1978
1979         dc_trigger_sync(dc, context);
1980
1981         /* Full update should unconditionally be triggered when dc_commit_state_no_check is called */
1982         for (i = 0; i < context->stream_count; i++) {
1983                 uint32_t prev_dsc_changed = context->streams[i]->update_flags.bits.dsc_changed;
1984
1985                 context->streams[i]->update_flags.raw = 0xFFFFFFFF;
1986                 context->streams[i]->update_flags.bits.dsc_changed = prev_dsc_changed;
1987         }
1988
1989         /* Program all planes within new context*/
1990         if (dc->hwss.program_front_end_for_ctx) {
1991                 dc->hwss.interdependent_update_lock(dc, context, true);
1992                 dc->hwss.program_front_end_for_ctx(dc, context);
1993                 dc->hwss.interdependent_update_lock(dc, context, false);
1994                 dc->hwss.post_unlock_program_front_end(dc, context);
1995         }
1996
1997         if (dc->hwss.commit_subvp_config)
1998                 dc->hwss.commit_subvp_config(dc, context);
1999         if (dc->hwss.subvp_pipe_control_lock)
2000                 dc->hwss.subvp_pipe_control_lock(dc, context, false, true, NULL, subvp_prev_use);
2001
2002         for (i = 0; i < context->stream_count; i++) {
2003                 const struct dc_link *link = context->streams[i]->link;
2004
2005                 if (!context->streams[i]->mode_changed)
2006                         continue;
2007
2008                 if (dc->hwss.apply_ctx_for_surface) {
2009                         apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
2010                         dc->hwss.apply_ctx_for_surface(
2011                                         dc, context->streams[i],
2012                                         context->stream_status[i].plane_count,
2013                                         context);
2014                         apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
2015                         dc->hwss.post_unlock_program_front_end(dc, context);
2016                 }
2017
2018                 /*
2019                  * enable stereo
2020                  * TODO rework dc_enable_stereo call to work with validation sets?
2021                  */
2022                 for (k = 0; k < MAX_PIPES; k++) {
2023                         pipe = &context->res_ctx.pipe_ctx[k];
2024
2025                         for (l = 0 ; pipe && l < context->stream_count; l++)  {
2026                                 if (context->streams[l] &&
2027                                         context->streams[l] == pipe->stream &&
2028                                         dc->hwss.setup_stereo)
2029                                         dc->hwss.setup_stereo(pipe, dc);
2030                         }
2031                 }
2032
2033                 CONN_MSG_MODE(link, "{%dx%d, %dx%d@%dKhz}",
2034                                 context->streams[i]->timing.h_addressable,
2035                                 context->streams[i]->timing.v_addressable,
2036                                 context->streams[i]->timing.h_total,
2037                                 context->streams[i]->timing.v_total,
2038                                 context->streams[i]->timing.pix_clk_100hz / 10);
2039         }
2040
2041         dc_enable_stereo(dc, context, dc_streams, context->stream_count);
2042
2043         if (context->stream_count > get_seamless_boot_stream_count(context) ||
2044                 context->stream_count == 0) {
2045                 /* Must wait for no flips to be pending before doing optimize bw */
2046                 wait_for_no_pipes_pending(dc, context);
2047                 /*
2048                  * optimized dispclk depends on ODM setup. Need to wait for ODM
2049                  * update pending complete before optimizing bandwidth.
2050                  */
2051                 wait_for_odm_update_pending_complete(dc, context);
2052                 /* pplib is notified if disp_num changed */
2053                 dc->hwss.optimize_bandwidth(dc, context);
2054                 /* Need to do otg sync again as otg could be out of sync due to otg
2055                  * workaround applied during clock update
2056                  */
2057                 dc_trigger_sync(dc, context);
2058         }
2059
2060         if (dc->hwss.update_dsc_pg)
2061                 dc->hwss.update_dsc_pg(dc, context, true);
2062
2063         if (dc->ctx->dce_version >= DCE_VERSION_MAX)
2064                 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
2065         else
2066                 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
2067
2068         context->stream_mask = get_stream_mask(dc, context);
2069
2070         if (context->stream_mask != dc->current_state->stream_mask)
2071                 dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, context->stream_mask);
2072
2073         for (i = 0; i < context->stream_count; i++)
2074                 context->streams[i]->mode_changed = false;
2075
2076         /* Clear update flags that were set earlier to avoid redundant programming */
2077         for (i = 0; i < context->stream_count; i++) {
2078                 context->streams[i]->update_flags.raw = 0x0;
2079         }
2080
2081         old_state = dc->current_state;
2082         dc->current_state = context;
2083
2084         dc_state_release(old_state);
2085
2086         dc_state_retain(dc->current_state);
2087
2088         return result;
2089 }
2090
2091 static bool commit_minimal_transition_state_legacy(struct dc *dc,
2092                 struct dc_state *transition_base_context);
2093
2094 /**
2095  * dc_commit_streams - Commit current stream state
2096  *
2097  * @dc: DC object with the commit state to be configured in the hardware
2098  * @streams: Array with a list of stream state
2099  * @stream_count: Total of streams
2100  *
2101  * Function responsible for commit streams change to the hardware.
2102  *
2103  * Return:
2104  * Return DC_OK if everything work as expected, otherwise, return a dc_status
2105  * code.
2106  */
2107 enum dc_status dc_commit_streams(struct dc *dc,
2108                                  struct dc_stream_state *streams[],
2109                                  uint8_t stream_count)
2110 {
2111         int i, j;
2112         struct dc_state *context;
2113         enum dc_status res = DC_OK;
2114         struct dc_validation_set set[MAX_STREAMS] = {0};
2115         struct pipe_ctx *pipe;
2116         bool handle_exit_odm2to1 = false;
2117
2118         if (dc->ctx->dce_environment == DCE_ENV_VIRTUAL_HW)
2119                 return res;
2120
2121         if (!streams_changed(dc, streams, stream_count))
2122                 return res;
2123
2124         dc_exit_ips_for_hw_access(dc);
2125
2126         DC_LOG_DC("%s: %d streams\n", __func__, stream_count);
2127
2128         for (i = 0; i < stream_count; i++) {
2129                 struct dc_stream_state *stream = streams[i];
2130                 struct dc_stream_status *status = dc_stream_get_status(stream);
2131
2132                 dc_stream_log(dc, stream);
2133
2134                 set[i].stream = stream;
2135
2136                 if (status) {
2137                         set[i].plane_count = status->plane_count;
2138                         for (j = 0; j < status->plane_count; j++)
2139                                 set[i].plane_states[j] = status->plane_states[j];
2140                 }
2141         }
2142
2143         /* ODM Combine 2:1 power optimization is only applied for single stream
2144          * scenario, it uses extra pipes than needed to reduce power consumption
2145          * We need to switch off this feature to make room for new streams.
2146          */
2147         if (stream_count > dc->current_state->stream_count &&
2148                         dc->current_state->stream_count == 1) {
2149                 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2150                         pipe = &dc->current_state->res_ctx.pipe_ctx[i];
2151                         if (pipe->next_odm_pipe)
2152                                 handle_exit_odm2to1 = true;
2153                 }
2154         }
2155
2156         if (handle_exit_odm2to1)
2157                 res = commit_minimal_transition_state_legacy(dc, dc->current_state);
2158
2159         context = dc_state_create_current_copy(dc);
2160         if (!context)
2161                 goto context_alloc_fail;
2162
2163         res = dc_validate_with_context(dc, set, stream_count, context, false);
2164         if (res != DC_OK) {
2165                 BREAK_TO_DEBUGGER();
2166                 goto fail;
2167         }
2168
2169         res = dc_commit_state_no_check(dc, context);
2170
2171         for (i = 0; i < stream_count; i++) {
2172                 for (j = 0; j < context->stream_count; j++) {
2173                         if (streams[i]->stream_id == context->streams[j]->stream_id)
2174                                 streams[i]->out.otg_offset = context->stream_status[j].primary_otg_inst;
2175
2176                         if (dc_is_embedded_signal(streams[i]->signal)) {
2177                                 struct dc_stream_status *status = dc_state_get_stream_status(context, streams[i]);
2178
2179                                 if (dc->hwss.is_abm_supported)
2180                                         status->is_abm_supported = dc->hwss.is_abm_supported(dc, context, streams[i]);
2181                                 else
2182                                         status->is_abm_supported = true;
2183                         }
2184                 }
2185         }
2186
2187 fail:
2188         dc_state_release(context);
2189
2190 context_alloc_fail:
2191
2192         DC_LOG_DC("%s Finished.\n", __func__);
2193
2194         return res;
2195 }
2196
2197 bool dc_acquire_release_mpc_3dlut(
2198                 struct dc *dc, bool acquire,
2199                 struct dc_stream_state *stream,
2200                 struct dc_3dlut **lut,
2201                 struct dc_transfer_func **shaper)
2202 {
2203         int pipe_idx;
2204         bool ret = false;
2205         bool found_pipe_idx = false;
2206         const struct resource_pool *pool = dc->res_pool;
2207         struct resource_context *res_ctx = &dc->current_state->res_ctx;
2208         int mpcc_id = 0;
2209
2210         if (pool && res_ctx) {
2211                 if (acquire) {
2212                         /*find pipe idx for the given stream*/
2213                         for (pipe_idx = 0; pipe_idx < pool->pipe_count; pipe_idx++) {
2214                                 if (res_ctx->pipe_ctx[pipe_idx].stream == stream) {
2215                                         found_pipe_idx = true;
2216                                         mpcc_id = res_ctx->pipe_ctx[pipe_idx].plane_res.hubp->inst;
2217                                         break;
2218                                 }
2219                         }
2220                 } else
2221                         found_pipe_idx = true;/*for release pipe_idx is not required*/
2222
2223                 if (found_pipe_idx) {
2224                         if (acquire && pool->funcs->acquire_post_bldn_3dlut)
2225                                 ret = pool->funcs->acquire_post_bldn_3dlut(res_ctx, pool, mpcc_id, lut, shaper);
2226                         else if (!acquire && pool->funcs->release_post_bldn_3dlut)
2227                                 ret = pool->funcs->release_post_bldn_3dlut(res_ctx, pool, lut, shaper);
2228                 }
2229         }
2230         return ret;
2231 }
2232
2233 static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
2234 {
2235         int i;
2236         struct pipe_ctx *pipe;
2237
2238         for (i = 0; i < MAX_PIPES; i++) {
2239                 pipe = &context->res_ctx.pipe_ctx[i];
2240
2241                 // Don't check flip pending on phantom pipes
2242                 if (!pipe->plane_state || (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM))
2243                         continue;
2244
2245                 /* Must set to false to start with, due to OR in update function */
2246                 pipe->plane_state->status.is_flip_pending = false;
2247                 dc->hwss.update_pending_status(pipe);
2248                 if (pipe->plane_state->status.is_flip_pending)
2249                         return true;
2250         }
2251         return false;
2252 }
2253
2254 /* Perform updates here which need to be deferred until next vupdate
2255  *
2256  * i.e. blnd lut, 3dlut, and shaper lut bypass regs are double buffered
2257  * but forcing lut memory to shutdown state is immediate. This causes
2258  * single frame corruption as lut gets disabled mid-frame unless shutdown
2259  * is deferred until after entering bypass.
2260  */
2261 static void process_deferred_updates(struct dc *dc)
2262 {
2263         int i = 0;
2264
2265         if (dc->debug.enable_mem_low_power.bits.cm) {
2266                 ASSERT(dc->dcn_ip->max_num_dpp);
2267                 for (i = 0; i < dc->dcn_ip->max_num_dpp; i++)
2268                         if (dc->res_pool->dpps[i]->funcs->dpp_deferred_update)
2269                                 dc->res_pool->dpps[i]->funcs->dpp_deferred_update(dc->res_pool->dpps[i]);
2270         }
2271 }
2272
2273 void dc_post_update_surfaces_to_stream(struct dc *dc)
2274 {
2275         int i;
2276         struct dc_state *context = dc->current_state;
2277
2278         if ((!dc->optimized_required) || get_seamless_boot_stream_count(context) > 0)
2279                 return;
2280
2281         post_surface_trace(dc);
2282
2283         /*
2284          * Only relevant for DCN behavior where we can guarantee the optimization
2285          * is safe to apply - retain the legacy behavior for DCE.
2286          */
2287
2288         if (dc->ctx->dce_version < DCE_VERSION_MAX)
2289                 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
2290         else {
2291                 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
2292
2293                 if (is_flip_pending_in_pipes(dc, context))
2294                         return;
2295
2296                 for (i = 0; i < dc->res_pool->pipe_count; i++)
2297                         if (context->res_ctx.pipe_ctx[i].stream == NULL ||
2298                                         context->res_ctx.pipe_ctx[i].plane_state == NULL) {
2299                                 context->res_ctx.pipe_ctx[i].pipe_idx = i;
2300                                 dc->hwss.disable_plane(dc, context, &context->res_ctx.pipe_ctx[i]);
2301                         }
2302
2303                 process_deferred_updates(dc);
2304
2305                 dc->hwss.optimize_bandwidth(dc, context);
2306
2307                 if (dc->hwss.update_dsc_pg)
2308                         dc->hwss.update_dsc_pg(dc, context, true);
2309         }
2310
2311         dc->optimized_required = false;
2312         dc->wm_optimized_required = false;
2313 }
2314
2315 bool dc_set_generic_gpio_for_stereo(bool enable,
2316                 struct gpio_service *gpio_service)
2317 {
2318         enum gpio_result gpio_result = GPIO_RESULT_NON_SPECIFIC_ERROR;
2319         struct gpio_pin_info pin_info;
2320         struct gpio *generic;
2321         struct gpio_generic_mux_config *config = kzalloc(sizeof(struct gpio_generic_mux_config),
2322                            GFP_KERNEL);
2323
2324         if (!config)
2325                 return false;
2326         pin_info = dal_gpio_get_generic_pin_info(gpio_service, GPIO_ID_GENERIC, 0);
2327
2328         if (pin_info.mask == 0xFFFFFFFF || pin_info.offset == 0xFFFFFFFF) {
2329                 kfree(config);
2330                 return false;
2331         } else {
2332                 generic = dal_gpio_service_create_generic_mux(
2333                         gpio_service,
2334                         pin_info.offset,
2335                         pin_info.mask);
2336         }
2337
2338         if (!generic) {
2339                 kfree(config);
2340                 return false;
2341         }
2342
2343         gpio_result = dal_gpio_open(generic, GPIO_MODE_OUTPUT);
2344
2345         config->enable_output_from_mux = enable;
2346         config->mux_select = GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC;
2347
2348         if (gpio_result == GPIO_RESULT_OK)
2349                 gpio_result = dal_mux_setup_config(generic, config);
2350
2351         if (gpio_result == GPIO_RESULT_OK) {
2352                 dal_gpio_close(generic);
2353                 dal_gpio_destroy_generic_mux(&generic);
2354                 kfree(config);
2355                 return true;
2356         } else {
2357                 dal_gpio_close(generic);
2358                 dal_gpio_destroy_generic_mux(&generic);
2359                 kfree(config);
2360                 return false;
2361         }
2362 }
2363
2364 static bool is_surface_in_context(
2365                 const struct dc_state *context,
2366                 const struct dc_plane_state *plane_state)
2367 {
2368         int j;
2369
2370         for (j = 0; j < MAX_PIPES; j++) {
2371                 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2372
2373                 if (plane_state == pipe_ctx->plane_state) {
2374                         return true;
2375                 }
2376         }
2377
2378         return false;
2379 }
2380
2381 static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u)
2382 {
2383         union surface_update_flags *update_flags = &u->surface->update_flags;
2384         enum surface_update_type update_type = UPDATE_TYPE_FAST;
2385
2386         if (!u->plane_info)
2387                 return UPDATE_TYPE_FAST;
2388
2389         if (u->plane_info->color_space != u->surface->color_space) {
2390                 update_flags->bits.color_space_change = 1;
2391                 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2392         }
2393
2394         if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) {
2395                 update_flags->bits.horizontal_mirror_change = 1;
2396                 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2397         }
2398
2399         if (u->plane_info->rotation != u->surface->rotation) {
2400                 update_flags->bits.rotation_change = 1;
2401                 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2402         }
2403
2404         if (u->plane_info->format != u->surface->format) {
2405                 update_flags->bits.pixel_format_change = 1;
2406                 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2407         }
2408
2409         if (u->plane_info->stereo_format != u->surface->stereo_format) {
2410                 update_flags->bits.stereo_format_change = 1;
2411                 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2412         }
2413
2414         if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) {
2415                 update_flags->bits.per_pixel_alpha_change = 1;
2416                 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2417         }
2418
2419         if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) {
2420                 update_flags->bits.global_alpha_change = 1;
2421                 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2422         }
2423
2424         if (u->plane_info->dcc.enable != u->surface->dcc.enable
2425                         || u->plane_info->dcc.dcc_ind_blk != u->surface->dcc.dcc_ind_blk
2426                         || u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) {
2427                 /* During DCC on/off, stutter period is calculated before
2428                  * DCC has fully transitioned. This results in incorrect
2429                  * stutter period calculation. Triggering a full update will
2430                  * recalculate stutter period.
2431                  */
2432                 update_flags->bits.dcc_change = 1;
2433                 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2434         }
2435
2436         if (resource_pixel_format_to_bpp(u->plane_info->format) !=
2437                         resource_pixel_format_to_bpp(u->surface->format)) {
2438                 /* different bytes per element will require full bandwidth
2439                  * and DML calculation
2440                  */
2441                 update_flags->bits.bpp_change = 1;
2442                 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2443         }
2444
2445         if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch
2446                         || u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) {
2447                 update_flags->bits.plane_size_change = 1;
2448                 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2449         }
2450
2451
2452         if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
2453                         sizeof(union dc_tiling_info)) != 0) {
2454                 update_flags->bits.swizzle_change = 1;
2455                 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2456
2457                 /* todo: below are HW dependent, we should add a hook to
2458                  * DCE/N resource and validated there.
2459                  */
2460                 if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
2461                         /* swizzled mode requires RQ to be setup properly,
2462                          * thus need to run DML to calculate RQ settings
2463                          */
2464                         update_flags->bits.bandwidth_change = 1;
2465                         elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2466                 }
2467         }
2468
2469         /* This should be UPDATE_TYPE_FAST if nothing has changed. */
2470         return update_type;
2471 }
2472
2473 static enum surface_update_type get_scaling_info_update_type(
2474                 const struct dc *dc,
2475                 const struct dc_surface_update *u)
2476 {
2477         union surface_update_flags *update_flags = &u->surface->update_flags;
2478
2479         if (!u->scaling_info)
2480                 return UPDATE_TYPE_FAST;
2481
2482         if (u->scaling_info->dst_rect.width != u->surface->dst_rect.width
2483                         || u->scaling_info->dst_rect.height != u->surface->dst_rect.height
2484                         || u->scaling_info->scaling_quality.integer_scaling !=
2485                                 u->surface->scaling_quality.integer_scaling
2486                         ) {
2487                 update_flags->bits.scaling_change = 1;
2488
2489                 if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width
2490                         || u->scaling_info->dst_rect.height < u->surface->dst_rect.height)
2491                                 && (u->scaling_info->dst_rect.width < u->surface->src_rect.width
2492                                         || u->scaling_info->dst_rect.height < u->surface->src_rect.height))
2493                         /* Making dst rect smaller requires a bandwidth change */
2494                         update_flags->bits.bandwidth_change = 1;
2495         }
2496
2497         if (u->scaling_info->src_rect.width != u->surface->src_rect.width
2498                 || u->scaling_info->src_rect.height != u->surface->src_rect.height) {
2499
2500                 update_flags->bits.scaling_change = 1;
2501                 if (u->scaling_info->src_rect.width > u->surface->src_rect.width
2502                                 || u->scaling_info->src_rect.height > u->surface->src_rect.height)
2503                         /* Making src rect bigger requires a bandwidth change */
2504                         update_flags->bits.clock_change = 1;
2505         }
2506
2507         if (u->scaling_info->src_rect.width > dc->caps.max_optimizable_video_width &&
2508                 (u->scaling_info->clip_rect.width > u->surface->clip_rect.width ||
2509                  u->scaling_info->clip_rect.height > u->surface->clip_rect.height))
2510                  /* Changing clip size of a large surface may result in MPC slice count change */
2511                 update_flags->bits.bandwidth_change = 1;
2512
2513         if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width ||
2514                         u->scaling_info->clip_rect.height != u->surface->clip_rect.height)
2515                 update_flags->bits.clip_size_change = 1;
2516
2517         if (u->scaling_info->src_rect.x != u->surface->src_rect.x
2518                         || u->scaling_info->src_rect.y != u->surface->src_rect.y
2519                         || u->scaling_info->clip_rect.x != u->surface->clip_rect.x
2520                         || u->scaling_info->clip_rect.y != u->surface->clip_rect.y
2521                         || u->scaling_info->dst_rect.x != u->surface->dst_rect.x
2522                         || u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
2523                 update_flags->bits.position_change = 1;
2524
2525         if (update_flags->bits.clock_change
2526                         || update_flags->bits.bandwidth_change
2527                         || update_flags->bits.scaling_change)
2528                 return UPDATE_TYPE_FULL;
2529
2530         if (update_flags->bits.position_change ||
2531                         update_flags->bits.clip_size_change)
2532                 return UPDATE_TYPE_MED;
2533
2534         return UPDATE_TYPE_FAST;
2535 }
2536
2537 static enum surface_update_type det_surface_update(const struct dc *dc,
2538                 const struct dc_surface_update *u)
2539 {
2540         const struct dc_state *context = dc->current_state;
2541         enum surface_update_type type;
2542         enum surface_update_type overall_type = UPDATE_TYPE_FAST;
2543         union surface_update_flags *update_flags = &u->surface->update_flags;
2544
2545         if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) {
2546                 update_flags->raw = 0xFFFFFFFF;
2547                 return UPDATE_TYPE_FULL;
2548         }
2549
2550         update_flags->raw = 0; // Reset all flags
2551
2552         type = get_plane_info_update_type(u);
2553         elevate_update_type(&overall_type, type);
2554
2555         type = get_scaling_info_update_type(dc, u);
2556         elevate_update_type(&overall_type, type);
2557
2558         if (u->flip_addr) {
2559                 update_flags->bits.addr_update = 1;
2560                 if (u->flip_addr->address.tmz_surface != u->surface->address.tmz_surface) {
2561                         update_flags->bits.tmz_changed = 1;
2562                         elevate_update_type(&overall_type, UPDATE_TYPE_FULL);
2563                 }
2564         }
2565         if (u->in_transfer_func)
2566                 update_flags->bits.in_transfer_func_change = 1;
2567
2568         if (u->input_csc_color_matrix)
2569                 update_flags->bits.input_csc_change = 1;
2570
2571         if (u->coeff_reduction_factor)
2572                 update_flags->bits.coeff_reduction_change = 1;
2573
2574         if (u->gamut_remap_matrix)
2575                 update_flags->bits.gamut_remap_change = 1;
2576
2577         if (u->blend_tf)
2578                 update_flags->bits.gamma_change = 1;
2579
2580         if (u->gamma) {
2581                 enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN;
2582
2583                 if (u->plane_info)
2584                         format = u->plane_info->format;
2585                 else if (u->surface)
2586                         format = u->surface->format;
2587
2588                 if (dce_use_lut(format))
2589                         update_flags->bits.gamma_change = 1;
2590         }
2591
2592         if (u->lut3d_func || u->func_shaper)
2593                 update_flags->bits.lut_3d = 1;
2594
2595         if (u->hdr_mult.value)
2596                 if (u->hdr_mult.value != u->surface->hdr_mult.value) {
2597                         update_flags->bits.hdr_mult = 1;
2598                         elevate_update_type(&overall_type, UPDATE_TYPE_MED);
2599                 }
2600
2601         if (update_flags->bits.in_transfer_func_change) {
2602                 type = UPDATE_TYPE_MED;
2603                 elevate_update_type(&overall_type, type);
2604         }
2605
2606         if (update_flags->bits.lut_3d) {
2607                 type = UPDATE_TYPE_FULL;
2608                 elevate_update_type(&overall_type, type);
2609         }
2610
2611         if (dc->debug.enable_legacy_fast_update &&
2612                         (update_flags->bits.gamma_change ||
2613                         update_flags->bits.gamut_remap_change ||
2614                         update_flags->bits.input_csc_change ||
2615                         update_flags->bits.coeff_reduction_change)) {
2616                 type = UPDATE_TYPE_FULL;
2617                 elevate_update_type(&overall_type, type);
2618         }
2619         return overall_type;
2620 }
2621
2622 static enum surface_update_type check_update_surfaces_for_stream(
2623                 struct dc *dc,
2624                 struct dc_surface_update *updates,
2625                 int surface_count,
2626                 struct dc_stream_update *stream_update,
2627                 const struct dc_stream_status *stream_status)
2628 {
2629         int i;
2630         enum surface_update_type overall_type = UPDATE_TYPE_FAST;
2631
2632         if (dc->idle_optimizations_allowed)
2633                 overall_type = UPDATE_TYPE_FULL;
2634
2635         if (stream_status == NULL || stream_status->plane_count != surface_count)
2636                 overall_type = UPDATE_TYPE_FULL;
2637
2638         if (stream_update && stream_update->pending_test_pattern) {
2639                 overall_type = UPDATE_TYPE_FULL;
2640         }
2641
2642         /* some stream updates require passive update */
2643         if (stream_update) {
2644                 union stream_update_flags *su_flags = &stream_update->stream->update_flags;
2645
2646                 if ((stream_update->src.height != 0 && stream_update->src.width != 0) ||
2647                         (stream_update->dst.height != 0 && stream_update->dst.width != 0) ||
2648                         stream_update->integer_scaling_update)
2649                         su_flags->bits.scaling = 1;
2650
2651                 if (dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func)
2652                         su_flags->bits.out_tf = 1;
2653
2654                 if (stream_update->abm_level)
2655                         su_flags->bits.abm_level = 1;
2656
2657                 if (stream_update->dpms_off)
2658                         su_flags->bits.dpms_off = 1;
2659
2660                 if (stream_update->gamut_remap)
2661                         su_flags->bits.gamut_remap = 1;
2662
2663                 if (stream_update->wb_update)
2664                         su_flags->bits.wb_update = 1;
2665
2666                 if (stream_update->dsc_config)
2667                         su_flags->bits.dsc_changed = 1;
2668
2669                 if (stream_update->mst_bw_update)
2670                         su_flags->bits.mst_bw = 1;
2671
2672                 if (stream_update->stream && stream_update->stream->freesync_on_desktop &&
2673                         (stream_update->vrr_infopacket || stream_update->allow_freesync ||
2674                                 stream_update->vrr_active_variable || stream_update->vrr_active_fixed))
2675                         su_flags->bits.fams_changed = 1;
2676
2677                 if (su_flags->raw != 0)
2678                         overall_type = UPDATE_TYPE_FULL;
2679
2680                 if (stream_update->output_csc_transform || stream_update->output_color_space)
2681                         su_flags->bits.out_csc = 1;
2682
2683                 /* Output transfer function changes do not require bandwidth recalculation,
2684                  * so don't trigger a full update
2685                  */
2686                 if (!dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func)
2687                         su_flags->bits.out_tf = 1;
2688         }
2689
2690         for (i = 0 ; i < surface_count; i++) {
2691                 enum surface_update_type type =
2692                                 det_surface_update(dc, &updates[i]);
2693
2694                 elevate_update_type(&overall_type, type);
2695         }
2696
2697         return overall_type;
2698 }
2699
2700 /*
2701  * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full)
2702  *
2703  * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types
2704  */
2705 enum surface_update_type dc_check_update_surfaces_for_stream(
2706                 struct dc *dc,
2707                 struct dc_surface_update *updates,
2708                 int surface_count,
2709                 struct dc_stream_update *stream_update,
2710                 const struct dc_stream_status *stream_status)
2711 {
2712         int i;
2713         enum surface_update_type type;
2714
2715         if (stream_update)
2716                 stream_update->stream->update_flags.raw = 0;
2717         for (i = 0; i < surface_count; i++)
2718                 updates[i].surface->update_flags.raw = 0;
2719
2720         type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
2721         if (type == UPDATE_TYPE_FULL) {
2722                 if (stream_update) {
2723                         uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed;
2724                         stream_update->stream->update_flags.raw = 0xFFFFFFFF;
2725                         stream_update->stream->update_flags.bits.dsc_changed = dsc_changed;
2726                 }
2727                 for (i = 0; i < surface_count; i++)
2728                         updates[i].surface->update_flags.raw = 0xFFFFFFFF;
2729         }
2730
2731         if (type == UPDATE_TYPE_FAST) {
2732                 // If there's an available clock comparator, we use that.
2733                 if (dc->clk_mgr->funcs->are_clock_states_equal) {
2734                         if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk))
2735                                 dc->optimized_required = true;
2736                 // Else we fallback to mem compare.
2737                 } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
2738                         dc->optimized_required = true;
2739                 }
2740
2741                 dc->optimized_required |= dc->wm_optimized_required;
2742         }
2743
2744         return type;
2745 }
2746
2747 static struct dc_stream_status *stream_get_status(
2748         struct dc_state *ctx,
2749         struct dc_stream_state *stream)
2750 {
2751         uint8_t i;
2752
2753         for (i = 0; i < ctx->stream_count; i++) {
2754                 if (stream == ctx->streams[i]) {
2755                         return &ctx->stream_status[i];
2756                 }
2757         }
2758
2759         return NULL;
2760 }
2761
2762 static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
2763
2764 static void copy_surface_update_to_plane(
2765                 struct dc_plane_state *surface,
2766                 struct dc_surface_update *srf_update)
2767 {
2768         if (srf_update->flip_addr) {
2769                 surface->address = srf_update->flip_addr->address;
2770                 surface->flip_immediate =
2771                         srf_update->flip_addr->flip_immediate;
2772                 surface->time.time_elapsed_in_us[surface->time.index] =
2773                         srf_update->flip_addr->flip_timestamp_in_us -
2774                                 surface->time.prev_update_time_in_us;
2775                 surface->time.prev_update_time_in_us =
2776                         srf_update->flip_addr->flip_timestamp_in_us;
2777                 surface->time.index++;
2778                 if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX)
2779                         surface->time.index = 0;
2780
2781                 surface->triplebuffer_flips = srf_update->flip_addr->triplebuffer_flips;
2782         }
2783
2784         if (srf_update->scaling_info) {
2785                 surface->scaling_quality =
2786                                 srf_update->scaling_info->scaling_quality;
2787                 surface->dst_rect =
2788                                 srf_update->scaling_info->dst_rect;
2789                 surface->src_rect =
2790                                 srf_update->scaling_info->src_rect;
2791                 surface->clip_rect =
2792                                 srf_update->scaling_info->clip_rect;
2793         }
2794
2795         if (srf_update->plane_info) {
2796                 surface->color_space =
2797                                 srf_update->plane_info->color_space;
2798                 surface->format =
2799                                 srf_update->plane_info->format;
2800                 surface->plane_size =
2801                                 srf_update->plane_info->plane_size;
2802                 surface->rotation =
2803                                 srf_update->plane_info->rotation;
2804                 surface->horizontal_mirror =
2805                                 srf_update->plane_info->horizontal_mirror;
2806                 surface->stereo_format =
2807                                 srf_update->plane_info->stereo_format;
2808                 surface->tiling_info =
2809                                 srf_update->plane_info->tiling_info;
2810                 surface->visible =
2811                                 srf_update->plane_info->visible;
2812                 surface->per_pixel_alpha =
2813                                 srf_update->plane_info->per_pixel_alpha;
2814                 surface->global_alpha =
2815                                 srf_update->plane_info->global_alpha;
2816                 surface->global_alpha_value =
2817                                 srf_update->plane_info->global_alpha_value;
2818                 surface->dcc =
2819                                 srf_update->plane_info->dcc;
2820                 surface->layer_index =
2821                                 srf_update->plane_info->layer_index;
2822         }
2823
2824         if (srf_update->gamma &&
2825                         (surface->gamma_correction !=
2826                                         srf_update->gamma)) {
2827                 memcpy(&surface->gamma_correction->entries,
2828                         &srf_update->gamma->entries,
2829                         sizeof(struct dc_gamma_entries));
2830                 surface->gamma_correction->is_identity =
2831                         srf_update->gamma->is_identity;
2832                 surface->gamma_correction->num_entries =
2833                         srf_update->gamma->num_entries;
2834                 surface->gamma_correction->type =
2835                         srf_update->gamma->type;
2836         }
2837
2838         if (srf_update->in_transfer_func &&
2839                         (surface->in_transfer_func !=
2840                                 srf_update->in_transfer_func)) {
2841                 surface->in_transfer_func->sdr_ref_white_level =
2842                         srf_update->in_transfer_func->sdr_ref_white_level;
2843                 surface->in_transfer_func->tf =
2844                         srf_update->in_transfer_func->tf;
2845                 surface->in_transfer_func->type =
2846                         srf_update->in_transfer_func->type;
2847                 memcpy(&surface->in_transfer_func->tf_pts,
2848                         &srf_update->in_transfer_func->tf_pts,
2849                         sizeof(struct dc_transfer_func_distributed_points));
2850         }
2851
2852         if (srf_update->func_shaper &&
2853                         (surface->in_shaper_func !=
2854                         srf_update->func_shaper))
2855                 memcpy(surface->in_shaper_func, srf_update->func_shaper,
2856                 sizeof(*surface->in_shaper_func));
2857
2858         if (srf_update->lut3d_func &&
2859                         (surface->lut3d_func !=
2860                         srf_update->lut3d_func))
2861                 memcpy(surface->lut3d_func, srf_update->lut3d_func,
2862                 sizeof(*surface->lut3d_func));
2863
2864         if (srf_update->hdr_mult.value)
2865                 surface->hdr_mult =
2866                                 srf_update->hdr_mult;
2867
2868         if (srf_update->blend_tf &&
2869                         (surface->blend_tf !=
2870                         srf_update->blend_tf))
2871                 memcpy(surface->blend_tf, srf_update->blend_tf,
2872                 sizeof(*surface->blend_tf));
2873
2874         if (srf_update->input_csc_color_matrix)
2875                 surface->input_csc_color_matrix =
2876                         *srf_update->input_csc_color_matrix;
2877
2878         if (srf_update->coeff_reduction_factor)
2879                 surface->coeff_reduction_factor =
2880                         *srf_update->coeff_reduction_factor;
2881
2882         if (srf_update->gamut_remap_matrix)
2883                 surface->gamut_remap_matrix =
2884                         *srf_update->gamut_remap_matrix;
2885 }
2886
2887 static void copy_stream_update_to_stream(struct dc *dc,
2888                                          struct dc_state *context,
2889                                          struct dc_stream_state *stream,
2890                                          struct dc_stream_update *update)
2891 {
2892         struct dc_context *dc_ctx = dc->ctx;
2893
2894         if (update == NULL || stream == NULL)
2895                 return;
2896
2897         if (update->src.height && update->src.width)
2898                 stream->src = update->src;
2899
2900         if (update->dst.height && update->dst.width)
2901                 stream->dst = update->dst;
2902
2903         if (update->out_transfer_func &&
2904             stream->out_transfer_func != update->out_transfer_func) {
2905                 stream->out_transfer_func->sdr_ref_white_level =
2906                         update->out_transfer_func->sdr_ref_white_level;
2907                 stream->out_transfer_func->tf = update->out_transfer_func->tf;
2908                 stream->out_transfer_func->type =
2909                         update->out_transfer_func->type;
2910                 memcpy(&stream->out_transfer_func->tf_pts,
2911                        &update->out_transfer_func->tf_pts,
2912                        sizeof(struct dc_transfer_func_distributed_points));
2913         }
2914
2915         if (update->hdr_static_metadata)
2916                 stream->hdr_static_metadata = *update->hdr_static_metadata;
2917
2918         if (update->abm_level)
2919                 stream->abm_level = *update->abm_level;
2920
2921         if (update->periodic_interrupt)
2922                 stream->periodic_interrupt = *update->periodic_interrupt;
2923
2924         if (update->gamut_remap)
2925                 stream->gamut_remap_matrix = *update->gamut_remap;
2926
2927         /* Note: this being updated after mode set is currently not a use case
2928          * however if it arises OCSC would need to be reprogrammed at the
2929          * minimum
2930          */
2931         if (update->output_color_space)
2932                 stream->output_color_space = *update->output_color_space;
2933
2934         if (update->output_csc_transform)
2935                 stream->csc_color_matrix = *update->output_csc_transform;
2936
2937         if (update->vrr_infopacket)
2938                 stream->vrr_infopacket = *update->vrr_infopacket;
2939
2940         if (update->allow_freesync)
2941                 stream->allow_freesync = *update->allow_freesync;
2942
2943         if (update->vrr_active_variable)
2944                 stream->vrr_active_variable = *update->vrr_active_variable;
2945
2946         if (update->vrr_active_fixed)
2947                 stream->vrr_active_fixed = *update->vrr_active_fixed;
2948
2949         if (update->crtc_timing_adjust)
2950                 stream->adjust = *update->crtc_timing_adjust;
2951
2952         if (update->dpms_off)
2953                 stream->dpms_off = *update->dpms_off;
2954
2955         if (update->hfvsif_infopacket)
2956                 stream->hfvsif_infopacket = *update->hfvsif_infopacket;
2957
2958         if (update->vtem_infopacket)
2959                 stream->vtem_infopacket = *update->vtem_infopacket;
2960
2961         if (update->vsc_infopacket)
2962                 stream->vsc_infopacket = *update->vsc_infopacket;
2963
2964         if (update->vsp_infopacket)
2965                 stream->vsp_infopacket = *update->vsp_infopacket;
2966
2967         if (update->adaptive_sync_infopacket)
2968                 stream->adaptive_sync_infopacket = *update->adaptive_sync_infopacket;
2969
2970         if (update->dither_option)
2971                 stream->dither_option = *update->dither_option;
2972
2973         if (update->pending_test_pattern)
2974                 stream->test_pattern = *update->pending_test_pattern;
2975         /* update current stream with writeback info */
2976         if (update->wb_update) {
2977                 int i;
2978
2979                 stream->num_wb_info = update->wb_update->num_wb_info;
2980                 ASSERT(stream->num_wb_info <= MAX_DWB_PIPES);
2981                 for (i = 0; i < stream->num_wb_info; i++)
2982                         stream->writeback_info[i] =
2983                                 update->wb_update->writeback_info[i];
2984         }
2985         if (update->dsc_config) {
2986                 struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg;
2987                 uint32_t old_dsc_enabled = stream->timing.flags.DSC;
2988                 uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 &&
2989                                        update->dsc_config->num_slices_v != 0);
2990
2991                 /* Use temporarry context for validating new DSC config */
2992                 struct dc_state *dsc_validate_context = dc_state_create_copy(dc->current_state);
2993
2994                 if (dsc_validate_context) {
2995                         stream->timing.dsc_cfg = *update->dsc_config;
2996                         stream->timing.flags.DSC = enable_dsc;
2997                         if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) {
2998                                 stream->timing.dsc_cfg = old_dsc_cfg;
2999                                 stream->timing.flags.DSC = old_dsc_enabled;
3000                                 update->dsc_config = NULL;
3001                         }
3002
3003                         dc_state_release(dsc_validate_context);
3004                 } else {
3005                         DC_ERROR("Failed to allocate new validate context for DSC change\n");
3006                         update->dsc_config = NULL;
3007                 }
3008         }
3009 }
3010
3011 static void backup_planes_and_stream_state(
3012                 struct dc_scratch_space *scratch,
3013                 struct dc_stream_state *stream)
3014 {
3015         int i;
3016         struct dc_stream_status *status = dc_stream_get_status(stream);
3017
3018         if (!status)
3019                 return;
3020
3021         for (i = 0; i < status->plane_count; i++) {
3022                 scratch->plane_states[i] = *status->plane_states[i];
3023                 scratch->gamma_correction[i] = *status->plane_states[i]->gamma_correction;
3024                 scratch->in_transfer_func[i] = *status->plane_states[i]->in_transfer_func;
3025                 scratch->lut3d_func[i] = *status->plane_states[i]->lut3d_func;
3026                 scratch->in_shaper_func[i] = *status->plane_states[i]->in_shaper_func;
3027                 scratch->blend_tf[i] = *status->plane_states[i]->blend_tf;
3028         }
3029         scratch->stream_state = *stream;
3030         if (stream->out_transfer_func)
3031                 scratch->out_transfer_func = *stream->out_transfer_func;
3032 }
3033
3034 static void restore_planes_and_stream_state(
3035                 struct dc_scratch_space *scratch,
3036                 struct dc_stream_state *stream)
3037 {
3038         int i;
3039         struct dc_stream_status *status = dc_stream_get_status(stream);
3040
3041         if (!status)
3042                 return;
3043
3044         for (i = 0; i < status->plane_count; i++) {
3045                 *status->plane_states[i] = scratch->plane_states[i];
3046                 *status->plane_states[i]->gamma_correction = scratch->gamma_correction[i];
3047                 *status->plane_states[i]->in_transfer_func = scratch->in_transfer_func[i];
3048                 *status->plane_states[i]->lut3d_func = scratch->lut3d_func[i];
3049                 *status->plane_states[i]->in_shaper_func = scratch->in_shaper_func[i];
3050                 *status->plane_states[i]->blend_tf = scratch->blend_tf[i];
3051         }
3052         *stream = scratch->stream_state;
3053         if (stream->out_transfer_func)
3054                 *stream->out_transfer_func = scratch->out_transfer_func;
3055 }
3056
3057 static bool update_planes_and_stream_state(struct dc *dc,
3058                 struct dc_surface_update *srf_updates, int surface_count,
3059                 struct dc_stream_state *stream,
3060                 struct dc_stream_update *stream_update,
3061                 enum surface_update_type *new_update_type,
3062                 struct dc_state **new_context)
3063 {
3064         struct dc_state *context;
3065         int i, j;
3066         enum surface_update_type update_type;
3067         const struct dc_stream_status *stream_status;
3068         struct dc_context *dc_ctx = dc->ctx;
3069
3070         stream_status = dc_stream_get_status(stream);
3071
3072         if (!stream_status) {
3073                 if (surface_count) /* Only an error condition if surf_count non-zero*/
3074                         ASSERT(false);
3075
3076                 return false; /* Cannot commit surface to stream that is not committed */
3077         }
3078
3079         context = dc->current_state;
3080         backup_planes_and_stream_state(&dc->current_state->scratch, stream);
3081         update_type = dc_check_update_surfaces_for_stream(
3082                         dc, srf_updates, surface_count, stream_update, stream_status);
3083
3084         /* update current stream with the new updates */
3085         copy_stream_update_to_stream(dc, context, stream, stream_update);
3086
3087         /* do not perform surface update if surface has invalid dimensions
3088          * (all zero) and no scaling_info is provided
3089          */
3090         if (surface_count > 0) {
3091                 for (i = 0; i < surface_count; i++) {
3092                         if ((srf_updates[i].surface->src_rect.width == 0 ||
3093                                  srf_updates[i].surface->src_rect.height == 0 ||
3094                                  srf_updates[i].surface->dst_rect.width == 0 ||
3095                                  srf_updates[i].surface->dst_rect.height == 0) &&
3096                                 (!srf_updates[i].scaling_info ||
3097                                   srf_updates[i].scaling_info->src_rect.width == 0 ||
3098                                   srf_updates[i].scaling_info->src_rect.height == 0 ||
3099                                   srf_updates[i].scaling_info->dst_rect.width == 0 ||
3100                                   srf_updates[i].scaling_info->dst_rect.height == 0)) {
3101                                 DC_ERROR("Invalid src/dst rects in surface update!\n");
3102                                 return false;
3103                         }
3104                 }
3105         }
3106
3107         if (update_type >= update_surface_trace_level)
3108                 update_surface_trace(dc, srf_updates, surface_count);
3109
3110         for (i = 0; i < surface_count; i++)
3111                 copy_surface_update_to_plane(srf_updates[i].surface, &srf_updates[i]);
3112
3113         if (update_type >= UPDATE_TYPE_FULL) {
3114                 struct dc_plane_state *new_planes[MAX_SURFACES] = {0};
3115
3116                 for (i = 0; i < surface_count; i++)
3117                         new_planes[i] = srf_updates[i].surface;
3118
3119                 /* initialize scratch memory for building context */
3120                 context = dc_state_create_copy(dc->current_state);
3121                 if (context == NULL) {
3122                         DC_ERROR("Failed to allocate new validate context!\n");
3123                         return false;
3124                 }
3125
3126                 /* For each full update, remove all existing phantom pipes first.
3127                  * Ensures that we have enough pipes for newly added MPO planes
3128                  */
3129                 dc_state_remove_phantom_streams_and_planes(dc, context);
3130                 dc_state_release_phantom_streams_and_planes(dc, context);
3131
3132                 /*remove old surfaces from context */
3133                 if (!dc_state_rem_all_planes_for_stream(dc, stream, context)) {
3134
3135                         BREAK_TO_DEBUGGER();
3136                         goto fail;
3137                 }
3138
3139                 /* add surface to context */
3140                 if (!dc_state_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) {
3141
3142                         BREAK_TO_DEBUGGER();
3143                         goto fail;
3144                 }
3145         }
3146
3147         /* save update parameters into surface */
3148         for (i = 0; i < surface_count; i++) {
3149                 struct dc_plane_state *surface = srf_updates[i].surface;
3150
3151                 if (update_type >= UPDATE_TYPE_MED) {
3152                         for (j = 0; j < dc->res_pool->pipe_count; j++) {
3153                                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3154
3155                                 if (pipe_ctx->plane_state != surface)
3156                                         continue;
3157
3158                                 resource_build_scaling_params(pipe_ctx);
3159                         }
3160                 }
3161         }
3162
3163         if (update_type == UPDATE_TYPE_FULL) {
3164                 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
3165                         BREAK_TO_DEBUGGER();
3166                         goto fail;
3167                 }
3168
3169                 for (i = 0; i < context->stream_count; i++) {
3170                         struct pipe_ctx *otg_master = resource_get_otg_master_for_stream(&context->res_ctx,
3171                                         context->streams[i]);
3172
3173                         if (otg_master && otg_master->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE)
3174                                 resource_build_test_pattern_params(&context->res_ctx, otg_master);
3175                 }
3176         }
3177
3178         *new_context = context;
3179         *new_update_type = update_type;
3180         backup_planes_and_stream_state(&context->scratch, stream);
3181
3182         return true;
3183
3184 fail:
3185         dc_state_release(context);
3186
3187         return false;
3188
3189 }
3190
3191 static void commit_planes_do_stream_update(struct dc *dc,
3192                 struct dc_stream_state *stream,
3193                 struct dc_stream_update *stream_update,
3194                 enum surface_update_type update_type,
3195                 struct dc_state *context)
3196 {
3197         int j;
3198
3199         // Stream updates
3200         for (j = 0; j < dc->res_pool->pipe_count; j++) {
3201                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3202
3203                 if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) && pipe_ctx->stream == stream) {
3204
3205                         if (stream_update->periodic_interrupt && dc->hwss.setup_periodic_interrupt)
3206                                 dc->hwss.setup_periodic_interrupt(dc, pipe_ctx);
3207
3208                         if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) ||
3209                                         stream_update->vrr_infopacket ||
3210                                         stream_update->vsc_infopacket ||
3211                                         stream_update->vsp_infopacket ||
3212                                         stream_update->hfvsif_infopacket ||
3213                                         stream_update->adaptive_sync_infopacket ||
3214                                         stream_update->vtem_infopacket) {
3215                                 resource_build_info_frame(pipe_ctx);
3216                                 dc->hwss.update_info_frame(pipe_ctx);
3217
3218                                 if (dc_is_dp_signal(pipe_ctx->stream->signal))
3219                                         dc->link_srv->dp_trace_source_sequence(
3220                                                         pipe_ctx->stream->link,
3221                                                         DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
3222                         }
3223
3224                         if (stream_update->hdr_static_metadata &&
3225                                         stream->use_dynamic_meta &&
3226                                         dc->hwss.set_dmdata_attributes &&
3227                                         pipe_ctx->stream->dmdata_address.quad_part != 0)
3228                                 dc->hwss.set_dmdata_attributes(pipe_ctx);
3229
3230                         if (stream_update->gamut_remap)
3231                                 dc_stream_set_gamut_remap(dc, stream);
3232
3233                         if (stream_update->output_csc_transform)
3234                                 dc_stream_program_csc_matrix(dc, stream);
3235
3236                         if (stream_update->dither_option) {
3237                                 struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
3238                                 resource_build_bit_depth_reduction_params(pipe_ctx->stream,
3239                                                                         &pipe_ctx->stream->bit_depth_params);
3240                                 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp,
3241                                                 &stream->bit_depth_params,
3242                                                 &stream->clamping);
3243                                 while (odm_pipe) {
3244                                         odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp,
3245                                                         &stream->bit_depth_params,
3246                                                         &stream->clamping);
3247                                         odm_pipe = odm_pipe->next_odm_pipe;
3248                                 }
3249                         }
3250
3251
3252                         /* Full fe update*/
3253                         if (update_type == UPDATE_TYPE_FAST)
3254                                 continue;
3255
3256                         if (stream_update->dsc_config)
3257                                 dc->link_srv->update_dsc_config(pipe_ctx);
3258
3259                         if (stream_update->mst_bw_update) {
3260                                 if (stream_update->mst_bw_update->is_increase)
3261                                         dc->link_srv->increase_mst_payload(pipe_ctx,
3262                                                         stream_update->mst_bw_update->mst_stream_bw);
3263                                 else
3264                                         dc->link_srv->reduce_mst_payload(pipe_ctx,
3265                                                         stream_update->mst_bw_update->mst_stream_bw);
3266                         }
3267
3268                         if (stream_update->pending_test_pattern) {
3269                                 dc_link_dp_set_test_pattern(stream->link,
3270                                         stream->test_pattern.type,
3271                                         stream->test_pattern.color_space,
3272                                         stream->test_pattern.p_link_settings,
3273                                         stream->test_pattern.p_custom_pattern,
3274                                         stream->test_pattern.cust_pattern_size);
3275                         }
3276
3277                         if (stream_update->dpms_off) {
3278                                 if (*stream_update->dpms_off) {
3279                                         dc->link_srv->set_dpms_off(pipe_ctx);
3280                                         /* for dpms, keep acquired resources*/
3281                                         if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only)
3282                                                 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
3283
3284                                         dc->optimized_required = true;
3285
3286                                 } else {
3287                                         if (get_seamless_boot_stream_count(context) == 0)
3288                                                 dc->hwss.prepare_bandwidth(dc, dc->current_state);
3289                                         dc->link_srv->set_dpms_on(dc->current_state, pipe_ctx);
3290                                 }
3291                         } else if (pipe_ctx->stream->link->wa_flags.blank_stream_on_ocs_change && stream_update->output_color_space
3292                                         && !stream->dpms_off && dc_is_dp_signal(pipe_ctx->stream->signal)) {
3293                                 /*
3294                                  * Workaround for firmware issue in some receivers where they don't pick up
3295                                  * correct output color space unless DP link is disabled/re-enabled
3296                                  */
3297                                 dc->link_srv->set_dpms_on(dc->current_state, pipe_ctx);
3298                         }
3299
3300                         if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
3301                                 bool should_program_abm = true;
3302
3303                                 // if otg funcs defined check if blanked before programming
3304                                 if (pipe_ctx->stream_res.tg->funcs->is_blanked)
3305                                         if (pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
3306                                                 should_program_abm = false;
3307
3308                                 if (should_program_abm) {
3309                                         if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) {
3310                                                 dc->hwss.set_abm_immediate_disable(pipe_ctx);
3311                                         } else {
3312                                                 pipe_ctx->stream_res.abm->funcs->set_abm_level(
3313                                                         pipe_ctx->stream_res.abm, stream->abm_level);
3314                                         }
3315                                 }
3316                         }
3317                 }
3318         }
3319 }
3320
3321 static bool dc_dmub_should_send_dirty_rect_cmd(struct dc *dc, struct dc_stream_state *stream)
3322 {
3323         if ((stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1
3324                         || stream->link->psr_settings.psr_version == DC_PSR_VERSION_1)
3325                         && stream->ctx->dce_version >= DCN_VERSION_3_1)
3326                 return true;
3327
3328         if (stream->link->replay_settings.config.replay_supported)
3329                 return true;
3330
3331         if (stream->ctx->dce_version >= DCN_VERSION_3_5 && stream->abm_level)
3332                 return true;
3333
3334         return false;
3335 }
3336
3337 void dc_dmub_update_dirty_rect(struct dc *dc,
3338                                int surface_count,
3339                                struct dc_stream_state *stream,
3340                                struct dc_surface_update *srf_updates,
3341                                struct dc_state *context)
3342 {
3343         union dmub_rb_cmd cmd;
3344         struct dmub_cmd_update_dirty_rect_data *update_dirty_rect;
3345         unsigned int i, j;
3346         unsigned int panel_inst = 0;
3347
3348         if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream))
3349                 return;
3350
3351         if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst))
3352                 return;
3353
3354         memset(&cmd, 0x0, sizeof(cmd));
3355         cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT;
3356         cmd.update_dirty_rect.header.sub_type = 0;
3357         cmd.update_dirty_rect.header.payload_bytes =
3358                 sizeof(cmd.update_dirty_rect) -
3359                 sizeof(cmd.update_dirty_rect.header);
3360         update_dirty_rect = &cmd.update_dirty_rect.update_dirty_rect_data;
3361         for (i = 0; i < surface_count; i++) {
3362                 struct dc_plane_state *plane_state = srf_updates[i].surface;
3363                 const struct dc_flip_addrs *flip_addr = srf_updates[i].flip_addr;
3364
3365                 if (!srf_updates[i].surface || !flip_addr)
3366                         continue;
3367                 /* Do not send in immediate flip mode */
3368                 if (srf_updates[i].surface->flip_immediate)
3369                         continue;
3370
3371                 update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count;
3372                 memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects,
3373                                 sizeof(flip_addr->dirty_rects));
3374                 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3375                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3376
3377                         if (pipe_ctx->stream != stream)
3378                                 continue;
3379                         if (pipe_ctx->plane_state != plane_state)
3380                                 continue;
3381
3382                         update_dirty_rect->panel_inst = panel_inst;
3383                         update_dirty_rect->pipe_idx = j;
3384                         dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
3385                 }
3386         }
3387 }
3388
3389 static void build_dmub_update_dirty_rect(
3390                 struct dc *dc,
3391                 int surface_count,
3392                 struct dc_stream_state *stream,
3393                 struct dc_surface_update *srf_updates,
3394                 struct dc_state *context,
3395                 struct dc_dmub_cmd dc_dmub_cmd[],
3396                 unsigned int *dmub_cmd_count)
3397 {
3398         union dmub_rb_cmd cmd;
3399         struct dmub_cmd_update_dirty_rect_data *update_dirty_rect;
3400         unsigned int i, j;
3401         unsigned int panel_inst = 0;
3402
3403         if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream))
3404                 return;
3405
3406         if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst))
3407                 return;
3408
3409         memset(&cmd, 0x0, sizeof(cmd));
3410         cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT;
3411         cmd.update_dirty_rect.header.sub_type = 0;
3412         cmd.update_dirty_rect.header.payload_bytes =
3413                 sizeof(cmd.update_dirty_rect) -
3414                 sizeof(cmd.update_dirty_rect.header);
3415         update_dirty_rect = &cmd.update_dirty_rect.update_dirty_rect_data;
3416         for (i = 0; i < surface_count; i++) {
3417                 struct dc_plane_state *plane_state = srf_updates[i].surface;
3418                 const struct dc_flip_addrs *flip_addr = srf_updates[i].flip_addr;
3419
3420                 if (!srf_updates[i].surface || !flip_addr)
3421                         continue;
3422                 /* Do not send in immediate flip mode */
3423                 if (srf_updates[i].surface->flip_immediate)
3424                         continue;
3425                 update_dirty_rect->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
3426                 update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count;
3427                 memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects,
3428                                 sizeof(flip_addr->dirty_rects));
3429                 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3430                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3431
3432                         if (pipe_ctx->stream != stream)
3433                                 continue;
3434                         if (pipe_ctx->plane_state != plane_state)
3435                                 continue;
3436                         update_dirty_rect->panel_inst = panel_inst;
3437                         update_dirty_rect->pipe_idx = j;
3438                         dc_dmub_cmd[*dmub_cmd_count].dmub_cmd = cmd;
3439                         dc_dmub_cmd[*dmub_cmd_count].wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT;
3440                         (*dmub_cmd_count)++;
3441                 }
3442         }
3443 }
3444
3445
3446 /**
3447  * build_dmub_cmd_list() - Build an array of DMCUB commands to be sent to DMCUB
3448  *
3449  * @dc: Current DC state
3450  * @srf_updates: Array of surface updates
3451  * @surface_count: Number of surfaces that have an updated
3452  * @stream: Corresponding stream to be updated in the current flip
3453  * @context: New DC state to be programmed
3454  *
3455  * @dc_dmub_cmd: Array of DMCUB commands to be sent to DMCUB
3456  * @dmub_cmd_count: Count indicating the number of DMCUB commands in dc_dmub_cmd array
3457  *
3458  * This function builds an array of DMCUB commands to be sent to DMCUB. This function is required
3459  * to build an array of commands and have them sent while the OTG lock is acquired.
3460  *
3461  * Return: void
3462  */
3463 static void build_dmub_cmd_list(struct dc *dc,
3464                 struct dc_surface_update *srf_updates,
3465                 int surface_count,
3466                 struct dc_stream_state *stream,
3467                 struct dc_state *context,
3468                 struct dc_dmub_cmd dc_dmub_cmd[],
3469                 unsigned int *dmub_cmd_count)
3470 {
3471         // Initialize cmd count to 0
3472         *dmub_cmd_count = 0;
3473         build_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context, dc_dmub_cmd, dmub_cmd_count);
3474 }
3475
3476 static void commit_planes_for_stream_fast(struct dc *dc,
3477                 struct dc_surface_update *srf_updates,
3478                 int surface_count,
3479                 struct dc_stream_state *stream,
3480                 struct dc_stream_update *stream_update,
3481                 enum surface_update_type update_type,
3482                 struct dc_state *context)
3483 {
3484         int i, j;
3485         struct pipe_ctx *top_pipe_to_program = NULL;
3486         struct dc_stream_status *stream_status = NULL;
3487         dc_exit_ips_for_hw_access(dc);
3488
3489         dc_z10_restore(dc);
3490
3491         top_pipe_to_program = resource_get_otg_master_for_stream(
3492                         &context->res_ctx,
3493                         stream);
3494
3495         if (!top_pipe_to_program)
3496                 return;
3497
3498         for (i = 0; i < dc->res_pool->pipe_count; i++) {
3499                 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
3500
3501                 if (pipe->stream && pipe->plane_state) {
3502                         set_p_state_switch_method(dc, context, pipe);
3503
3504                         if (dc->debug.visual_confirm)
3505                                 dc_update_visual_confirm_color(dc, context, pipe);
3506                 }
3507         }
3508
3509         for (i = 0; i < surface_count; i++) {
3510                 struct dc_plane_state *plane_state = srf_updates[i].surface;
3511                 /*set logical flag for lock/unlock use*/
3512                 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3513                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3514
3515                         if (!pipe_ctx->plane_state)
3516                                 continue;
3517                         if (should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3518                                 continue;
3519                         pipe_ctx->plane_state->triplebuffer_flips = false;
3520                         if (update_type == UPDATE_TYPE_FAST &&
3521                             dc->hwss.program_triplebuffer &&
3522                             !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
3523                                 /*triple buffer for VUpdate  only*/
3524                                 pipe_ctx->plane_state->triplebuffer_flips = true;
3525                         }
3526                 }
3527         }
3528
3529         stream_status = dc_state_get_stream_status(context, stream);
3530
3531         build_dmub_cmd_list(dc,
3532                         srf_updates,
3533                         surface_count,
3534                         stream,
3535                         context,
3536                         context->dc_dmub_cmd,
3537                         &(context->dmub_cmd_count));
3538         hwss_build_fast_sequence(dc,
3539                         context->dc_dmub_cmd,
3540                         context->dmub_cmd_count,
3541                         context->block_sequence,
3542                         &(context->block_sequence_steps),
3543                         top_pipe_to_program,
3544                         stream_status);
3545         hwss_execute_sequence(dc,
3546                         context->block_sequence,
3547                         context->block_sequence_steps);
3548         /* Clear update flags so next flip doesn't have redundant programming
3549          * (if there's no stream update, the update flags are not cleared).
3550          * Surface updates are cleared unconditionally at the beginning of each flip,
3551          * so no need to clear here.
3552          */
3553         if (top_pipe_to_program->stream)
3554                 top_pipe_to_program->stream->update_flags.raw = 0;
3555 }
3556
3557 static void wait_for_outstanding_hw_updates(struct dc *dc, struct dc_state *dc_context)
3558 {
3559 /*
3560  * This function calls HWSS to wait for any potentially double buffered
3561  * operations to complete. It should be invoked as a pre-amble prior
3562  * to full update programming before asserting any HW locks.
3563  */
3564         int pipe_idx;
3565         int opp_inst;
3566         int opp_count = dc->res_pool->res_cap->num_opp;
3567         struct hubp *hubp;
3568         int mpcc_inst;
3569         const struct pipe_ctx *pipe_ctx;
3570
3571         for (pipe_idx = 0; pipe_idx < dc->res_pool->pipe_count; pipe_idx++) {
3572                 pipe_ctx = &dc_context->res_ctx.pipe_ctx[pipe_idx];
3573
3574                 if (!pipe_ctx->stream)
3575                         continue;
3576
3577                 if (pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear)
3578                         pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear(pipe_ctx->stream_res.tg);
3579
3580                 hubp = pipe_ctx->plane_res.hubp;
3581                 if (!hubp)
3582                         continue;
3583
3584                 mpcc_inst = hubp->inst;
3585                 // MPCC inst is equal to pipe index in practice
3586                 for (opp_inst = 0; opp_inst < opp_count; opp_inst++) {
3587                         if ((dc->res_pool->opps[opp_inst] != NULL) &&
3588                                 (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst])) {
3589                                 dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst);
3590                                 dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false;
3591                                 break;
3592                         }
3593                 }
3594         }
3595         wait_for_odm_update_pending_complete(dc, dc_context);
3596 }
3597
3598 static void commit_planes_for_stream(struct dc *dc,
3599                 struct dc_surface_update *srf_updates,
3600                 int surface_count,
3601                 struct dc_stream_state *stream,
3602                 struct dc_stream_update *stream_update,
3603                 enum surface_update_type update_type,
3604                 struct dc_state *context)
3605 {
3606         int i, j;
3607         struct pipe_ctx *top_pipe_to_program = NULL;
3608         bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST);
3609         bool subvp_prev_use = false;
3610         bool subvp_curr_use = false;
3611         uint8_t current_stream_mask = 0;
3612
3613         // Once we apply the new subvp context to hardware it won't be in the
3614         // dc->current_state anymore, so we have to cache it before we apply
3615         // the new SubVP context
3616         subvp_prev_use = false;
3617         dc_exit_ips_for_hw_access(dc);
3618
3619         dc_z10_restore(dc);
3620         if (update_type == UPDATE_TYPE_FULL)
3621                 wait_for_outstanding_hw_updates(dc, context);
3622
3623         for (i = 0; i < dc->res_pool->pipe_count; i++) {
3624                 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
3625
3626                 if (pipe->stream && pipe->plane_state) {
3627                         set_p_state_switch_method(dc, context, pipe);
3628
3629                         if (dc->debug.visual_confirm)
3630                                 dc_update_visual_confirm_color(dc, context, pipe);
3631                 }
3632         }
3633
3634         if (update_type == UPDATE_TYPE_FULL) {
3635                 dc_allow_idle_optimizations(dc, false);
3636
3637                 if (get_seamless_boot_stream_count(context) == 0)
3638                         dc->hwss.prepare_bandwidth(dc, context);
3639
3640                 if (dc->hwss.update_dsc_pg)
3641                         dc->hwss.update_dsc_pg(dc, context, false);
3642
3643                 context_clock_trace(dc, context);
3644         }
3645
3646         top_pipe_to_program = resource_get_otg_master_for_stream(
3647                                 &context->res_ctx,
3648                                 stream);
3649         ASSERT(top_pipe_to_program != NULL);
3650         for (i = 0; i < dc->res_pool->pipe_count; i++) {
3651                 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
3652
3653                 // Check old context for SubVP
3654                 subvp_prev_use |= (dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) == SUBVP_PHANTOM);
3655                 if (subvp_prev_use)
3656                         break;
3657         }
3658
3659         for (i = 0; i < dc->res_pool->pipe_count; i++) {
3660                 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
3661
3662                 if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
3663                         subvp_curr_use = true;
3664                         break;
3665                 }
3666         }
3667
3668         if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) {
3669                 struct pipe_ctx *mpcc_pipe;
3670                 struct pipe_ctx *odm_pipe;
3671
3672                 for (mpcc_pipe = top_pipe_to_program; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe)
3673                         for (odm_pipe = mpcc_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
3674                                 odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU;
3675         }
3676
3677         if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
3678                 if (top_pipe_to_program &&
3679                         top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
3680                         if (should_use_dmub_lock(stream->link)) {
3681                                 union dmub_hw_lock_flags hw_locks = { 0 };
3682                                 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
3683
3684                                 hw_locks.bits.lock_dig = 1;
3685                                 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
3686
3687                                 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
3688                                                         true,
3689                                                         &hw_locks,
3690                                                         &inst_flags);
3691                         } else
3692                                 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable(
3693                                                 top_pipe_to_program->stream_res.tg);
3694                 }
3695
3696         if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3697                 if (dc->hwss.subvp_pipe_control_lock)
3698                                 dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, NULL, subvp_prev_use);
3699                 dc->hwss.interdependent_update_lock(dc, context, true);
3700
3701         } else {
3702                 if (dc->hwss.subvp_pipe_control_lock)
3703                         dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
3704                 /* Lock the top pipe while updating plane addrs, since freesync requires
3705                  *  plane addr update event triggers to be synchronized.
3706                  *  top_pipe_to_program is expected to never be NULL
3707                  */
3708                 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
3709         }
3710
3711         dc_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context);
3712
3713         // Stream updates
3714         if (stream_update)
3715                 commit_planes_do_stream_update(dc, stream, stream_update, update_type, context);
3716
3717         if (surface_count == 0) {
3718                 /*
3719                  * In case of turning off screen, no need to program front end a second time.
3720                  * just return after program blank.
3721                  */
3722                 if (dc->hwss.apply_ctx_for_surface)
3723                         dc->hwss.apply_ctx_for_surface(dc, stream, 0, context);
3724                 if (dc->hwss.program_front_end_for_ctx)
3725                         dc->hwss.program_front_end_for_ctx(dc, context);
3726
3727                 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3728                         dc->hwss.interdependent_update_lock(dc, context, false);
3729                 } else {
3730                         dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
3731                 }
3732                 dc->hwss.post_unlock_program_front_end(dc, context);
3733
3734                 if (update_type != UPDATE_TYPE_FAST)
3735                         if (dc->hwss.commit_subvp_config)
3736                                 dc->hwss.commit_subvp_config(dc, context);
3737
3738                 /* Since phantom pipe programming is moved to post_unlock_program_front_end,
3739                  * move the SubVP lock to after the phantom pipes have been setup
3740                  */
3741                 if (dc->hwss.subvp_pipe_control_lock)
3742                         dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes,
3743                                                          NULL, subvp_prev_use);
3744                 return;
3745         }
3746
3747         if (update_type != UPDATE_TYPE_FAST) {
3748                 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3749                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3750
3751                         if ((dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP ||
3752                                 dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH) &&
3753                                 pipe_ctx->stream && pipe_ctx->plane_state) {
3754                                 /* Only update visual confirm for SUBVP and Mclk switching here.
3755                                  * The bar appears on all pipes, so we need to update the bar on all displays,
3756                                  * so the information doesn't get stale.
3757                                  */
3758                                 dc->hwss.update_visual_confirm_color(dc, pipe_ctx,
3759                                                 pipe_ctx->plane_res.hubp->inst);
3760                         }
3761                 }
3762         }
3763
3764         for (i = 0; i < surface_count; i++) {
3765                 struct dc_plane_state *plane_state = srf_updates[i].surface;
3766                 /*set logical flag for lock/unlock use*/
3767                 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3768                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3769                         if (!pipe_ctx->plane_state)
3770                                 continue;
3771                         if (should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3772                                 continue;
3773                         pipe_ctx->plane_state->triplebuffer_flips = false;
3774                         if (update_type == UPDATE_TYPE_FAST &&
3775                                 dc->hwss.program_triplebuffer != NULL &&
3776                                 !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
3777                                         /*triple buffer for VUpdate  only*/
3778                                         pipe_ctx->plane_state->triplebuffer_flips = true;
3779                         }
3780                 }
3781                 if (update_type == UPDATE_TYPE_FULL) {
3782                         /* force vsync flip when reconfiguring pipes to prevent underflow */
3783                         plane_state->flip_immediate = false;
3784                 }
3785         }
3786
3787         // Update Type FULL, Surface updates
3788         for (j = 0; j < dc->res_pool->pipe_count; j++) {
3789                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3790
3791                 if (!pipe_ctx->top_pipe &&
3792                         !pipe_ctx->prev_odm_pipe &&
3793                         should_update_pipe_for_stream(context, pipe_ctx, stream)) {
3794                         struct dc_stream_status *stream_status = NULL;
3795
3796                         if (!pipe_ctx->plane_state)
3797                                 continue;
3798
3799                         /* Full fe update*/
3800                         if (update_type == UPDATE_TYPE_FAST)
3801                                 continue;
3802
3803                         ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
3804
3805                         if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
3806                                 /*turn off triple buffer for full update*/
3807                                 dc->hwss.program_triplebuffer(
3808                                         dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
3809                         }
3810                         stream_status =
3811                                 stream_get_status(context, pipe_ctx->stream);
3812
3813                         if (dc->hwss.apply_ctx_for_surface)
3814                                 dc->hwss.apply_ctx_for_surface(
3815                                         dc, pipe_ctx->stream, stream_status->plane_count, context);
3816                 }
3817         }
3818         if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) {
3819                 dc->hwss.program_front_end_for_ctx(dc, context);
3820                 if (dc->debug.validate_dml_output) {
3821                         for (i = 0; i < dc->res_pool->pipe_count; i++) {
3822                                 struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i];
3823                                 if (cur_pipe->stream == NULL)
3824                                         continue;
3825
3826                                 cur_pipe->plane_res.hubp->funcs->validate_dml_output(
3827                                                 cur_pipe->plane_res.hubp, dc->ctx,
3828                                                 &context->res_ctx.pipe_ctx[i].rq_regs,
3829                                                 &context->res_ctx.pipe_ctx[i].dlg_regs,
3830                                                 &context->res_ctx.pipe_ctx[i].ttu_regs);
3831                         }
3832                 }
3833         }
3834
3835         // Update Type FAST, Surface updates
3836         if (update_type == UPDATE_TYPE_FAST) {
3837                 if (dc->hwss.set_flip_control_gsl)
3838                         for (i = 0; i < surface_count; i++) {
3839                                 struct dc_plane_state *plane_state = srf_updates[i].surface;
3840
3841                                 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3842                                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3843
3844                                         if (!should_update_pipe_for_stream(context, pipe_ctx, stream))
3845                                                 continue;
3846
3847                                         if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3848                                                 continue;
3849
3850                                         // GSL has to be used for flip immediate
3851                                         dc->hwss.set_flip_control_gsl(pipe_ctx,
3852                                                         pipe_ctx->plane_state->flip_immediate);
3853                                 }
3854                         }
3855
3856                 /* Perform requested Updates */
3857                 for (i = 0; i < surface_count; i++) {
3858                         struct dc_plane_state *plane_state = srf_updates[i].surface;
3859
3860                         for (j = 0; j < dc->res_pool->pipe_count; j++) {
3861                                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3862
3863                                 if (!should_update_pipe_for_stream(context, pipe_ctx, stream))
3864                                         continue;
3865
3866                                 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3867                                         continue;
3868
3869                                 /*program triple buffer after lock based on flip type*/
3870                                 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
3871                                         /*only enable triplebuffer for  fast_update*/
3872                                         dc->hwss.program_triplebuffer(
3873                                                 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
3874                                 }
3875                                 if (pipe_ctx->plane_state->update_flags.bits.addr_update)
3876                                         dc->hwss.update_plane_addr(dc, pipe_ctx);
3877                         }
3878                 }
3879         }
3880
3881         if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3882                 dc->hwss.interdependent_update_lock(dc, context, false);
3883         } else {
3884                 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
3885         }
3886
3887         if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
3888                 if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
3889                         top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3890                                 top_pipe_to_program->stream_res.tg,
3891                                 CRTC_STATE_VACTIVE);
3892                         top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3893                                 top_pipe_to_program->stream_res.tg,
3894                                 CRTC_STATE_VBLANK);
3895                         top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3896                                 top_pipe_to_program->stream_res.tg,
3897                                 CRTC_STATE_VACTIVE);
3898
3899                         if (should_use_dmub_lock(stream->link)) {
3900                                 union dmub_hw_lock_flags hw_locks = { 0 };
3901                                 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
3902
3903                                 hw_locks.bits.lock_dig = 1;
3904                                 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
3905
3906                                 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
3907                                                         false,
3908                                                         &hw_locks,
3909                                                         &inst_flags);
3910                         } else
3911                                 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable(
3912                                         top_pipe_to_program->stream_res.tg);
3913                 }
3914
3915         if (subvp_curr_use) {
3916                 /* If enabling subvp or transitioning from subvp->subvp, enable the
3917                  * phantom streams before we program front end for the phantom pipes.
3918                  */
3919                 if (update_type != UPDATE_TYPE_FAST) {
3920                         if (dc->hwss.enable_phantom_streams)
3921                                 dc->hwss.enable_phantom_streams(dc, context);
3922                 }
3923         }
3924
3925         if (update_type != UPDATE_TYPE_FAST)
3926                 dc->hwss.post_unlock_program_front_end(dc, context);
3927
3928         if (subvp_prev_use && !subvp_curr_use) {
3929                 /* If disabling subvp, disable phantom streams after front end
3930                  * programming has completed (we turn on phantom OTG in order
3931                  * to complete the plane disable for phantom pipes).
3932                  */
3933
3934                 if (dc->hwss.disable_phantom_streams)
3935                         dc->hwss.disable_phantom_streams(dc, context);
3936         }
3937
3938         if (update_type != UPDATE_TYPE_FAST)
3939                 if (dc->hwss.commit_subvp_config)
3940                         dc->hwss.commit_subvp_config(dc, context);
3941         /* Since phantom pipe programming is moved to post_unlock_program_front_end,
3942          * move the SubVP lock to after the phantom pipes have been setup
3943          */
3944         if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3945                 if (dc->hwss.subvp_pipe_control_lock)
3946                         dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
3947         } else {
3948                 if (dc->hwss.subvp_pipe_control_lock)
3949                         dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
3950         }
3951
3952         // Fire manual trigger only when bottom plane is flipped
3953         for (j = 0; j < dc->res_pool->pipe_count; j++) {
3954                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3955
3956                 if (!pipe_ctx->plane_state)
3957                         continue;
3958
3959                 if (pipe_ctx->bottom_pipe || pipe_ctx->next_odm_pipe ||
3960                                 !pipe_ctx->stream || !should_update_pipe_for_stream(context, pipe_ctx, stream) ||
3961                                 !pipe_ctx->plane_state->update_flags.bits.addr_update ||
3962                                 pipe_ctx->plane_state->skip_manual_trigger)
3963                         continue;
3964
3965                 if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger)
3966                         pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg);
3967         }
3968
3969         current_stream_mask = get_stream_mask(dc, context);
3970         if (current_stream_mask != context->stream_mask) {
3971                 context->stream_mask = current_stream_mask;
3972                 dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, current_stream_mask);
3973         }
3974 }
3975
3976 /**
3977  * could_mpcc_tree_change_for_active_pipes - Check if an OPP associated with MPCC might change
3978  *
3979  * @dc: Used to get the current state status
3980  * @stream: Target stream, which we want to remove the attached planes
3981  * @srf_updates: Array of surface updates
3982  * @surface_count: Number of surface update
3983  * @is_plane_addition: [in] Fill out with true if it is a plane addition case
3984  *
3985  * DCN32x and newer support a feature named Dynamic ODM which can conflict with
3986  * the MPO if used simultaneously in some specific configurations (e.g.,
3987  * 4k@144). This function checks if the incoming context requires applying a
3988  * transition state with unnecessary pipe splitting and ODM disabled to
3989  * circumvent our hardware limitations to prevent this edge case. If the OPP
3990  * associated with an MPCC might change due to plane additions, this function
3991  * returns true.
3992  *
3993  * Return:
3994  * Return true if OPP and MPCC might change, otherwise, return false.
3995  */
3996 static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc,
3997                 struct dc_stream_state *stream,
3998                 struct dc_surface_update *srf_updates,
3999                 int surface_count,
4000                 bool *is_plane_addition)
4001 {
4002
4003         struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream);
4004         bool force_minimal_pipe_splitting = false;
4005         bool subvp_active = false;
4006         uint32_t i;
4007
4008         *is_plane_addition = false;
4009
4010         if (cur_stream_status &&
4011                         dc->current_state->stream_count > 0 &&
4012                         dc->debug.pipe_split_policy != MPC_SPLIT_AVOID) {
4013                 /* determine if minimal transition is required due to MPC*/
4014                 if (surface_count > 0) {
4015                         if (cur_stream_status->plane_count > surface_count) {
4016                                 force_minimal_pipe_splitting = true;
4017                         } else if (cur_stream_status->plane_count < surface_count) {
4018                                 force_minimal_pipe_splitting = true;
4019                                 *is_plane_addition = true;
4020                         }
4021                 }
4022         }
4023
4024         if (cur_stream_status &&
4025                         dc->current_state->stream_count == 1 &&
4026                         dc->debug.enable_single_display_2to1_odm_policy) {
4027                 /* determine if minimal transition is required due to dynamic ODM*/
4028                 if (surface_count > 0) {
4029                         if (cur_stream_status->plane_count > 2 && cur_stream_status->plane_count > surface_count) {
4030                                 force_minimal_pipe_splitting = true;
4031                         } else if (surface_count > 2 && cur_stream_status->plane_count < surface_count) {
4032                                 force_minimal_pipe_splitting = true;
4033                                 *is_plane_addition = true;
4034                         }
4035                 }
4036         }
4037
4038         for (i = 0; i < dc->res_pool->pipe_count; i++) {
4039                 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
4040
4041                 if (dc_state_get_pipe_subvp_type(dc->current_state, pipe) != SUBVP_NONE) {
4042                         subvp_active = true;
4043                         break;
4044                 }
4045         }
4046
4047         /* For SubVP when adding or removing planes we need to add a minimal transition
4048          * (even when disabling all planes). Whenever disabling a phantom pipe, we
4049          * must use the minimal transition path to disable the pipe correctly.
4050          *
4051          * We want to use the minimal transition whenever subvp is active, not only if
4052          * a plane is being added / removed from a subvp stream (MPO plane can be added
4053          * to a DRR pipe of SubVP + DRR config, in which case we still want to run through
4054          * a min transition to disable subvp.
4055          */
4056         if (cur_stream_status && subvp_active) {
4057                 /* determine if minimal transition is required due to SubVP*/
4058                 if (cur_stream_status->plane_count > surface_count) {
4059                         force_minimal_pipe_splitting = true;
4060                 } else if (cur_stream_status->plane_count < surface_count) {
4061                         force_minimal_pipe_splitting = true;
4062                         *is_plane_addition = true;
4063                 }
4064         }
4065
4066         return force_minimal_pipe_splitting;
4067 }
4068
4069 struct pipe_split_policy_backup {
4070         bool dynamic_odm_policy;
4071         bool subvp_policy;
4072         enum pipe_split_policy mpc_policy;
4073 };
4074
4075 static void release_minimal_transition_state(struct dc *dc,
4076                 struct dc_state *context, struct pipe_split_policy_backup *policy)
4077 {
4078         dc_state_release(context);
4079         /* restore previous pipe split and odm policy */
4080         if (!dc->config.is_vmin_only_asic)
4081                 dc->debug.pipe_split_policy = policy->mpc_policy;
4082         dc->debug.enable_single_display_2to1_odm_policy = policy->dynamic_odm_policy;
4083         dc->debug.force_disable_subvp = policy->subvp_policy;
4084 }
4085
4086 static struct dc_state *create_minimal_transition_state(struct dc *dc,
4087                 struct dc_state *base_context, struct pipe_split_policy_backup *policy)
4088 {
4089         struct dc_state *minimal_transition_context = NULL;
4090         unsigned int i, j;
4091
4092         if (!dc->config.is_vmin_only_asic) {
4093                 policy->mpc_policy = dc->debug.pipe_split_policy;
4094                 dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
4095         }
4096         policy->dynamic_odm_policy = dc->debug.enable_single_display_2to1_odm_policy;
4097         dc->debug.enable_single_display_2to1_odm_policy = false;
4098         policy->subvp_policy = dc->debug.force_disable_subvp;
4099         dc->debug.force_disable_subvp = true;
4100
4101         minimal_transition_context = dc_state_create_copy(base_context);
4102         if (!minimal_transition_context)
4103                 return NULL;
4104
4105         /* commit minimal state */
4106         if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, false)) {
4107                 for (i = 0; i < minimal_transition_context->stream_count; i++) {
4108                         struct dc_stream_status *stream_status = &minimal_transition_context->stream_status[i];
4109
4110                         for (j = 0; j < stream_status->plane_count; j++) {
4111                                 struct dc_plane_state *plane_state = stream_status->plane_states[j];
4112
4113                                 /* force vsync flip when reconfiguring pipes to prevent underflow
4114                                  * and corruption
4115                                  */
4116                                 plane_state->flip_immediate = false;
4117                         }
4118                 }
4119         } else {
4120                 /* this should never happen */
4121                 release_minimal_transition_state(dc, minimal_transition_context, policy);
4122                 BREAK_TO_DEBUGGER();
4123                 minimal_transition_context = NULL;
4124         }
4125         return minimal_transition_context;
4126 }
4127
4128
4129 /**
4130  * commit_minimal_transition_state - Commit a minimal state based on current or new context
4131  *
4132  * @dc: DC structure, used to get the current state
4133  * @context: New context
4134  * @stream: Stream getting the update for the flip
4135  *
4136  * The function takes in current state and new state and determine a minimal transition state
4137  * as the intermediate step which could make the transition between current and new states
4138  * seamless. If found, it will commit the minimal transition state and update current state to
4139  * this minimal transition state and return true, if not, it will return false.
4140  *
4141  * Return:
4142  * Return True if the minimal transition succeeded, false otherwise
4143  */
4144 static bool commit_minimal_transition_state(struct dc *dc,
4145                 struct dc_state *context,
4146                 struct dc_stream_state *stream)
4147 {
4148         bool success = false;
4149         struct dc_state *minimal_transition_context;
4150         struct pipe_split_policy_backup policy;
4151
4152         /* commit based on new context */
4153         minimal_transition_context = create_minimal_transition_state(dc,
4154                         context, &policy);
4155         if (minimal_transition_context) {
4156                 if (dc->hwss.is_pipe_topology_transition_seamless(
4157                                         dc, dc->current_state, minimal_transition_context) &&
4158                         dc->hwss.is_pipe_topology_transition_seamless(
4159                                         dc, minimal_transition_context, context)) {
4160                         DC_LOG_DC("%s base = new state\n", __func__);
4161
4162                         success = dc_commit_state_no_check(dc, minimal_transition_context) == DC_OK;
4163                 }
4164                 release_minimal_transition_state(dc, minimal_transition_context, &policy);
4165         }
4166
4167         if (!success) {
4168                 /* commit based on current context */
4169                 restore_planes_and_stream_state(&dc->current_state->scratch, stream);
4170                 minimal_transition_context = create_minimal_transition_state(dc,
4171                                 dc->current_state, &policy);
4172                 if (minimal_transition_context) {
4173                         if (dc->hwss.is_pipe_topology_transition_seamless(
4174                                         dc, dc->current_state, minimal_transition_context) &&
4175                                 dc->hwss.is_pipe_topology_transition_seamless(
4176                                                 dc, minimal_transition_context, context)) {
4177                                 DC_LOG_DC("%s base = current state\n", __func__);
4178                                 success = dc_commit_state_no_check(dc, minimal_transition_context) == DC_OK;
4179                         }
4180                         release_minimal_transition_state(dc, minimal_transition_context, &policy);
4181                 }
4182                 restore_planes_and_stream_state(&context->scratch, stream);
4183         }
4184
4185         ASSERT(success);
4186         return success;
4187 }
4188
4189 /**
4190  * commit_minimal_transition_state_legacy - Create a transition pipe split state
4191  *
4192  * @dc: Used to get the current state status
4193  * @transition_base_context: New transition state
4194  *
4195  * In some specific configurations, such as pipe split on multi-display with
4196  * MPO and/or Dynamic ODM, removing a plane may cause unsupported pipe
4197  * programming when moving to new planes. To mitigate those types of problems,
4198  * this function adds a transition state that minimizes pipe usage before
4199  * programming the new configuration. When adding a new plane, the current
4200  * state requires the least pipes, so it is applied without splitting. When
4201  * removing a plane, the new state requires the least pipes, so it is applied
4202  * without splitting.
4203  *
4204  * Return:
4205  * Return false if something is wrong in the transition state.
4206  */
4207 static bool commit_minimal_transition_state_legacy(struct dc *dc,
4208                 struct dc_state *transition_base_context)
4209 {
4210         struct dc_state *transition_context;
4211         struct pipe_split_policy_backup policy;
4212         enum dc_status ret = DC_ERROR_UNEXPECTED;
4213         unsigned int i, j;
4214         unsigned int pipe_in_use = 0;
4215         bool subvp_in_use = false;
4216         bool odm_in_use = false;
4217
4218         /* check current pipes in use*/
4219         for (i = 0; i < dc->res_pool->pipe_count; i++) {
4220                 struct pipe_ctx *pipe = &transition_base_context->res_ctx.pipe_ctx[i];
4221
4222                 if (pipe->plane_state)
4223                         pipe_in_use++;
4224         }
4225
4226         /* If SubVP is enabled and we are adding or removing planes from any main subvp
4227          * pipe, we must use the minimal transition.
4228          */
4229         for (i = 0; i < dc->res_pool->pipe_count; i++) {
4230                 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
4231
4232                 if (pipe->stream && dc_state_get_pipe_subvp_type(dc->current_state, pipe) == SUBVP_PHANTOM) {
4233                         subvp_in_use = true;
4234                         break;
4235                 }
4236         }
4237
4238         /* If ODM is enabled and we are adding or removing planes from any ODM
4239          * pipe, we must use the minimal transition.
4240          */
4241         for (i = 0; i < dc->res_pool->pipe_count; i++) {
4242                 struct pipe_ctx *pipe = &transition_base_context->res_ctx.pipe_ctx[i];
4243
4244                 if (resource_is_pipe_type(pipe, OTG_MASTER)) {
4245                         odm_in_use = resource_get_odm_slice_count(pipe) > 1;
4246                         break;
4247                 }
4248         }
4249
4250         /* When the OS add a new surface if we have been used all of pipes with odm combine
4251          * and mpc split feature, it need use commit_minimal_transition_state to transition safely.
4252          * After OS exit MPO, it will back to use odm and mpc split with all of pipes, we need
4253          * call it again. Otherwise return true to skip.
4254          *
4255          * Reduce the scenarios to use dc_commit_state_no_check in the stage of flip. Especially
4256          * enter/exit MPO when DCN still have enough resources.
4257          */
4258         if (pipe_in_use != dc->res_pool->pipe_count && !subvp_in_use && !odm_in_use)
4259                 return true;
4260
4261         DC_LOG_DC("%s base = %s state, reason = %s\n", __func__,
4262                         dc->current_state == transition_base_context ? "current" : "new",
4263                         subvp_in_use ? "Subvp In Use" :
4264                         odm_in_use ? "ODM in Use" :
4265                         dc->debug.pipe_split_policy != MPC_SPLIT_AVOID ? "MPC in Use" :
4266                         "Unknown");
4267
4268         transition_context = create_minimal_transition_state(dc,
4269                         transition_base_context, &policy);
4270         if (transition_context) {
4271                 ret = dc_commit_state_no_check(dc, transition_context);
4272                 release_minimal_transition_state(dc, transition_context, &policy);
4273         }
4274
4275         if (ret != DC_OK) {
4276                 /* this should never happen */
4277                 BREAK_TO_DEBUGGER();
4278                 return false;
4279         }
4280
4281         /* force full surface update */
4282         for (i = 0; i < dc->current_state->stream_count; i++) {
4283                 for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) {
4284                         dc->current_state->stream_status[i].plane_states[j]->update_flags.raw = 0xFFFFFFFF;
4285                 }
4286         }
4287
4288         return true;
4289 }
4290
4291 /**
4292  * update_seamless_boot_flags() - Helper function for updating seamless boot flags
4293  *
4294  * @dc: Current DC state
4295  * @context: New DC state to be programmed
4296  * @surface_count: Number of surfaces that have an updated
4297  * @stream: Corresponding stream to be updated in the current flip
4298  *
4299  * Updating seamless boot flags do not need to be part of the commit sequence. This
4300  * helper function will update the seamless boot flags on each flip (if required)
4301  * outside of the HW commit sequence (fast or slow).
4302  *
4303  * Return: void
4304  */
4305 static void update_seamless_boot_flags(struct dc *dc,
4306                 struct dc_state *context,
4307                 int surface_count,
4308                 struct dc_stream_state *stream)
4309 {
4310         if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) {
4311                 /* Optimize seamless boot flag keeps clocks and watermarks high until
4312                  * first flip. After first flip, optimization is required to lower
4313                  * bandwidth. Important to note that it is expected UEFI will
4314                  * only light up a single display on POST, therefore we only expect
4315                  * one stream with seamless boot flag set.
4316                  */
4317                 if (stream->apply_seamless_boot_optimization) {
4318                         stream->apply_seamless_boot_optimization = false;
4319
4320                         if (get_seamless_boot_stream_count(context) == 0)
4321                                 dc->optimized_required = true;
4322                 }
4323         }
4324 }
4325
4326 static void populate_fast_updates(struct dc_fast_update *fast_update,
4327                 struct dc_surface_update *srf_updates,
4328                 int surface_count,
4329                 struct dc_stream_update *stream_update)
4330 {
4331         int i = 0;
4332
4333         if (stream_update) {
4334                 fast_update[0].out_transfer_func = stream_update->out_transfer_func;
4335                 fast_update[0].output_csc_transform = stream_update->output_csc_transform;
4336         }
4337
4338         for (i = 0; i < surface_count; i++) {
4339                 fast_update[i].flip_addr = srf_updates[i].flip_addr;
4340                 fast_update[i].gamma = srf_updates[i].gamma;
4341                 fast_update[i].gamut_remap_matrix = srf_updates[i].gamut_remap_matrix;
4342                 fast_update[i].input_csc_color_matrix = srf_updates[i].input_csc_color_matrix;
4343                 fast_update[i].coeff_reduction_factor = srf_updates[i].coeff_reduction_factor;
4344         }
4345 }
4346
4347 static bool fast_updates_exist(struct dc_fast_update *fast_update, int surface_count)
4348 {
4349         int i;
4350
4351         if (fast_update[0].out_transfer_func ||
4352                 fast_update[0].output_csc_transform)
4353                 return true;
4354
4355         for (i = 0; i < surface_count; i++) {
4356                 if (fast_update[i].flip_addr ||
4357                                 fast_update[i].gamma ||
4358                                 fast_update[i].gamut_remap_matrix ||
4359                                 fast_update[i].input_csc_color_matrix ||
4360                                 fast_update[i].coeff_reduction_factor)
4361                         return true;
4362         }
4363
4364         return false;
4365 }
4366
4367 static bool full_update_required(struct dc *dc,
4368                 struct dc_surface_update *srf_updates,
4369                 int surface_count,
4370                 struct dc_stream_update *stream_update,
4371                 struct dc_stream_state *stream)
4372 {
4373
4374         int i;
4375         struct dc_stream_status *stream_status;
4376         const struct dc_state *context = dc->current_state;
4377
4378         for (i = 0; i < surface_count; i++) {
4379                 if (srf_updates &&
4380                                 (srf_updates[i].plane_info ||
4381                                 srf_updates[i].scaling_info ||
4382                                 (srf_updates[i].hdr_mult.value &&
4383                                 srf_updates[i].hdr_mult.value != srf_updates->surface->hdr_mult.value) ||
4384                                 srf_updates[i].in_transfer_func ||
4385                                 srf_updates[i].func_shaper ||
4386                                 srf_updates[i].lut3d_func ||
4387                                 srf_updates[i].surface->force_full_update ||
4388                                 (srf_updates[i].flip_addr &&
4389                                 srf_updates[i].flip_addr->address.tmz_surface != srf_updates[i].surface->address.tmz_surface) ||
4390                                 !is_surface_in_context(context, srf_updates[i].surface)))
4391                         return true;
4392         }
4393
4394         if (stream_update &&
4395                         (((stream_update->src.height != 0 && stream_update->src.width != 0) ||
4396                         (stream_update->dst.height != 0 && stream_update->dst.width != 0) ||
4397                         stream_update->integer_scaling_update) ||
4398                         stream_update->hdr_static_metadata ||
4399                         stream_update->abm_level ||
4400                         stream_update->periodic_interrupt ||
4401                         stream_update->vrr_infopacket ||
4402                         stream_update->vsc_infopacket ||
4403                         stream_update->vsp_infopacket ||
4404                         stream_update->hfvsif_infopacket ||
4405                         stream_update->vtem_infopacket ||
4406                         stream_update->adaptive_sync_infopacket ||
4407                         stream_update->dpms_off ||
4408                         stream_update->allow_freesync ||
4409                         stream_update->vrr_active_variable ||
4410                         stream_update->vrr_active_fixed ||
4411                         stream_update->gamut_remap ||
4412                         stream_update->output_color_space ||
4413                         stream_update->dither_option ||
4414                         stream_update->wb_update ||
4415                         stream_update->dsc_config ||
4416                         stream_update->mst_bw_update ||
4417                         stream_update->func_shaper ||
4418                         stream_update->lut3d_func ||
4419                         stream_update->pending_test_pattern ||
4420                         stream_update->crtc_timing_adjust))
4421                 return true;
4422
4423         if (stream) {
4424                 stream_status = dc_stream_get_status(stream);
4425                 if (stream_status == NULL || stream_status->plane_count != surface_count)
4426                         return true;
4427         }
4428         if (dc->idle_optimizations_allowed)
4429                 return true;
4430
4431         return false;
4432 }
4433
4434 static bool fast_update_only(struct dc *dc,
4435                 struct dc_fast_update *fast_update,
4436                 struct dc_surface_update *srf_updates,
4437                 int surface_count,
4438                 struct dc_stream_update *stream_update,
4439                 struct dc_stream_state *stream)
4440 {
4441         return fast_updates_exist(fast_update, surface_count)
4442                         && !full_update_required(dc, srf_updates, surface_count, stream_update, stream);
4443 }
4444
4445 bool dc_update_planes_and_stream(struct dc *dc,
4446                 struct dc_surface_update *srf_updates, int surface_count,
4447                 struct dc_stream_state *stream,
4448                 struct dc_stream_update *stream_update)
4449 {
4450         struct dc_state *context;
4451         enum surface_update_type update_type;
4452         int i;
4453         struct dc_fast_update fast_update[MAX_SURFACES] = {0};
4454
4455         /* In cases where MPO and split or ODM are used transitions can
4456          * cause underflow. Apply stream configuration with minimal pipe
4457          * split first to avoid unsupported transitions for active pipes.
4458          */
4459         bool force_minimal_pipe_splitting = 0;
4460         bool is_plane_addition = 0;
4461         bool is_fast_update_only;
4462
4463         dc_exit_ips_for_hw_access(dc);
4464
4465         populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
4466         is_fast_update_only = fast_update_only(dc, fast_update, srf_updates,
4467                         surface_count, stream_update, stream);
4468         force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes(
4469                         dc,
4470                         stream,
4471                         srf_updates,
4472                         surface_count,
4473                         &is_plane_addition);
4474
4475         /* on plane addition, minimal state is the current one */
4476         if (force_minimal_pipe_splitting && is_plane_addition &&
4477                 !commit_minimal_transition_state_legacy(dc, dc->current_state))
4478                                 return false;
4479
4480         if (!update_planes_and_stream_state(
4481                         dc,
4482                         srf_updates,
4483                         surface_count,
4484                         stream,
4485                         stream_update,
4486                         &update_type,
4487                         &context))
4488                 return false;
4489
4490         /* on plane removal, minimal state is the new one */
4491         if (force_minimal_pipe_splitting && !is_plane_addition) {
4492                 if (!commit_minimal_transition_state_legacy(dc, context)) {
4493                         dc_state_release(context);
4494                         return false;
4495                 }
4496                 update_type = UPDATE_TYPE_FULL;
4497         }
4498
4499         if (dc->hwss.is_pipe_topology_transition_seamless &&
4500                         !dc->hwss.is_pipe_topology_transition_seamless(
4501                                         dc, dc->current_state, context)) {
4502                 commit_minimal_transition_state(dc,
4503                                 context, stream);
4504         }
4505         update_seamless_boot_flags(dc, context, surface_count, stream);
4506         if (is_fast_update_only && !dc->debug.enable_legacy_fast_update) {
4507                 commit_planes_for_stream_fast(dc,
4508                                 srf_updates,
4509                                 surface_count,
4510                                 stream,
4511                                 stream_update,
4512                                 update_type,
4513                                 context);
4514         } else {
4515                 if (!stream_update &&
4516                                 dc->hwss.is_pipe_topology_transition_seamless &&
4517                                 !dc->hwss.is_pipe_topology_transition_seamless(
4518                                                 dc, dc->current_state, context)) {
4519                         DC_LOG_ERROR("performing non-seamless pipe topology transition with surface only update!\n");
4520                         BREAK_TO_DEBUGGER();
4521                 }
4522                 commit_planes_for_stream(
4523                                 dc,
4524                                 srf_updates,
4525                                 surface_count,
4526                                 stream,
4527                                 stream_update,
4528                                 update_type,
4529                                 context);
4530         }
4531
4532         if (dc->current_state != context) {
4533
4534                 /* Since memory free requires elevated IRQL, an interrupt
4535                  * request is generated by mem free. If this happens
4536                  * between freeing and reassigning the context, our vsync
4537                  * interrupt will call into dc and cause a memory
4538                  * corruption BSOD. Hence, we first reassign the context,
4539                  * then free the old context.
4540                  */
4541
4542                 struct dc_state *old = dc->current_state;
4543
4544                 dc->current_state = context;
4545                 dc_state_release(old);
4546
4547                 // clear any forced full updates
4548                 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4549                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
4550
4551                         if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
4552                                 pipe_ctx->plane_state->force_full_update = false;
4553                 }
4554         }
4555         return true;
4556 }
4557
4558 void dc_commit_updates_for_stream(struct dc *dc,
4559                 struct dc_surface_update *srf_updates,
4560                 int surface_count,
4561                 struct dc_stream_state *stream,
4562                 struct dc_stream_update *stream_update,
4563                 struct dc_state *state)
4564 {
4565         const struct dc_stream_status *stream_status;
4566         enum surface_update_type update_type;
4567         struct dc_state *context;
4568         struct dc_context *dc_ctx = dc->ctx;
4569         int i, j;
4570         struct dc_fast_update fast_update[MAX_SURFACES] = {0};
4571
4572         dc_exit_ips_for_hw_access(dc);
4573
4574         populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
4575         stream_status = dc_stream_get_status(stream);
4576         context = dc->current_state;
4577
4578         update_type = dc_check_update_surfaces_for_stream(
4579                                 dc, srf_updates, surface_count, stream_update, stream_status);
4580
4581         /* TODO: Since change commit sequence can have a huge impact,
4582          * we decided to only enable it for DCN3x. However, as soon as
4583          * we get more confident about this change we'll need to enable
4584          * the new sequence for all ASICs.
4585          */
4586         if (dc->ctx->dce_version >= DCN_VERSION_3_2) {
4587                 /*
4588                  * Previous frame finished and HW is ready for optimization.
4589                  */
4590                 if (update_type == UPDATE_TYPE_FAST)
4591                         dc_post_update_surfaces_to_stream(dc);
4592
4593                 dc_update_planes_and_stream(dc, srf_updates,
4594                                             surface_count, stream,
4595                                             stream_update);
4596                 return;
4597         }
4598
4599         if (update_type >= update_surface_trace_level)
4600                 update_surface_trace(dc, srf_updates, surface_count);
4601
4602
4603         if (update_type >= UPDATE_TYPE_FULL) {
4604
4605                 /* initialize scratch memory for building context */
4606                 context = dc_state_create_copy(state);
4607                 if (context == NULL) {
4608                         DC_ERROR("Failed to allocate new validate context!\n");
4609                         return;
4610                 }
4611
4612                 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4613                         struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
4614                         struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
4615
4616                         if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
4617                                 new_pipe->plane_state->force_full_update = true;
4618                 }
4619         } else if (update_type == UPDATE_TYPE_FAST) {
4620                 /*
4621                  * Previous frame finished and HW is ready for optimization.
4622                  */
4623                 dc_post_update_surfaces_to_stream(dc);
4624         }
4625
4626
4627         for (i = 0; i < surface_count; i++) {
4628                 struct dc_plane_state *surface = srf_updates[i].surface;
4629
4630                 copy_surface_update_to_plane(surface, &srf_updates[i]);
4631
4632                 if (update_type >= UPDATE_TYPE_MED) {
4633                         for (j = 0; j < dc->res_pool->pipe_count; j++) {
4634                                 struct pipe_ctx *pipe_ctx =
4635                                         &context->res_ctx.pipe_ctx[j];
4636
4637                                 if (pipe_ctx->plane_state != surface)
4638                                         continue;
4639
4640                                 resource_build_scaling_params(pipe_ctx);
4641                         }
4642                 }
4643         }
4644
4645         copy_stream_update_to_stream(dc, context, stream, stream_update);
4646
4647         if (update_type >= UPDATE_TYPE_FULL) {
4648                 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
4649                         DC_ERROR("Mode validation failed for stream update!\n");
4650                         dc_state_release(context);
4651                         return;
4652                 }
4653         }
4654
4655         TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
4656
4657         update_seamless_boot_flags(dc, context, surface_count, stream);
4658         if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) &&
4659                         !dc->debug.enable_legacy_fast_update) {
4660                 commit_planes_for_stream_fast(dc,
4661                                 srf_updates,
4662                                 surface_count,
4663                                 stream,
4664                                 stream_update,
4665                                 update_type,
4666                                 context);
4667         } else {
4668                 commit_planes_for_stream(
4669                                 dc,
4670                                 srf_updates,
4671                                 surface_count,
4672                                 stream,
4673                                 stream_update,
4674                                 update_type,
4675                                 context);
4676         }
4677         /*update current_State*/
4678         if (dc->current_state != context) {
4679
4680                 struct dc_state *old = dc->current_state;
4681
4682                 dc->current_state = context;
4683                 dc_state_release(old);
4684
4685                 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4686                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
4687
4688                         if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
4689                                 pipe_ctx->plane_state->force_full_update = false;
4690                 }
4691         }
4692
4693         /* Legacy optimization path for DCE. */
4694         if (update_type >= UPDATE_TYPE_FULL && dc_ctx->dce_version < DCE_VERSION_MAX) {
4695                 dc_post_update_surfaces_to_stream(dc);
4696                 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
4697         }
4698
4699         return;
4700
4701 }
4702
4703 uint8_t dc_get_current_stream_count(struct dc *dc)
4704 {
4705         return dc->current_state->stream_count;
4706 }
4707
4708 struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i)
4709 {
4710         if (i < dc->current_state->stream_count)
4711                 return dc->current_state->streams[i];
4712         return NULL;
4713 }
4714
4715 enum dc_irq_source dc_interrupt_to_irq_source(
4716                 struct dc *dc,
4717                 uint32_t src_id,
4718                 uint32_t ext_id)
4719 {
4720         return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
4721 }
4722
4723 /*
4724  * dc_interrupt_set() - Enable/disable an AMD hw interrupt source
4725  */
4726 bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
4727 {
4728
4729         if (dc == NULL)
4730                 return false;
4731
4732         return dal_irq_service_set(dc->res_pool->irqs, src, enable);
4733 }
4734
4735 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
4736 {
4737         dal_irq_service_ack(dc->res_pool->irqs, src);
4738 }
4739
4740 void dc_power_down_on_boot(struct dc *dc)
4741 {
4742         if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW &&
4743                         dc->hwss.power_down_on_boot)
4744                 dc->hwss.power_down_on_boot(dc);
4745 }
4746
4747 void dc_set_power_state(
4748         struct dc *dc,
4749         enum dc_acpi_cm_power_state power_state)
4750 {
4751         if (!dc->current_state)
4752                 return;
4753
4754         switch (power_state) {
4755         case DC_ACPI_CM_POWER_STATE_D0:
4756                 dc_state_construct(dc, dc->current_state);
4757
4758                 dc_exit_ips_for_hw_access(dc);
4759
4760                 dc_z10_restore(dc);
4761
4762                 dc->hwss.init_hw(dc);
4763
4764                 if (dc->hwss.init_sys_ctx != NULL &&
4765                         dc->vm_pa_config.valid) {
4766                         dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config);
4767                 }
4768
4769                 break;
4770         default:
4771                 ASSERT(dc->current_state->stream_count == 0);
4772
4773                 dc_state_destruct(dc->current_state);
4774
4775                 break;
4776         }
4777 }
4778
4779 void dc_resume(struct dc *dc)
4780 {
4781         uint32_t i;
4782
4783         for (i = 0; i < dc->link_count; i++)
4784                 dc->link_srv->resume(dc->links[i]);
4785 }
4786
4787 bool dc_is_dmcu_initialized(struct dc *dc)
4788 {
4789         struct dmcu *dmcu = dc->res_pool->dmcu;
4790
4791         if (dmcu)
4792                 return dmcu->funcs->is_dmcu_initialized(dmcu);
4793         return false;
4794 }
4795
4796 void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info)
4797 {
4798         info->displayClock                              = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz;
4799         info->engineClock                               = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_khz;
4800         info->memoryClock                               = (unsigned int)state->bw_ctx.bw.dcn.clk.dramclk_khz;
4801         info->maxSupportedDppClock              = (unsigned int)state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz;
4802         info->dppClock                                  = (unsigned int)state->bw_ctx.bw.dcn.clk.dppclk_khz;
4803         info->socClock                                  = (unsigned int)state->bw_ctx.bw.dcn.clk.socclk_khz;
4804         info->dcfClockDeepSleep                 = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz;
4805         info->fClock                                    = (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz;
4806         info->phyClock                                  = (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz;
4807 }
4808 enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping)
4809 {
4810         if (dc->hwss.set_clock)
4811                 return dc->hwss.set_clock(dc, clock_type, clk_khz, stepping);
4812         return DC_ERROR_UNEXPECTED;
4813 }
4814 void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg)
4815 {
4816         if (dc->hwss.get_clock)
4817                 dc->hwss.get_clock(dc, clock_type, clock_cfg);
4818 }
4819
4820 /* enable/disable eDP PSR without specify stream for eDP */
4821 bool dc_set_psr_allow_active(struct dc *dc, bool enable)
4822 {
4823         int i;
4824         bool allow_active;
4825
4826         for (i = 0; i < dc->current_state->stream_count ; i++) {
4827                 struct dc_link *link;
4828                 struct dc_stream_state *stream = dc->current_state->streams[i];
4829
4830                 link = stream->link;
4831                 if (!link)
4832                         continue;
4833
4834                 if (link->psr_settings.psr_feature_enabled) {
4835                         if (enable && !link->psr_settings.psr_allow_active) {
4836                                 allow_active = true;
4837                                 if (!dc_link_set_psr_allow_active(link, &allow_active, false, false, NULL))
4838                                         return false;
4839                         } else if (!enable && link->psr_settings.psr_allow_active) {
4840                                 allow_active = false;
4841                                 if (!dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL))
4842                                         return false;
4843                         }
4844                 }
4845         }
4846
4847         return true;
4848 }
4849
4850 /* enable/disable eDP Replay without specify stream for eDP */
4851 bool dc_set_replay_allow_active(struct dc *dc, bool active)
4852 {
4853         int i;
4854         bool allow_active;
4855
4856         for (i = 0; i < dc->current_state->stream_count; i++) {
4857                 struct dc_link *link;
4858                 struct dc_stream_state *stream = dc->current_state->streams[i];
4859
4860                 link = stream->link;
4861                 if (!link)
4862                         continue;
4863
4864                 if (link->replay_settings.replay_feature_enabled) {
4865                         if (active && !link->replay_settings.replay_allow_active) {
4866                                 allow_active = true;
4867                                 if (!dc_link_set_replay_allow_active(link, &allow_active,
4868                                         false, false, NULL))
4869                                         return false;
4870                         } else if (!active && link->replay_settings.replay_allow_active) {
4871                                 allow_active = false;
4872                                 if (!dc_link_set_replay_allow_active(link, &allow_active,
4873                                         true, false, NULL))
4874                                         return false;
4875                         }
4876                 }
4877         }
4878
4879         return true;
4880 }
4881
4882 void dc_allow_idle_optimizations(struct dc *dc, bool allow)
4883 {
4884         if (dc->debug.disable_idle_power_optimizations)
4885                 return;
4886
4887         if (dc->caps.ips_support && (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL))
4888                 return;
4889
4890         if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->is_smu_present)
4891                 if (!dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr))
4892                         return;
4893
4894         if (allow == dc->idle_optimizations_allowed)
4895                 return;
4896
4897         if (dc->hwss.apply_idle_power_optimizations && dc->hwss.apply_idle_power_optimizations(dc, allow))
4898                 dc->idle_optimizations_allowed = allow;
4899 }
4900
4901 void dc_exit_ips_for_hw_access(struct dc *dc)
4902 {
4903         if (dc->caps.ips_support)
4904                 dc_allow_idle_optimizations(dc, false);
4905 }
4906
4907 bool dc_dmub_is_ips_idle_state(struct dc *dc)
4908 {
4909         if (dc->debug.disable_idle_power_optimizations)
4910                 return false;
4911
4912         if (!dc->caps.ips_support || (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL))
4913                 return false;
4914
4915         if (!dc->ctx->dmub_srv)
4916                 return false;
4917
4918         return dc->ctx->dmub_srv->idle_allowed;
4919 }
4920
4921 /* set min and max memory clock to lowest and highest DPM level, respectively */
4922 void dc_unlock_memory_clock_frequency(struct dc *dc)
4923 {
4924         if (dc->clk_mgr->funcs->set_hard_min_memclk)
4925                 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, false);
4926
4927         if (dc->clk_mgr->funcs->set_hard_max_memclk)
4928                 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
4929 }
4930
4931 /* set min memory clock to the min required for current mode, max to maxDPM */
4932 void dc_lock_memory_clock_frequency(struct dc *dc)
4933 {
4934         if (dc->clk_mgr->funcs->get_memclk_states_from_smu)
4935                 dc->clk_mgr->funcs->get_memclk_states_from_smu(dc->clk_mgr);
4936
4937         if (dc->clk_mgr->funcs->set_hard_min_memclk)
4938                 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, true);
4939
4940         if (dc->clk_mgr->funcs->set_hard_max_memclk)
4941                 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
4942 }
4943
4944 static void blank_and_force_memclk(struct dc *dc, bool apply, unsigned int memclk_mhz)
4945 {
4946         struct dc_state *context = dc->current_state;
4947         struct hubp *hubp;
4948         struct pipe_ctx *pipe;
4949         int i;
4950
4951         for (i = 0; i < dc->res_pool->pipe_count; i++) {
4952                 pipe = &context->res_ctx.pipe_ctx[i];
4953
4954                 if (pipe->stream != NULL) {
4955                         dc->hwss.disable_pixel_data(dc, pipe, true);
4956
4957                         // wait for double buffer
4958                         pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE);
4959                         pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VBLANK);
4960                         pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE);
4961
4962                         hubp = pipe->plane_res.hubp;
4963                         hubp->funcs->set_blank_regs(hubp, true);
4964                 }
4965         }
4966
4967         dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, memclk_mhz);
4968         dc->clk_mgr->funcs->set_min_memclk(dc->clk_mgr, memclk_mhz);
4969
4970         for (i = 0; i < dc->res_pool->pipe_count; i++) {
4971                 pipe = &context->res_ctx.pipe_ctx[i];
4972
4973                 if (pipe->stream != NULL) {
4974                         dc->hwss.disable_pixel_data(dc, pipe, false);
4975
4976                         hubp = pipe->plane_res.hubp;
4977                         hubp->funcs->set_blank_regs(hubp, false);
4978                 }
4979         }
4980 }
4981
4982
4983 /**
4984  * dc_enable_dcmode_clk_limit() - lower clocks in dc (battery) mode
4985  * @dc: pointer to dc of the dm calling this
4986  * @enable: True = transition to DC mode, false = transition back to AC mode
4987  *
4988  * Some SoCs define additional clock limits when in DC mode, DM should
4989  * invoke this function when the platform undergoes a power source transition
4990  * so DC can apply/unapply the limit. This interface may be disruptive to
4991  * the onscreen content.
4992  *
4993  * Context: Triggered by OS through DM interface, or manually by escape calls.
4994  * Need to hold a dclock when doing so.
4995  *
4996  * Return: none (void function)
4997  *
4998  */
4999 void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable)
5000 {
5001         unsigned int softMax = 0, maxDPM = 0, funcMin = 0, i;
5002         bool p_state_change_support;
5003
5004         if (!dc->config.dc_mode_clk_limit_support)
5005                 return;
5006
5007         softMax = dc->clk_mgr->bw_params->dc_mode_softmax_memclk;
5008         for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries; i++) {
5009                 if (dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz > maxDPM)
5010                         maxDPM = dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz;
5011         }
5012         funcMin = (dc->clk_mgr->clks.dramclk_khz + 999) / 1000;
5013         p_state_change_support = dc->clk_mgr->clks.p_state_change_support;
5014
5015         if (enable && !dc->clk_mgr->dc_mode_softmax_enabled) {
5016                 if (p_state_change_support) {
5017                         if (funcMin <= softMax)
5018                                 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, softMax);
5019                         // else: No-Op
5020                 } else {
5021                         if (funcMin <= softMax)
5022                                 blank_and_force_memclk(dc, true, softMax);
5023                         // else: No-Op
5024                 }
5025         } else if (!enable && dc->clk_mgr->dc_mode_softmax_enabled) {
5026                 if (p_state_change_support) {
5027                         if (funcMin <= softMax)
5028                                 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, maxDPM);
5029                         // else: No-Op
5030                 } else {
5031                         if (funcMin <= softMax)
5032                                 blank_and_force_memclk(dc, true, maxDPM);
5033                         // else: No-Op
5034                 }
5035         }
5036         dc->clk_mgr->dc_mode_softmax_enabled = enable;
5037 }
5038 bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane,
5039                 struct dc_cursor_attributes *cursor_attr)
5040 {
5041         if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, plane, cursor_attr))
5042                 return true;
5043         return false;
5044 }
5045
5046 /* cleanup on driver unload */
5047 void dc_hardware_release(struct dc *dc)
5048 {
5049         dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(dc);
5050
5051         if (dc->hwss.hardware_release)
5052                 dc->hwss.hardware_release(dc);
5053 }
5054
5055 void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc)
5056 {
5057         if (dc->current_state)
5058                 dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching_shut_down = true;
5059 }
5060
5061 /**
5062  * dc_is_dmub_outbox_supported - Check if DMUB firmware support outbox notification
5063  *
5064  * @dc: [in] dc structure
5065  *
5066  * Checks whether DMUB FW supports outbox notifications, if supported DM
5067  * should register outbox interrupt prior to actually enabling interrupts
5068  * via dc_enable_dmub_outbox
5069  *
5070  * Return:
5071  * True if DMUB FW supports outbox notifications, False otherwise
5072  */
5073 bool dc_is_dmub_outbox_supported(struct dc *dc)
5074 {
5075         switch (dc->ctx->asic_id.chip_family) {
5076
5077         case FAMILY_YELLOW_CARP:
5078                 /* DCN31 B0 USB4 DPIA needs dmub notifications for interrupts */
5079                 if (dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 &&
5080                     !dc->debug.dpia_debug.bits.disable_dpia)
5081                         return true;
5082         break;
5083
5084         case AMDGPU_FAMILY_GC_11_0_1:
5085         case AMDGPU_FAMILY_GC_11_5_0:
5086                 if (!dc->debug.dpia_debug.bits.disable_dpia)
5087                         return true;
5088         break;
5089
5090         default:
5091                 break;
5092         }
5093
5094         /* dmub aux needs dmub notifications to be enabled */
5095         return dc->debug.enable_dmub_aux_for_legacy_ddc;
5096
5097 }
5098
5099 /**
5100  * dc_enable_dmub_notifications - Check if dmub fw supports outbox
5101  *
5102  * @dc: [in] dc structure
5103  *
5104  * Calls dc_is_dmub_outbox_supported to check if dmub fw supports outbox
5105  * notifications. All DMs shall switch to dc_is_dmub_outbox_supported.  This
5106  * API shall be removed after switching.
5107  *
5108  * Return:
5109  * True if DMUB FW supports outbox notifications, False otherwise
5110  */
5111 bool dc_enable_dmub_notifications(struct dc *dc)
5112 {
5113         return dc_is_dmub_outbox_supported(dc);
5114 }
5115
5116 /**
5117  * dc_enable_dmub_outbox - Enables DMUB unsolicited notification
5118  *
5119  * @dc: [in] dc structure
5120  *
5121  * Enables DMUB unsolicited notifications to x86 via outbox.
5122  */
5123 void dc_enable_dmub_outbox(struct dc *dc)
5124 {
5125         struct dc_context *dc_ctx = dc->ctx;
5126
5127         dmub_enable_outbox_notification(dc_ctx->dmub_srv);
5128         DC_LOG_DC("%s: dmub outbox notifications enabled\n", __func__);
5129 }
5130
5131 /**
5132  * dc_process_dmub_aux_transfer_async - Submits aux command to dmub via inbox message
5133  *                                      Sets port index appropriately for legacy DDC
5134  * @dc: dc structure
5135  * @link_index: link index
5136  * @payload: aux payload
5137  *
5138  * Returns: True if successful, False if failure
5139  */
5140 bool dc_process_dmub_aux_transfer_async(struct dc *dc,
5141                                 uint32_t link_index,
5142                                 struct aux_payload *payload)
5143 {
5144         uint8_t action;
5145         union dmub_rb_cmd cmd = {0};
5146
5147         ASSERT(payload->length <= 16);
5148
5149         cmd.dp_aux_access.header.type = DMUB_CMD__DP_AUX_ACCESS;
5150         cmd.dp_aux_access.header.payload_bytes = 0;
5151         /* For dpia, ddc_pin is set to NULL */
5152         if (!dc->links[link_index]->ddc->ddc_pin)
5153                 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_DPIA;
5154         else
5155                 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_LEGACY_DDC;
5156
5157         cmd.dp_aux_access.aux_control.instance = dc->links[link_index]->ddc_hw_inst;
5158         cmd.dp_aux_access.aux_control.sw_crc_enabled = 0;
5159         cmd.dp_aux_access.aux_control.timeout = 0;
5160         cmd.dp_aux_access.aux_control.dpaux.address = payload->address;
5161         cmd.dp_aux_access.aux_control.dpaux.is_i2c_over_aux = payload->i2c_over_aux;
5162         cmd.dp_aux_access.aux_control.dpaux.length = payload->length;
5163
5164         /* set aux action */
5165         if (payload->i2c_over_aux) {
5166                 if (payload->write) {
5167                         if (payload->mot)
5168                                 action = DP_AUX_REQ_ACTION_I2C_WRITE_MOT;
5169                         else
5170                                 action = DP_AUX_REQ_ACTION_I2C_WRITE;
5171                 } else {
5172                         if (payload->mot)
5173                                 action = DP_AUX_REQ_ACTION_I2C_READ_MOT;
5174                         else
5175                                 action = DP_AUX_REQ_ACTION_I2C_READ;
5176                         }
5177         } else {
5178                 if (payload->write)
5179                         action = DP_AUX_REQ_ACTION_DPCD_WRITE;
5180                 else
5181                         action = DP_AUX_REQ_ACTION_DPCD_READ;
5182         }
5183
5184         cmd.dp_aux_access.aux_control.dpaux.action = action;
5185
5186         if (payload->length && payload->write) {
5187                 memcpy(cmd.dp_aux_access.aux_control.dpaux.data,
5188                         payload->data,
5189                         payload->length
5190                         );
5191         }
5192
5193         dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
5194
5195         return true;
5196 }
5197
5198 uint8_t get_link_index_from_dpia_port_index(const struct dc *dc,
5199                                             uint8_t dpia_port_index)
5200 {
5201         uint8_t index, link_index = 0xFF;
5202
5203         for (index = 0; index < dc->link_count; index++) {
5204                 /* ddc_hw_inst has dpia port index for dpia links
5205                  * and ddc instance for legacy links
5206                  */
5207                 if (!dc->links[index]->ddc->ddc_pin) {
5208                         if (dc->links[index]->ddc_hw_inst == dpia_port_index) {
5209                                 link_index = index;
5210                                 break;
5211                         }
5212                 }
5213         }
5214         ASSERT(link_index != 0xFF);
5215         return link_index;
5216 }
5217
5218 /**
5219  * dc_process_dmub_set_config_async - Submits set_config command
5220  *
5221  * @dc: [in] dc structure
5222  * @link_index: [in] link_index: link index
5223  * @payload: [in] aux payload
5224  * @notify: [out] set_config immediate reply
5225  *
5226  * Submits set_config command to dmub via inbox message.
5227  *
5228  * Return:
5229  * True if successful, False if failure
5230  */
5231 bool dc_process_dmub_set_config_async(struct dc *dc,
5232                                 uint32_t link_index,
5233                                 struct set_config_cmd_payload *payload,
5234                                 struct dmub_notification *notify)
5235 {
5236         union dmub_rb_cmd cmd = {0};
5237         bool is_cmd_complete = true;
5238
5239         /* prepare SET_CONFIG command */
5240         cmd.set_config_access.header.type = DMUB_CMD__DPIA;
5241         cmd.set_config_access.header.sub_type = DMUB_CMD__DPIA_SET_CONFIG_ACCESS;
5242
5243         cmd.set_config_access.set_config_control.instance = dc->links[link_index]->ddc_hw_inst;
5244         cmd.set_config_access.set_config_control.cmd_pkt.msg_type = payload->msg_type;
5245         cmd.set_config_access.set_config_control.cmd_pkt.msg_data = payload->msg_data;
5246
5247         if (!dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) {
5248                 /* command is not processed by dmub */
5249                 notify->sc_status = SET_CONFIG_UNKNOWN_ERROR;
5250                 return is_cmd_complete;
5251         }
5252
5253         /* command processed by dmub, if ret_status is 1, it is completed instantly */
5254         if (cmd.set_config_access.header.ret_status == 1)
5255                 notify->sc_status = cmd.set_config_access.set_config_control.immed_status;
5256         else
5257                 /* cmd pending, will receive notification via outbox */
5258                 is_cmd_complete = false;
5259
5260         return is_cmd_complete;
5261 }
5262
5263 /**
5264  * dc_process_dmub_set_mst_slots - Submits MST solt allocation
5265  *
5266  * @dc: [in] dc structure
5267  * @link_index: [in] link index
5268  * @mst_alloc_slots: [in] mst slots to be allotted
5269  * @mst_slots_in_use: [out] mst slots in use returned in failure case
5270  *
5271  * Submits mst slot allocation command to dmub via inbox message
5272  *
5273  * Return:
5274  * DC_OK if successful, DC_ERROR if failure
5275  */
5276 enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
5277                                 uint32_t link_index,
5278                                 uint8_t mst_alloc_slots,
5279                                 uint8_t *mst_slots_in_use)
5280 {
5281         union dmub_rb_cmd cmd = {0};
5282
5283         /* prepare MST_ALLOC_SLOTS command */
5284         cmd.set_mst_alloc_slots.header.type = DMUB_CMD__DPIA;
5285         cmd.set_mst_alloc_slots.header.sub_type = DMUB_CMD__DPIA_MST_ALLOC_SLOTS;
5286
5287         cmd.set_mst_alloc_slots.mst_slots_control.instance = dc->links[link_index]->ddc_hw_inst;
5288         cmd.set_mst_alloc_slots.mst_slots_control.mst_alloc_slots = mst_alloc_slots;
5289
5290         if (!dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
5291                 /* command is not processed by dmub */
5292                 return DC_ERROR_UNEXPECTED;
5293
5294         /* command processed by dmub, if ret_status is 1 */
5295         if (cmd.set_config_access.header.ret_status != 1)
5296                 /* command processing error */
5297                 return DC_ERROR_UNEXPECTED;
5298
5299         /* command processed and we have a status of 2, mst not enabled in dpia */
5300         if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 2)
5301                 return DC_FAIL_UNSUPPORTED_1;
5302
5303         /* previously configured mst alloc and used slots did not match */
5304         if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 3) {
5305                 *mst_slots_in_use = cmd.set_mst_alloc_slots.mst_slots_control.mst_slots_in_use;
5306                 return DC_NOT_SUPPORTED;
5307         }
5308
5309         return DC_OK;
5310 }
5311
5312 /**
5313  * dc_process_dmub_dpia_hpd_int_enable - Submits DPIA DPD interruption
5314  *
5315  * @dc: [in] dc structure
5316  * @hpd_int_enable: [in] 1 for hpd int enable, 0 to disable
5317  *
5318  * Submits dpia hpd int enable command to dmub via inbox message
5319  */
5320 void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc,
5321                                 uint32_t hpd_int_enable)
5322 {
5323         union dmub_rb_cmd cmd = {0};
5324
5325         cmd.dpia_hpd_int_enable.header.type = DMUB_CMD__DPIA_HPD_INT_ENABLE;
5326         cmd.dpia_hpd_int_enable.enable = hpd_int_enable;
5327
5328         dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
5329
5330         DC_LOG_DEBUG("%s: hpd_int_enable(%d)\n", __func__, hpd_int_enable);
5331 }
5332
5333 /**
5334  * dc_print_dmub_diagnostic_data - Print DMUB diagnostic data for debugging
5335  *
5336  * @dc: [in] dc structure
5337  *
5338  *
5339  */
5340 void dc_print_dmub_diagnostic_data(const struct dc *dc)
5341 {
5342         dc_dmub_srv_log_diagnostic_data(dc->ctx->dmub_srv);
5343 }
5344
5345 /**
5346  * dc_disable_accelerated_mode - disable accelerated mode
5347  * @dc: dc structure
5348  */
5349 void dc_disable_accelerated_mode(struct dc *dc)
5350 {
5351         bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 0);
5352 }
5353
5354
5355 /**
5356  *  dc_notify_vsync_int_state - notifies vsync enable/disable state
5357  *  @dc: dc structure
5358  *  @stream: stream where vsync int state changed
5359  *  @enable: whether vsync is enabled or disabled
5360  *
5361  *  Called when vsync is enabled/disabled Will notify DMUB to start/stop ABM
5362  *  interrupts after steady state is reached.
5363  */
5364 void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bool enable)
5365 {
5366         int i;
5367         int edp_num;
5368         struct pipe_ctx *pipe = NULL;
5369         struct dc_link *link = stream->sink->link;
5370         struct dc_link *edp_links[MAX_NUM_EDP];
5371
5372
5373         if (link->psr_settings.psr_feature_enabled)
5374                 return;
5375
5376         if (link->replay_settings.replay_feature_enabled)
5377                 return;
5378
5379         /*find primary pipe associated with stream*/
5380         for (i = 0; i < MAX_PIPES; i++) {
5381                 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
5382
5383                 if (pipe->stream == stream && pipe->stream_res.tg)
5384                         break;
5385         }
5386
5387         if (i == MAX_PIPES) {
5388                 ASSERT(0);
5389                 return;
5390         }
5391
5392         dc_get_edp_links(dc, edp_links, &edp_num);
5393
5394         /* Determine panel inst */
5395         for (i = 0; i < edp_num; i++) {
5396                 if (edp_links[i] == link)
5397                         break;
5398         }
5399
5400         if (i == edp_num) {
5401                 return;
5402         }
5403
5404         if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause)
5405                 pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst);
5406 }
5407
5408 /*****************************************************************************
5409  *  dc_abm_save_restore() - Interface to DC for save+pause and restore+un-pause
5410  *                          ABM
5411  *  @dc: dc structure
5412  *      @stream: stream where vsync int state changed
5413  *  @pData: abm hw states
5414  *
5415  ****************************************************************************/
5416 bool dc_abm_save_restore(
5417                 struct dc *dc,
5418                 struct dc_stream_state *stream,
5419                 struct abm_save_restore *pData)
5420 {
5421         int i;
5422         int edp_num;
5423         struct pipe_ctx *pipe = NULL;
5424         struct dc_link *link = stream->sink->link;
5425         struct dc_link *edp_links[MAX_NUM_EDP];
5426
5427         if (link->replay_settings.replay_feature_enabled)
5428                 return false;
5429
5430         /*find primary pipe associated with stream*/
5431         for (i = 0; i < MAX_PIPES; i++) {
5432                 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
5433
5434                 if (pipe->stream == stream && pipe->stream_res.tg)
5435                         break;
5436         }
5437
5438         if (i == MAX_PIPES) {
5439                 ASSERT(0);
5440                 return false;
5441         }
5442
5443         dc_get_edp_links(dc, edp_links, &edp_num);
5444
5445         /* Determine panel inst */
5446         for (i = 0; i < edp_num; i++)
5447                 if (edp_links[i] == link)
5448                         break;
5449
5450         if (i == edp_num)
5451                 return false;
5452
5453         if (pipe->stream_res.abm &&
5454                 pipe->stream_res.abm->funcs->save_restore)
5455                 return pipe->stream_res.abm->funcs->save_restore(
5456                                 pipe->stream_res.abm,
5457                                 i,
5458                                 pData);
5459         return false;
5460 }
5461
5462 void dc_query_current_properties(struct dc *dc, struct dc_current_properties *properties)
5463 {
5464         unsigned int i;
5465         bool subvp_sw_cursor_req = false;
5466
5467         for (i = 0; i < dc->current_state->stream_count; i++) {
5468                 if (check_subvp_sw_cursor_fallback_req(dc, dc->current_state->streams[i])) {
5469                         subvp_sw_cursor_req = true;
5470                         break;
5471                 }
5472         }
5473         properties->cursor_size_limit = subvp_sw_cursor_req ? 64 : dc->caps.max_cursor_size;
5474 }
5475
5476 /**
5477  * dc_set_edp_power() - DM controls eDP power to be ON/OFF
5478  *
5479  * Called when DM wants to power on/off eDP.
5480  *     Only work on links with flag skip_implict_edp_power_control is set.
5481  *
5482  * @dc: Current DC state
5483  * @edp_link: a link with eDP connector signal type
5484  * @powerOn: power on/off eDP
5485  *
5486  * Return: void
5487  */
5488 void dc_set_edp_power(const struct dc *dc, struct dc_link *edp_link,
5489                                  bool powerOn)
5490 {
5491         if (edp_link->connector_signal != SIGNAL_TYPE_EDP)
5492                 return;
5493
5494         if (edp_link->skip_implict_edp_power_control == false)
5495                 return;
5496
5497         edp_link->dc->link_srv->edp_set_panel_power(edp_link, powerOn);
5498 }
5499
5500 /*
5501  *****************************************************************************
5502  * dc_get_power_profile_for_dc_state() - extracts power profile from dc state
5503  *
5504  * Called when DM wants to make power policy decisions based on dc_state
5505  *
5506  *****************************************************************************
5507  */
5508 struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state *context)
5509 {
5510         struct dc_power_profile profile = { 0 };
5511
5512         profile.power_level += !context->bw_ctx.bw.dcn.clk.p_state_change_support;
5513
5514         return profile;
5515 }
5516