2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include "dm_services.h"
31 #include "core_status.h"
32 #include "core_types.h"
33 #include "hw_sequencer.h"
34 #include "dce/dce_hwseq.h"
38 #include "dc_state_priv.h"
40 #include "gpio_service_interface.h"
42 #include "clock_source.h"
43 #include "dc_bios_types.h"
45 #include "bios_parser_interface.h"
46 #include "bios/bios_parser_helper.h"
47 #include "include/irq_service_interface.h"
48 #include "transform.h"
51 #include "timing_generator.h"
53 #include "virtual/virtual_link_encoder.h"
56 #include "link_hwss.h"
57 #include "link_encoder.h"
58 #include "link_enc_cfg.h"
61 #include "dm_helpers.h"
62 #include "mem_input.h"
64 #include "dc_dmub_srv.h"
68 #include "vm_helper.h"
70 #include "dce/dce_i2c.h"
72 #include "dmub/dmub_srv.h"
74 #include "dce/dmub_psr.h"
76 #include "dce/dmub_hw_lock_mgr.h"
80 #include "hw_sequencer_private.h"
82 #include "dml2/dml2_internal_types.h"
84 #include "dce/dmub_outbox.h"
92 static const char DC_BUILD_ID[] = "production-build";
97 * DC is the OS-agnostic component of the amdgpu DC driver.
99 * DC maintains and validates a set of structs representing the state of the
100 * driver and writes that state to AMD hardware
102 * Main DC HW structs:
104 * struct dc - The central struct. One per driver. Created on driver load,
105 * destroyed on driver unload.
107 * struct dc_context - One per driver.
108 * Used as a backpointer by most other structs in dc.
110 * struct dc_link - One per connector (the physical DP, HDMI, miniDP, or eDP
111 * plugpoints). Created on driver load, destroyed on driver unload.
113 * struct dc_sink - One per display. Created on boot or hotplug.
114 * Destroyed on shutdown or hotunplug. A dc_link can have a local sink
115 * (the display directly attached). It may also have one or more remote
116 * sinks (in the Multi-Stream Transport case)
118 * struct resource_pool - One per driver. Represents the hw blocks not in the
119 * main pipeline. Not directly accessible by dm.
121 * Main dc state structs:
123 * These structs can be created and destroyed as needed. There is a full set of
124 * these structs in dc->current_state representing the currently programmed state.
126 * struct dc_state - The global DC state to track global state information,
127 * such as bandwidth values.
129 * struct dc_stream_state - Represents the hw configuration for the pipeline from
130 * a framebuffer to a display. Maps one-to-one with dc_sink.
132 * struct dc_plane_state - Represents a framebuffer. Each stream has at least one,
133 * and may have more in the Multi-Plane Overlay case.
135 * struct resource_context - Represents the programmable state of everything in
136 * the resource_pool. Not directly accessible by dm.
138 * struct pipe_ctx - A member of struct resource_context. Represents the
139 * internal hardware pipeline components. Each dc_plane_state has either
140 * one or two (in the pipe-split case).
143 /* Private functions */
145 static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new)
151 static void destroy_links(struct dc *dc)
155 for (i = 0; i < dc->link_count; i++) {
156 if (NULL != dc->links[i])
157 dc->link_srv->destroy_link(&dc->links[i]);
161 static uint32_t get_num_of_internal_disp(struct dc_link **links, uint32_t num_links)
166 for (i = 0; i < num_links; i++) {
167 if (links[i]->connector_signal == SIGNAL_TYPE_EDP ||
168 links[i]->is_internal_display)
175 static int get_seamless_boot_stream_count(struct dc_state *ctx)
178 uint8_t seamless_boot_stream_count = 0;
180 for (i = 0; i < ctx->stream_count; i++)
181 if (ctx->streams[i]->apply_seamless_boot_optimization)
182 seamless_boot_stream_count++;
184 return seamless_boot_stream_count;
187 static bool create_links(
189 uint32_t num_virtual_links)
193 struct dc_bios *bios = dc->ctx->dc_bios;
197 connectors_num = bios->funcs->get_connectors_number(bios);
199 DC_LOG_DC("BIOS object table - number of connectors: %d", connectors_num);
201 if (connectors_num > ENUM_ID_COUNT) {
203 "DC: Number of connectors %d exceeds maximum of %d!\n",
209 dm_output_to_console(
210 "DC: %s: connectors_num: physical:%d, virtual:%d\n",
215 for (i = 0; i < connectors_num; i++) {
216 struct link_init_data link_init_params = {0};
217 struct dc_link *link;
219 DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count);
221 link_init_params.ctx = dc->ctx;
222 /* next BIOS object table connector */
223 link_init_params.connector_index = i;
224 link_init_params.link_index = dc->link_count;
225 link_init_params.dc = dc;
226 link = dc->link_srv->create_link(&link_init_params);
229 dc->links[dc->link_count] = link;
235 DC_LOG_DC("BIOS object table - end");
237 /* Create a link for each usb4 dpia port */
238 for (i = 0; i < dc->res_pool->usb4_dpia_count; i++) {
239 struct link_init_data link_init_params = {0};
240 struct dc_link *link;
242 link_init_params.ctx = dc->ctx;
243 link_init_params.connector_index = i;
244 link_init_params.link_index = dc->link_count;
245 link_init_params.dc = dc;
246 link_init_params.is_dpia_link = true;
248 link = dc->link_srv->create_link(&link_init_params);
250 dc->links[dc->link_count] = link;
256 for (i = 0; i < num_virtual_links; i++) {
257 struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL);
258 struct encoder_init_data enc_init = {0};
265 link->link_index = dc->link_count;
266 dc->links[dc->link_count] = link;
271 link->connector_signal = SIGNAL_TYPE_VIRTUAL;
272 link->link_id.type = OBJECT_TYPE_CONNECTOR;
273 link->link_id.id = CONNECTOR_ID_VIRTUAL;
274 link->link_id.enum_id = ENUM_ID_1;
275 link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
277 if (!link->link_enc) {
282 link->link_status.dpcd_caps = &link->dpcd_caps;
284 enc_init.ctx = dc->ctx;
285 enc_init.channel = CHANNEL_ID_UNKNOWN;
286 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
287 enc_init.transmitter = TRANSMITTER_UNKNOWN;
288 enc_init.connector = link->link_id;
289 enc_init.encoder.type = OBJECT_TYPE_ENCODER;
290 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
291 enc_init.encoder.enum_id = ENUM_ID_1;
292 virtual_link_encoder_construct(link->link_enc, &enc_init);
295 dc->caps.num_of_internal_disp = get_num_of_internal_disp(dc->links, dc->link_count);
303 /* Create additional DIG link encoder objects if fewer than the platform
304 * supports were created during link construction. This can happen if the
305 * number of physical connectors is less than the number of DIGs.
307 static bool create_link_encoders(struct dc *dc)
310 unsigned int num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia;
311 unsigned int num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc;
314 /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG
315 * link encoders and physical display endpoints and does not require
316 * additional link encoder objects.
318 if (num_usb4_dpia == 0)
321 /* Create as many link encoder objects as the platform supports. DPIA
322 * endpoints can be programmably mapped to any DIG.
324 if (num_dig_link_enc > dc->res_pool->dig_link_enc_count) {
325 for (i = 0; i < num_dig_link_enc; i++) {
326 struct link_encoder *link_enc = dc->res_pool->link_encoders[i];
328 if (!link_enc && dc->res_pool->funcs->link_enc_create_minimal) {
329 link_enc = dc->res_pool->funcs->link_enc_create_minimal(dc->ctx,
330 (enum engine_id)(ENGINE_ID_DIGA + i));
332 dc->res_pool->link_encoders[i] = link_enc;
333 dc->res_pool->dig_link_enc_count++;
344 /* Destroy any additional DIG link encoder objects created by
345 * create_link_encoders().
346 * NB: Must only be called after destroy_links().
348 static void destroy_link_encoders(struct dc *dc)
350 unsigned int num_usb4_dpia;
351 unsigned int num_dig_link_enc;
357 num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia;
358 num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc;
360 /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG
361 * link encoders and physical display endpoints and does not require
362 * additional link encoder objects.
364 if (num_usb4_dpia == 0)
367 for (i = 0; i < num_dig_link_enc; i++) {
368 struct link_encoder *link_enc = dc->res_pool->link_encoders[i];
371 link_enc->funcs->destroy(&link_enc);
372 dc->res_pool->link_encoders[i] = NULL;
373 dc->res_pool->dig_link_enc_count--;
378 static struct dc_perf_trace *dc_perf_trace_create(void)
380 return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL);
383 static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)
390 * dc_stream_adjust_vmin_vmax - look up pipe context & update parts of DRR
392 * @stream: Initial dc stream state
393 * @adjust: Updated parameters for vertical_total_min and vertical_total_max
395 * Looks up the pipe context of dc_stream_state and updates the
396 * vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh
397 * Rate, which is a power-saving feature that targets reducing panel
398 * refresh rate while the screen is static
400 * Return: %true if the pipe context is found and adjusted;
401 * %false if the pipe context is not found.
403 bool dc_stream_adjust_vmin_vmax(struct dc *dc,
404 struct dc_stream_state *stream,
405 struct dc_crtc_timing_adjust *adjust)
410 * Don't adjust DRR while there's bandwidth optimizations pending to
411 * avoid conflicting with firmware updates.
413 if (dc->ctx->dce_version > DCE_VERSION_MAX)
414 if (dc->optimized_required || dc->wm_optimized_required)
417 dc_exit_ips_for_hw_access(dc);
419 stream->adjust.v_total_max = adjust->v_total_max;
420 stream->adjust.v_total_mid = adjust->v_total_mid;
421 stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num;
422 stream->adjust.v_total_min = adjust->v_total_min;
424 for (i = 0; i < MAX_PIPES; i++) {
425 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
427 if (pipe->stream == stream && pipe->stream_res.tg) {
428 dc->hwss.set_drr(&pipe,
439 * dc_stream_get_last_used_drr_vtotal - Looks up the pipe context of
440 * dc_stream_state and gets the last VTOTAL used by DRR (Dynamic Refresh Rate)
442 * @dc: [in] dc reference
443 * @stream: [in] Initial dc stream state
444 * @refresh_rate: [in] new refresh_rate
446 * Return: %true if the pipe context is found and there is an associated
447 * timing_generator for the DC;
448 * %false if the pipe context is not found or there is no
449 * timing_generator for the DC.
451 bool dc_stream_get_last_used_drr_vtotal(struct dc *dc,
452 struct dc_stream_state *stream,
453 uint32_t *refresh_rate)
459 dc_exit_ips_for_hw_access(dc);
461 for (i = 0; i < MAX_PIPES; i++) {
462 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
464 if (pipe->stream == stream && pipe->stream_res.tg) {
465 /* Only execute if a function pointer has been defined for
466 * the DC version in question
468 if (pipe->stream_res.tg->funcs->get_last_used_drr_vtotal) {
469 pipe->stream_res.tg->funcs->get_last_used_drr_vtotal(pipe->stream_res.tg, refresh_rate);
481 bool dc_stream_get_crtc_position(struct dc *dc,
482 struct dc_stream_state **streams, int num_streams,
483 unsigned int *v_pos, unsigned int *nom_v_pos)
485 /* TODO: Support multiple streams */
486 const struct dc_stream_state *stream = streams[0];
489 struct crtc_position position;
491 dc_exit_ips_for_hw_access(dc);
493 for (i = 0; i < MAX_PIPES; i++) {
494 struct pipe_ctx *pipe =
495 &dc->current_state->res_ctx.pipe_ctx[i];
497 if (pipe->stream == stream && pipe->stream_res.stream_enc) {
498 dc->hwss.get_position(&pipe, 1, &position);
500 *v_pos = position.vertical_count;
501 *nom_v_pos = position.nominal_vcount;
508 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
510 dc_stream_forward_dmub_crc_window(struct dc_dmub_srv *dmub_srv,
511 struct rect *rect, struct otg_phy_mux *mux_mapping, bool is_stop)
513 union dmub_rb_cmd cmd = {0};
515 cmd.secure_display.roi_info.phy_id = mux_mapping->phy_output_num;
516 cmd.secure_display.roi_info.otg_id = mux_mapping->otg_output_num;
519 cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY;
520 cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_CRC_STOP_UPDATE;
522 cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY;
523 cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_CRC_WIN_NOTIFY;
524 cmd.secure_display.roi_info.x_start = rect->x;
525 cmd.secure_display.roi_info.y_start = rect->y;
526 cmd.secure_display.roi_info.x_end = rect->x + rect->width;
527 cmd.secure_display.roi_info.y_end = rect->y + rect->height;
530 dc_wake_and_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
534 dc_stream_forward_dmcu_crc_window(struct dmcu *dmcu,
535 struct rect *rect, struct otg_phy_mux *mux_mapping, bool is_stop)
538 dmcu->funcs->stop_crc_win_update(dmcu, mux_mapping);
540 dmcu->funcs->forward_crc_window(dmcu, rect, mux_mapping);
544 dc_stream_forward_crc_window(struct dc_stream_state *stream,
545 struct rect *rect, bool is_stop)
548 struct dc_dmub_srv *dmub_srv;
549 struct otg_phy_mux mux_mapping;
550 struct pipe_ctx *pipe;
552 struct dc *dc = stream->ctx->dc;
554 for (i = 0; i < MAX_PIPES; i++) {
555 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
556 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
560 /* Stream not found */
564 mux_mapping.phy_output_num = stream->link->link_enc_hw_inst;
565 mux_mapping.otg_output_num = pipe->stream_res.tg->inst;
567 dmcu = dc->res_pool->dmcu;
568 dmub_srv = dc->ctx->dmub_srv;
570 /* forward to dmub */
572 dc_stream_forward_dmub_crc_window(dmub_srv, rect, &mux_mapping, is_stop);
573 /* forward to dmcu */
574 else if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu))
575 dc_stream_forward_dmcu_crc_window(dmcu, rect, &mux_mapping, is_stop);
581 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
584 * dc_stream_configure_crc() - Configure CRC capture for the given stream.
586 * @stream: The stream to configure CRC on.
587 * @enable: Enable CRC if true, disable otherwise.
588 * @crc_window: CRC window (x/y start/end) information
589 * @continuous: Capture CRC on every frame if true. Otherwise, only capture
592 * By default, only CRC0 is configured, and the entire frame is used to
595 * Return: %false if the stream is not found or CRC capture is not supported;
596 * %true if the stream has been configured.
598 bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
599 struct crc_params *crc_window, bool enable, bool continuous)
601 struct pipe_ctx *pipe;
602 struct crc_params param;
603 struct timing_generator *tg;
605 pipe = resource_get_otg_master_for_stream(
606 &dc->current_state->res_ctx, stream);
608 /* Stream not found */
612 dc_exit_ips_for_hw_access(dc);
614 /* By default, capture the full frame */
615 param.windowa_x_start = 0;
616 param.windowa_y_start = 0;
617 param.windowa_x_end = pipe->stream->timing.h_addressable;
618 param.windowa_y_end = pipe->stream->timing.v_addressable;
619 param.windowb_x_start = 0;
620 param.windowb_y_start = 0;
621 param.windowb_x_end = pipe->stream->timing.h_addressable;
622 param.windowb_y_end = pipe->stream->timing.v_addressable;
625 param.windowa_x_start = crc_window->windowa_x_start;
626 param.windowa_y_start = crc_window->windowa_y_start;
627 param.windowa_x_end = crc_window->windowa_x_end;
628 param.windowa_y_end = crc_window->windowa_y_end;
629 param.windowb_x_start = crc_window->windowb_x_start;
630 param.windowb_y_start = crc_window->windowb_y_start;
631 param.windowb_x_end = crc_window->windowb_x_end;
632 param.windowb_y_end = crc_window->windowb_y_end;
635 param.dsc_mode = pipe->stream->timing.flags.DSC ? 1:0;
636 param.odm_mode = pipe->next_odm_pipe ? 1:0;
638 /* Default to the union of both windows */
639 param.selection = UNION_WINDOW_A_B;
640 param.continuous_mode = continuous;
641 param.enable = enable;
643 tg = pipe->stream_res.tg;
645 /* Only call if supported */
646 if (tg->funcs->configure_crc)
647 return tg->funcs->configure_crc(tg, ¶m);
648 DC_LOG_WARNING("CRC capture not supported.");
653 * dc_stream_get_crc() - Get CRC values for the given stream.
656 * @stream: The DC stream state of the stream to get CRCs from.
657 * @r_cr: CRC value for the red component.
658 * @g_y: CRC value for the green component.
659 * @b_cb: CRC value for the blue component.
661 * dc_stream_configure_crc needs to be called beforehand to enable CRCs.
664 * %false if stream is not found, or if CRCs are not enabled.
666 bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
667 uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
670 struct pipe_ctx *pipe;
671 struct timing_generator *tg;
673 dc_exit_ips_for_hw_access(dc);
675 for (i = 0; i < MAX_PIPES; i++) {
676 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
677 if (pipe->stream == stream)
680 /* Stream not found */
684 tg = pipe->stream_res.tg;
686 if (tg->funcs->get_crc)
687 return tg->funcs->get_crc(tg, r_cr, g_y, b_cb);
688 DC_LOG_WARNING("CRC capture not supported.");
692 void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,
693 enum dc_dynamic_expansion option)
695 /* OPP FMT dyn expansion updates*/
697 struct pipe_ctx *pipe_ctx;
699 dc_exit_ips_for_hw_access(dc);
701 for (i = 0; i < MAX_PIPES; i++) {
702 if (dc->current_state->res_ctx.pipe_ctx[i].stream
704 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
705 pipe_ctx->stream_res.opp->dyn_expansion = option;
706 pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
707 pipe_ctx->stream_res.opp,
708 COLOR_SPACE_YCBCR601,
709 stream->timing.display_color_depth,
715 void dc_stream_set_dither_option(struct dc_stream_state *stream,
716 enum dc_dither_option option)
718 struct bit_depth_reduction_params params;
719 struct dc_link *link = stream->link;
720 struct pipe_ctx *pipes = NULL;
723 for (i = 0; i < MAX_PIPES; i++) {
724 if (link->dc->current_state->res_ctx.pipe_ctx[i].stream ==
726 pipes = &link->dc->current_state->res_ctx.pipe_ctx[i];
733 if (option > DITHER_OPTION_MAX)
736 dc_exit_ips_for_hw_access(stream->ctx->dc);
738 stream->dither_option = option;
740 memset(¶ms, 0, sizeof(params));
741 resource_build_bit_depth_reduction_params(stream, ¶ms);
742 stream->bit_depth_params = params;
744 if (pipes->plane_res.xfm &&
745 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) {
746 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth(
747 pipes->plane_res.xfm,
748 pipes->plane_res.scl_data.lb_params.depth,
749 &stream->bit_depth_params);
752 pipes->stream_res.opp->funcs->
753 opp_program_bit_depth_reduction(pipes->stream_res.opp, ¶ms);
756 bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream)
760 struct pipe_ctx *pipes;
762 dc_exit_ips_for_hw_access(dc);
764 for (i = 0; i < MAX_PIPES; i++) {
765 if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
766 pipes = &dc->current_state->res_ctx.pipe_ctx[i];
767 dc->hwss.program_gamut_remap(pipes);
775 bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
779 struct pipe_ctx *pipes;
781 dc_exit_ips_for_hw_access(dc);
783 for (i = 0; i < MAX_PIPES; i++) {
784 if (dc->current_state->res_ctx.pipe_ctx[i].stream
787 pipes = &dc->current_state->res_ctx.pipe_ctx[i];
788 dc->hwss.program_output_csc(dc,
790 stream->output_color_space,
791 stream->csc_color_matrix.matrix,
792 pipes->stream_res.opp->inst);
800 void dc_stream_set_static_screen_params(struct dc *dc,
801 struct dc_stream_state **streams,
803 const struct dc_static_screen_params *params)
806 struct pipe_ctx *pipes_affected[MAX_PIPES];
807 int num_pipes_affected = 0;
809 dc_exit_ips_for_hw_access(dc);
811 for (i = 0; i < num_streams; i++) {
812 struct dc_stream_state *stream = streams[i];
814 for (j = 0; j < MAX_PIPES; j++) {
815 if (dc->current_state->res_ctx.pipe_ctx[j].stream
817 pipes_affected[num_pipes_affected++] =
818 &dc->current_state->res_ctx.pipe_ctx[j];
823 dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, params);
826 static void dc_destruct(struct dc *dc)
828 // reset link encoder assignment table on destruct
829 if (dc->res_pool && dc->res_pool->funcs->link_encs_assign)
830 link_enc_cfg_init(dc, dc->current_state);
832 if (dc->current_state) {
833 dc_state_release(dc->current_state);
834 dc->current_state = NULL;
839 destroy_link_encoders(dc);
842 dc_destroy_clk_mgr(dc->clk_mgr);
846 dc_destroy_resource_pool(dc);
849 link_destroy_link_service(&dc->link_srv);
851 if (dc->ctx->gpio_service)
852 dal_gpio_service_destroy(&dc->ctx->gpio_service);
854 if (dc->ctx->created_bios)
855 dal_bios_parser_destroy(&dc->ctx->dc_bios);
857 kfree(dc->ctx->logger);
858 dc_perf_trace_destroy(&dc->ctx->perf_trace);
875 kfree(dc->vm_helper);
876 dc->vm_helper = NULL;
880 static bool dc_construct_ctx(struct dc *dc,
881 const struct dc_init_data *init_params)
883 struct dc_context *dc_ctx;
885 dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
889 dc_ctx->cgs_device = init_params->cgs_device;
890 dc_ctx->driver_context = init_params->driver;
892 dc_ctx->asic_id = init_params->asic_id;
893 dc_ctx->dc_sink_id_count = 0;
894 dc_ctx->dc_stream_id_count = 0;
895 dc_ctx->dce_environment = init_params->dce_environment;
896 dc_ctx->dcn_reg_offsets = init_params->dcn_reg_offsets;
897 dc_ctx->nbio_reg_offsets = init_params->nbio_reg_offsets;
898 dc_ctx->clk_reg_offsets = init_params->clk_reg_offsets;
901 dc_ctx->logger = kmalloc(sizeof(*dc_ctx->logger), GFP_KERNEL);
903 if (!dc_ctx->logger) {
908 dc_ctx->logger->dev = adev_to_drm(init_params->driver);
909 dc->dml.logger = dc_ctx->logger;
911 dc_ctx->dce_version = resource_parse_asic_id(init_params->asic_id);
913 dc_ctx->perf_trace = dc_perf_trace_create();
914 if (!dc_ctx->perf_trace) {
916 ASSERT_CRITICAL(false);
922 dc->link_srv = link_create_link_service();
929 static bool dc_construct(struct dc *dc,
930 const struct dc_init_data *init_params)
932 struct dc_context *dc_ctx;
933 struct bw_calcs_dceip *dc_dceip;
934 struct bw_calcs_vbios *dc_vbios;
935 struct dcn_soc_bounding_box *dcn_soc;
936 struct dcn_ip_params *dcn_ip;
938 dc->config = init_params->flags;
940 // Allocate memory for the vm_helper
941 dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL);
942 if (!dc->vm_helper) {
943 dm_error("%s: failed to create dc->vm_helper\n", __func__);
947 memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides));
949 dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
951 dm_error("%s: failed to create dceip\n", __func__);
955 dc->bw_dceip = dc_dceip;
957 dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL);
959 dm_error("%s: failed to create vbios\n", __func__);
963 dc->bw_vbios = dc_vbios;
964 dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);
966 dm_error("%s: failed to create dcn_soc\n", __func__);
970 dc->dcn_soc = dcn_soc;
972 dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL);
974 dm_error("%s: failed to create dcn_ip\n", __func__);
980 if (!dc_construct_ctx(dc, init_params)) {
981 dm_error("%s: failed to create ctx\n", __func__);
987 /* Resource should construct all asic specific resources.
988 * This should be the only place where we need to parse the asic id
990 if (init_params->vbios_override)
991 dc_ctx->dc_bios = init_params->vbios_override;
993 /* Create BIOS parser */
994 struct bp_init_data bp_init_data;
996 bp_init_data.ctx = dc_ctx;
997 bp_init_data.bios = init_params->asic_id.atombios_base_address;
999 dc_ctx->dc_bios = dal_bios_parser_create(
1000 &bp_init_data, dc_ctx->dce_version);
1002 if (!dc_ctx->dc_bios) {
1003 ASSERT_CRITICAL(false);
1007 dc_ctx->created_bios = true;
1010 dc->vendor_signature = init_params->vendor_signature;
1012 /* Create GPIO service */
1013 dc_ctx->gpio_service = dal_gpio_service_create(
1014 dc_ctx->dce_version,
1015 dc_ctx->dce_environment,
1018 if (!dc_ctx->gpio_service) {
1019 ASSERT_CRITICAL(false);
1023 dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version);
1027 /* set i2c speed if not done by the respective dcnxxx__resource.c */
1028 if (dc->caps.i2c_speed_in_khz_hdcp == 0)
1029 dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz;
1030 if (dc->caps.max_optimizable_video_width == 0)
1031 dc->caps.max_optimizable_video_width = 5120;
1032 dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
1035 #ifdef CONFIG_DRM_AMD_DC_FP
1036 dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present;
1038 if (dc->res_pool->funcs->update_bw_bounding_box) {
1040 dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
1045 if (!create_links(dc, init_params->num_virtual_links))
1048 /* Create additional DIG link encoder objects if fewer than the platform
1049 * supports were created during link construction.
1051 if (!create_link_encoders(dc))
1054 /* Creation of current_state must occur after dc->dml
1055 * is initialized in dc_create_resource_pool because
1056 * on creation it copies the contents of dc->dml
1059 dc->current_state = dc_state_create(dc);
1061 if (!dc->current_state) {
1062 dm_error("%s: failed to create validate ctx\n", __func__);
1072 static void disable_all_writeback_pipes_for_stream(
1073 const struct dc *dc,
1074 struct dc_stream_state *stream,
1075 struct dc_state *context)
1079 for (i = 0; i < stream->num_wb_info; i++)
1080 stream->writeback_info[i].wb_enabled = false;
1083 static void apply_ctx_interdependent_lock(struct dc *dc,
1084 struct dc_state *context,
1085 struct dc_stream_state *stream,
1090 /* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */
1091 if (dc->hwss.interdependent_update_lock)
1092 dc->hwss.interdependent_update_lock(dc, context, lock);
1094 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1095 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1096 struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1098 // Copied conditions that were previously in dce110_apply_ctx_for_surface
1099 if (stream == pipe_ctx->stream) {
1100 if (resource_is_pipe_type(pipe_ctx, OPP_HEAD) &&
1101 (pipe_ctx->plane_state || old_pipe_ctx->plane_state))
1102 dc->hwss.pipe_control_lock(dc, pipe_ctx, lock);
1108 static void dc_update_visual_confirm_color(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
1110 if (dc->ctx->dce_version >= DCN_VERSION_1_0) {
1111 memset(&pipe_ctx->visual_confirm_color, 0, sizeof(struct tg_color));
1113 if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR)
1114 get_hdr_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
1115 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE)
1116 get_surface_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
1117 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE)
1118 get_surface_tile_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
1120 if (dc->ctx->dce_version < DCN_VERSION_2_0)
1121 color_space_to_black_color(
1122 dc, pipe_ctx->stream->output_color_space, &(pipe_ctx->visual_confirm_color));
1124 if (dc->ctx->dce_version >= DCN_VERSION_2_0) {
1125 if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE)
1126 get_mpctree_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
1127 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP)
1128 get_subvp_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
1129 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH)
1130 get_mclk_switch_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
1135 static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
1138 struct dc_state *dangling_context = dc_state_create_current_copy(dc);
1139 struct dc_state *current_ctx;
1140 struct pipe_ctx *pipe;
1141 struct timing_generator *tg;
1143 if (dangling_context == NULL)
1146 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1147 struct dc_stream_state *old_stream =
1148 dc->current_state->res_ctx.pipe_ctx[i].stream;
1149 bool should_disable = true;
1150 bool pipe_split_change = false;
1152 if ((context->res_ctx.pipe_ctx[i].top_pipe) &&
1153 (dc->current_state->res_ctx.pipe_ctx[i].top_pipe))
1154 pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe->pipe_idx !=
1155 dc->current_state->res_ctx.pipe_ctx[i].top_pipe->pipe_idx;
1157 pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe !=
1158 dc->current_state->res_ctx.pipe_ctx[i].top_pipe;
1160 for (j = 0; j < context->stream_count; j++) {
1161 if (old_stream == context->streams[j]) {
1162 should_disable = false;
1166 if (!should_disable && pipe_split_change &&
1167 dc->current_state->stream_count != context->stream_count)
1168 should_disable = true;
1170 if (old_stream && !dc->current_state->res_ctx.pipe_ctx[i].top_pipe &&
1171 !dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe) {
1172 struct pipe_ctx *old_pipe, *new_pipe;
1174 old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1175 new_pipe = &context->res_ctx.pipe_ctx[i];
1177 if (old_pipe->plane_state && !new_pipe->plane_state)
1178 should_disable = true;
1181 if (should_disable && old_stream) {
1182 bool is_phantom = dc_state_get_stream_subvp_type(dc->current_state, old_stream) == SUBVP_PHANTOM;
1183 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1184 tg = pipe->stream_res.tg;
1185 /* When disabling plane for a phantom pipe, we must turn on the
1186 * phantom OTG so the disable programming gets the double buffer
1187 * update. Otherwise the pipe will be left in a partially disabled
1188 * state that can result in underflow or hang when enabling it
1189 * again for different use.
1192 if (tg->funcs->enable_crtc) {
1193 int main_pipe_width, main_pipe_height;
1194 struct dc_stream_state *old_paired_stream = dc_state_get_paired_subvp_stream(dc->current_state, old_stream);
1196 main_pipe_width = old_paired_stream->dst.width;
1197 main_pipe_height = old_paired_stream->dst.height;
1198 if (dc->hwss.blank_phantom)
1199 dc->hwss.blank_phantom(dc, tg, main_pipe_width, main_pipe_height);
1200 tg->funcs->enable_crtc(tg);
1205 dc_state_rem_all_phantom_planes_for_stream(dc, old_stream, dangling_context, true);
1207 dc_state_rem_all_planes_for_stream(dc, old_stream, dangling_context);
1208 disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
1210 if (pipe->stream && pipe->plane_state) {
1211 set_p_state_switch_method(dc, context, pipe);
1212 dc_update_visual_confirm_color(dc, context, pipe);
1215 if (dc->hwss.apply_ctx_for_surface) {
1216 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true);
1217 dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
1218 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, false);
1219 dc->hwss.post_unlock_program_front_end(dc, dangling_context);
1221 if (dc->hwss.program_front_end_for_ctx) {
1222 dc->hwss.interdependent_update_lock(dc, dc->current_state, true);
1223 dc->hwss.program_front_end_for_ctx(dc, dangling_context);
1224 dc->hwss.interdependent_update_lock(dc, dc->current_state, false);
1225 dc->hwss.post_unlock_program_front_end(dc, dangling_context);
1227 /* We need to put the phantom OTG back into it's default (disabled) state or we
1228 * can get corruption when transition from one SubVP config to a different one.
1229 * The OTG is set to disable on falling edge of VUPDATE so the plane disable
1230 * will still get it's double buffer update.
1233 if (tg->funcs->disable_phantom_crtc)
1234 tg->funcs->disable_phantom_crtc(tg);
1239 current_ctx = dc->current_state;
1240 dc->current_state = dangling_context;
1241 dc_state_release(current_ctx);
1244 static void disable_vbios_mode_if_required(
1246 struct dc_state *context)
1250 /* check if timing_changed, disable stream*/
1251 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1252 struct dc_stream_state *stream = NULL;
1253 struct dc_link *link = NULL;
1254 struct pipe_ctx *pipe = NULL;
1256 pipe = &context->res_ctx.pipe_ctx[i];
1257 stream = pipe->stream;
1261 if (stream->apply_seamless_boot_optimization)
1264 // only looking for first odm pipe
1265 if (pipe->prev_odm_pipe)
1268 if (stream->link->local_sink &&
1269 stream->link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
1270 link = stream->link;
1273 if (link != NULL && link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
1274 unsigned int enc_inst, tg_inst = 0;
1275 unsigned int pix_clk_100hz;
1277 enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
1278 if (enc_inst != ENGINE_ID_UNKNOWN) {
1279 for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
1280 if (dc->res_pool->stream_enc[j]->id == enc_inst) {
1281 tg_inst = dc->res_pool->stream_enc[j]->funcs->dig_source_otg(
1282 dc->res_pool->stream_enc[j]);
1287 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1288 dc->res_pool->dp_clock_source,
1289 tg_inst, &pix_clk_100hz);
1291 if (link->link_status.link_active) {
1292 uint32_t requested_pix_clk_100hz =
1293 pipe->stream_res.pix_clk_params.requested_pix_clk_100hz;
1295 if (pix_clk_100hz != requested_pix_clk_100hz) {
1296 dc->link_srv->set_dpms_off(pipe);
1297 pipe->stream->dpms_off = false;
1306 * wait_for_blank_complete - wait for all active OPPs to finish pending blank
1309 * @dc: [in] dc reference
1310 * @context: [in] hardware context in use
1312 static void wait_for_blank_complete(struct dc *dc,
1313 struct dc_state *context)
1315 struct pipe_ctx *opp_head;
1316 struct dce_hwseq *hws = dc->hwseq;
1319 if (!hws->funcs.wait_for_blank_complete)
1322 for (i = 0; i < MAX_PIPES; i++) {
1323 opp_head = &context->res_ctx.pipe_ctx[i];
1325 if (!resource_is_pipe_type(opp_head, OPP_HEAD) ||
1326 dc_state_get_pipe_subvp_type(context, opp_head) == SUBVP_PHANTOM)
1329 hws->funcs.wait_for_blank_complete(opp_head->stream_res.opp);
1333 static void wait_for_odm_update_pending_complete(struct dc *dc, struct dc_state *context)
1335 struct pipe_ctx *otg_master;
1336 struct timing_generator *tg;
1339 for (i = 0; i < MAX_PIPES; i++) {
1340 otg_master = &context->res_ctx.pipe_ctx[i];
1341 if (!resource_is_pipe_type(otg_master, OTG_MASTER) ||
1342 dc_state_get_pipe_subvp_type(context, otg_master) == SUBVP_PHANTOM)
1344 tg = otg_master->stream_res.tg;
1345 if (tg->funcs->wait_odm_doublebuffer_pending_clear)
1346 tg->funcs->wait_odm_doublebuffer_pending_clear(tg);
1349 /* ODM update may require to reprogram blank pattern for each OPP */
1350 wait_for_blank_complete(dc, context);
1353 static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
1357 for (i = 0; i < MAX_PIPES; i++) {
1359 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
1361 if (!pipe->plane_state || dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM)
1364 /* Timeout 100 ms */
1365 while (count < 100000) {
1366 /* Must set to false to start with, due to OR in update function */
1367 pipe->plane_state->status.is_flip_pending = false;
1368 dc->hwss.update_pending_status(pipe);
1369 if (!pipe->plane_state->status.is_flip_pending)
1374 ASSERT(!pipe->plane_state->status.is_flip_pending);
1379 /* Public functions */
1381 struct dc *dc_create(const struct dc_init_data *init_params)
1383 struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
1384 unsigned int full_pipe_count;
1389 if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) {
1390 if (!dc_construct_ctx(dc, init_params))
1393 if (!dc_construct(dc, init_params))
1396 full_pipe_count = dc->res_pool->pipe_count;
1397 if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
1399 dc->caps.max_streams = min(
1401 dc->res_pool->stream_enc_count);
1403 dc->caps.max_links = dc->link_count;
1404 dc->caps.max_audios = dc->res_pool->audio_count;
1405 dc->caps.linear_pitch_alignment = 64;
1407 dc->caps.max_dp_protocol_version = DP_VERSION_1_4;
1409 dc->caps.max_otg_num = dc->res_pool->res_cap->num_timing_generator;
1411 if (dc->res_pool->dmcu != NULL)
1412 dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
1415 dc->dcn_reg_offsets = init_params->dcn_reg_offsets;
1416 dc->nbio_reg_offsets = init_params->nbio_reg_offsets;
1417 dc->clk_reg_offsets = init_params->clk_reg_offsets;
1419 /* Populate versioning information */
1420 dc->versions.dc_ver = DC_VER;
1422 dc->build_id = DC_BUILD_ID;
1424 DC_LOG_DC("Display Core initialized\n");
1436 static void detect_edp_presence(struct dc *dc)
1438 struct dc_link *edp_links[MAX_NUM_EDP];
1439 struct dc_link *edp_link = NULL;
1440 enum dc_connection_type type;
1444 dc_get_edp_links(dc, edp_links, &edp_num);
1448 for (i = 0; i < edp_num; i++) {
1449 edp_link = edp_links[i];
1450 if (dc->config.edp_not_connected) {
1451 edp_link->edp_sink_present = false;
1453 dc_link_detect_connection_type(edp_link, &type);
1454 edp_link->edp_sink_present = (type != dc_connection_none);
1459 void dc_hardware_init(struct dc *dc)
1462 detect_edp_presence(dc);
1463 if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW)
1464 dc->hwss.init_hw(dc);
1467 void dc_init_callbacks(struct dc *dc,
1468 const struct dc_callback_init *init_params)
1470 dc->ctx->cp_psp = init_params->cp_psp;
1473 void dc_deinit_callbacks(struct dc *dc)
1475 memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp));
1478 void dc_destroy(struct dc **dc)
1485 static void enable_timing_multisync(
1487 struct dc_state *ctx)
1489 int i, multisync_count = 0;
1490 int pipe_count = dc->res_pool->pipe_count;
1491 struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL };
1493 for (i = 0; i < pipe_count; i++) {
1494 if (!ctx->res_ctx.pipe_ctx[i].stream ||
1495 !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled)
1497 if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source)
1499 multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i];
1503 if (multisync_count > 0) {
1504 dc->hwss.enable_per_frame_crtc_position_reset(
1505 dc, multisync_count, multisync_pipes);
1509 static void program_timing_sync(
1511 struct dc_state *ctx)
1514 int group_index = 0;
1516 int pipe_count = dc->res_pool->pipe_count;
1517 struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
1519 for (i = 0; i < pipe_count; i++) {
1520 if (!ctx->res_ctx.pipe_ctx[i].stream
1521 || ctx->res_ctx.pipe_ctx[i].top_pipe
1522 || ctx->res_ctx.pipe_ctx[i].prev_odm_pipe)
1525 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
1528 for (i = 0; i < pipe_count; i++) {
1530 enum timing_synchronization_type sync_type = NOT_SYNCHRONIZABLE;
1531 struct pipe_ctx *pipe_set[MAX_PIPES];
1533 if (!unsynced_pipes[i])
1536 pipe_set[0] = unsynced_pipes[i];
1537 unsynced_pipes[i] = NULL;
1539 /* Add tg to the set, search rest of the tg's for ones with
1540 * same timing, add all tgs with same timing to the group
1542 for (j = i + 1; j < pipe_count; j++) {
1543 if (!unsynced_pipes[j])
1545 if (sync_type != TIMING_SYNCHRONIZABLE &&
1546 dc->hwss.enable_vblanks_synchronization &&
1547 unsynced_pipes[j]->stream_res.tg->funcs->align_vblanks &&
1548 resource_are_vblanks_synchronizable(
1549 unsynced_pipes[j]->stream,
1550 pipe_set[0]->stream)) {
1551 sync_type = VBLANK_SYNCHRONIZABLE;
1552 pipe_set[group_size] = unsynced_pipes[j];
1553 unsynced_pipes[j] = NULL;
1556 if (sync_type != VBLANK_SYNCHRONIZABLE &&
1557 resource_are_streams_timing_synchronizable(
1558 unsynced_pipes[j]->stream,
1559 pipe_set[0]->stream)) {
1560 sync_type = TIMING_SYNCHRONIZABLE;
1561 pipe_set[group_size] = unsynced_pipes[j];
1562 unsynced_pipes[j] = NULL;
1567 /* set first unblanked pipe as master */
1568 for (j = 0; j < group_size; j++) {
1571 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
1573 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
1576 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
1581 swap(pipe_set[0], pipe_set[j]);
1586 for (k = 0; k < group_size; k++) {
1587 struct dc_stream_status *status = dc_state_get_stream_status(ctx, pipe_set[k]->stream);
1589 status->timing_sync_info.group_id = num_group;
1590 status->timing_sync_info.group_size = group_size;
1592 status->timing_sync_info.master = true;
1594 status->timing_sync_info.master = false;
1598 /* remove any other unblanked pipes as they have already been synced */
1599 if (dc->config.use_pipe_ctx_sync_logic) {
1600 /* check pipe's syncd to decide which pipe to be removed */
1601 for (j = 1; j < group_size; j++) {
1602 if (pipe_set[j]->pipe_idx_syncd == pipe_set[0]->pipe_idx_syncd) {
1604 pipe_set[j] = pipe_set[group_size];
1607 /* link slave pipe's syncd with master pipe */
1608 pipe_set[j]->pipe_idx_syncd = pipe_set[0]->pipe_idx_syncd;
1611 /* remove any other pipes by checking valid plane */
1612 for (j = j + 1; j < group_size; j++) {
1615 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
1617 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
1620 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
1623 pipe_set[j] = pipe_set[group_size];
1629 if (group_size > 1) {
1630 if (sync_type == TIMING_SYNCHRONIZABLE) {
1631 dc->hwss.enable_timing_synchronization(
1632 dc, ctx, group_index, group_size, pipe_set);
1634 if (sync_type == VBLANK_SYNCHRONIZABLE) {
1635 dc->hwss.enable_vblanks_synchronization(
1636 dc, group_index, group_size, pipe_set);
1644 static bool streams_changed(struct dc *dc,
1645 struct dc_stream_state *streams[],
1646 uint8_t stream_count)
1650 if (stream_count != dc->current_state->stream_count)
1653 for (i = 0; i < dc->current_state->stream_count; i++) {
1654 if (dc->current_state->streams[i] != streams[i])
1656 if (!streams[i]->link->link_state_valid)
1663 bool dc_validate_boot_timing(const struct dc *dc,
1664 const struct dc_sink *sink,
1665 struct dc_crtc_timing *crtc_timing)
1667 struct timing_generator *tg;
1668 struct stream_encoder *se = NULL;
1670 struct dc_crtc_timing hw_crtc_timing = {0};
1672 struct dc_link *link = sink->link;
1673 unsigned int i, enc_inst, tg_inst = 0;
1675 /* Support seamless boot on EDP displays only */
1676 if (sink->sink_signal != SIGNAL_TYPE_EDP) {
1680 if (dc->debug.force_odm_combine)
1683 /* Check for enabled DIG to identify enabled display */
1684 if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
1687 enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
1689 if (enc_inst == ENGINE_ID_UNKNOWN)
1692 for (i = 0; i < dc->res_pool->stream_enc_count; i++) {
1693 if (dc->res_pool->stream_enc[i]->id == enc_inst) {
1695 se = dc->res_pool->stream_enc[i];
1697 tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg(
1698 dc->res_pool->stream_enc[i]);
1703 // tg_inst not found
1704 if (i == dc->res_pool->stream_enc_count)
1707 if (tg_inst >= dc->res_pool->timing_generator_count)
1710 if (tg_inst != link->link_enc->preferred_engine)
1713 tg = dc->res_pool->timing_generators[tg_inst];
1715 if (!tg->funcs->get_hw_timing)
1718 if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing))
1721 if (crtc_timing->h_total != hw_crtc_timing.h_total)
1724 if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left)
1727 if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable)
1730 if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right)
1733 if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch)
1736 if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width)
1739 if (crtc_timing->v_total != hw_crtc_timing.v_total)
1742 if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top)
1745 if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable)
1748 if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom)
1751 if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch)
1754 if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width)
1757 /* block DSC for now, as VBIOS does not currently support DSC timings */
1758 if (crtc_timing->flags.DSC)
1761 if (dc_is_dp_signal(link->connector_signal)) {
1762 unsigned int pix_clk_100hz;
1763 uint32_t numOdmPipes = 1;
1764 uint32_t id_src[4] = {0};
1766 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1767 dc->res_pool->dp_clock_source,
1768 tg_inst, &pix_clk_100hz);
1770 if (tg->funcs->get_optc_source)
1771 tg->funcs->get_optc_source(tg,
1772 &numOdmPipes, &id_src[0], &id_src[1]);
1774 if (numOdmPipes == 2)
1776 if (numOdmPipes == 4)
1779 // Note: In rare cases, HW pixclk may differ from crtc's pixclk
1780 // slightly due to rounding issues in 10 kHz units.
1781 if (crtc_timing->pix_clk_100hz != pix_clk_100hz)
1784 if (!se->funcs->dp_get_pixel_format)
1787 if (!se->funcs->dp_get_pixel_format(
1789 &hw_crtc_timing.pixel_encoding,
1790 &hw_crtc_timing.display_color_depth))
1793 if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth)
1796 if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding)
1800 if (link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
1804 if (link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED)
1807 if (dc->link_srv->edp_is_ilr_optimization_required(link, crtc_timing)) {
1808 DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n");
1815 static inline bool should_update_pipe_for_stream(
1816 struct dc_state *context,
1817 struct pipe_ctx *pipe_ctx,
1818 struct dc_stream_state *stream)
1820 return (pipe_ctx->stream && pipe_ctx->stream == stream);
1823 static inline bool should_update_pipe_for_plane(
1824 struct dc_state *context,
1825 struct pipe_ctx *pipe_ctx,
1826 struct dc_plane_state *plane_state)
1828 return (pipe_ctx->plane_state == plane_state);
1831 void dc_enable_stereo(
1833 struct dc_state *context,
1834 struct dc_stream_state *streams[],
1835 uint8_t stream_count)
1838 struct pipe_ctx *pipe;
1840 dc_exit_ips_for_hw_access(dc);
1842 for (i = 0; i < MAX_PIPES; i++) {
1843 if (context != NULL) {
1844 pipe = &context->res_ctx.pipe_ctx[i];
1846 context = dc->current_state;
1847 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1850 for (j = 0; pipe && j < stream_count; j++) {
1851 if (should_update_pipe_for_stream(context, pipe, streams[j]) &&
1852 dc->hwss.setup_stereo)
1853 dc->hwss.setup_stereo(pipe, dc);
1858 void dc_trigger_sync(struct dc *dc, struct dc_state *context)
1860 if (context->stream_count > 1 && !dc->debug.disable_timing_sync) {
1861 dc_exit_ips_for_hw_access(dc);
1863 enable_timing_multisync(dc, context);
1864 program_timing_sync(dc, context);
1868 static uint8_t get_stream_mask(struct dc *dc, struct dc_state *context)
1871 unsigned int stream_mask = 0;
1873 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1874 if (context->res_ctx.pipe_ctx[i].stream)
1875 stream_mask |= 1 << i;
1881 void dc_z10_restore(const struct dc *dc)
1883 if (dc->hwss.z10_restore)
1884 dc->hwss.z10_restore(dc);
1887 void dc_z10_save_init(struct dc *dc)
1889 if (dc->hwss.z10_save_init)
1890 dc->hwss.z10_save_init(dc);
1894 * dc_commit_state_no_check - Apply context to the hardware
1896 * @dc: DC object with the current status to be updated
1897 * @context: New state that will become the current status at the end of this function
1899 * Applies given context to the hardware and copy it into current context.
1900 * It's up to the user to release the src context afterwards.
1902 * Return: an enum dc_status result code for the operation
1904 static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)
1906 struct dc_bios *dcb = dc->ctx->dc_bios;
1907 enum dc_status result = DC_ERROR_UNEXPECTED;
1908 struct pipe_ctx *pipe;
1910 struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
1911 struct dc_state *old_state;
1912 bool subvp_prev_use = false;
1915 dc_allow_idle_optimizations(dc, false);
1917 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1918 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1920 /* Check old context for SubVP */
1921 subvp_prev_use |= (dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) == SUBVP_PHANTOM);
1926 for (i = 0; i < context->stream_count; i++)
1927 dc_streams[i] = context->streams[i];
1929 if (!dcb->funcs->is_accelerated_mode(dcb)) {
1930 disable_vbios_mode_if_required(dc, context);
1931 dc->hwss.enable_accelerated_mode(dc, context);
1934 if (context->stream_count > get_seamless_boot_stream_count(context) ||
1935 context->stream_count == 0)
1936 dc->hwss.prepare_bandwidth(dc, context);
1938 /* When SubVP is active, all HW programming must be done while
1939 * SubVP lock is acquired
1941 if (dc->hwss.subvp_pipe_control_lock)
1942 dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use);
1944 if (dc->hwss.update_dsc_pg)
1945 dc->hwss.update_dsc_pg(dc, context, false);
1947 disable_dangling_plane(dc, context);
1948 /* re-program planes for existing stream, in case we need to
1949 * free up plane resource for later use
1951 if (dc->hwss.apply_ctx_for_surface) {
1952 for (i = 0; i < context->stream_count; i++) {
1953 if (context->streams[i]->mode_changed)
1955 apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
1956 dc->hwss.apply_ctx_for_surface(
1957 dc, context->streams[i],
1958 context->stream_status[i].plane_count,
1959 context); /* use new pipe config in new context */
1960 apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
1961 dc->hwss.post_unlock_program_front_end(dc, context);
1965 /* Program hardware */
1966 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1967 pipe = &context->res_ctx.pipe_ctx[i];
1968 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
1971 result = dc->hwss.apply_ctx_to_hw(dc, context);
1973 if (result != DC_OK) {
1974 /* Application of dc_state to hardware stopped. */
1975 dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY;
1979 dc_trigger_sync(dc, context);
1981 /* Full update should unconditionally be triggered when dc_commit_state_no_check is called */
1982 for (i = 0; i < context->stream_count; i++) {
1983 uint32_t prev_dsc_changed = context->streams[i]->update_flags.bits.dsc_changed;
1985 context->streams[i]->update_flags.raw = 0xFFFFFFFF;
1986 context->streams[i]->update_flags.bits.dsc_changed = prev_dsc_changed;
1989 /* Program all planes within new context*/
1990 if (dc->hwss.program_front_end_for_ctx) {
1991 dc->hwss.interdependent_update_lock(dc, context, true);
1992 dc->hwss.program_front_end_for_ctx(dc, context);
1993 dc->hwss.interdependent_update_lock(dc, context, false);
1994 dc->hwss.post_unlock_program_front_end(dc, context);
1997 if (dc->hwss.commit_subvp_config)
1998 dc->hwss.commit_subvp_config(dc, context);
1999 if (dc->hwss.subvp_pipe_control_lock)
2000 dc->hwss.subvp_pipe_control_lock(dc, context, false, true, NULL, subvp_prev_use);
2002 for (i = 0; i < context->stream_count; i++) {
2003 const struct dc_link *link = context->streams[i]->link;
2005 if (!context->streams[i]->mode_changed)
2008 if (dc->hwss.apply_ctx_for_surface) {
2009 apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
2010 dc->hwss.apply_ctx_for_surface(
2011 dc, context->streams[i],
2012 context->stream_status[i].plane_count,
2014 apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
2015 dc->hwss.post_unlock_program_front_end(dc, context);
2020 * TODO rework dc_enable_stereo call to work with validation sets?
2022 for (k = 0; k < MAX_PIPES; k++) {
2023 pipe = &context->res_ctx.pipe_ctx[k];
2025 for (l = 0 ; pipe && l < context->stream_count; l++) {
2026 if (context->streams[l] &&
2027 context->streams[l] == pipe->stream &&
2028 dc->hwss.setup_stereo)
2029 dc->hwss.setup_stereo(pipe, dc);
2033 CONN_MSG_MODE(link, "{%dx%d, %dx%d@%dKhz}",
2034 context->streams[i]->timing.h_addressable,
2035 context->streams[i]->timing.v_addressable,
2036 context->streams[i]->timing.h_total,
2037 context->streams[i]->timing.v_total,
2038 context->streams[i]->timing.pix_clk_100hz / 10);
2041 dc_enable_stereo(dc, context, dc_streams, context->stream_count);
2043 if (context->stream_count > get_seamless_boot_stream_count(context) ||
2044 context->stream_count == 0) {
2045 /* Must wait for no flips to be pending before doing optimize bw */
2046 wait_for_no_pipes_pending(dc, context);
2048 * optimized dispclk depends on ODM setup. Need to wait for ODM
2049 * update pending complete before optimizing bandwidth.
2051 wait_for_odm_update_pending_complete(dc, context);
2052 /* pplib is notified if disp_num changed */
2053 dc->hwss.optimize_bandwidth(dc, context);
2054 /* Need to do otg sync again as otg could be out of sync due to otg
2055 * workaround applied during clock update
2057 dc_trigger_sync(dc, context);
2060 if (dc->hwss.update_dsc_pg)
2061 dc->hwss.update_dsc_pg(dc, context, true);
2063 if (dc->ctx->dce_version >= DCE_VERSION_MAX)
2064 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
2066 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
2068 context->stream_mask = get_stream_mask(dc, context);
2070 if (context->stream_mask != dc->current_state->stream_mask)
2071 dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, context->stream_mask);
2073 for (i = 0; i < context->stream_count; i++)
2074 context->streams[i]->mode_changed = false;
2076 /* Clear update flags that were set earlier to avoid redundant programming */
2077 for (i = 0; i < context->stream_count; i++) {
2078 context->streams[i]->update_flags.raw = 0x0;
2081 old_state = dc->current_state;
2082 dc->current_state = context;
2084 dc_state_release(old_state);
2086 dc_state_retain(dc->current_state);
2091 static bool commit_minimal_transition_state_legacy(struct dc *dc,
2092 struct dc_state *transition_base_context);
2095 * dc_commit_streams - Commit current stream state
2097 * @dc: DC object with the commit state to be configured in the hardware
2098 * @streams: Array with a list of stream state
2099 * @stream_count: Total of streams
2101 * Function responsible for commit streams change to the hardware.
2104 * Return DC_OK if everything work as expected, otherwise, return a dc_status
2107 enum dc_status dc_commit_streams(struct dc *dc,
2108 struct dc_stream_state *streams[],
2109 uint8_t stream_count)
2112 struct dc_state *context;
2113 enum dc_status res = DC_OK;
2114 struct dc_validation_set set[MAX_STREAMS] = {0};
2115 struct pipe_ctx *pipe;
2116 bool handle_exit_odm2to1 = false;
2118 if (dc->ctx->dce_environment == DCE_ENV_VIRTUAL_HW)
2121 if (!streams_changed(dc, streams, stream_count))
2124 dc_exit_ips_for_hw_access(dc);
2126 DC_LOG_DC("%s: %d streams\n", __func__, stream_count);
2128 for (i = 0; i < stream_count; i++) {
2129 struct dc_stream_state *stream = streams[i];
2130 struct dc_stream_status *status = dc_stream_get_status(stream);
2132 dc_stream_log(dc, stream);
2134 set[i].stream = stream;
2137 set[i].plane_count = status->plane_count;
2138 for (j = 0; j < status->plane_count; j++)
2139 set[i].plane_states[j] = status->plane_states[j];
2143 /* ODM Combine 2:1 power optimization is only applied for single stream
2144 * scenario, it uses extra pipes than needed to reduce power consumption
2145 * We need to switch off this feature to make room for new streams.
2147 if (stream_count > dc->current_state->stream_count &&
2148 dc->current_state->stream_count == 1) {
2149 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2150 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
2151 if (pipe->next_odm_pipe)
2152 handle_exit_odm2to1 = true;
2156 if (handle_exit_odm2to1)
2157 res = commit_minimal_transition_state_legacy(dc, dc->current_state);
2159 context = dc_state_create_current_copy(dc);
2161 goto context_alloc_fail;
2163 res = dc_validate_with_context(dc, set, stream_count, context, false);
2165 BREAK_TO_DEBUGGER();
2169 res = dc_commit_state_no_check(dc, context);
2171 for (i = 0; i < stream_count; i++) {
2172 for (j = 0; j < context->stream_count; j++) {
2173 if (streams[i]->stream_id == context->streams[j]->stream_id)
2174 streams[i]->out.otg_offset = context->stream_status[j].primary_otg_inst;
2176 if (dc_is_embedded_signal(streams[i]->signal)) {
2177 struct dc_stream_status *status = dc_state_get_stream_status(context, streams[i]);
2179 if (dc->hwss.is_abm_supported)
2180 status->is_abm_supported = dc->hwss.is_abm_supported(dc, context, streams[i]);
2182 status->is_abm_supported = true;
2188 dc_state_release(context);
2192 DC_LOG_DC("%s Finished.\n", __func__);
2197 bool dc_acquire_release_mpc_3dlut(
2198 struct dc *dc, bool acquire,
2199 struct dc_stream_state *stream,
2200 struct dc_3dlut **lut,
2201 struct dc_transfer_func **shaper)
2205 bool found_pipe_idx = false;
2206 const struct resource_pool *pool = dc->res_pool;
2207 struct resource_context *res_ctx = &dc->current_state->res_ctx;
2210 if (pool && res_ctx) {
2212 /*find pipe idx for the given stream*/
2213 for (pipe_idx = 0; pipe_idx < pool->pipe_count; pipe_idx++) {
2214 if (res_ctx->pipe_ctx[pipe_idx].stream == stream) {
2215 found_pipe_idx = true;
2216 mpcc_id = res_ctx->pipe_ctx[pipe_idx].plane_res.hubp->inst;
2221 found_pipe_idx = true;/*for release pipe_idx is not required*/
2223 if (found_pipe_idx) {
2224 if (acquire && pool->funcs->acquire_post_bldn_3dlut)
2225 ret = pool->funcs->acquire_post_bldn_3dlut(res_ctx, pool, mpcc_id, lut, shaper);
2226 else if (!acquire && pool->funcs->release_post_bldn_3dlut)
2227 ret = pool->funcs->release_post_bldn_3dlut(res_ctx, pool, lut, shaper);
2233 static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
2236 struct pipe_ctx *pipe;
2238 for (i = 0; i < MAX_PIPES; i++) {
2239 pipe = &context->res_ctx.pipe_ctx[i];
2241 // Don't check flip pending on phantom pipes
2242 if (!pipe->plane_state || (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM))
2245 /* Must set to false to start with, due to OR in update function */
2246 pipe->plane_state->status.is_flip_pending = false;
2247 dc->hwss.update_pending_status(pipe);
2248 if (pipe->plane_state->status.is_flip_pending)
2254 /* Perform updates here which need to be deferred until next vupdate
2256 * i.e. blnd lut, 3dlut, and shaper lut bypass regs are double buffered
2257 * but forcing lut memory to shutdown state is immediate. This causes
2258 * single frame corruption as lut gets disabled mid-frame unless shutdown
2259 * is deferred until after entering bypass.
2261 static void process_deferred_updates(struct dc *dc)
2265 if (dc->debug.enable_mem_low_power.bits.cm) {
2266 ASSERT(dc->dcn_ip->max_num_dpp);
2267 for (i = 0; i < dc->dcn_ip->max_num_dpp; i++)
2268 if (dc->res_pool->dpps[i]->funcs->dpp_deferred_update)
2269 dc->res_pool->dpps[i]->funcs->dpp_deferred_update(dc->res_pool->dpps[i]);
2273 void dc_post_update_surfaces_to_stream(struct dc *dc)
2276 struct dc_state *context = dc->current_state;
2278 if ((!dc->optimized_required) || get_seamless_boot_stream_count(context) > 0)
2281 post_surface_trace(dc);
2284 * Only relevant for DCN behavior where we can guarantee the optimization
2285 * is safe to apply - retain the legacy behavior for DCE.
2288 if (dc->ctx->dce_version < DCE_VERSION_MAX)
2289 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
2291 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
2293 if (is_flip_pending_in_pipes(dc, context))
2296 for (i = 0; i < dc->res_pool->pipe_count; i++)
2297 if (context->res_ctx.pipe_ctx[i].stream == NULL ||
2298 context->res_ctx.pipe_ctx[i].plane_state == NULL) {
2299 context->res_ctx.pipe_ctx[i].pipe_idx = i;
2300 dc->hwss.disable_plane(dc, context, &context->res_ctx.pipe_ctx[i]);
2303 process_deferred_updates(dc);
2305 dc->hwss.optimize_bandwidth(dc, context);
2307 if (dc->hwss.update_dsc_pg)
2308 dc->hwss.update_dsc_pg(dc, context, true);
2311 dc->optimized_required = false;
2312 dc->wm_optimized_required = false;
2315 bool dc_set_generic_gpio_for_stereo(bool enable,
2316 struct gpio_service *gpio_service)
2318 enum gpio_result gpio_result = GPIO_RESULT_NON_SPECIFIC_ERROR;
2319 struct gpio_pin_info pin_info;
2320 struct gpio *generic;
2321 struct gpio_generic_mux_config *config = kzalloc(sizeof(struct gpio_generic_mux_config),
2326 pin_info = dal_gpio_get_generic_pin_info(gpio_service, GPIO_ID_GENERIC, 0);
2328 if (pin_info.mask == 0xFFFFFFFF || pin_info.offset == 0xFFFFFFFF) {
2332 generic = dal_gpio_service_create_generic_mux(
2343 gpio_result = dal_gpio_open(generic, GPIO_MODE_OUTPUT);
2345 config->enable_output_from_mux = enable;
2346 config->mux_select = GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC;
2348 if (gpio_result == GPIO_RESULT_OK)
2349 gpio_result = dal_mux_setup_config(generic, config);
2351 if (gpio_result == GPIO_RESULT_OK) {
2352 dal_gpio_close(generic);
2353 dal_gpio_destroy_generic_mux(&generic);
2357 dal_gpio_close(generic);
2358 dal_gpio_destroy_generic_mux(&generic);
2364 static bool is_surface_in_context(
2365 const struct dc_state *context,
2366 const struct dc_plane_state *plane_state)
2370 for (j = 0; j < MAX_PIPES; j++) {
2371 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2373 if (plane_state == pipe_ctx->plane_state) {
2381 static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u)
2383 union surface_update_flags *update_flags = &u->surface->update_flags;
2384 enum surface_update_type update_type = UPDATE_TYPE_FAST;
2387 return UPDATE_TYPE_FAST;
2389 if (u->plane_info->color_space != u->surface->color_space) {
2390 update_flags->bits.color_space_change = 1;
2391 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2394 if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) {
2395 update_flags->bits.horizontal_mirror_change = 1;
2396 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2399 if (u->plane_info->rotation != u->surface->rotation) {
2400 update_flags->bits.rotation_change = 1;
2401 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2404 if (u->plane_info->format != u->surface->format) {
2405 update_flags->bits.pixel_format_change = 1;
2406 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2409 if (u->plane_info->stereo_format != u->surface->stereo_format) {
2410 update_flags->bits.stereo_format_change = 1;
2411 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2414 if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) {
2415 update_flags->bits.per_pixel_alpha_change = 1;
2416 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2419 if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) {
2420 update_flags->bits.global_alpha_change = 1;
2421 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2424 if (u->plane_info->dcc.enable != u->surface->dcc.enable
2425 || u->plane_info->dcc.dcc_ind_blk != u->surface->dcc.dcc_ind_blk
2426 || u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) {
2427 /* During DCC on/off, stutter period is calculated before
2428 * DCC has fully transitioned. This results in incorrect
2429 * stutter period calculation. Triggering a full update will
2430 * recalculate stutter period.
2432 update_flags->bits.dcc_change = 1;
2433 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2436 if (resource_pixel_format_to_bpp(u->plane_info->format) !=
2437 resource_pixel_format_to_bpp(u->surface->format)) {
2438 /* different bytes per element will require full bandwidth
2439 * and DML calculation
2441 update_flags->bits.bpp_change = 1;
2442 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2445 if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch
2446 || u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) {
2447 update_flags->bits.plane_size_change = 1;
2448 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2452 if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
2453 sizeof(union dc_tiling_info)) != 0) {
2454 update_flags->bits.swizzle_change = 1;
2455 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2457 /* todo: below are HW dependent, we should add a hook to
2458 * DCE/N resource and validated there.
2460 if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
2461 /* swizzled mode requires RQ to be setup properly,
2462 * thus need to run DML to calculate RQ settings
2464 update_flags->bits.bandwidth_change = 1;
2465 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2469 /* This should be UPDATE_TYPE_FAST if nothing has changed. */
2473 static enum surface_update_type get_scaling_info_update_type(
2474 const struct dc *dc,
2475 const struct dc_surface_update *u)
2477 union surface_update_flags *update_flags = &u->surface->update_flags;
2479 if (!u->scaling_info)
2480 return UPDATE_TYPE_FAST;
2482 if (u->scaling_info->dst_rect.width != u->surface->dst_rect.width
2483 || u->scaling_info->dst_rect.height != u->surface->dst_rect.height
2484 || u->scaling_info->scaling_quality.integer_scaling !=
2485 u->surface->scaling_quality.integer_scaling
2487 update_flags->bits.scaling_change = 1;
2489 if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width
2490 || u->scaling_info->dst_rect.height < u->surface->dst_rect.height)
2491 && (u->scaling_info->dst_rect.width < u->surface->src_rect.width
2492 || u->scaling_info->dst_rect.height < u->surface->src_rect.height))
2493 /* Making dst rect smaller requires a bandwidth change */
2494 update_flags->bits.bandwidth_change = 1;
2497 if (u->scaling_info->src_rect.width != u->surface->src_rect.width
2498 || u->scaling_info->src_rect.height != u->surface->src_rect.height) {
2500 update_flags->bits.scaling_change = 1;
2501 if (u->scaling_info->src_rect.width > u->surface->src_rect.width
2502 || u->scaling_info->src_rect.height > u->surface->src_rect.height)
2503 /* Making src rect bigger requires a bandwidth change */
2504 update_flags->bits.clock_change = 1;
2507 if (u->scaling_info->src_rect.width > dc->caps.max_optimizable_video_width &&
2508 (u->scaling_info->clip_rect.width > u->surface->clip_rect.width ||
2509 u->scaling_info->clip_rect.height > u->surface->clip_rect.height))
2510 /* Changing clip size of a large surface may result in MPC slice count change */
2511 update_flags->bits.bandwidth_change = 1;
2513 if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width ||
2514 u->scaling_info->clip_rect.height != u->surface->clip_rect.height)
2515 update_flags->bits.clip_size_change = 1;
2517 if (u->scaling_info->src_rect.x != u->surface->src_rect.x
2518 || u->scaling_info->src_rect.y != u->surface->src_rect.y
2519 || u->scaling_info->clip_rect.x != u->surface->clip_rect.x
2520 || u->scaling_info->clip_rect.y != u->surface->clip_rect.y
2521 || u->scaling_info->dst_rect.x != u->surface->dst_rect.x
2522 || u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
2523 update_flags->bits.position_change = 1;
2525 if (update_flags->bits.clock_change
2526 || update_flags->bits.bandwidth_change
2527 || update_flags->bits.scaling_change)
2528 return UPDATE_TYPE_FULL;
2530 if (update_flags->bits.position_change ||
2531 update_flags->bits.clip_size_change)
2532 return UPDATE_TYPE_MED;
2534 return UPDATE_TYPE_FAST;
2537 static enum surface_update_type det_surface_update(const struct dc *dc,
2538 const struct dc_surface_update *u)
2540 const struct dc_state *context = dc->current_state;
2541 enum surface_update_type type;
2542 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
2543 union surface_update_flags *update_flags = &u->surface->update_flags;
2545 if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) {
2546 update_flags->raw = 0xFFFFFFFF;
2547 return UPDATE_TYPE_FULL;
2550 update_flags->raw = 0; // Reset all flags
2552 type = get_plane_info_update_type(u);
2553 elevate_update_type(&overall_type, type);
2555 type = get_scaling_info_update_type(dc, u);
2556 elevate_update_type(&overall_type, type);
2559 update_flags->bits.addr_update = 1;
2560 if (u->flip_addr->address.tmz_surface != u->surface->address.tmz_surface) {
2561 update_flags->bits.tmz_changed = 1;
2562 elevate_update_type(&overall_type, UPDATE_TYPE_FULL);
2565 if (u->in_transfer_func)
2566 update_flags->bits.in_transfer_func_change = 1;
2568 if (u->input_csc_color_matrix)
2569 update_flags->bits.input_csc_change = 1;
2571 if (u->coeff_reduction_factor)
2572 update_flags->bits.coeff_reduction_change = 1;
2574 if (u->gamut_remap_matrix)
2575 update_flags->bits.gamut_remap_change = 1;
2578 update_flags->bits.gamma_change = 1;
2581 enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN;
2584 format = u->plane_info->format;
2585 else if (u->surface)
2586 format = u->surface->format;
2588 if (dce_use_lut(format))
2589 update_flags->bits.gamma_change = 1;
2592 if (u->lut3d_func || u->func_shaper)
2593 update_flags->bits.lut_3d = 1;
2595 if (u->hdr_mult.value)
2596 if (u->hdr_mult.value != u->surface->hdr_mult.value) {
2597 update_flags->bits.hdr_mult = 1;
2598 elevate_update_type(&overall_type, UPDATE_TYPE_MED);
2601 if (update_flags->bits.in_transfer_func_change) {
2602 type = UPDATE_TYPE_MED;
2603 elevate_update_type(&overall_type, type);
2606 if (update_flags->bits.lut_3d) {
2607 type = UPDATE_TYPE_FULL;
2608 elevate_update_type(&overall_type, type);
2611 if (dc->debug.enable_legacy_fast_update &&
2612 (update_flags->bits.gamma_change ||
2613 update_flags->bits.gamut_remap_change ||
2614 update_flags->bits.input_csc_change ||
2615 update_flags->bits.coeff_reduction_change)) {
2616 type = UPDATE_TYPE_FULL;
2617 elevate_update_type(&overall_type, type);
2619 return overall_type;
2622 static enum surface_update_type check_update_surfaces_for_stream(
2624 struct dc_surface_update *updates,
2626 struct dc_stream_update *stream_update,
2627 const struct dc_stream_status *stream_status)
2630 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
2632 if (dc->idle_optimizations_allowed)
2633 overall_type = UPDATE_TYPE_FULL;
2635 if (stream_status == NULL || stream_status->plane_count != surface_count)
2636 overall_type = UPDATE_TYPE_FULL;
2638 if (stream_update && stream_update->pending_test_pattern) {
2639 overall_type = UPDATE_TYPE_FULL;
2642 /* some stream updates require passive update */
2643 if (stream_update) {
2644 union stream_update_flags *su_flags = &stream_update->stream->update_flags;
2646 if ((stream_update->src.height != 0 && stream_update->src.width != 0) ||
2647 (stream_update->dst.height != 0 && stream_update->dst.width != 0) ||
2648 stream_update->integer_scaling_update)
2649 su_flags->bits.scaling = 1;
2651 if (dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func)
2652 su_flags->bits.out_tf = 1;
2654 if (stream_update->abm_level)
2655 su_flags->bits.abm_level = 1;
2657 if (stream_update->dpms_off)
2658 su_flags->bits.dpms_off = 1;
2660 if (stream_update->gamut_remap)
2661 su_flags->bits.gamut_remap = 1;
2663 if (stream_update->wb_update)
2664 su_flags->bits.wb_update = 1;
2666 if (stream_update->dsc_config)
2667 su_flags->bits.dsc_changed = 1;
2669 if (stream_update->mst_bw_update)
2670 su_flags->bits.mst_bw = 1;
2672 if (stream_update->stream && stream_update->stream->freesync_on_desktop &&
2673 (stream_update->vrr_infopacket || stream_update->allow_freesync ||
2674 stream_update->vrr_active_variable || stream_update->vrr_active_fixed))
2675 su_flags->bits.fams_changed = 1;
2677 if (su_flags->raw != 0)
2678 overall_type = UPDATE_TYPE_FULL;
2680 if (stream_update->output_csc_transform || stream_update->output_color_space)
2681 su_flags->bits.out_csc = 1;
2683 /* Output transfer function changes do not require bandwidth recalculation,
2684 * so don't trigger a full update
2686 if (!dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func)
2687 su_flags->bits.out_tf = 1;
2690 for (i = 0 ; i < surface_count; i++) {
2691 enum surface_update_type type =
2692 det_surface_update(dc, &updates[i]);
2694 elevate_update_type(&overall_type, type);
2697 return overall_type;
2701 * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full)
2703 * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types
2705 enum surface_update_type dc_check_update_surfaces_for_stream(
2707 struct dc_surface_update *updates,
2709 struct dc_stream_update *stream_update,
2710 const struct dc_stream_status *stream_status)
2713 enum surface_update_type type;
2716 stream_update->stream->update_flags.raw = 0;
2717 for (i = 0; i < surface_count; i++)
2718 updates[i].surface->update_flags.raw = 0;
2720 type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
2721 if (type == UPDATE_TYPE_FULL) {
2722 if (stream_update) {
2723 uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed;
2724 stream_update->stream->update_flags.raw = 0xFFFFFFFF;
2725 stream_update->stream->update_flags.bits.dsc_changed = dsc_changed;
2727 for (i = 0; i < surface_count; i++)
2728 updates[i].surface->update_flags.raw = 0xFFFFFFFF;
2731 if (type == UPDATE_TYPE_FAST) {
2732 // If there's an available clock comparator, we use that.
2733 if (dc->clk_mgr->funcs->are_clock_states_equal) {
2734 if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk))
2735 dc->optimized_required = true;
2736 // Else we fallback to mem compare.
2737 } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
2738 dc->optimized_required = true;
2741 dc->optimized_required |= dc->wm_optimized_required;
2747 static struct dc_stream_status *stream_get_status(
2748 struct dc_state *ctx,
2749 struct dc_stream_state *stream)
2753 for (i = 0; i < ctx->stream_count; i++) {
2754 if (stream == ctx->streams[i]) {
2755 return &ctx->stream_status[i];
2762 static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
2764 static void copy_surface_update_to_plane(
2765 struct dc_plane_state *surface,
2766 struct dc_surface_update *srf_update)
2768 if (srf_update->flip_addr) {
2769 surface->address = srf_update->flip_addr->address;
2770 surface->flip_immediate =
2771 srf_update->flip_addr->flip_immediate;
2772 surface->time.time_elapsed_in_us[surface->time.index] =
2773 srf_update->flip_addr->flip_timestamp_in_us -
2774 surface->time.prev_update_time_in_us;
2775 surface->time.prev_update_time_in_us =
2776 srf_update->flip_addr->flip_timestamp_in_us;
2777 surface->time.index++;
2778 if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX)
2779 surface->time.index = 0;
2781 surface->triplebuffer_flips = srf_update->flip_addr->triplebuffer_flips;
2784 if (srf_update->scaling_info) {
2785 surface->scaling_quality =
2786 srf_update->scaling_info->scaling_quality;
2788 srf_update->scaling_info->dst_rect;
2790 srf_update->scaling_info->src_rect;
2791 surface->clip_rect =
2792 srf_update->scaling_info->clip_rect;
2795 if (srf_update->plane_info) {
2796 surface->color_space =
2797 srf_update->plane_info->color_space;
2799 srf_update->plane_info->format;
2800 surface->plane_size =
2801 srf_update->plane_info->plane_size;
2803 srf_update->plane_info->rotation;
2804 surface->horizontal_mirror =
2805 srf_update->plane_info->horizontal_mirror;
2806 surface->stereo_format =
2807 srf_update->plane_info->stereo_format;
2808 surface->tiling_info =
2809 srf_update->plane_info->tiling_info;
2811 srf_update->plane_info->visible;
2812 surface->per_pixel_alpha =
2813 srf_update->plane_info->per_pixel_alpha;
2814 surface->global_alpha =
2815 srf_update->plane_info->global_alpha;
2816 surface->global_alpha_value =
2817 srf_update->plane_info->global_alpha_value;
2819 srf_update->plane_info->dcc;
2820 surface->layer_index =
2821 srf_update->plane_info->layer_index;
2824 if (srf_update->gamma &&
2825 (surface->gamma_correction !=
2826 srf_update->gamma)) {
2827 memcpy(&surface->gamma_correction->entries,
2828 &srf_update->gamma->entries,
2829 sizeof(struct dc_gamma_entries));
2830 surface->gamma_correction->is_identity =
2831 srf_update->gamma->is_identity;
2832 surface->gamma_correction->num_entries =
2833 srf_update->gamma->num_entries;
2834 surface->gamma_correction->type =
2835 srf_update->gamma->type;
2838 if (srf_update->in_transfer_func &&
2839 (surface->in_transfer_func !=
2840 srf_update->in_transfer_func)) {
2841 surface->in_transfer_func->sdr_ref_white_level =
2842 srf_update->in_transfer_func->sdr_ref_white_level;
2843 surface->in_transfer_func->tf =
2844 srf_update->in_transfer_func->tf;
2845 surface->in_transfer_func->type =
2846 srf_update->in_transfer_func->type;
2847 memcpy(&surface->in_transfer_func->tf_pts,
2848 &srf_update->in_transfer_func->tf_pts,
2849 sizeof(struct dc_transfer_func_distributed_points));
2852 if (srf_update->func_shaper &&
2853 (surface->in_shaper_func !=
2854 srf_update->func_shaper))
2855 memcpy(surface->in_shaper_func, srf_update->func_shaper,
2856 sizeof(*surface->in_shaper_func));
2858 if (srf_update->lut3d_func &&
2859 (surface->lut3d_func !=
2860 srf_update->lut3d_func))
2861 memcpy(surface->lut3d_func, srf_update->lut3d_func,
2862 sizeof(*surface->lut3d_func));
2864 if (srf_update->hdr_mult.value)
2866 srf_update->hdr_mult;
2868 if (srf_update->blend_tf &&
2869 (surface->blend_tf !=
2870 srf_update->blend_tf))
2871 memcpy(surface->blend_tf, srf_update->blend_tf,
2872 sizeof(*surface->blend_tf));
2874 if (srf_update->input_csc_color_matrix)
2875 surface->input_csc_color_matrix =
2876 *srf_update->input_csc_color_matrix;
2878 if (srf_update->coeff_reduction_factor)
2879 surface->coeff_reduction_factor =
2880 *srf_update->coeff_reduction_factor;
2882 if (srf_update->gamut_remap_matrix)
2883 surface->gamut_remap_matrix =
2884 *srf_update->gamut_remap_matrix;
2887 static void copy_stream_update_to_stream(struct dc *dc,
2888 struct dc_state *context,
2889 struct dc_stream_state *stream,
2890 struct dc_stream_update *update)
2892 struct dc_context *dc_ctx = dc->ctx;
2894 if (update == NULL || stream == NULL)
2897 if (update->src.height && update->src.width)
2898 stream->src = update->src;
2900 if (update->dst.height && update->dst.width)
2901 stream->dst = update->dst;
2903 if (update->out_transfer_func &&
2904 stream->out_transfer_func != update->out_transfer_func) {
2905 stream->out_transfer_func->sdr_ref_white_level =
2906 update->out_transfer_func->sdr_ref_white_level;
2907 stream->out_transfer_func->tf = update->out_transfer_func->tf;
2908 stream->out_transfer_func->type =
2909 update->out_transfer_func->type;
2910 memcpy(&stream->out_transfer_func->tf_pts,
2911 &update->out_transfer_func->tf_pts,
2912 sizeof(struct dc_transfer_func_distributed_points));
2915 if (update->hdr_static_metadata)
2916 stream->hdr_static_metadata = *update->hdr_static_metadata;
2918 if (update->abm_level)
2919 stream->abm_level = *update->abm_level;
2921 if (update->periodic_interrupt)
2922 stream->periodic_interrupt = *update->periodic_interrupt;
2924 if (update->gamut_remap)
2925 stream->gamut_remap_matrix = *update->gamut_remap;
2927 /* Note: this being updated after mode set is currently not a use case
2928 * however if it arises OCSC would need to be reprogrammed at the
2931 if (update->output_color_space)
2932 stream->output_color_space = *update->output_color_space;
2934 if (update->output_csc_transform)
2935 stream->csc_color_matrix = *update->output_csc_transform;
2937 if (update->vrr_infopacket)
2938 stream->vrr_infopacket = *update->vrr_infopacket;
2940 if (update->allow_freesync)
2941 stream->allow_freesync = *update->allow_freesync;
2943 if (update->vrr_active_variable)
2944 stream->vrr_active_variable = *update->vrr_active_variable;
2946 if (update->vrr_active_fixed)
2947 stream->vrr_active_fixed = *update->vrr_active_fixed;
2949 if (update->crtc_timing_adjust)
2950 stream->adjust = *update->crtc_timing_adjust;
2952 if (update->dpms_off)
2953 stream->dpms_off = *update->dpms_off;
2955 if (update->hfvsif_infopacket)
2956 stream->hfvsif_infopacket = *update->hfvsif_infopacket;
2958 if (update->vtem_infopacket)
2959 stream->vtem_infopacket = *update->vtem_infopacket;
2961 if (update->vsc_infopacket)
2962 stream->vsc_infopacket = *update->vsc_infopacket;
2964 if (update->vsp_infopacket)
2965 stream->vsp_infopacket = *update->vsp_infopacket;
2967 if (update->adaptive_sync_infopacket)
2968 stream->adaptive_sync_infopacket = *update->adaptive_sync_infopacket;
2970 if (update->dither_option)
2971 stream->dither_option = *update->dither_option;
2973 if (update->pending_test_pattern)
2974 stream->test_pattern = *update->pending_test_pattern;
2975 /* update current stream with writeback info */
2976 if (update->wb_update) {
2979 stream->num_wb_info = update->wb_update->num_wb_info;
2980 ASSERT(stream->num_wb_info <= MAX_DWB_PIPES);
2981 for (i = 0; i < stream->num_wb_info; i++)
2982 stream->writeback_info[i] =
2983 update->wb_update->writeback_info[i];
2985 if (update->dsc_config) {
2986 struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg;
2987 uint32_t old_dsc_enabled = stream->timing.flags.DSC;
2988 uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 &&
2989 update->dsc_config->num_slices_v != 0);
2991 /* Use temporarry context for validating new DSC config */
2992 struct dc_state *dsc_validate_context = dc_state_create_copy(dc->current_state);
2994 if (dsc_validate_context) {
2995 stream->timing.dsc_cfg = *update->dsc_config;
2996 stream->timing.flags.DSC = enable_dsc;
2997 if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) {
2998 stream->timing.dsc_cfg = old_dsc_cfg;
2999 stream->timing.flags.DSC = old_dsc_enabled;
3000 update->dsc_config = NULL;
3003 dc_state_release(dsc_validate_context);
3005 DC_ERROR("Failed to allocate new validate context for DSC change\n");
3006 update->dsc_config = NULL;
3011 static void backup_planes_and_stream_state(
3012 struct dc_scratch_space *scratch,
3013 struct dc_stream_state *stream)
3016 struct dc_stream_status *status = dc_stream_get_status(stream);
3021 for (i = 0; i < status->plane_count; i++) {
3022 scratch->plane_states[i] = *status->plane_states[i];
3023 scratch->gamma_correction[i] = *status->plane_states[i]->gamma_correction;
3024 scratch->in_transfer_func[i] = *status->plane_states[i]->in_transfer_func;
3025 scratch->lut3d_func[i] = *status->plane_states[i]->lut3d_func;
3026 scratch->in_shaper_func[i] = *status->plane_states[i]->in_shaper_func;
3027 scratch->blend_tf[i] = *status->plane_states[i]->blend_tf;
3029 scratch->stream_state = *stream;
3030 if (stream->out_transfer_func)
3031 scratch->out_transfer_func = *stream->out_transfer_func;
3034 static void restore_planes_and_stream_state(
3035 struct dc_scratch_space *scratch,
3036 struct dc_stream_state *stream)
3039 struct dc_stream_status *status = dc_stream_get_status(stream);
3044 for (i = 0; i < status->plane_count; i++) {
3045 *status->plane_states[i] = scratch->plane_states[i];
3046 *status->plane_states[i]->gamma_correction = scratch->gamma_correction[i];
3047 *status->plane_states[i]->in_transfer_func = scratch->in_transfer_func[i];
3048 *status->plane_states[i]->lut3d_func = scratch->lut3d_func[i];
3049 *status->plane_states[i]->in_shaper_func = scratch->in_shaper_func[i];
3050 *status->plane_states[i]->blend_tf = scratch->blend_tf[i];
3052 *stream = scratch->stream_state;
3053 if (stream->out_transfer_func)
3054 *stream->out_transfer_func = scratch->out_transfer_func;
3057 static bool update_planes_and_stream_state(struct dc *dc,
3058 struct dc_surface_update *srf_updates, int surface_count,
3059 struct dc_stream_state *stream,
3060 struct dc_stream_update *stream_update,
3061 enum surface_update_type *new_update_type,
3062 struct dc_state **new_context)
3064 struct dc_state *context;
3066 enum surface_update_type update_type;
3067 const struct dc_stream_status *stream_status;
3068 struct dc_context *dc_ctx = dc->ctx;
3070 stream_status = dc_stream_get_status(stream);
3072 if (!stream_status) {
3073 if (surface_count) /* Only an error condition if surf_count non-zero*/
3076 return false; /* Cannot commit surface to stream that is not committed */
3079 context = dc->current_state;
3080 backup_planes_and_stream_state(&dc->current_state->scratch, stream);
3081 update_type = dc_check_update_surfaces_for_stream(
3082 dc, srf_updates, surface_count, stream_update, stream_status);
3084 /* update current stream with the new updates */
3085 copy_stream_update_to_stream(dc, context, stream, stream_update);
3087 /* do not perform surface update if surface has invalid dimensions
3088 * (all zero) and no scaling_info is provided
3090 if (surface_count > 0) {
3091 for (i = 0; i < surface_count; i++) {
3092 if ((srf_updates[i].surface->src_rect.width == 0 ||
3093 srf_updates[i].surface->src_rect.height == 0 ||
3094 srf_updates[i].surface->dst_rect.width == 0 ||
3095 srf_updates[i].surface->dst_rect.height == 0) &&
3096 (!srf_updates[i].scaling_info ||
3097 srf_updates[i].scaling_info->src_rect.width == 0 ||
3098 srf_updates[i].scaling_info->src_rect.height == 0 ||
3099 srf_updates[i].scaling_info->dst_rect.width == 0 ||
3100 srf_updates[i].scaling_info->dst_rect.height == 0)) {
3101 DC_ERROR("Invalid src/dst rects in surface update!\n");
3107 if (update_type >= update_surface_trace_level)
3108 update_surface_trace(dc, srf_updates, surface_count);
3110 for (i = 0; i < surface_count; i++)
3111 copy_surface_update_to_plane(srf_updates[i].surface, &srf_updates[i]);
3113 if (update_type >= UPDATE_TYPE_FULL) {
3114 struct dc_plane_state *new_planes[MAX_SURFACES] = {0};
3116 for (i = 0; i < surface_count; i++)
3117 new_planes[i] = srf_updates[i].surface;
3119 /* initialize scratch memory for building context */
3120 context = dc_state_create_copy(dc->current_state);
3121 if (context == NULL) {
3122 DC_ERROR("Failed to allocate new validate context!\n");
3126 /* For each full update, remove all existing phantom pipes first.
3127 * Ensures that we have enough pipes for newly added MPO planes
3129 dc_state_remove_phantom_streams_and_planes(dc, context);
3130 dc_state_release_phantom_streams_and_planes(dc, context);
3132 /*remove old surfaces from context */
3133 if (!dc_state_rem_all_planes_for_stream(dc, stream, context)) {
3135 BREAK_TO_DEBUGGER();
3139 /* add surface to context */
3140 if (!dc_state_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) {
3142 BREAK_TO_DEBUGGER();
3147 /* save update parameters into surface */
3148 for (i = 0; i < surface_count; i++) {
3149 struct dc_plane_state *surface = srf_updates[i].surface;
3151 if (update_type >= UPDATE_TYPE_MED) {
3152 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3153 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3155 if (pipe_ctx->plane_state != surface)
3158 resource_build_scaling_params(pipe_ctx);
3163 if (update_type == UPDATE_TYPE_FULL) {
3164 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
3165 BREAK_TO_DEBUGGER();
3169 for (i = 0; i < context->stream_count; i++) {
3170 struct pipe_ctx *otg_master = resource_get_otg_master_for_stream(&context->res_ctx,
3171 context->streams[i]);
3173 if (otg_master && otg_master->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE)
3174 resource_build_test_pattern_params(&context->res_ctx, otg_master);
3178 *new_context = context;
3179 *new_update_type = update_type;
3180 backup_planes_and_stream_state(&context->scratch, stream);
3185 dc_state_release(context);
3191 static void commit_planes_do_stream_update(struct dc *dc,
3192 struct dc_stream_state *stream,
3193 struct dc_stream_update *stream_update,
3194 enum surface_update_type update_type,
3195 struct dc_state *context)
3200 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3201 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3203 if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) && pipe_ctx->stream == stream) {
3205 if (stream_update->periodic_interrupt && dc->hwss.setup_periodic_interrupt)
3206 dc->hwss.setup_periodic_interrupt(dc, pipe_ctx);
3208 if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) ||
3209 stream_update->vrr_infopacket ||
3210 stream_update->vsc_infopacket ||
3211 stream_update->vsp_infopacket ||
3212 stream_update->hfvsif_infopacket ||
3213 stream_update->adaptive_sync_infopacket ||
3214 stream_update->vtem_infopacket) {
3215 resource_build_info_frame(pipe_ctx);
3216 dc->hwss.update_info_frame(pipe_ctx);
3218 if (dc_is_dp_signal(pipe_ctx->stream->signal))
3219 dc->link_srv->dp_trace_source_sequence(
3220 pipe_ctx->stream->link,
3221 DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
3224 if (stream_update->hdr_static_metadata &&
3225 stream->use_dynamic_meta &&
3226 dc->hwss.set_dmdata_attributes &&
3227 pipe_ctx->stream->dmdata_address.quad_part != 0)
3228 dc->hwss.set_dmdata_attributes(pipe_ctx);
3230 if (stream_update->gamut_remap)
3231 dc_stream_set_gamut_remap(dc, stream);
3233 if (stream_update->output_csc_transform)
3234 dc_stream_program_csc_matrix(dc, stream);
3236 if (stream_update->dither_option) {
3237 struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
3238 resource_build_bit_depth_reduction_params(pipe_ctx->stream,
3239 &pipe_ctx->stream->bit_depth_params);
3240 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp,
3241 &stream->bit_depth_params,
3244 odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp,
3245 &stream->bit_depth_params,
3247 odm_pipe = odm_pipe->next_odm_pipe;
3253 if (update_type == UPDATE_TYPE_FAST)
3256 if (stream_update->dsc_config)
3257 dc->link_srv->update_dsc_config(pipe_ctx);
3259 if (stream_update->mst_bw_update) {
3260 if (stream_update->mst_bw_update->is_increase)
3261 dc->link_srv->increase_mst_payload(pipe_ctx,
3262 stream_update->mst_bw_update->mst_stream_bw);
3264 dc->link_srv->reduce_mst_payload(pipe_ctx,
3265 stream_update->mst_bw_update->mst_stream_bw);
3268 if (stream_update->pending_test_pattern) {
3269 dc_link_dp_set_test_pattern(stream->link,
3270 stream->test_pattern.type,
3271 stream->test_pattern.color_space,
3272 stream->test_pattern.p_link_settings,
3273 stream->test_pattern.p_custom_pattern,
3274 stream->test_pattern.cust_pattern_size);
3277 if (stream_update->dpms_off) {
3278 if (*stream_update->dpms_off) {
3279 dc->link_srv->set_dpms_off(pipe_ctx);
3280 /* for dpms, keep acquired resources*/
3281 if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only)
3282 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
3284 dc->optimized_required = true;
3287 if (get_seamless_boot_stream_count(context) == 0)
3288 dc->hwss.prepare_bandwidth(dc, dc->current_state);
3289 dc->link_srv->set_dpms_on(dc->current_state, pipe_ctx);
3291 } else if (pipe_ctx->stream->link->wa_flags.blank_stream_on_ocs_change && stream_update->output_color_space
3292 && !stream->dpms_off && dc_is_dp_signal(pipe_ctx->stream->signal)) {
3294 * Workaround for firmware issue in some receivers where they don't pick up
3295 * correct output color space unless DP link is disabled/re-enabled
3297 dc->link_srv->set_dpms_on(dc->current_state, pipe_ctx);
3300 if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
3301 bool should_program_abm = true;
3303 // if otg funcs defined check if blanked before programming
3304 if (pipe_ctx->stream_res.tg->funcs->is_blanked)
3305 if (pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
3306 should_program_abm = false;
3308 if (should_program_abm) {
3309 if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) {
3310 dc->hwss.set_abm_immediate_disable(pipe_ctx);
3312 pipe_ctx->stream_res.abm->funcs->set_abm_level(
3313 pipe_ctx->stream_res.abm, stream->abm_level);
3321 static bool dc_dmub_should_send_dirty_rect_cmd(struct dc *dc, struct dc_stream_state *stream)
3323 if ((stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1
3324 || stream->link->psr_settings.psr_version == DC_PSR_VERSION_1)
3325 && stream->ctx->dce_version >= DCN_VERSION_3_1)
3328 if (stream->link->replay_settings.config.replay_supported)
3331 if (stream->ctx->dce_version >= DCN_VERSION_3_5 && stream->abm_level)
3337 void dc_dmub_update_dirty_rect(struct dc *dc,
3339 struct dc_stream_state *stream,
3340 struct dc_surface_update *srf_updates,
3341 struct dc_state *context)
3343 union dmub_rb_cmd cmd;
3344 struct dmub_cmd_update_dirty_rect_data *update_dirty_rect;
3346 unsigned int panel_inst = 0;
3348 if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream))
3351 if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst))
3354 memset(&cmd, 0x0, sizeof(cmd));
3355 cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT;
3356 cmd.update_dirty_rect.header.sub_type = 0;
3357 cmd.update_dirty_rect.header.payload_bytes =
3358 sizeof(cmd.update_dirty_rect) -
3359 sizeof(cmd.update_dirty_rect.header);
3360 update_dirty_rect = &cmd.update_dirty_rect.update_dirty_rect_data;
3361 for (i = 0; i < surface_count; i++) {
3362 struct dc_plane_state *plane_state = srf_updates[i].surface;
3363 const struct dc_flip_addrs *flip_addr = srf_updates[i].flip_addr;
3365 if (!srf_updates[i].surface || !flip_addr)
3367 /* Do not send in immediate flip mode */
3368 if (srf_updates[i].surface->flip_immediate)
3371 update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count;
3372 memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects,
3373 sizeof(flip_addr->dirty_rects));
3374 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3375 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3377 if (pipe_ctx->stream != stream)
3379 if (pipe_ctx->plane_state != plane_state)
3382 update_dirty_rect->panel_inst = panel_inst;
3383 update_dirty_rect->pipe_idx = j;
3384 dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
3389 static void build_dmub_update_dirty_rect(
3392 struct dc_stream_state *stream,
3393 struct dc_surface_update *srf_updates,
3394 struct dc_state *context,
3395 struct dc_dmub_cmd dc_dmub_cmd[],
3396 unsigned int *dmub_cmd_count)
3398 union dmub_rb_cmd cmd;
3399 struct dmub_cmd_update_dirty_rect_data *update_dirty_rect;
3401 unsigned int panel_inst = 0;
3403 if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream))
3406 if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst))
3409 memset(&cmd, 0x0, sizeof(cmd));
3410 cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT;
3411 cmd.update_dirty_rect.header.sub_type = 0;
3412 cmd.update_dirty_rect.header.payload_bytes =
3413 sizeof(cmd.update_dirty_rect) -
3414 sizeof(cmd.update_dirty_rect.header);
3415 update_dirty_rect = &cmd.update_dirty_rect.update_dirty_rect_data;
3416 for (i = 0; i < surface_count; i++) {
3417 struct dc_plane_state *plane_state = srf_updates[i].surface;
3418 const struct dc_flip_addrs *flip_addr = srf_updates[i].flip_addr;
3420 if (!srf_updates[i].surface || !flip_addr)
3422 /* Do not send in immediate flip mode */
3423 if (srf_updates[i].surface->flip_immediate)
3425 update_dirty_rect->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
3426 update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count;
3427 memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects,
3428 sizeof(flip_addr->dirty_rects));
3429 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3430 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3432 if (pipe_ctx->stream != stream)
3434 if (pipe_ctx->plane_state != plane_state)
3436 update_dirty_rect->panel_inst = panel_inst;
3437 update_dirty_rect->pipe_idx = j;
3438 dc_dmub_cmd[*dmub_cmd_count].dmub_cmd = cmd;
3439 dc_dmub_cmd[*dmub_cmd_count].wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT;
3440 (*dmub_cmd_count)++;
3447 * build_dmub_cmd_list() - Build an array of DMCUB commands to be sent to DMCUB
3449 * @dc: Current DC state
3450 * @srf_updates: Array of surface updates
3451 * @surface_count: Number of surfaces that have an updated
3452 * @stream: Corresponding stream to be updated in the current flip
3453 * @context: New DC state to be programmed
3455 * @dc_dmub_cmd: Array of DMCUB commands to be sent to DMCUB
3456 * @dmub_cmd_count: Count indicating the number of DMCUB commands in dc_dmub_cmd array
3458 * This function builds an array of DMCUB commands to be sent to DMCUB. This function is required
3459 * to build an array of commands and have them sent while the OTG lock is acquired.
3463 static void build_dmub_cmd_list(struct dc *dc,
3464 struct dc_surface_update *srf_updates,
3466 struct dc_stream_state *stream,
3467 struct dc_state *context,
3468 struct dc_dmub_cmd dc_dmub_cmd[],
3469 unsigned int *dmub_cmd_count)
3471 // Initialize cmd count to 0
3472 *dmub_cmd_count = 0;
3473 build_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context, dc_dmub_cmd, dmub_cmd_count);
3476 static void commit_planes_for_stream_fast(struct dc *dc,
3477 struct dc_surface_update *srf_updates,
3479 struct dc_stream_state *stream,
3480 struct dc_stream_update *stream_update,
3481 enum surface_update_type update_type,
3482 struct dc_state *context)
3485 struct pipe_ctx *top_pipe_to_program = NULL;
3486 struct dc_stream_status *stream_status = NULL;
3487 dc_exit_ips_for_hw_access(dc);
3491 top_pipe_to_program = resource_get_otg_master_for_stream(
3495 if (!top_pipe_to_program)
3498 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3499 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
3501 if (pipe->stream && pipe->plane_state) {
3502 set_p_state_switch_method(dc, context, pipe);
3504 if (dc->debug.visual_confirm)
3505 dc_update_visual_confirm_color(dc, context, pipe);
3509 for (i = 0; i < surface_count; i++) {
3510 struct dc_plane_state *plane_state = srf_updates[i].surface;
3511 /*set logical flag for lock/unlock use*/
3512 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3513 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3515 if (!pipe_ctx->plane_state)
3517 if (should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3519 pipe_ctx->plane_state->triplebuffer_flips = false;
3520 if (update_type == UPDATE_TYPE_FAST &&
3521 dc->hwss.program_triplebuffer &&
3522 !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
3523 /*triple buffer for VUpdate only*/
3524 pipe_ctx->plane_state->triplebuffer_flips = true;
3529 stream_status = dc_state_get_stream_status(context, stream);
3531 build_dmub_cmd_list(dc,
3536 context->dc_dmub_cmd,
3537 &(context->dmub_cmd_count));
3538 hwss_build_fast_sequence(dc,
3539 context->dc_dmub_cmd,
3540 context->dmub_cmd_count,
3541 context->block_sequence,
3542 &(context->block_sequence_steps),
3543 top_pipe_to_program,
3545 hwss_execute_sequence(dc,
3546 context->block_sequence,
3547 context->block_sequence_steps);
3548 /* Clear update flags so next flip doesn't have redundant programming
3549 * (if there's no stream update, the update flags are not cleared).
3550 * Surface updates are cleared unconditionally at the beginning of each flip,
3551 * so no need to clear here.
3553 if (top_pipe_to_program->stream)
3554 top_pipe_to_program->stream->update_flags.raw = 0;
3557 static void wait_for_outstanding_hw_updates(struct dc *dc, struct dc_state *dc_context)
3560 * This function calls HWSS to wait for any potentially double buffered
3561 * operations to complete. It should be invoked as a pre-amble prior
3562 * to full update programming before asserting any HW locks.
3566 int opp_count = dc->res_pool->res_cap->num_opp;
3569 const struct pipe_ctx *pipe_ctx;
3571 for (pipe_idx = 0; pipe_idx < dc->res_pool->pipe_count; pipe_idx++) {
3572 pipe_ctx = &dc_context->res_ctx.pipe_ctx[pipe_idx];
3574 if (!pipe_ctx->stream)
3577 if (pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear)
3578 pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear(pipe_ctx->stream_res.tg);
3580 hubp = pipe_ctx->plane_res.hubp;
3584 mpcc_inst = hubp->inst;
3585 // MPCC inst is equal to pipe index in practice
3586 for (opp_inst = 0; opp_inst < opp_count; opp_inst++) {
3587 if ((dc->res_pool->opps[opp_inst] != NULL) &&
3588 (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst])) {
3589 dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst);
3590 dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false;
3595 wait_for_odm_update_pending_complete(dc, dc_context);
3598 static void commit_planes_for_stream(struct dc *dc,
3599 struct dc_surface_update *srf_updates,
3601 struct dc_stream_state *stream,
3602 struct dc_stream_update *stream_update,
3603 enum surface_update_type update_type,
3604 struct dc_state *context)
3607 struct pipe_ctx *top_pipe_to_program = NULL;
3608 bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST);
3609 bool subvp_prev_use = false;
3610 bool subvp_curr_use = false;
3611 uint8_t current_stream_mask = 0;
3613 // Once we apply the new subvp context to hardware it won't be in the
3614 // dc->current_state anymore, so we have to cache it before we apply
3615 // the new SubVP context
3616 subvp_prev_use = false;
3617 dc_exit_ips_for_hw_access(dc);
3620 if (update_type == UPDATE_TYPE_FULL)
3621 wait_for_outstanding_hw_updates(dc, context);
3623 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3624 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
3626 if (pipe->stream && pipe->plane_state) {
3627 set_p_state_switch_method(dc, context, pipe);
3629 if (dc->debug.visual_confirm)
3630 dc_update_visual_confirm_color(dc, context, pipe);
3634 if (update_type == UPDATE_TYPE_FULL) {
3635 dc_allow_idle_optimizations(dc, false);
3637 if (get_seamless_boot_stream_count(context) == 0)
3638 dc->hwss.prepare_bandwidth(dc, context);
3640 if (dc->hwss.update_dsc_pg)
3641 dc->hwss.update_dsc_pg(dc, context, false);
3643 context_clock_trace(dc, context);
3646 top_pipe_to_program = resource_get_otg_master_for_stream(
3649 ASSERT(top_pipe_to_program != NULL);
3650 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3651 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
3653 // Check old context for SubVP
3654 subvp_prev_use |= (dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) == SUBVP_PHANTOM);
3659 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3660 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
3662 if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
3663 subvp_curr_use = true;
3668 if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) {
3669 struct pipe_ctx *mpcc_pipe;
3670 struct pipe_ctx *odm_pipe;
3672 for (mpcc_pipe = top_pipe_to_program; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe)
3673 for (odm_pipe = mpcc_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
3674 odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU;
3677 if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
3678 if (top_pipe_to_program &&
3679 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
3680 if (should_use_dmub_lock(stream->link)) {
3681 union dmub_hw_lock_flags hw_locks = { 0 };
3682 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
3684 hw_locks.bits.lock_dig = 1;
3685 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
3687 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
3692 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable(
3693 top_pipe_to_program->stream_res.tg);
3696 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3697 if (dc->hwss.subvp_pipe_control_lock)
3698 dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, NULL, subvp_prev_use);
3699 dc->hwss.interdependent_update_lock(dc, context, true);
3702 if (dc->hwss.subvp_pipe_control_lock)
3703 dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
3704 /* Lock the top pipe while updating plane addrs, since freesync requires
3705 * plane addr update event triggers to be synchronized.
3706 * top_pipe_to_program is expected to never be NULL
3708 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
3711 dc_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context);
3715 commit_planes_do_stream_update(dc, stream, stream_update, update_type, context);
3717 if (surface_count == 0) {
3719 * In case of turning off screen, no need to program front end a second time.
3720 * just return after program blank.
3722 if (dc->hwss.apply_ctx_for_surface)
3723 dc->hwss.apply_ctx_for_surface(dc, stream, 0, context);
3724 if (dc->hwss.program_front_end_for_ctx)
3725 dc->hwss.program_front_end_for_ctx(dc, context);
3727 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3728 dc->hwss.interdependent_update_lock(dc, context, false);
3730 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
3732 dc->hwss.post_unlock_program_front_end(dc, context);
3734 if (update_type != UPDATE_TYPE_FAST)
3735 if (dc->hwss.commit_subvp_config)
3736 dc->hwss.commit_subvp_config(dc, context);
3738 /* Since phantom pipe programming is moved to post_unlock_program_front_end,
3739 * move the SubVP lock to after the phantom pipes have been setup
3741 if (dc->hwss.subvp_pipe_control_lock)
3742 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes,
3743 NULL, subvp_prev_use);
3747 if (update_type != UPDATE_TYPE_FAST) {
3748 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3749 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3751 if ((dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP ||
3752 dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH) &&
3753 pipe_ctx->stream && pipe_ctx->plane_state) {
3754 /* Only update visual confirm for SUBVP and Mclk switching here.
3755 * The bar appears on all pipes, so we need to update the bar on all displays,
3756 * so the information doesn't get stale.
3758 dc->hwss.update_visual_confirm_color(dc, pipe_ctx,
3759 pipe_ctx->plane_res.hubp->inst);
3764 for (i = 0; i < surface_count; i++) {
3765 struct dc_plane_state *plane_state = srf_updates[i].surface;
3766 /*set logical flag for lock/unlock use*/
3767 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3768 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3769 if (!pipe_ctx->plane_state)
3771 if (should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3773 pipe_ctx->plane_state->triplebuffer_flips = false;
3774 if (update_type == UPDATE_TYPE_FAST &&
3775 dc->hwss.program_triplebuffer != NULL &&
3776 !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
3777 /*triple buffer for VUpdate only*/
3778 pipe_ctx->plane_state->triplebuffer_flips = true;
3781 if (update_type == UPDATE_TYPE_FULL) {
3782 /* force vsync flip when reconfiguring pipes to prevent underflow */
3783 plane_state->flip_immediate = false;
3787 // Update Type FULL, Surface updates
3788 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3789 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3791 if (!pipe_ctx->top_pipe &&
3792 !pipe_ctx->prev_odm_pipe &&
3793 should_update_pipe_for_stream(context, pipe_ctx, stream)) {
3794 struct dc_stream_status *stream_status = NULL;
3796 if (!pipe_ctx->plane_state)
3800 if (update_type == UPDATE_TYPE_FAST)
3803 ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
3805 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
3806 /*turn off triple buffer for full update*/
3807 dc->hwss.program_triplebuffer(
3808 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
3811 stream_get_status(context, pipe_ctx->stream);
3813 if (dc->hwss.apply_ctx_for_surface)
3814 dc->hwss.apply_ctx_for_surface(
3815 dc, pipe_ctx->stream, stream_status->plane_count, context);
3818 if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) {
3819 dc->hwss.program_front_end_for_ctx(dc, context);
3820 if (dc->debug.validate_dml_output) {
3821 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3822 struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i];
3823 if (cur_pipe->stream == NULL)
3826 cur_pipe->plane_res.hubp->funcs->validate_dml_output(
3827 cur_pipe->plane_res.hubp, dc->ctx,
3828 &context->res_ctx.pipe_ctx[i].rq_regs,
3829 &context->res_ctx.pipe_ctx[i].dlg_regs,
3830 &context->res_ctx.pipe_ctx[i].ttu_regs);
3835 // Update Type FAST, Surface updates
3836 if (update_type == UPDATE_TYPE_FAST) {
3837 if (dc->hwss.set_flip_control_gsl)
3838 for (i = 0; i < surface_count; i++) {
3839 struct dc_plane_state *plane_state = srf_updates[i].surface;
3841 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3842 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3844 if (!should_update_pipe_for_stream(context, pipe_ctx, stream))
3847 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3850 // GSL has to be used for flip immediate
3851 dc->hwss.set_flip_control_gsl(pipe_ctx,
3852 pipe_ctx->plane_state->flip_immediate);
3856 /* Perform requested Updates */
3857 for (i = 0; i < surface_count; i++) {
3858 struct dc_plane_state *plane_state = srf_updates[i].surface;
3860 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3861 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3863 if (!should_update_pipe_for_stream(context, pipe_ctx, stream))
3866 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3869 /*program triple buffer after lock based on flip type*/
3870 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
3871 /*only enable triplebuffer for fast_update*/
3872 dc->hwss.program_triplebuffer(
3873 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
3875 if (pipe_ctx->plane_state->update_flags.bits.addr_update)
3876 dc->hwss.update_plane_addr(dc, pipe_ctx);
3881 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3882 dc->hwss.interdependent_update_lock(dc, context, false);
3884 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
3887 if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
3888 if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
3889 top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3890 top_pipe_to_program->stream_res.tg,
3891 CRTC_STATE_VACTIVE);
3892 top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3893 top_pipe_to_program->stream_res.tg,
3895 top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3896 top_pipe_to_program->stream_res.tg,
3897 CRTC_STATE_VACTIVE);
3899 if (should_use_dmub_lock(stream->link)) {
3900 union dmub_hw_lock_flags hw_locks = { 0 };
3901 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
3903 hw_locks.bits.lock_dig = 1;
3904 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
3906 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
3911 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable(
3912 top_pipe_to_program->stream_res.tg);
3915 if (subvp_curr_use) {
3916 /* If enabling subvp or transitioning from subvp->subvp, enable the
3917 * phantom streams before we program front end for the phantom pipes.
3919 if (update_type != UPDATE_TYPE_FAST) {
3920 if (dc->hwss.enable_phantom_streams)
3921 dc->hwss.enable_phantom_streams(dc, context);
3925 if (update_type != UPDATE_TYPE_FAST)
3926 dc->hwss.post_unlock_program_front_end(dc, context);
3928 if (subvp_prev_use && !subvp_curr_use) {
3929 /* If disabling subvp, disable phantom streams after front end
3930 * programming has completed (we turn on phantom OTG in order
3931 * to complete the plane disable for phantom pipes).
3934 if (dc->hwss.disable_phantom_streams)
3935 dc->hwss.disable_phantom_streams(dc, context);
3938 if (update_type != UPDATE_TYPE_FAST)
3939 if (dc->hwss.commit_subvp_config)
3940 dc->hwss.commit_subvp_config(dc, context);
3941 /* Since phantom pipe programming is moved to post_unlock_program_front_end,
3942 * move the SubVP lock to after the phantom pipes have been setup
3944 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3945 if (dc->hwss.subvp_pipe_control_lock)
3946 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
3948 if (dc->hwss.subvp_pipe_control_lock)
3949 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
3952 // Fire manual trigger only when bottom plane is flipped
3953 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3954 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3956 if (!pipe_ctx->plane_state)
3959 if (pipe_ctx->bottom_pipe || pipe_ctx->next_odm_pipe ||
3960 !pipe_ctx->stream || !should_update_pipe_for_stream(context, pipe_ctx, stream) ||
3961 !pipe_ctx->plane_state->update_flags.bits.addr_update ||
3962 pipe_ctx->plane_state->skip_manual_trigger)
3965 if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger)
3966 pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg);
3969 current_stream_mask = get_stream_mask(dc, context);
3970 if (current_stream_mask != context->stream_mask) {
3971 context->stream_mask = current_stream_mask;
3972 dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, current_stream_mask);
3977 * could_mpcc_tree_change_for_active_pipes - Check if an OPP associated with MPCC might change
3979 * @dc: Used to get the current state status
3980 * @stream: Target stream, which we want to remove the attached planes
3981 * @srf_updates: Array of surface updates
3982 * @surface_count: Number of surface update
3983 * @is_plane_addition: [in] Fill out with true if it is a plane addition case
3985 * DCN32x and newer support a feature named Dynamic ODM which can conflict with
3986 * the MPO if used simultaneously in some specific configurations (e.g.,
3987 * 4k@144). This function checks if the incoming context requires applying a
3988 * transition state with unnecessary pipe splitting and ODM disabled to
3989 * circumvent our hardware limitations to prevent this edge case. If the OPP
3990 * associated with an MPCC might change due to plane additions, this function
3994 * Return true if OPP and MPCC might change, otherwise, return false.
3996 static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc,
3997 struct dc_stream_state *stream,
3998 struct dc_surface_update *srf_updates,
4000 bool *is_plane_addition)
4003 struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream);
4004 bool force_minimal_pipe_splitting = false;
4005 bool subvp_active = false;
4008 *is_plane_addition = false;
4010 if (cur_stream_status &&
4011 dc->current_state->stream_count > 0 &&
4012 dc->debug.pipe_split_policy != MPC_SPLIT_AVOID) {
4013 /* determine if minimal transition is required due to MPC*/
4014 if (surface_count > 0) {
4015 if (cur_stream_status->plane_count > surface_count) {
4016 force_minimal_pipe_splitting = true;
4017 } else if (cur_stream_status->plane_count < surface_count) {
4018 force_minimal_pipe_splitting = true;
4019 *is_plane_addition = true;
4024 if (cur_stream_status &&
4025 dc->current_state->stream_count == 1 &&
4026 dc->debug.enable_single_display_2to1_odm_policy) {
4027 /* determine if minimal transition is required due to dynamic ODM*/
4028 if (surface_count > 0) {
4029 if (cur_stream_status->plane_count > 2 && cur_stream_status->plane_count > surface_count) {
4030 force_minimal_pipe_splitting = true;
4031 } else if (surface_count > 2 && cur_stream_status->plane_count < surface_count) {
4032 force_minimal_pipe_splitting = true;
4033 *is_plane_addition = true;
4038 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4039 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
4041 if (dc_state_get_pipe_subvp_type(dc->current_state, pipe) != SUBVP_NONE) {
4042 subvp_active = true;
4047 /* For SubVP when adding or removing planes we need to add a minimal transition
4048 * (even when disabling all planes). Whenever disabling a phantom pipe, we
4049 * must use the minimal transition path to disable the pipe correctly.
4051 * We want to use the minimal transition whenever subvp is active, not only if
4052 * a plane is being added / removed from a subvp stream (MPO plane can be added
4053 * to a DRR pipe of SubVP + DRR config, in which case we still want to run through
4054 * a min transition to disable subvp.
4056 if (cur_stream_status && subvp_active) {
4057 /* determine if minimal transition is required due to SubVP*/
4058 if (cur_stream_status->plane_count > surface_count) {
4059 force_minimal_pipe_splitting = true;
4060 } else if (cur_stream_status->plane_count < surface_count) {
4061 force_minimal_pipe_splitting = true;
4062 *is_plane_addition = true;
4066 return force_minimal_pipe_splitting;
4069 struct pipe_split_policy_backup {
4070 bool dynamic_odm_policy;
4072 enum pipe_split_policy mpc_policy;
4075 static void release_minimal_transition_state(struct dc *dc,
4076 struct dc_state *context, struct pipe_split_policy_backup *policy)
4078 dc_state_release(context);
4079 /* restore previous pipe split and odm policy */
4080 if (!dc->config.is_vmin_only_asic)
4081 dc->debug.pipe_split_policy = policy->mpc_policy;
4082 dc->debug.enable_single_display_2to1_odm_policy = policy->dynamic_odm_policy;
4083 dc->debug.force_disable_subvp = policy->subvp_policy;
4086 static struct dc_state *create_minimal_transition_state(struct dc *dc,
4087 struct dc_state *base_context, struct pipe_split_policy_backup *policy)
4089 struct dc_state *minimal_transition_context = NULL;
4092 if (!dc->config.is_vmin_only_asic) {
4093 policy->mpc_policy = dc->debug.pipe_split_policy;
4094 dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
4096 policy->dynamic_odm_policy = dc->debug.enable_single_display_2to1_odm_policy;
4097 dc->debug.enable_single_display_2to1_odm_policy = false;
4098 policy->subvp_policy = dc->debug.force_disable_subvp;
4099 dc->debug.force_disable_subvp = true;
4101 minimal_transition_context = dc_state_create_copy(base_context);
4102 if (!minimal_transition_context)
4105 /* commit minimal state */
4106 if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, false)) {
4107 for (i = 0; i < minimal_transition_context->stream_count; i++) {
4108 struct dc_stream_status *stream_status = &minimal_transition_context->stream_status[i];
4110 for (j = 0; j < stream_status->plane_count; j++) {
4111 struct dc_plane_state *plane_state = stream_status->plane_states[j];
4113 /* force vsync flip when reconfiguring pipes to prevent underflow
4116 plane_state->flip_immediate = false;
4120 /* this should never happen */
4121 release_minimal_transition_state(dc, minimal_transition_context, policy);
4122 BREAK_TO_DEBUGGER();
4123 minimal_transition_context = NULL;
4125 return minimal_transition_context;
4130 * commit_minimal_transition_state - Commit a minimal state based on current or new context
4132 * @dc: DC structure, used to get the current state
4133 * @context: New context
4134 * @stream: Stream getting the update for the flip
4136 * The function takes in current state and new state and determine a minimal transition state
4137 * as the intermediate step which could make the transition between current and new states
4138 * seamless. If found, it will commit the minimal transition state and update current state to
4139 * this minimal transition state and return true, if not, it will return false.
4142 * Return True if the minimal transition succeeded, false otherwise
4144 static bool commit_minimal_transition_state(struct dc *dc,
4145 struct dc_state *context,
4146 struct dc_stream_state *stream)
4148 bool success = false;
4149 struct dc_state *minimal_transition_context;
4150 struct pipe_split_policy_backup policy;
4152 /* commit based on new context */
4153 minimal_transition_context = create_minimal_transition_state(dc,
4155 if (minimal_transition_context) {
4156 if (dc->hwss.is_pipe_topology_transition_seamless(
4157 dc, dc->current_state, minimal_transition_context) &&
4158 dc->hwss.is_pipe_topology_transition_seamless(
4159 dc, minimal_transition_context, context)) {
4160 DC_LOG_DC("%s base = new state\n", __func__);
4162 success = dc_commit_state_no_check(dc, minimal_transition_context) == DC_OK;
4164 release_minimal_transition_state(dc, minimal_transition_context, &policy);
4168 /* commit based on current context */
4169 restore_planes_and_stream_state(&dc->current_state->scratch, stream);
4170 minimal_transition_context = create_minimal_transition_state(dc,
4171 dc->current_state, &policy);
4172 if (minimal_transition_context) {
4173 if (dc->hwss.is_pipe_topology_transition_seamless(
4174 dc, dc->current_state, minimal_transition_context) &&
4175 dc->hwss.is_pipe_topology_transition_seamless(
4176 dc, minimal_transition_context, context)) {
4177 DC_LOG_DC("%s base = current state\n", __func__);
4178 success = dc_commit_state_no_check(dc, minimal_transition_context) == DC_OK;
4180 release_minimal_transition_state(dc, minimal_transition_context, &policy);
4182 restore_planes_and_stream_state(&context->scratch, stream);
4190 * commit_minimal_transition_state_legacy - Create a transition pipe split state
4192 * @dc: Used to get the current state status
4193 * @transition_base_context: New transition state
4195 * In some specific configurations, such as pipe split on multi-display with
4196 * MPO and/or Dynamic ODM, removing a plane may cause unsupported pipe
4197 * programming when moving to new planes. To mitigate those types of problems,
4198 * this function adds a transition state that minimizes pipe usage before
4199 * programming the new configuration. When adding a new plane, the current
4200 * state requires the least pipes, so it is applied without splitting. When
4201 * removing a plane, the new state requires the least pipes, so it is applied
4202 * without splitting.
4205 * Return false if something is wrong in the transition state.
4207 static bool commit_minimal_transition_state_legacy(struct dc *dc,
4208 struct dc_state *transition_base_context)
4210 struct dc_state *transition_context;
4211 struct pipe_split_policy_backup policy;
4212 enum dc_status ret = DC_ERROR_UNEXPECTED;
4214 unsigned int pipe_in_use = 0;
4215 bool subvp_in_use = false;
4216 bool odm_in_use = false;
4218 /* check current pipes in use*/
4219 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4220 struct pipe_ctx *pipe = &transition_base_context->res_ctx.pipe_ctx[i];
4222 if (pipe->plane_state)
4226 /* If SubVP is enabled and we are adding or removing planes from any main subvp
4227 * pipe, we must use the minimal transition.
4229 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4230 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
4232 if (pipe->stream && dc_state_get_pipe_subvp_type(dc->current_state, pipe) == SUBVP_PHANTOM) {
4233 subvp_in_use = true;
4238 /* If ODM is enabled and we are adding or removing planes from any ODM
4239 * pipe, we must use the minimal transition.
4241 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4242 struct pipe_ctx *pipe = &transition_base_context->res_ctx.pipe_ctx[i];
4244 if (resource_is_pipe_type(pipe, OTG_MASTER)) {
4245 odm_in_use = resource_get_odm_slice_count(pipe) > 1;
4250 /* When the OS add a new surface if we have been used all of pipes with odm combine
4251 * and mpc split feature, it need use commit_minimal_transition_state to transition safely.
4252 * After OS exit MPO, it will back to use odm and mpc split with all of pipes, we need
4253 * call it again. Otherwise return true to skip.
4255 * Reduce the scenarios to use dc_commit_state_no_check in the stage of flip. Especially
4256 * enter/exit MPO when DCN still have enough resources.
4258 if (pipe_in_use != dc->res_pool->pipe_count && !subvp_in_use && !odm_in_use)
4261 DC_LOG_DC("%s base = %s state, reason = %s\n", __func__,
4262 dc->current_state == transition_base_context ? "current" : "new",
4263 subvp_in_use ? "Subvp In Use" :
4264 odm_in_use ? "ODM in Use" :
4265 dc->debug.pipe_split_policy != MPC_SPLIT_AVOID ? "MPC in Use" :
4268 transition_context = create_minimal_transition_state(dc,
4269 transition_base_context, &policy);
4270 if (transition_context) {
4271 ret = dc_commit_state_no_check(dc, transition_context);
4272 release_minimal_transition_state(dc, transition_context, &policy);
4276 /* this should never happen */
4277 BREAK_TO_DEBUGGER();
4281 /* force full surface update */
4282 for (i = 0; i < dc->current_state->stream_count; i++) {
4283 for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) {
4284 dc->current_state->stream_status[i].plane_states[j]->update_flags.raw = 0xFFFFFFFF;
4292 * update_seamless_boot_flags() - Helper function for updating seamless boot flags
4294 * @dc: Current DC state
4295 * @context: New DC state to be programmed
4296 * @surface_count: Number of surfaces that have an updated
4297 * @stream: Corresponding stream to be updated in the current flip
4299 * Updating seamless boot flags do not need to be part of the commit sequence. This
4300 * helper function will update the seamless boot flags on each flip (if required)
4301 * outside of the HW commit sequence (fast or slow).
4305 static void update_seamless_boot_flags(struct dc *dc,
4306 struct dc_state *context,
4308 struct dc_stream_state *stream)
4310 if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) {
4311 /* Optimize seamless boot flag keeps clocks and watermarks high until
4312 * first flip. After first flip, optimization is required to lower
4313 * bandwidth. Important to note that it is expected UEFI will
4314 * only light up a single display on POST, therefore we only expect
4315 * one stream with seamless boot flag set.
4317 if (stream->apply_seamless_boot_optimization) {
4318 stream->apply_seamless_boot_optimization = false;
4320 if (get_seamless_boot_stream_count(context) == 0)
4321 dc->optimized_required = true;
4326 static void populate_fast_updates(struct dc_fast_update *fast_update,
4327 struct dc_surface_update *srf_updates,
4329 struct dc_stream_update *stream_update)
4333 if (stream_update) {
4334 fast_update[0].out_transfer_func = stream_update->out_transfer_func;
4335 fast_update[0].output_csc_transform = stream_update->output_csc_transform;
4338 for (i = 0; i < surface_count; i++) {
4339 fast_update[i].flip_addr = srf_updates[i].flip_addr;
4340 fast_update[i].gamma = srf_updates[i].gamma;
4341 fast_update[i].gamut_remap_matrix = srf_updates[i].gamut_remap_matrix;
4342 fast_update[i].input_csc_color_matrix = srf_updates[i].input_csc_color_matrix;
4343 fast_update[i].coeff_reduction_factor = srf_updates[i].coeff_reduction_factor;
4347 static bool fast_updates_exist(struct dc_fast_update *fast_update, int surface_count)
4351 if (fast_update[0].out_transfer_func ||
4352 fast_update[0].output_csc_transform)
4355 for (i = 0; i < surface_count; i++) {
4356 if (fast_update[i].flip_addr ||
4357 fast_update[i].gamma ||
4358 fast_update[i].gamut_remap_matrix ||
4359 fast_update[i].input_csc_color_matrix ||
4360 fast_update[i].coeff_reduction_factor)
4367 static bool full_update_required(struct dc *dc,
4368 struct dc_surface_update *srf_updates,
4370 struct dc_stream_update *stream_update,
4371 struct dc_stream_state *stream)
4375 struct dc_stream_status *stream_status;
4376 const struct dc_state *context = dc->current_state;
4378 for (i = 0; i < surface_count; i++) {
4380 (srf_updates[i].plane_info ||
4381 srf_updates[i].scaling_info ||
4382 (srf_updates[i].hdr_mult.value &&
4383 srf_updates[i].hdr_mult.value != srf_updates->surface->hdr_mult.value) ||
4384 srf_updates[i].in_transfer_func ||
4385 srf_updates[i].func_shaper ||
4386 srf_updates[i].lut3d_func ||
4387 srf_updates[i].surface->force_full_update ||
4388 (srf_updates[i].flip_addr &&
4389 srf_updates[i].flip_addr->address.tmz_surface != srf_updates[i].surface->address.tmz_surface) ||
4390 !is_surface_in_context(context, srf_updates[i].surface)))
4394 if (stream_update &&
4395 (((stream_update->src.height != 0 && stream_update->src.width != 0) ||
4396 (stream_update->dst.height != 0 && stream_update->dst.width != 0) ||
4397 stream_update->integer_scaling_update) ||
4398 stream_update->hdr_static_metadata ||
4399 stream_update->abm_level ||
4400 stream_update->periodic_interrupt ||
4401 stream_update->vrr_infopacket ||
4402 stream_update->vsc_infopacket ||
4403 stream_update->vsp_infopacket ||
4404 stream_update->hfvsif_infopacket ||
4405 stream_update->vtem_infopacket ||
4406 stream_update->adaptive_sync_infopacket ||
4407 stream_update->dpms_off ||
4408 stream_update->allow_freesync ||
4409 stream_update->vrr_active_variable ||
4410 stream_update->vrr_active_fixed ||
4411 stream_update->gamut_remap ||
4412 stream_update->output_color_space ||
4413 stream_update->dither_option ||
4414 stream_update->wb_update ||
4415 stream_update->dsc_config ||
4416 stream_update->mst_bw_update ||
4417 stream_update->func_shaper ||
4418 stream_update->lut3d_func ||
4419 stream_update->pending_test_pattern ||
4420 stream_update->crtc_timing_adjust))
4424 stream_status = dc_stream_get_status(stream);
4425 if (stream_status == NULL || stream_status->plane_count != surface_count)
4428 if (dc->idle_optimizations_allowed)
4434 static bool fast_update_only(struct dc *dc,
4435 struct dc_fast_update *fast_update,
4436 struct dc_surface_update *srf_updates,
4438 struct dc_stream_update *stream_update,
4439 struct dc_stream_state *stream)
4441 return fast_updates_exist(fast_update, surface_count)
4442 && !full_update_required(dc, srf_updates, surface_count, stream_update, stream);
4445 bool dc_update_planes_and_stream(struct dc *dc,
4446 struct dc_surface_update *srf_updates, int surface_count,
4447 struct dc_stream_state *stream,
4448 struct dc_stream_update *stream_update)
4450 struct dc_state *context;
4451 enum surface_update_type update_type;
4453 struct dc_fast_update fast_update[MAX_SURFACES] = {0};
4455 /* In cases where MPO and split or ODM are used transitions can
4456 * cause underflow. Apply stream configuration with minimal pipe
4457 * split first to avoid unsupported transitions for active pipes.
4459 bool force_minimal_pipe_splitting = 0;
4460 bool is_plane_addition = 0;
4461 bool is_fast_update_only;
4463 dc_exit_ips_for_hw_access(dc);
4465 populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
4466 is_fast_update_only = fast_update_only(dc, fast_update, srf_updates,
4467 surface_count, stream_update, stream);
4468 force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes(
4473 &is_plane_addition);
4475 /* on plane addition, minimal state is the current one */
4476 if (force_minimal_pipe_splitting && is_plane_addition &&
4477 !commit_minimal_transition_state_legacy(dc, dc->current_state))
4480 if (!update_planes_and_stream_state(
4490 /* on plane removal, minimal state is the new one */
4491 if (force_minimal_pipe_splitting && !is_plane_addition) {
4492 if (!commit_minimal_transition_state_legacy(dc, context)) {
4493 dc_state_release(context);
4496 update_type = UPDATE_TYPE_FULL;
4499 if (dc->hwss.is_pipe_topology_transition_seamless &&
4500 !dc->hwss.is_pipe_topology_transition_seamless(
4501 dc, dc->current_state, context)) {
4502 commit_minimal_transition_state(dc,
4505 update_seamless_boot_flags(dc, context, surface_count, stream);
4506 if (is_fast_update_only && !dc->debug.enable_legacy_fast_update) {
4507 commit_planes_for_stream_fast(dc,
4515 if (!stream_update &&
4516 dc->hwss.is_pipe_topology_transition_seamless &&
4517 !dc->hwss.is_pipe_topology_transition_seamless(
4518 dc, dc->current_state, context)) {
4519 DC_LOG_ERROR("performing non-seamless pipe topology transition with surface only update!\n");
4520 BREAK_TO_DEBUGGER();
4522 commit_planes_for_stream(
4532 if (dc->current_state != context) {
4534 /* Since memory free requires elevated IRQL, an interrupt
4535 * request is generated by mem free. If this happens
4536 * between freeing and reassigning the context, our vsync
4537 * interrupt will call into dc and cause a memory
4538 * corruption BSOD. Hence, we first reassign the context,
4539 * then free the old context.
4542 struct dc_state *old = dc->current_state;
4544 dc->current_state = context;
4545 dc_state_release(old);
4547 // clear any forced full updates
4548 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4549 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
4551 if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
4552 pipe_ctx->plane_state->force_full_update = false;
4558 void dc_commit_updates_for_stream(struct dc *dc,
4559 struct dc_surface_update *srf_updates,
4561 struct dc_stream_state *stream,
4562 struct dc_stream_update *stream_update,
4563 struct dc_state *state)
4565 const struct dc_stream_status *stream_status;
4566 enum surface_update_type update_type;
4567 struct dc_state *context;
4568 struct dc_context *dc_ctx = dc->ctx;
4570 struct dc_fast_update fast_update[MAX_SURFACES] = {0};
4572 dc_exit_ips_for_hw_access(dc);
4574 populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
4575 stream_status = dc_stream_get_status(stream);
4576 context = dc->current_state;
4578 update_type = dc_check_update_surfaces_for_stream(
4579 dc, srf_updates, surface_count, stream_update, stream_status);
4581 /* TODO: Since change commit sequence can have a huge impact,
4582 * we decided to only enable it for DCN3x. However, as soon as
4583 * we get more confident about this change we'll need to enable
4584 * the new sequence for all ASICs.
4586 if (dc->ctx->dce_version >= DCN_VERSION_3_2) {
4588 * Previous frame finished and HW is ready for optimization.
4590 if (update_type == UPDATE_TYPE_FAST)
4591 dc_post_update_surfaces_to_stream(dc);
4593 dc_update_planes_and_stream(dc, srf_updates,
4594 surface_count, stream,
4599 if (update_type >= update_surface_trace_level)
4600 update_surface_trace(dc, srf_updates, surface_count);
4603 if (update_type >= UPDATE_TYPE_FULL) {
4605 /* initialize scratch memory for building context */
4606 context = dc_state_create_copy(state);
4607 if (context == NULL) {
4608 DC_ERROR("Failed to allocate new validate context!\n");
4612 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4613 struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
4614 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
4616 if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
4617 new_pipe->plane_state->force_full_update = true;
4619 } else if (update_type == UPDATE_TYPE_FAST) {
4621 * Previous frame finished and HW is ready for optimization.
4623 dc_post_update_surfaces_to_stream(dc);
4627 for (i = 0; i < surface_count; i++) {
4628 struct dc_plane_state *surface = srf_updates[i].surface;
4630 copy_surface_update_to_plane(surface, &srf_updates[i]);
4632 if (update_type >= UPDATE_TYPE_MED) {
4633 for (j = 0; j < dc->res_pool->pipe_count; j++) {
4634 struct pipe_ctx *pipe_ctx =
4635 &context->res_ctx.pipe_ctx[j];
4637 if (pipe_ctx->plane_state != surface)
4640 resource_build_scaling_params(pipe_ctx);
4645 copy_stream_update_to_stream(dc, context, stream, stream_update);
4647 if (update_type >= UPDATE_TYPE_FULL) {
4648 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
4649 DC_ERROR("Mode validation failed for stream update!\n");
4650 dc_state_release(context);
4655 TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
4657 update_seamless_boot_flags(dc, context, surface_count, stream);
4658 if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) &&
4659 !dc->debug.enable_legacy_fast_update) {
4660 commit_planes_for_stream_fast(dc,
4668 commit_planes_for_stream(
4677 /*update current_State*/
4678 if (dc->current_state != context) {
4680 struct dc_state *old = dc->current_state;
4682 dc->current_state = context;
4683 dc_state_release(old);
4685 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4686 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
4688 if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
4689 pipe_ctx->plane_state->force_full_update = false;
4693 /* Legacy optimization path for DCE. */
4694 if (update_type >= UPDATE_TYPE_FULL && dc_ctx->dce_version < DCE_VERSION_MAX) {
4695 dc_post_update_surfaces_to_stream(dc);
4696 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
4703 uint8_t dc_get_current_stream_count(struct dc *dc)
4705 return dc->current_state->stream_count;
4708 struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i)
4710 if (i < dc->current_state->stream_count)
4711 return dc->current_state->streams[i];
4715 enum dc_irq_source dc_interrupt_to_irq_source(
4720 return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
4724 * dc_interrupt_set() - Enable/disable an AMD hw interrupt source
4726 bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
4732 return dal_irq_service_set(dc->res_pool->irqs, src, enable);
4735 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
4737 dal_irq_service_ack(dc->res_pool->irqs, src);
4740 void dc_power_down_on_boot(struct dc *dc)
4742 if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW &&
4743 dc->hwss.power_down_on_boot)
4744 dc->hwss.power_down_on_boot(dc);
4747 void dc_set_power_state(
4749 enum dc_acpi_cm_power_state power_state)
4751 if (!dc->current_state)
4754 switch (power_state) {
4755 case DC_ACPI_CM_POWER_STATE_D0:
4756 dc_state_construct(dc, dc->current_state);
4758 dc_exit_ips_for_hw_access(dc);
4762 dc->hwss.init_hw(dc);
4764 if (dc->hwss.init_sys_ctx != NULL &&
4765 dc->vm_pa_config.valid) {
4766 dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config);
4771 ASSERT(dc->current_state->stream_count == 0);
4773 dc_state_destruct(dc->current_state);
4779 void dc_resume(struct dc *dc)
4783 for (i = 0; i < dc->link_count; i++)
4784 dc->link_srv->resume(dc->links[i]);
4787 bool dc_is_dmcu_initialized(struct dc *dc)
4789 struct dmcu *dmcu = dc->res_pool->dmcu;
4792 return dmcu->funcs->is_dmcu_initialized(dmcu);
4796 void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info)
4798 info->displayClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz;
4799 info->engineClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_khz;
4800 info->memoryClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dramclk_khz;
4801 info->maxSupportedDppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz;
4802 info->dppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dppclk_khz;
4803 info->socClock = (unsigned int)state->bw_ctx.bw.dcn.clk.socclk_khz;
4804 info->dcfClockDeepSleep = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz;
4805 info->fClock = (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz;
4806 info->phyClock = (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz;
4808 enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping)
4810 if (dc->hwss.set_clock)
4811 return dc->hwss.set_clock(dc, clock_type, clk_khz, stepping);
4812 return DC_ERROR_UNEXPECTED;
4814 void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg)
4816 if (dc->hwss.get_clock)
4817 dc->hwss.get_clock(dc, clock_type, clock_cfg);
4820 /* enable/disable eDP PSR without specify stream for eDP */
4821 bool dc_set_psr_allow_active(struct dc *dc, bool enable)
4826 for (i = 0; i < dc->current_state->stream_count ; i++) {
4827 struct dc_link *link;
4828 struct dc_stream_state *stream = dc->current_state->streams[i];
4830 link = stream->link;
4834 if (link->psr_settings.psr_feature_enabled) {
4835 if (enable && !link->psr_settings.psr_allow_active) {
4836 allow_active = true;
4837 if (!dc_link_set_psr_allow_active(link, &allow_active, false, false, NULL))
4839 } else if (!enable && link->psr_settings.psr_allow_active) {
4840 allow_active = false;
4841 if (!dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL))
4850 /* enable/disable eDP Replay without specify stream for eDP */
4851 bool dc_set_replay_allow_active(struct dc *dc, bool active)
4856 for (i = 0; i < dc->current_state->stream_count; i++) {
4857 struct dc_link *link;
4858 struct dc_stream_state *stream = dc->current_state->streams[i];
4860 link = stream->link;
4864 if (link->replay_settings.replay_feature_enabled) {
4865 if (active && !link->replay_settings.replay_allow_active) {
4866 allow_active = true;
4867 if (!dc_link_set_replay_allow_active(link, &allow_active,
4868 false, false, NULL))
4870 } else if (!active && link->replay_settings.replay_allow_active) {
4871 allow_active = false;
4872 if (!dc_link_set_replay_allow_active(link, &allow_active,
4882 void dc_allow_idle_optimizations(struct dc *dc, bool allow)
4884 if (dc->debug.disable_idle_power_optimizations)
4887 if (dc->caps.ips_support && (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL))
4890 if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->is_smu_present)
4891 if (!dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr))
4894 if (allow == dc->idle_optimizations_allowed)
4897 if (dc->hwss.apply_idle_power_optimizations && dc->hwss.apply_idle_power_optimizations(dc, allow))
4898 dc->idle_optimizations_allowed = allow;
4901 void dc_exit_ips_for_hw_access(struct dc *dc)
4903 if (dc->caps.ips_support)
4904 dc_allow_idle_optimizations(dc, false);
4907 bool dc_dmub_is_ips_idle_state(struct dc *dc)
4909 if (dc->debug.disable_idle_power_optimizations)
4912 if (!dc->caps.ips_support || (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL))
4915 if (!dc->ctx->dmub_srv)
4918 return dc->ctx->dmub_srv->idle_allowed;
4921 /* set min and max memory clock to lowest and highest DPM level, respectively */
4922 void dc_unlock_memory_clock_frequency(struct dc *dc)
4924 if (dc->clk_mgr->funcs->set_hard_min_memclk)
4925 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, false);
4927 if (dc->clk_mgr->funcs->set_hard_max_memclk)
4928 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
4931 /* set min memory clock to the min required for current mode, max to maxDPM */
4932 void dc_lock_memory_clock_frequency(struct dc *dc)
4934 if (dc->clk_mgr->funcs->get_memclk_states_from_smu)
4935 dc->clk_mgr->funcs->get_memclk_states_from_smu(dc->clk_mgr);
4937 if (dc->clk_mgr->funcs->set_hard_min_memclk)
4938 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, true);
4940 if (dc->clk_mgr->funcs->set_hard_max_memclk)
4941 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
4944 static void blank_and_force_memclk(struct dc *dc, bool apply, unsigned int memclk_mhz)
4946 struct dc_state *context = dc->current_state;
4948 struct pipe_ctx *pipe;
4951 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4952 pipe = &context->res_ctx.pipe_ctx[i];
4954 if (pipe->stream != NULL) {
4955 dc->hwss.disable_pixel_data(dc, pipe, true);
4957 // wait for double buffer
4958 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE);
4959 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VBLANK);
4960 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE);
4962 hubp = pipe->plane_res.hubp;
4963 hubp->funcs->set_blank_regs(hubp, true);
4967 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, memclk_mhz);
4968 dc->clk_mgr->funcs->set_min_memclk(dc->clk_mgr, memclk_mhz);
4970 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4971 pipe = &context->res_ctx.pipe_ctx[i];
4973 if (pipe->stream != NULL) {
4974 dc->hwss.disable_pixel_data(dc, pipe, false);
4976 hubp = pipe->plane_res.hubp;
4977 hubp->funcs->set_blank_regs(hubp, false);
4984 * dc_enable_dcmode_clk_limit() - lower clocks in dc (battery) mode
4985 * @dc: pointer to dc of the dm calling this
4986 * @enable: True = transition to DC mode, false = transition back to AC mode
4988 * Some SoCs define additional clock limits when in DC mode, DM should
4989 * invoke this function when the platform undergoes a power source transition
4990 * so DC can apply/unapply the limit. This interface may be disruptive to
4991 * the onscreen content.
4993 * Context: Triggered by OS through DM interface, or manually by escape calls.
4994 * Need to hold a dclock when doing so.
4996 * Return: none (void function)
4999 void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable)
5001 unsigned int softMax = 0, maxDPM = 0, funcMin = 0, i;
5002 bool p_state_change_support;
5004 if (!dc->config.dc_mode_clk_limit_support)
5007 softMax = dc->clk_mgr->bw_params->dc_mode_softmax_memclk;
5008 for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries; i++) {
5009 if (dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz > maxDPM)
5010 maxDPM = dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz;
5012 funcMin = (dc->clk_mgr->clks.dramclk_khz + 999) / 1000;
5013 p_state_change_support = dc->clk_mgr->clks.p_state_change_support;
5015 if (enable && !dc->clk_mgr->dc_mode_softmax_enabled) {
5016 if (p_state_change_support) {
5017 if (funcMin <= softMax)
5018 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, softMax);
5021 if (funcMin <= softMax)
5022 blank_and_force_memclk(dc, true, softMax);
5025 } else if (!enable && dc->clk_mgr->dc_mode_softmax_enabled) {
5026 if (p_state_change_support) {
5027 if (funcMin <= softMax)
5028 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, maxDPM);
5031 if (funcMin <= softMax)
5032 blank_and_force_memclk(dc, true, maxDPM);
5036 dc->clk_mgr->dc_mode_softmax_enabled = enable;
5038 bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane,
5039 struct dc_cursor_attributes *cursor_attr)
5041 if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, plane, cursor_attr))
5046 /* cleanup on driver unload */
5047 void dc_hardware_release(struct dc *dc)
5049 dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(dc);
5051 if (dc->hwss.hardware_release)
5052 dc->hwss.hardware_release(dc);
5055 void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc)
5057 if (dc->current_state)
5058 dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching_shut_down = true;
5062 * dc_is_dmub_outbox_supported - Check if DMUB firmware support outbox notification
5064 * @dc: [in] dc structure
5066 * Checks whether DMUB FW supports outbox notifications, if supported DM
5067 * should register outbox interrupt prior to actually enabling interrupts
5068 * via dc_enable_dmub_outbox
5071 * True if DMUB FW supports outbox notifications, False otherwise
5073 bool dc_is_dmub_outbox_supported(struct dc *dc)
5075 switch (dc->ctx->asic_id.chip_family) {
5077 case FAMILY_YELLOW_CARP:
5078 /* DCN31 B0 USB4 DPIA needs dmub notifications for interrupts */
5079 if (dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 &&
5080 !dc->debug.dpia_debug.bits.disable_dpia)
5084 case AMDGPU_FAMILY_GC_11_0_1:
5085 case AMDGPU_FAMILY_GC_11_5_0:
5086 if (!dc->debug.dpia_debug.bits.disable_dpia)
5094 /* dmub aux needs dmub notifications to be enabled */
5095 return dc->debug.enable_dmub_aux_for_legacy_ddc;
5100 * dc_enable_dmub_notifications - Check if dmub fw supports outbox
5102 * @dc: [in] dc structure
5104 * Calls dc_is_dmub_outbox_supported to check if dmub fw supports outbox
5105 * notifications. All DMs shall switch to dc_is_dmub_outbox_supported. This
5106 * API shall be removed after switching.
5109 * True if DMUB FW supports outbox notifications, False otherwise
5111 bool dc_enable_dmub_notifications(struct dc *dc)
5113 return dc_is_dmub_outbox_supported(dc);
5117 * dc_enable_dmub_outbox - Enables DMUB unsolicited notification
5119 * @dc: [in] dc structure
5121 * Enables DMUB unsolicited notifications to x86 via outbox.
5123 void dc_enable_dmub_outbox(struct dc *dc)
5125 struct dc_context *dc_ctx = dc->ctx;
5127 dmub_enable_outbox_notification(dc_ctx->dmub_srv);
5128 DC_LOG_DC("%s: dmub outbox notifications enabled\n", __func__);
5132 * dc_process_dmub_aux_transfer_async - Submits aux command to dmub via inbox message
5133 * Sets port index appropriately for legacy DDC
5135 * @link_index: link index
5136 * @payload: aux payload
5138 * Returns: True if successful, False if failure
5140 bool dc_process_dmub_aux_transfer_async(struct dc *dc,
5141 uint32_t link_index,
5142 struct aux_payload *payload)
5145 union dmub_rb_cmd cmd = {0};
5147 ASSERT(payload->length <= 16);
5149 cmd.dp_aux_access.header.type = DMUB_CMD__DP_AUX_ACCESS;
5150 cmd.dp_aux_access.header.payload_bytes = 0;
5151 /* For dpia, ddc_pin is set to NULL */
5152 if (!dc->links[link_index]->ddc->ddc_pin)
5153 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_DPIA;
5155 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_LEGACY_DDC;
5157 cmd.dp_aux_access.aux_control.instance = dc->links[link_index]->ddc_hw_inst;
5158 cmd.dp_aux_access.aux_control.sw_crc_enabled = 0;
5159 cmd.dp_aux_access.aux_control.timeout = 0;
5160 cmd.dp_aux_access.aux_control.dpaux.address = payload->address;
5161 cmd.dp_aux_access.aux_control.dpaux.is_i2c_over_aux = payload->i2c_over_aux;
5162 cmd.dp_aux_access.aux_control.dpaux.length = payload->length;
5164 /* set aux action */
5165 if (payload->i2c_over_aux) {
5166 if (payload->write) {
5168 action = DP_AUX_REQ_ACTION_I2C_WRITE_MOT;
5170 action = DP_AUX_REQ_ACTION_I2C_WRITE;
5173 action = DP_AUX_REQ_ACTION_I2C_READ_MOT;
5175 action = DP_AUX_REQ_ACTION_I2C_READ;
5179 action = DP_AUX_REQ_ACTION_DPCD_WRITE;
5181 action = DP_AUX_REQ_ACTION_DPCD_READ;
5184 cmd.dp_aux_access.aux_control.dpaux.action = action;
5186 if (payload->length && payload->write) {
5187 memcpy(cmd.dp_aux_access.aux_control.dpaux.data,
5193 dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
5198 uint8_t get_link_index_from_dpia_port_index(const struct dc *dc,
5199 uint8_t dpia_port_index)
5201 uint8_t index, link_index = 0xFF;
5203 for (index = 0; index < dc->link_count; index++) {
5204 /* ddc_hw_inst has dpia port index for dpia links
5205 * and ddc instance for legacy links
5207 if (!dc->links[index]->ddc->ddc_pin) {
5208 if (dc->links[index]->ddc_hw_inst == dpia_port_index) {
5214 ASSERT(link_index != 0xFF);
5219 * dc_process_dmub_set_config_async - Submits set_config command
5221 * @dc: [in] dc structure
5222 * @link_index: [in] link_index: link index
5223 * @payload: [in] aux payload
5224 * @notify: [out] set_config immediate reply
5226 * Submits set_config command to dmub via inbox message.
5229 * True if successful, False if failure
5231 bool dc_process_dmub_set_config_async(struct dc *dc,
5232 uint32_t link_index,
5233 struct set_config_cmd_payload *payload,
5234 struct dmub_notification *notify)
5236 union dmub_rb_cmd cmd = {0};
5237 bool is_cmd_complete = true;
5239 /* prepare SET_CONFIG command */
5240 cmd.set_config_access.header.type = DMUB_CMD__DPIA;
5241 cmd.set_config_access.header.sub_type = DMUB_CMD__DPIA_SET_CONFIG_ACCESS;
5243 cmd.set_config_access.set_config_control.instance = dc->links[link_index]->ddc_hw_inst;
5244 cmd.set_config_access.set_config_control.cmd_pkt.msg_type = payload->msg_type;
5245 cmd.set_config_access.set_config_control.cmd_pkt.msg_data = payload->msg_data;
5247 if (!dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) {
5248 /* command is not processed by dmub */
5249 notify->sc_status = SET_CONFIG_UNKNOWN_ERROR;
5250 return is_cmd_complete;
5253 /* command processed by dmub, if ret_status is 1, it is completed instantly */
5254 if (cmd.set_config_access.header.ret_status == 1)
5255 notify->sc_status = cmd.set_config_access.set_config_control.immed_status;
5257 /* cmd pending, will receive notification via outbox */
5258 is_cmd_complete = false;
5260 return is_cmd_complete;
5264 * dc_process_dmub_set_mst_slots - Submits MST solt allocation
5266 * @dc: [in] dc structure
5267 * @link_index: [in] link index
5268 * @mst_alloc_slots: [in] mst slots to be allotted
5269 * @mst_slots_in_use: [out] mst slots in use returned in failure case
5271 * Submits mst slot allocation command to dmub via inbox message
5274 * DC_OK if successful, DC_ERROR if failure
5276 enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
5277 uint32_t link_index,
5278 uint8_t mst_alloc_slots,
5279 uint8_t *mst_slots_in_use)
5281 union dmub_rb_cmd cmd = {0};
5283 /* prepare MST_ALLOC_SLOTS command */
5284 cmd.set_mst_alloc_slots.header.type = DMUB_CMD__DPIA;
5285 cmd.set_mst_alloc_slots.header.sub_type = DMUB_CMD__DPIA_MST_ALLOC_SLOTS;
5287 cmd.set_mst_alloc_slots.mst_slots_control.instance = dc->links[link_index]->ddc_hw_inst;
5288 cmd.set_mst_alloc_slots.mst_slots_control.mst_alloc_slots = mst_alloc_slots;
5290 if (!dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
5291 /* command is not processed by dmub */
5292 return DC_ERROR_UNEXPECTED;
5294 /* command processed by dmub, if ret_status is 1 */
5295 if (cmd.set_config_access.header.ret_status != 1)
5296 /* command processing error */
5297 return DC_ERROR_UNEXPECTED;
5299 /* command processed and we have a status of 2, mst not enabled in dpia */
5300 if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 2)
5301 return DC_FAIL_UNSUPPORTED_1;
5303 /* previously configured mst alloc and used slots did not match */
5304 if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 3) {
5305 *mst_slots_in_use = cmd.set_mst_alloc_slots.mst_slots_control.mst_slots_in_use;
5306 return DC_NOT_SUPPORTED;
5313 * dc_process_dmub_dpia_hpd_int_enable - Submits DPIA DPD interruption
5315 * @dc: [in] dc structure
5316 * @hpd_int_enable: [in] 1 for hpd int enable, 0 to disable
5318 * Submits dpia hpd int enable command to dmub via inbox message
5320 void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc,
5321 uint32_t hpd_int_enable)
5323 union dmub_rb_cmd cmd = {0};
5325 cmd.dpia_hpd_int_enable.header.type = DMUB_CMD__DPIA_HPD_INT_ENABLE;
5326 cmd.dpia_hpd_int_enable.enable = hpd_int_enable;
5328 dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
5330 DC_LOG_DEBUG("%s: hpd_int_enable(%d)\n", __func__, hpd_int_enable);
5334 * dc_print_dmub_diagnostic_data - Print DMUB diagnostic data for debugging
5336 * @dc: [in] dc structure
5340 void dc_print_dmub_diagnostic_data(const struct dc *dc)
5342 dc_dmub_srv_log_diagnostic_data(dc->ctx->dmub_srv);
5346 * dc_disable_accelerated_mode - disable accelerated mode
5349 void dc_disable_accelerated_mode(struct dc *dc)
5351 bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 0);
5356 * dc_notify_vsync_int_state - notifies vsync enable/disable state
5358 * @stream: stream where vsync int state changed
5359 * @enable: whether vsync is enabled or disabled
5361 * Called when vsync is enabled/disabled Will notify DMUB to start/stop ABM
5362 * interrupts after steady state is reached.
5364 void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bool enable)
5368 struct pipe_ctx *pipe = NULL;
5369 struct dc_link *link = stream->sink->link;
5370 struct dc_link *edp_links[MAX_NUM_EDP];
5373 if (link->psr_settings.psr_feature_enabled)
5376 if (link->replay_settings.replay_feature_enabled)
5379 /*find primary pipe associated with stream*/
5380 for (i = 0; i < MAX_PIPES; i++) {
5381 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
5383 if (pipe->stream == stream && pipe->stream_res.tg)
5387 if (i == MAX_PIPES) {
5392 dc_get_edp_links(dc, edp_links, &edp_num);
5394 /* Determine panel inst */
5395 for (i = 0; i < edp_num; i++) {
5396 if (edp_links[i] == link)
5404 if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause)
5405 pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst);
5408 /*****************************************************************************
5409 * dc_abm_save_restore() - Interface to DC for save+pause and restore+un-pause
5412 * @stream: stream where vsync int state changed
5413 * @pData: abm hw states
5415 ****************************************************************************/
5416 bool dc_abm_save_restore(
5418 struct dc_stream_state *stream,
5419 struct abm_save_restore *pData)
5423 struct pipe_ctx *pipe = NULL;
5424 struct dc_link *link = stream->sink->link;
5425 struct dc_link *edp_links[MAX_NUM_EDP];
5427 if (link->replay_settings.replay_feature_enabled)
5430 /*find primary pipe associated with stream*/
5431 for (i = 0; i < MAX_PIPES; i++) {
5432 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
5434 if (pipe->stream == stream && pipe->stream_res.tg)
5438 if (i == MAX_PIPES) {
5443 dc_get_edp_links(dc, edp_links, &edp_num);
5445 /* Determine panel inst */
5446 for (i = 0; i < edp_num; i++)
5447 if (edp_links[i] == link)
5453 if (pipe->stream_res.abm &&
5454 pipe->stream_res.abm->funcs->save_restore)
5455 return pipe->stream_res.abm->funcs->save_restore(
5456 pipe->stream_res.abm,
5462 void dc_query_current_properties(struct dc *dc, struct dc_current_properties *properties)
5465 bool subvp_sw_cursor_req = false;
5467 for (i = 0; i < dc->current_state->stream_count; i++) {
5468 if (check_subvp_sw_cursor_fallback_req(dc, dc->current_state->streams[i])) {
5469 subvp_sw_cursor_req = true;
5473 properties->cursor_size_limit = subvp_sw_cursor_req ? 64 : dc->caps.max_cursor_size;
5477 * dc_set_edp_power() - DM controls eDP power to be ON/OFF
5479 * Called when DM wants to power on/off eDP.
5480 * Only work on links with flag skip_implict_edp_power_control is set.
5482 * @dc: Current DC state
5483 * @edp_link: a link with eDP connector signal type
5484 * @powerOn: power on/off eDP
5488 void dc_set_edp_power(const struct dc *dc, struct dc_link *edp_link,
5491 if (edp_link->connector_signal != SIGNAL_TYPE_EDP)
5494 if (edp_link->skip_implict_edp_power_control == false)
5497 edp_link->dc->link_srv->edp_set_panel_power(edp_link, powerOn);
5501 *****************************************************************************
5502 * dc_get_power_profile_for_dc_state() - extracts power profile from dc state
5504 * Called when DM wants to make power policy decisions based on dc_state
5506 *****************************************************************************
5508 struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state *context)
5510 struct dc_power_profile profile = { 0 };
5512 profile.power_level += !context->bw_ctx.bw.dcn.clk.p_state_change_support;