[PATCH 25/27] drm/amd/display: add bw logging for dcn

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: Dmytro Laktyushkin <Dmytro.Laktyushkin@xxxxxxx>

Change-Id: I6242cae575f292728fea23069514379b87f421f5
Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin at amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng at amd.com>
Acked-by: Harry Wentland <Harry.Wentland at amd.com>
---
 drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c   | 138 +++++++++++
 drivers/gpu/drm/amd/display/dc/core/dc.c           |   4 +-
 drivers/gpu/drm/amd/display/dc/core/dc_debug.c     |  36 +++
 drivers/gpu/drm/amd/display/dc/dc.h                |   1 +
 .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c  | 260 +++++++++++++++++----
 .../gpu/drm/amd/display/dc/dcn10/dcn10_resource.c  |   2 +
 .../gpu/drm/amd/display/include/logger_interface.h |   4 +
 7 files changed, 396 insertions(+), 49 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index 6d8bc6c74a73..f0f688b99d37 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -1312,6 +1312,144 @@ void dcn_bw_notify_pplib_of_wm_ranges(struct core_dc *dc)
 void dcn_bw_sync_calcs_and_dml(struct core_dc *dc)
 {
 	kernel_fpu_begin();
+	dm_logger_write(dc->ctx->logger, LOG_BANDWIDTH_CALCS,
+			"sr_exit_time: %d ns\n"
+			"sr_enter_plus_exit_time: %d ns\n"
+			"urgent_latency: %d ns\n"
+			"write_back_latency: %d ns\n"
+			"percent_of_ideal_drambw_received_after_urg_latency: %d %\n"
+			"max_request_size: %d bytes\n"
+			"dcfclkv_max0p9: %d kHz\n"
+			"dcfclkv_nom0p8: %d kHz\n"
+			"dcfclkv_mid0p72: %d kHz\n"
+			"dcfclkv_min0p65: %d kHz\n"
+			"max_dispclk_vmax0p9: %d kHz\n"
+			"max_dispclk_vnom0p8: %d kHz\n"
+			"max_dispclk_vmid0p72: %d kHz\n"
+			"max_dispclk_vmin0p65: %d kHz\n"
+			"max_dppclk_vmax0p9: %d kHz\n"
+			"max_dppclk_vnom0p8: %d kHz\n"
+			"max_dppclk_vmid0p72: %d kHz\n"
+			"max_dppclk_vmin0p65: %d kHz\n"
+			"socclk: %d kHz\n"
+			"fabric_and_dram_bandwidth_vmax0p9: %d MB/s\n"
+			"fabric_and_dram_bandwidth_vnom0p8: %d MB/s\n"
+			"fabric_and_dram_bandwidth_vmid0p72: %d MB/s\n"
+			"fabric_and_dram_bandwidth_vmin0p65: %d MB/s\n"
+			"phyclkv_max0p9: %d kHz\n"
+			"phyclkv_nom0p8: %d kHz\n"
+			"phyclkv_mid0p72: %d kHz\n"
+			"phyclkv_min0p65: %d kHz\n"
+			"downspreading: %d %\n"
+			"round_trip_ping_latency_cycles: %d DCFCLK Cycles\n"
+			"urgent_out_of_order_return_per_channel: %d Bytes\n"
+			"number_of_channels: %d\n"
+			"vmm_page_size: %d Bytes\n"
+			"dram_clock_change_latency: %d ns\n"
+			"return_bus_width: %d Bytes\n",
+			dc->dcn_soc.sr_exit_time * 1000,
+			dc->dcn_soc.sr_enter_plus_exit_time * 1000,
+			dc->dcn_soc.urgent_latency * 1000,
+			dc->dcn_soc.write_back_latency * 1000,
+			dc->dcn_soc.percent_of_ideal_drambw_received_after_urg_latency,
+			dc->dcn_soc.max_request_size,
+			dc->dcn_soc.dcfclkv_max0p9 * 1000,
+			dc->dcn_soc.dcfclkv_nom0p8 * 1000,
+			dc->dcn_soc.dcfclkv_mid0p72 * 1000,
+			dc->dcn_soc.dcfclkv_min0p65 * 1000,
+			dc->dcn_soc.max_dispclk_vmax0p9 * 1000,
+			dc->dcn_soc.max_dispclk_vnom0p8 * 1000,
+			dc->dcn_soc.max_dispclk_vmid0p72 * 1000,
+			dc->dcn_soc.max_dispclk_vmin0p65 * 1000,
+			dc->dcn_soc.max_dppclk_vmax0p9 * 1000,
+			dc->dcn_soc.max_dppclk_vnom0p8 * 1000,
+			dc->dcn_soc.max_dppclk_vmid0p72 * 1000,
+			dc->dcn_soc.max_dppclk_vmin0p65 * 1000,
+			dc->dcn_soc.socclk * 1000,
+			dc->dcn_soc.fabric_and_dram_bandwidth_vmax0p9 * 1000,
+			dc->dcn_soc.fabric_and_dram_bandwidth_vnom0p8 * 1000,
+			dc->dcn_soc.fabric_and_dram_bandwidth_vmid0p72 * 1000,
+			dc->dcn_soc.fabric_and_dram_bandwidth_vmin0p65 * 1000,
+			dc->dcn_soc.phyclkv_max0p9 * 1000,
+			dc->dcn_soc.phyclkv_nom0p8 * 1000,
+			dc->dcn_soc.phyclkv_mid0p72 * 1000,
+			dc->dcn_soc.phyclkv_min0p65 * 1000,
+			dc->dcn_soc.downspreading * 100,
+			dc->dcn_soc.round_trip_ping_latency_cycles,
+			dc->dcn_soc.urgent_out_of_order_return_per_channel,
+			dc->dcn_soc.number_of_channels,
+			dc->dcn_soc.vmm_page_size,
+			dc->dcn_soc.dram_clock_change_latency * 1000,
+			dc->dcn_soc.return_bus_width);
+	dm_logger_write(dc->ctx->logger, LOG_BANDWIDTH_CALCS,
+			"rob_buffer_size_in_kbyte: %d\n"
+			"det_buffer_size_in_kbyte: %d\n"
+			"dpp_output_buffer_pixels: %d\n"
+			"opp_output_buffer_lines: %d\n"
+			"pixel_chunk_size_in_kbyte: %d\n"
+			"pte_enable: %d\n"
+			"pte_chunk_size: %d kbytes\n"
+			"meta_chunk_size: %d kbytes\n"
+			"writeback_chunk_size: %d kbytes\n"
+			"odm_capability: %d\n"
+			"dsc_capability: %d\n"
+			"line_buffer_size: %d bits\n"
+			"max_line_buffer_lines: %d\n"
+			"is_line_buffer_bpp_fixed: %d\n"
+			"line_buffer_fixed_bpp: %d\n"
+			"writeback_luma_buffer_size: %d kbytes\n"
+			"writeback_chroma_buffer_size: %d kbytes\n"
+			"max_num_dpp: %d\n"
+			"max_num_writeback: %d\n"
+			"max_dchub_topscl_throughput: %d pixels/dppclk\n"
+			"max_pscl_tolb_throughput: %d pixels/dppclk\n"
+			"max_lb_tovscl_throughput: %d pixels/dppclk\n"
+			"max_vscl_tohscl_throughput: %d pixels/dppclk\n"
+			"max_hscl_ratio: %d\n"
+			"max_vscl_ratio: %d\n"
+			"max_hscl_taps: %d\n"
+			"max_vscl_taps: %d\n"
+			"pte_buffer_size_in_requests: %d\n"
+			"dispclk_ramping_margin: %d %\n"
+			"under_scan_factor: %d %\n"
+			"max_inter_dcn_tile_repeaters: %d\n"
+			"can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one: %d\n"
+			"bug_forcing_luma_and_chroma_request_to_same_size_fixed: %d\n"
+			"dcfclk_cstate_latency: %d\n",
+			dc->dcn_ip.rob_buffer_size_in_kbyte,
+			dc->dcn_ip.det_buffer_size_in_kbyte,
+			dc->dcn_ip.dpp_output_buffer_pixels,
+			dc->dcn_ip.opp_output_buffer_lines,
+			dc->dcn_ip.pixel_chunk_size_in_kbyte,
+			dc->dcn_ip.pte_enable,
+			dc->dcn_ip.pte_chunk_size,
+			dc->dcn_ip.meta_chunk_size,
+			dc->dcn_ip.writeback_chunk_size,
+			dc->dcn_ip.odm_capability,
+			dc->dcn_ip.dsc_capability,
+			dc->dcn_ip.line_buffer_size,
+			dc->dcn_ip.max_line_buffer_lines,
+			dc->dcn_ip.is_line_buffer_bpp_fixed,
+			dc->dcn_ip.line_buffer_fixed_bpp,
+			dc->dcn_ip.writeback_luma_buffer_size,
+			dc->dcn_ip.writeback_chroma_buffer_size,
+			dc->dcn_ip.max_num_dpp,
+			dc->dcn_ip.max_num_writeback,
+			dc->dcn_ip.max_dchub_topscl_throughput,
+			dc->dcn_ip.max_pscl_tolb_throughput,
+			dc->dcn_ip.max_lb_tovscl_throughput,
+			dc->dcn_ip.max_vscl_tohscl_throughput,
+			dc->dcn_ip.max_hscl_ratio,
+			dc->dcn_ip.max_vscl_ratio,
+			dc->dcn_ip.max_hscl_taps,
+			dc->dcn_ip.max_vscl_taps,
+			dc->dcn_ip.pte_buffer_size_in_requests,
+			dc->dcn_ip.dispclk_ramping_margin,
+			dc->dcn_ip.under_scan_factor * 100,
+			dc->dcn_ip.max_inter_dcn_tile_repeaters,
+			dc->dcn_ip.can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one,
+			dc->dcn_ip.bug_forcing_luma_and_chroma_request_to_same_size_fixed,
+			dc->dcn_ip.dcfclk_cstate_latency);
 	dc->dml.soc.vmin.socclk_mhz = dc->dcn_soc.socclk;
 	dc->dml.soc.vmid.socclk_mhz = dc->dcn_soc.socclk;
 	dc->dml.soc.vnom.socclk_mhz = dc->dcn_soc.socclk;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index fb5bacb50ebc..47870a640f37 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1379,8 +1379,10 @@ void dc_update_surfaces_and_stream(struct dc *dc,
 		if (!core_dc->res_pool->funcs->validate_bandwidth(core_dc, context)) {
 			BREAK_TO_DEBUGGER();
 			goto fail;
-		} else
+		} else {
 			core_dc->hwss.set_bandwidth(core_dc, context, false);
+			context_clock_trace(dc, context);
+		}
 	}
 
 	if (!surface_count)  /* reset */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
index c60b59f41693..263dab6337a6 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
@@ -29,6 +29,13 @@
 				##__VA_ARGS__); \
 } while (0)
 
+#define CLOCK_TRACE(...) do {\
+	if (dc->debug.clock_trace) \
+		dm_logger_write(logger, \
+				LOG_BANDWIDTH_CALCS, \
+				##__VA_ARGS__); \
+} while (0)
+
 void pre_surface_trace(
 		const struct dc *dc,
 		const struct dc_surface *const *surfaces,
@@ -314,3 +321,32 @@ void context_timing_trace(
 				h_pos[i], v_pos[i]);
 	}
 }
+
+void context_clock_trace(
+		const struct dc *dc,
+		struct validate_context *context)
+{
+	struct core_dc *core_dc = DC_TO_CORE(dc);
+	struct dal_logger *logger =  core_dc->ctx->logger;
+
+	CLOCK_TRACE("Current: dispclk_khz:%d  dppclk_div:%d  dcfclk_khz:%d\n"
+			"dcfclk_deep_sleep_khz:%d  fclk_khz:%d\n"
+			"dram_ccm_us:%d  min_active_dram_ccm_us:%d\n",
+			context->bw.dcn.calc_clk.dispclk_khz,
+			context->bw.dcn.calc_clk.dppclk_div,
+			context->bw.dcn.calc_clk.dcfclk_khz,
+			context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
+			context->bw.dcn.calc_clk.fclk_khz,
+			context->bw.dcn.calc_clk.dram_ccm_us,
+			context->bw.dcn.calc_clk.min_active_dram_ccm_us);
+	CLOCK_TRACE("Calculated: dispclk_khz:%d  dppclk_div:%d  dcfclk_khz:%d\n"
+			"dcfclk_deep_sleep_khz:%d  fclk_khz:%d\n"
+			"dram_ccm_us:%d  min_active_dram_ccm_us:%d\n",
+			context->bw.dcn.calc_clk.dispclk_khz,
+			context->bw.dcn.calc_clk.dppclk_div,
+			context->bw.dcn.calc_clk.dcfclk_khz,
+			context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
+			context->bw.dcn.calc_clk.fclk_khz,
+			context->bw.dcn.calc_clk.dram_ccm_us,
+			context->bw.dcn.calc_clk.min_active_dram_ccm_us);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 7191b2519334..e08e532cafb8 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -160,6 +160,7 @@ struct dc_debug {
 	bool max_disp_clk;
 	bool surface_trace;
 	bool timing_trace;
+	bool clock_trace;
 	bool validation_trace;
 	bool disable_stutter;
 	bool disable_dcc;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 0a346aafacce..93a34e2ef175 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -1412,6 +1412,128 @@ static void dcn10_enable_timing_synchronization(
 	DC_SYNC_INFO("Sync complete\n");
 }
 
+static void print_rq_dlg_ttu(
+		struct core_dc *core_dc,
+		struct pipe_ctx *pipe_ctx)
+{
+	dm_logger_write(core_dc->ctx->logger, LOG_BANDWIDTH_CALCS,
+			"\n============== DML TTU Output parameters [%d] ==============\n"
+			"qos_level_low_wm: %d, \n"
+			"qos_level_high_wm: %d, \n"
+			"min_ttu_vblank: %d, \n"
+			"qos_level_flip: %d, \n"
+			"refcyc_per_req_delivery_l: %d, \n"
+			"qos_level_fixed_l: %d, \n"
+			"qos_ramp_disable_l: %d, \n"
+			"refcyc_per_req_delivery_pre_l: %d, \n"
+			"refcyc_per_req_delivery_c: %d, \n"
+			"qos_level_fixed_c: %d, \n"
+			"qos_ramp_disable_c: %d, \n"
+			"refcyc_per_req_delivery_pre_c: %d\n"
+			"=============================================================\n",
+			pipe_ctx->pipe_idx,
+			pipe_ctx->ttu_regs.qos_level_low_wm,
+			pipe_ctx->ttu_regs.qos_level_high_wm,
+			pipe_ctx->ttu_regs.min_ttu_vblank,
+			pipe_ctx->ttu_regs.qos_level_flip,
+			pipe_ctx->ttu_regs.refcyc_per_req_delivery_l,
+			pipe_ctx->ttu_regs.qos_level_fixed_l,
+			pipe_ctx->ttu_regs.qos_ramp_disable_l,
+			pipe_ctx->ttu_regs.refcyc_per_req_delivery_pre_l,
+			pipe_ctx->ttu_regs.refcyc_per_req_delivery_c,
+			pipe_ctx->ttu_regs.qos_level_fixed_c,
+			pipe_ctx->ttu_regs.qos_ramp_disable_c,
+			pipe_ctx->ttu_regs.refcyc_per_req_delivery_pre_c
+			);
+
+	dm_logger_write(core_dc->ctx->logger, LOG_BANDWIDTH_CALCS,
+			"\n============== DML DLG Output parameters [%d] ==============\n"
+			"refcyc_h_blank_end: %d, \n"
+			"dlg_vblank_end: %d, \n"
+			"min_dst_y_next_start: %d, \n"
+			"refcyc_per_htotal: %d, \n"
+			"refcyc_x_after_scaler: %d, \n"
+			"dst_y_after_scaler: %d, \n"
+			"dst_y_prefetch: %d, \n"
+			"dst_y_per_vm_vblank: %d, \n"
+			"dst_y_per_row_vblank: %d, \n"
+			"ref_freq_to_pix_freq: %d, \n"
+			"vratio_prefetch: %d, \n"
+			"refcyc_per_pte_group_vblank_l: %d, \n"
+			"refcyc_per_meta_chunk_vblank_l: %d, \n"
+			"dst_y_per_pte_row_nom_l: %d, \n"
+			"refcyc_per_pte_group_nom_l: %d, \n",
+			pipe_ctx->pipe_idx,
+			pipe_ctx->dlg_regs.refcyc_h_blank_end,
+			pipe_ctx->dlg_regs.dlg_vblank_end,
+			pipe_ctx->dlg_regs.min_dst_y_next_start,
+			pipe_ctx->dlg_regs.refcyc_per_htotal,
+			pipe_ctx->dlg_regs.refcyc_x_after_scaler,
+			pipe_ctx->dlg_regs.dst_y_after_scaler,
+			pipe_ctx->dlg_regs.dst_y_prefetch,
+			pipe_ctx->dlg_regs.dst_y_per_vm_vblank,
+			pipe_ctx->dlg_regs.dst_y_per_row_vblank,
+			pipe_ctx->dlg_regs.ref_freq_to_pix_freq,
+			pipe_ctx->dlg_regs.vratio_prefetch,
+			pipe_ctx->dlg_regs.refcyc_per_pte_group_vblank_l,
+			pipe_ctx->dlg_regs.refcyc_per_meta_chunk_vblank_l,
+			pipe_ctx->dlg_regs.dst_y_per_pte_row_nom_l,
+			pipe_ctx->dlg_regs.refcyc_per_pte_group_nom_l
+			);
+
+	dm_logger_write(core_dc->ctx->logger, LOG_BANDWIDTH_CALCS,
+			"\ndst_y_per_meta_row_nom_l: %d, \n"
+			"refcyc_per_meta_chunk_nom_l: %d, \n"
+			"refcyc_per_line_delivery_pre_l: %d, \n"
+			"refcyc_per_line_delivery_l: %d, \n"
+			"vratio_prefetch_c: %d, \n"
+			"refcyc_per_pte_group_vblank_c: %d, \n"
+			"refcyc_per_meta_chunk_vblank_c: %d, \n"
+			"dst_y_per_pte_row_nom_c: %d, \n"
+			"refcyc_per_pte_group_nom_c: %d, \n"
+			"dst_y_per_meta_row_nom_c: %d, \n"
+			"refcyc_per_meta_chunk_nom_c: %d, \n"
+			"refcyc_per_line_delivery_pre_c: %d, \n"
+			"refcyc_per_line_delivery_c: %d \n"
+			"========================================================\n",
+			pipe_ctx->dlg_regs.dst_y_per_meta_row_nom_l,
+			pipe_ctx->dlg_regs.refcyc_per_meta_chunk_nom_l,
+			pipe_ctx->dlg_regs.refcyc_per_line_delivery_pre_l,
+			pipe_ctx->dlg_regs.refcyc_per_line_delivery_l,
+			pipe_ctx->dlg_regs.vratio_prefetch_c,
+			pipe_ctx->dlg_regs.refcyc_per_pte_group_vblank_c,
+			pipe_ctx->dlg_regs.refcyc_per_meta_chunk_vblank_c,
+			pipe_ctx->dlg_regs.dst_y_per_pte_row_nom_c,
+			pipe_ctx->dlg_regs.refcyc_per_pte_group_nom_c,
+			pipe_ctx->dlg_regs.dst_y_per_meta_row_nom_c,
+			pipe_ctx->dlg_regs.refcyc_per_meta_chunk_nom_c,
+			pipe_ctx->dlg_regs.refcyc_per_line_delivery_pre_c,
+			pipe_ctx->dlg_regs.refcyc_per_line_delivery_c
+			);
+
+	dm_logger_write(core_dc->ctx->logger, LOG_BANDWIDTH_CALCS,
+			"\n============== DML RQ Output parameters [%d] ==============\n"
+			"chunk_size: %d \n"
+			"min_chunk_size: %d \n"
+			"meta_chunk_size: %d \n"
+			"min_meta_chunk_size: %d \n"
+			"dpte_group_size: %d \n"
+			"mpte_group_size: %d \n"
+			"swath_height: %d \n"
+			"pte_row_height_linear: %d \n"
+			"========================================================\n",
+			pipe_ctx->pipe_idx,
+			pipe_ctx->rq_regs.rq_regs_l.chunk_size,
+			pipe_ctx->rq_regs.rq_regs_l.min_chunk_size,
+			pipe_ctx->rq_regs.rq_regs_l.meta_chunk_size,
+			pipe_ctx->rq_regs.rq_regs_l.min_meta_chunk_size,
+			pipe_ctx->rq_regs.rq_regs_l.dpte_group_size,
+			pipe_ctx->rq_regs.rq_regs_l.mpte_group_size,
+			pipe_ctx->rq_regs.rq_regs_l.swath_height,
+			pipe_ctx->rq_regs.rq_regs_l.pte_row_height_linear
+			);
+}
+
 static void dcn10_power_on_fe(
 	struct core_dc *dc,
 	struct pipe_ctx *pipe_ctx,
@@ -1419,57 +1541,52 @@ static void dcn10_power_on_fe(
 {
 	struct dc_surface *dc_surface = &pipe_ctx->surface->public;
 
-	/* power up DCHUP and DPP from pseudo code pipe_move.c */
-	 /*TODO: function: power_on_plane. If already power up, skip
-	 */
-	{
-		power_on_plane(dc->ctx,
-			pipe_ctx->pipe_idx, pipe_ctx->tg->inst);
+	power_on_plane(dc->ctx,
+		pipe_ctx->pipe_idx, pipe_ctx->tg->inst);
 
-		/* enable DCFCLK current DCHUB */
-		enable_dcfclk(dc->ctx,
+	/* enable DCFCLK current DCHUB */
+	enable_dcfclk(dc->ctx,
+			pipe_ctx->pipe_idx,
+			pipe_ctx->pix_clk_params.requested_pix_clk,
+			context->bw.dcn.calc_clk.dppclk_div);
+	dc->current_context->bw.dcn.cur_clk.dppclk_div =
+			context->bw.dcn.calc_clk.dppclk_div;
+	context->bw.dcn.cur_clk.dppclk_div = context->bw.dcn.calc_clk.dppclk_div;
+
+	if (dc_surface) {
+		dm_logger_write(dc->ctx->logger, LOG_DC,
+				"Pipe:%d 0x%x: addr hi:0x%x, "
+				"addr low:0x%x, "
+				"src: %d, %d, %d,"
+				" %d; dst: %d, %d, %d, %d;\n",
 				pipe_ctx->pipe_idx,
-				pipe_ctx->pix_clk_params.requested_pix_clk,
-				context->bw.dcn.calc_clk.dppclk_div);
-		dc->current_context->bw.dcn.cur_clk.dppclk_div =
-				context->bw.dcn.calc_clk.dppclk_div;
-		context->bw.dcn.cur_clk.dppclk_div = context->bw.dcn.calc_clk.dppclk_div;
-
-		if (dc_surface) {
-			dm_logger_write(dc->ctx->logger, LOG_DC,
-					"Pipe:%d 0x%x: addr hi:0x%x, "
-					"addr low:0x%x, "
-					"src: %d, %d, %d,"
-					" %d; dst: %d, %d, %d, %d;\n",
-					pipe_ctx->pipe_idx,
-					dc_surface,
-					dc_surface->address.grph.addr.high_part,
-					dc_surface->address.grph.addr.low_part,
-					dc_surface->src_rect.x,
-					dc_surface->src_rect.y,
-					dc_surface->src_rect.width,
-					dc_surface->src_rect.height,
-					dc_surface->dst_rect.x,
-					dc_surface->dst_rect.y,
-					dc_surface->dst_rect.width,
-					dc_surface->dst_rect.height);
-
-			dm_logger_write(dc->ctx->logger, LOG_HW_SET_MODE,
-					"Pipe %d: width, height, x, y\n"
-					"viewport:%d, %d, %d, %d\n"
-					"recout:  %d, %d, %d, %d\n",
-					pipe_ctx->pipe_idx,
-					pipe_ctx->scl_data.viewport.width,
-					pipe_ctx->scl_data.viewport.height,
-					pipe_ctx->scl_data.viewport.x,
-					pipe_ctx->scl_data.viewport.y,
-					pipe_ctx->scl_data.recout.width,
-					pipe_ctx->scl_data.recout.height,
-					pipe_ctx->scl_data.recout.x,
-					pipe_ctx->scl_data.recout.y);
-		}
+				dc_surface,
+				dc_surface->address.grph.addr.high_part,
+				dc_surface->address.grph.addr.low_part,
+				dc_surface->src_rect.x,
+				dc_surface->src_rect.y,
+				dc_surface->src_rect.width,
+				dc_surface->src_rect.height,
+				dc_surface->dst_rect.x,
+				dc_surface->dst_rect.y,
+				dc_surface->dst_rect.width,
+				dc_surface->dst_rect.height);
+
+		dm_logger_write(dc->ctx->logger, LOG_HW_SET_MODE,
+				"Pipe %d: width, height, x, y\n"
+				"viewport:%d, %d, %d, %d\n"
+				"recout:  %d, %d, %d, %d\n",
+				pipe_ctx->pipe_idx,
+				pipe_ctx->scl_data.viewport.width,
+				pipe_ctx->scl_data.viewport.height,
+				pipe_ctx->scl_data.viewport.x,
+				pipe_ctx->scl_data.viewport.y,
+				pipe_ctx->scl_data.recout.width,
+				pipe_ctx->scl_data.recout.height,
+				pipe_ctx->scl_data.recout.x,
+				pipe_ctx->scl_data.recout.y);
+		print_rq_dlg_ttu(dc, pipe_ctx);
 	}
-
 }
 
 static void program_gamut_remap(struct pipe_ctx *pipe_ctx)
@@ -1743,6 +1860,53 @@ static void dcn10_apply_ctx_for_surface(
 		}
 	}
 
+	dm_logger_write(dc->ctx->logger, LOG_BANDWIDTH_CALCS,
+			"\n============== Watermark parameters ==============\n"
+			"a.urgent_ns: %d \n"
+			"a.cstate_enter_plus_exit: %d \n"
+			"a.cstate_exit: %d \n"
+			"a.pstate_change: %d \n"
+			"a.pte_meta_urgent: %d \n"
+			"b.urgent_ns: %d \n"
+			"b.cstate_enter_plus_exit: %d \n"
+			"b.cstate_exit: %d \n"
+			"b.pstate_change: %d \n"
+			"b.pte_meta_urgent: %d \n",
+			context->bw.dcn.watermarks.a.urgent_ns,
+			context->bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns,
+			context->bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns,
+			context->bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns,
+			context->bw.dcn.watermarks.a.pte_meta_urgent_ns,
+			context->bw.dcn.watermarks.b.urgent_ns,
+			context->bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns,
+			context->bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns,
+			context->bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns,
+			context->bw.dcn.watermarks.b.pte_meta_urgent_ns
+			);
+	dm_logger_write(dc->ctx->logger, LOG_BANDWIDTH_CALCS,
+			"\nc.urgent_ns: %d \n"
+			"c.cstate_enter_plus_exit: %d \n"
+			"c.cstate_exit: %d \n"
+			"c.pstate_change: %d \n"
+			"c.pte_meta_urgent: %d \n"
+			"d.urgent_ns: %d \n"
+			"d.cstate_enter_plus_exit: %d \n"
+			"d.cstate_exit: %d \n"
+			"d.pstate_change: %d \n"
+			"d.pte_meta_urgent: %d \n"
+			"========================================================\n",
+			context->bw.dcn.watermarks.c.urgent_ns,
+			context->bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns,
+			context->bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns,
+			context->bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns,
+			context->bw.dcn.watermarks.c.pte_meta_urgent_ns,
+			context->bw.dcn.watermarks.d.urgent_ns,
+			context->bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns,
+			context->bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns,
+			context->bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns,
+			context->bw.dcn.watermarks.d.pte_meta_urgent_ns
+			);
+
 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
 
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index e527d10b3e1f..6ada9a262721 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -429,6 +429,7 @@ static const struct dc_debug debug_defaults_drv = {
 		.disable_dmcu = true,
 		.force_abm_enable = false,
 		.timing_trace = false,
+		.clock_trace = true,
 		.disable_pplib_clock_request = true,
 		.disable_pplib_wm_range = false,
 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
@@ -441,6 +442,7 @@ static const struct dc_debug debug_defaults_diags = {
 		.disable_dmcu = true,
 		.force_abm_enable = false,
 		.timing_trace = true,
+		.clock_trace = true,
 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
 		.disable_pplib_clock_request = true,
 		.disable_pplib_wm_range = true,
diff --git a/drivers/gpu/drm/amd/display/include/logger_interface.h b/drivers/gpu/drm/amd/display/include/logger_interface.h
index 08a6911d3a3a..b75c343f8680 100644
--- a/drivers/gpu/drm/amd/display/include/logger_interface.h
+++ b/drivers/gpu/drm/amd/display/include/logger_interface.h
@@ -32,6 +32,7 @@ struct dc_context;
 struct dc_link;
 struct dc_surface_update;
 struct resource_context;
+struct validate_context;
 
 /*
  *
@@ -90,6 +91,9 @@ void context_timing_trace(
 		const struct dc *dc,
 		struct resource_context *res_ctx);
 
+void context_clock_trace(
+		const struct dc *dc,
+		struct validate_context *context);
 
 /* Any function which is empty or have incomplete implementation should be
  * marked by this macro.
-- 
2.11.0



[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux