@@ -126,6 +126,8 @@ enum dpu_enc_rc_states {
126
126
* @base: drm_encoder base class for registration with DRM
127
127
* @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
128
128
* @enabled: True if the encoder is active, protected by enc_lock
129
+ * @commit_done_timedout: True if there has been a timeout on commit after
130
+ * enabling the encoder.
129
131
* @num_phys_encs: Actual number of physical encoders contained.
130
132
* @phys_encs: Container of physical encoders managed.
131
133
* @cur_master: Pointer to the current master in this mode. Optimization
@@ -172,6 +174,7 @@ struct dpu_encoder_virt {
172
174
spinlock_t enc_spinlock ;
173
175
174
176
bool enabled ;
177
+ bool commit_done_timedout ;
175
178
176
179
unsigned int num_phys_encs ;
177
180
struct dpu_encoder_phys * phys_encs [MAX_PHYS_ENCODERS_PER_VIRTUAL ];
@@ -218,12 +221,59 @@ static u32 dither_matrix[DITHER_MATRIX_SZ] = {
218
221
15 , 7 , 13 , 5 , 3 , 11 , 1 , 9 , 12 , 4 , 14 , 6 , 0 , 8 , 2 , 10
219
222
};
220
223
224
+ u32 dpu_encoder_get_drm_fmt (struct dpu_encoder_phys * phys_enc )
225
+ {
226
+ struct drm_encoder * drm_enc ;
227
+ struct dpu_encoder_virt * dpu_enc ;
228
+ struct drm_display_info * info ;
229
+ struct drm_display_mode * mode ;
230
+
231
+ drm_enc = phys_enc -> parent ;
232
+ dpu_enc = to_dpu_encoder_virt (drm_enc );
233
+ info = & dpu_enc -> connector -> display_info ;
234
+ mode = & phys_enc -> cached_mode ;
235
+
236
+ if (drm_mode_is_420_only (info , mode ))
237
+ return DRM_FORMAT_YUV420 ;
238
+
239
+ return DRM_FORMAT_RGB888 ;
240
+ }
241
+
242
+ bool dpu_encoder_needs_periph_flush (struct dpu_encoder_phys * phys_enc )
243
+ {
244
+ struct drm_encoder * drm_enc ;
245
+ struct dpu_encoder_virt * dpu_enc ;
246
+ struct msm_display_info * disp_info ;
247
+ struct msm_drm_private * priv ;
248
+ struct drm_display_mode * mode ;
249
+
250
+ drm_enc = phys_enc -> parent ;
251
+ dpu_enc = to_dpu_encoder_virt (drm_enc );
252
+ disp_info = & dpu_enc -> disp_info ;
253
+ priv = drm_enc -> dev -> dev_private ;
254
+ mode = & phys_enc -> cached_mode ;
255
+
256
+ return phys_enc -> hw_intf -> cap -> type == INTF_DP &&
257
+ msm_dp_needs_periph_flush (priv -> dp [disp_info -> h_tile_instance [0 ]], mode );
258
+ }
221
259
222
260
bool dpu_encoder_is_widebus_enabled (const struct drm_encoder * drm_enc )
223
261
{
224
- const struct dpu_encoder_virt * dpu_enc = to_dpu_encoder_virt (drm_enc );
262
+ const struct dpu_encoder_virt * dpu_enc ;
263
+ struct msm_drm_private * priv = drm_enc -> dev -> dev_private ;
264
+ const struct msm_display_info * disp_info ;
265
+ int index ;
225
266
226
- return dpu_enc -> wide_bus_en ;
267
+ dpu_enc = to_dpu_encoder_virt (drm_enc );
268
+ disp_info = & dpu_enc -> disp_info ;
269
+ index = disp_info -> h_tile_instance [0 ];
270
+
271
+ if (disp_info -> intf_type == INTF_DP )
272
+ return msm_dp_wide_bus_available (priv -> dp [index ]);
273
+ else if (disp_info -> intf_type == INTF_DSI )
274
+ return msm_dsi_wide_bus_enabled (priv -> dsi [index ]);
275
+
276
+ return false;
227
277
}
228
278
229
279
bool dpu_encoder_is_dsc_enabled (const struct drm_encoder * drm_enc )
@@ -588,6 +638,7 @@ static int dpu_encoder_virt_atomic_check(
588
638
struct dpu_kms * dpu_kms ;
589
639
struct drm_display_mode * adj_mode ;
590
640
struct msm_display_topology topology ;
641
+ struct msm_display_info * disp_info ;
591
642
struct dpu_global_state * global_state ;
592
643
struct drm_framebuffer * fb ;
593
644
struct drm_dsc_config * dsc ;
@@ -603,6 +654,7 @@ static int dpu_encoder_virt_atomic_check(
603
654
DPU_DEBUG_ENC (dpu_enc , "\n" );
604
655
605
656
priv = drm_enc -> dev -> dev_private ;
657
+ disp_info = & dpu_enc -> disp_info ;
606
658
dpu_kms = to_dpu_kms (priv -> kms );
607
659
adj_mode = & crtc_state -> adjusted_mode ;
608
660
global_state = dpu_kms_get_global_state (crtc_state -> state );
@@ -616,21 +668,24 @@ static int dpu_encoder_virt_atomic_check(
616
668
topology = dpu_encoder_get_topology (dpu_enc , dpu_kms , adj_mode , crtc_state , dsc );
617
669
618
670
/*
619
- * Use CDM only for writeback at the moment as other interfaces cannot handle it.
620
- * if writeback itself cannot handle cdm for some reason it will fail in its atomic_check()
671
+ * Use CDM only for writeback or DP at the moment as other interfaces cannot handle it.
672
+ * If writeback itself cannot handle cdm for some reason it will fail in its atomic_check()
621
673
* earlier.
622
674
*/
623
- if (dpu_enc -> disp_info . intf_type == INTF_WB && conn_state -> writeback_job ) {
675
+ if (disp_info -> intf_type == INTF_WB && conn_state -> writeback_job ) {
624
676
fb = conn_state -> writeback_job -> fb ;
625
677
626
678
if (fb && DPU_FORMAT_IS_YUV (to_dpu_format (msm_framebuffer_format (fb ))))
627
679
topology .needs_cdm = true;
628
- if (topology .needs_cdm && !dpu_enc -> cur_master -> hw_cdm )
629
- crtc_state -> mode_changed = true;
630
- else if (!topology .needs_cdm && dpu_enc -> cur_master -> hw_cdm )
631
- crtc_state -> mode_changed = true;
680
+ } else if (disp_info -> intf_type == INTF_DP ) {
681
+ if (msm_dp_is_yuv_420_enabled (priv -> dp [disp_info -> h_tile_instance [0 ]], adj_mode ))
682
+ topology .needs_cdm = true;
632
683
}
633
684
685
+ if (topology .needs_cdm && !dpu_enc -> cur_master -> hw_cdm )
686
+ crtc_state -> mode_changed = true;
687
+ else if (!topology .needs_cdm && dpu_enc -> cur_master -> hw_cdm )
688
+ crtc_state -> mode_changed = true;
634
689
/*
635
690
* Release and Allocate resources on every modeset
636
691
* Dont allocate when active is false.
@@ -1102,7 +1157,8 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
1102
1157
1103
1158
dpu_enc -> dsc_mask = dsc_mask ;
1104
1159
1105
- if (dpu_enc -> disp_info .intf_type == INTF_WB && conn_state -> writeback_job ) {
1160
+ if ((dpu_enc -> disp_info .intf_type == INTF_WB && conn_state -> writeback_job ) ||
1161
+ dpu_enc -> disp_info .intf_type == INTF_DP ) {
1106
1162
struct dpu_hw_blk * hw_cdm = NULL ;
1107
1163
1108
1164
dpu_rm_get_assigned_resources (& dpu_kms -> rm , global_state ,
@@ -1209,26 +1265,20 @@ static void dpu_encoder_virt_atomic_enable(struct drm_encoder *drm_enc,
1209
1265
struct dpu_encoder_virt * dpu_enc = NULL ;
1210
1266
int ret = 0 ;
1211
1267
struct drm_display_mode * cur_mode = NULL ;
1212
- struct msm_drm_private * priv = drm_enc -> dev -> dev_private ;
1213
- struct msm_display_info * disp_info ;
1214
- int index ;
1215
1268
1216
1269
dpu_enc = to_dpu_encoder_virt (drm_enc );
1217
- disp_info = & dpu_enc -> disp_info ;
1218
- index = disp_info -> h_tile_instance [0 ];
1219
-
1220
1270
dpu_enc -> dsc = dpu_encoder_get_dsc_config (drm_enc );
1221
1271
1222
1272
atomic_set (& dpu_enc -> frame_done_timeout_cnt , 0 );
1223
1273
1224
- if (disp_info -> intf_type == INTF_DP )
1225
- dpu_enc -> wide_bus_en = msm_dp_wide_bus_available (priv -> dp [index ]);
1226
- else if (disp_info -> intf_type == INTF_DSI )
1227
- dpu_enc -> wide_bus_en = msm_dsi_wide_bus_enabled (priv -> dsi [index ]);
1228
-
1229
1274
mutex_lock (& dpu_enc -> enc_lock );
1275
+
1276
+ dpu_enc -> commit_done_timedout = false;
1277
+
1230
1278
cur_mode = & dpu_enc -> base .crtc -> state -> adjusted_mode ;
1231
1279
1280
+ dpu_enc -> wide_bus_en = dpu_encoder_is_widebus_enabled (drm_enc );
1281
+
1232
1282
trace_dpu_enc_enable (DRMID (drm_enc ), cur_mode -> hdisplay ,
1233
1283
cur_mode -> vdisplay );
1234
1284
@@ -1282,7 +1332,7 @@ static void dpu_encoder_virt_atomic_disable(struct drm_encoder *drm_enc,
1282
1332
trace_dpu_enc_disable (DRMID (drm_enc ));
1283
1333
1284
1334
/* wait for idle */
1285
- dpu_encoder_wait_for_event (drm_enc , MSM_ENC_TX_COMPLETE );
1335
+ dpu_encoder_wait_for_tx_complete (drm_enc );
1286
1336
1287
1337
dpu_encoder_resource_control (drm_enc , DPU_ENC_RC_EVENT_PRE_STOP );
1288
1338
@@ -2133,6 +2183,84 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
2133
2183
ctl -> ops .clear_pending_flush (ctl );
2134
2184
}
2135
2185
2186
+ void dpu_encoder_helper_phys_setup_cdm (struct dpu_encoder_phys * phys_enc ,
2187
+ const struct dpu_format * dpu_fmt ,
2188
+ u32 output_type )
2189
+ {
2190
+ struct dpu_hw_cdm * hw_cdm ;
2191
+ struct dpu_hw_cdm_cfg * cdm_cfg ;
2192
+ struct dpu_hw_pingpong * hw_pp ;
2193
+ int ret ;
2194
+
2195
+ if (!phys_enc )
2196
+ return ;
2197
+
2198
+ cdm_cfg = & phys_enc -> cdm_cfg ;
2199
+ hw_pp = phys_enc -> hw_pp ;
2200
+ hw_cdm = phys_enc -> hw_cdm ;
2201
+
2202
+ if (!hw_cdm )
2203
+ return ;
2204
+
2205
+ if (!DPU_FORMAT_IS_YUV (dpu_fmt )) {
2206
+ DPU_DEBUG ("[enc:%d] cdm_disable fmt:%x\n" , DRMID (phys_enc -> parent ),
2207
+ dpu_fmt -> base .pixel_format );
2208
+ if (hw_cdm -> ops .bind_pingpong_blk )
2209
+ hw_cdm -> ops .bind_pingpong_blk (hw_cdm , PINGPONG_NONE );
2210
+
2211
+ return ;
2212
+ }
2213
+
2214
+ memset (cdm_cfg , 0 , sizeof (struct dpu_hw_cdm_cfg ));
2215
+
2216
+ cdm_cfg -> output_width = phys_enc -> cached_mode .hdisplay ;
2217
+ cdm_cfg -> output_height = phys_enc -> cached_mode .vdisplay ;
2218
+ cdm_cfg -> output_fmt = dpu_fmt ;
2219
+ cdm_cfg -> output_type = output_type ;
2220
+ cdm_cfg -> output_bit_depth = DPU_FORMAT_IS_DX (dpu_fmt ) ?
2221
+ CDM_CDWN_OUTPUT_10BIT : CDM_CDWN_OUTPUT_8BIT ;
2222
+ cdm_cfg -> csc_cfg = & dpu_csc10_rgb2yuv_601l ;
2223
+
2224
+ /* enable 10 bit logic */
2225
+ switch (cdm_cfg -> output_fmt -> chroma_sample ) {
2226
+ case DPU_CHROMA_RGB :
2227
+ cdm_cfg -> h_cdwn_type = CDM_CDWN_DISABLE ;
2228
+ cdm_cfg -> v_cdwn_type = CDM_CDWN_DISABLE ;
2229
+ break ;
2230
+ case DPU_CHROMA_H2V1 :
2231
+ cdm_cfg -> h_cdwn_type = CDM_CDWN_COSITE ;
2232
+ cdm_cfg -> v_cdwn_type = CDM_CDWN_DISABLE ;
2233
+ break ;
2234
+ case DPU_CHROMA_420 :
2235
+ cdm_cfg -> h_cdwn_type = CDM_CDWN_COSITE ;
2236
+ cdm_cfg -> v_cdwn_type = CDM_CDWN_OFFSITE ;
2237
+ break ;
2238
+ case DPU_CHROMA_H1V2 :
2239
+ default :
2240
+ DPU_ERROR ("[enc:%d] unsupported chroma sampling type\n" ,
2241
+ DRMID (phys_enc -> parent ));
2242
+ cdm_cfg -> h_cdwn_type = CDM_CDWN_DISABLE ;
2243
+ cdm_cfg -> v_cdwn_type = CDM_CDWN_DISABLE ;
2244
+ break ;
2245
+ }
2246
+
2247
+ DPU_DEBUG ("[enc:%d] cdm_enable:%d,%d,%X,%d,%d,%d,%d]\n" ,
2248
+ DRMID (phys_enc -> parent ), cdm_cfg -> output_width ,
2249
+ cdm_cfg -> output_height , cdm_cfg -> output_fmt -> base .pixel_format ,
2250
+ cdm_cfg -> output_type , cdm_cfg -> output_bit_depth ,
2251
+ cdm_cfg -> h_cdwn_type , cdm_cfg -> v_cdwn_type );
2252
+
2253
+ if (hw_cdm -> ops .enable ) {
2254
+ cdm_cfg -> pp_id = hw_pp -> idx ;
2255
+ ret = hw_cdm -> ops .enable (hw_cdm , cdm_cfg );
2256
+ if (ret < 0 ) {
2257
+ DPU_ERROR ("[enc:%d] failed to enable CDM; ret:%d\n" ,
2258
+ DRMID (phys_enc -> parent ), ret );
2259
+ return ;
2260
+ }
2261
+ }
2262
+ }
2263
+
2136
2264
#ifdef CONFIG_DEBUG_FS
2137
2265
static int _dpu_encoder_status_show (struct seq_file * s , void * data )
2138
2266
{
@@ -2402,10 +2530,18 @@ struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
2402
2530
return & dpu_enc -> base ;
2403
2531
}
2404
2532
2405
- int dpu_encoder_wait_for_event (struct drm_encoder * drm_enc ,
2406
- enum msm_event_wait event )
2533
+ /**
2534
+ * dpu_encoder_wait_for_commit_done() - Wait for encoder to flush pending state
2535
+ * @drm_enc: encoder pointer
2536
+ *
2537
+ * Wait for hardware to have flushed the current pending changes to hardware at
2538
+ * a vblank or CTL_START. Physical encoders will map this differently depending
2539
+ * on the type: vid mode -> vsync_irq, cmd mode -> CTL_START.
2540
+ *
2541
+ * Return: 0 on success, -EWOULDBLOCK if already signaled, error otherwise
2542
+ */
2543
+ int dpu_encoder_wait_for_commit_done (struct drm_encoder * drm_enc )
2407
2544
{
2408
- int (* fn_wait )(struct dpu_encoder_phys * phys_enc ) = NULL ;
2409
2545
struct dpu_encoder_virt * dpu_enc = NULL ;
2410
2546
int i , ret = 0 ;
2411
2547
@@ -2419,23 +2555,51 @@ int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc,
2419
2555
for (i = 0 ; i < dpu_enc -> num_phys_encs ; i ++ ) {
2420
2556
struct dpu_encoder_phys * phys = dpu_enc -> phys_encs [i ];
2421
2557
2422
- switch (event ) {
2423
- case MSM_ENC_COMMIT_DONE :
2424
- fn_wait = phys -> ops .wait_for_commit_done ;
2425
- break ;
2426
- case MSM_ENC_TX_COMPLETE :
2427
- fn_wait = phys -> ops .wait_for_tx_complete ;
2428
- break ;
2429
- default :
2430
- DPU_ERROR_ENC (dpu_enc , "unknown wait event %d\n" ,
2431
- event );
2432
- return - EINVAL ;
2558
+ if (phys -> ops .wait_for_commit_done ) {
2559
+ DPU_ATRACE_BEGIN ("wait_for_commit_done" );
2560
+ ret = phys -> ops .wait_for_commit_done (phys );
2561
+ DPU_ATRACE_END ("wait_for_commit_done" );
2562
+ if (ret == - ETIMEDOUT && !dpu_enc -> commit_done_timedout ) {
2563
+ dpu_enc -> commit_done_timedout = true;
2564
+ msm_disp_snapshot_state (drm_enc -> dev );
2565
+ }
2566
+ if (ret )
2567
+ return ret ;
2433
2568
}
2569
+ }
2570
+
2571
+ return ret ;
2572
+ }
2573
+
2574
+ /**
2575
+ * dpu_encoder_wait_for_tx_complete() - Wait for encoder to transfer pixels to panel
2576
+ * @drm_enc: encoder pointer
2577
+ *
2578
+ * Wait for the hardware to transfer all the pixels to the panel. Physical
2579
+ * encoders will map this differently depending on the type: vid mode -> vsync_irq,
2580
+ * cmd mode -> pp_done.
2581
+ *
2582
+ * Return: 0 on success, -EWOULDBLOCK if already signaled, error otherwise
2583
+ */
2584
+ int dpu_encoder_wait_for_tx_complete (struct drm_encoder * drm_enc )
2585
+ {
2586
+ struct dpu_encoder_virt * dpu_enc = NULL ;
2587
+ int i , ret = 0 ;
2588
+
2589
+ if (!drm_enc ) {
2590
+ DPU_ERROR ("invalid encoder\n" );
2591
+ return - EINVAL ;
2592
+ }
2593
+ dpu_enc = to_dpu_encoder_virt (drm_enc );
2594
+ DPU_DEBUG_ENC (dpu_enc , "\n" );
2595
+
2596
+ for (i = 0 ; i < dpu_enc -> num_phys_encs ; i ++ ) {
2597
+ struct dpu_encoder_phys * phys = dpu_enc -> phys_encs [i ];
2434
2598
2435
- if (fn_wait ) {
2436
- DPU_ATRACE_BEGIN ("wait_for_completion_event " );
2437
- ret = fn_wait (phys );
2438
- DPU_ATRACE_END ("wait_for_completion_event " );
2599
+ if (phys -> ops . wait_for_tx_complete ) {
2600
+ DPU_ATRACE_BEGIN ("wait_for_tx_complete " );
2601
+ ret = phys -> ops . wait_for_tx_complete (phys );
2602
+ DPU_ATRACE_END ("wait_for_tx_complete " );
2439
2603
if (ret )
2440
2604
return ret ;
2441
2605
}
0 commit comments