Skip to content

Commit d6eaadb

Browse files
authored
chore: adjust elapsed time of various log messages (#15517)
* chore: adjust eplased times of various logs Adjust the time part in the log to use Duration instead of specific precision time (e.g., seconds), to avoid imprecise log information such as '0s'. * chore: add timing log for some schema api - update_table_meta - update_multi_table_meta
1 parent 05805e5 commit d6eaadb

File tree

17 files changed

+97
-81
lines changed

17 files changed

+97
-81
lines changed

src/query/ee/src/storages/fuse/io/snapshots.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -78,11 +78,11 @@ where
7878
{
7979
count += chunk.len();
8080
let status = format!(
81-
"gc orphan: read snapshot files:{}/{}, segment files: {}, cost:{} sec",
81+
"gc orphan: read snapshot files:{}/{}, segment files: {}, cost:{:?}",
8282
count,
8383
snapshot_files.len(),
8484
segments.len(),
85-
start.elapsed().as_secs()
85+
start.elapsed()
8686
);
8787
info!("{}", status);
8888
(status_callback)(status);

src/query/ee/src/storages/fuse/operations/vacuum_drop_tables.rs

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -74,10 +74,10 @@ pub async fn do_vacuum_drop_table(
7474
};
7575

7676
info!(
77-
"vacuum drop table {:?} dir {:?}, cost:{} sec",
77+
"vacuum drop table {:?} dir {:?}, cost:{:?}",
7878
table_info.name,
7979
dir,
80-
start.elapsed().as_secs()
80+
start.elapsed()
8181
);
8282
}
8383
Ok(if dry_run_limit.is_some() {
@@ -150,9 +150,9 @@ pub async fn do_vacuum_drop_tables(
150150
};
151151

152152
info!(
153-
"do_vacuum_drop_tables {} tables, cost:{} sec",
153+
"do_vacuum_drop_tables {} tables, cost:{:?}",
154154
tables_len,
155-
start.elapsed().as_secs()
155+
start.elapsed()
156156
);
157157

158158
Ok(result)

src/query/ee/src/storages/fuse/operations/vacuum_table.rs

Lines changed: 23 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -177,11 +177,11 @@ pub async fn do_gc_orphan_files(
177177
None => return Ok(()),
178178
};
179179
let status = format!(
180-
"gc orphan: read referenced files:{},{},{}, cost:{} sec",
180+
"gc orphan: read referenced files:{},{},{}, cost:{:?}",
181181
referenced_files.segments.len(),
182182
referenced_files.blocks.len(),
183183
referenced_files.blocks_index.len(),
184-
start.elapsed().as_secs()
184+
start.elapsed()
185185
);
186186
ctx.set_status_info(&status);
187187

@@ -191,9 +191,9 @@ pub async fn do_gc_orphan_files(
191191
get_orphan_files_to_be_purged(fuse_table, referenced_files.segments, retention_time)
192192
.await?;
193193
let status = format!(
194-
"gc orphan: read segment_locations_to_be_purged:{}, cost:{} sec, retention_time: {}",
194+
"gc orphan: read segment_locations_to_be_purged:{}, cost:{:?}, retention_time: {}",
195195
segment_locations_to_be_purged.len(),
196-
start.elapsed().as_secs(),
196+
start.elapsed(),
197197
retention_time
198198
);
199199
ctx.set_status_info(&status);
@@ -207,9 +207,9 @@ pub async fn do_gc_orphan_files(
207207
)
208208
.await?;
209209
let status = format!(
210-
"gc orphan: purged segment files:{}, cost:{} sec",
210+
"gc orphan: purged segment files:{}, cost:{:?}",
211211
purged_file_num,
212-
start.elapsed().as_secs()
212+
start.elapsed()
213213
);
214214
ctx.set_status_info(&status);
215215

@@ -218,9 +218,9 @@ pub async fn do_gc_orphan_files(
218218
let block_locations_to_be_purged =
219219
get_orphan_files_to_be_purged(fuse_table, referenced_files.blocks, retention_time).await?;
220220
let status = format!(
221-
"gc orphan: read block_locations_to_be_purged:{}, cost:{} sec",
221+
"gc orphan: read block_locations_to_be_purged:{}, cost:{:?}",
222222
block_locations_to_be_purged.len(),
223-
start.elapsed().as_secs()
223+
start.elapsed()
224224
);
225225
ctx.set_status_info(&status);
226226

@@ -233,9 +233,9 @@ pub async fn do_gc_orphan_files(
233233
)
234234
.await?;
235235
let status = format!(
236-
"gc orphan: purged block files:{}, cost:{} sec",
236+
"gc orphan: purged block files:{}, cost:{:?}",
237237
purged_file_num,
238-
start.elapsed().as_secs()
238+
start.elapsed()
239239
);
240240
ctx.set_status_info(&status);
241241

@@ -245,9 +245,9 @@ pub async fn do_gc_orphan_files(
245245
get_orphan_files_to_be_purged(fuse_table, referenced_files.blocks_index, retention_time)
246246
.await?;
247247
let status = format!(
248-
"gc orphan: read index_locations_to_be_purged:{}, cost:{} sec",
248+
"gc orphan: read index_locations_to_be_purged:{}, cost:{:?}",
249249
index_locations_to_be_purged.len(),
250-
start.elapsed().as_secs()
250+
start.elapsed()
251251
);
252252
ctx.set_status_info(&status);
253253

@@ -260,9 +260,9 @@ pub async fn do_gc_orphan_files(
260260
)
261261
.await?;
262262
let status = format!(
263-
"gc orphan: purged block index files:{}, cost:{} sec",
263+
"gc orphan: purged block index files:{}, cost:{:?}",
264264
purged_file_num,
265-
start.elapsed().as_secs()
265+
start.elapsed()
266266
);
267267
ctx.set_status_info(&status);
268268

@@ -284,11 +284,11 @@ pub async fn do_dry_run_orphan_files(
284284
None => return Ok(()),
285285
};
286286
let status = format!(
287-
"dry_run orphan: read referenced files:{},{},{}, cost:{} sec",
287+
"dry_run orphan: read referenced files:{},{},{}, cost:{:?}",
288288
referenced_files.segments.len(),
289289
referenced_files.blocks.len(),
290290
referenced_files.blocks_index.len(),
291-
start.elapsed().as_secs()
291+
start.elapsed()
292292
);
293293
ctx.set_status_info(&status);
294294

@@ -297,9 +297,9 @@ pub async fn do_dry_run_orphan_files(
297297
get_orphan_files_to_be_purged(fuse_table, referenced_files.segments, retention_time)
298298
.await?;
299299
let status = format!(
300-
"dry_run orphan: read segment_locations_to_be_purged:{}, cost:{} sec",
300+
"dry_run orphan: read segment_locations_to_be_purged:{}, cost:{:?}",
301301
segment_locations_to_be_purged.len(),
302-
start.elapsed().as_secs()
302+
start.elapsed()
303303
);
304304
ctx.set_status_info(&status);
305305

@@ -312,9 +312,9 @@ pub async fn do_dry_run_orphan_files(
312312
let block_locations_to_be_purged =
313313
get_orphan_files_to_be_purged(fuse_table, referenced_files.blocks, retention_time).await?;
314314
let status = format!(
315-
"dry_run orphan: read block_locations_to_be_purged:{}, cost:{} sec",
315+
"dry_run orphan: read block_locations_to_be_purged:{}, cost:{:?}",
316316
block_locations_to_be_purged.len(),
317-
start.elapsed().as_secs()
317+
start.elapsed()
318318
);
319319
ctx.set_status_info(&status);
320320
purge_files.extend(block_locations_to_be_purged);
@@ -327,9 +327,9 @@ pub async fn do_dry_run_orphan_files(
327327
get_orphan_files_to_be_purged(fuse_table, referenced_files.blocks_index, retention_time)
328328
.await?;
329329
let status = format!(
330-
"dry_run orphan: read index_locations_to_be_purged:{}, cost:{} sec",
330+
"dry_run orphan: read index_locations_to_be_purged:{}, cost:{:?}",
331331
index_locations_to_be_purged.len(),
332-
start.elapsed().as_secs()
332+
start.elapsed()
333333
);
334334
ctx.set_status_info(&status);
335335

@@ -352,10 +352,7 @@ pub async fn do_vacuum(
352352
let purge_files_opt = fuse_table
353353
.purge(ctx.clone(), instant, dry_run_limit, true, dry_run)
354354
.await?;
355-
let status = format!(
356-
"do_vacuum: purged table, cost:{} sec",
357-
start.elapsed().as_secs()
358-
);
355+
let status = format!("do_vacuum: purged table, cost:{:?}", start.elapsed());
359356
ctx.set_status_info(&status);
360357
let retention = Duration::days(ctx.get_settings().get_data_retention_time_in_days()? as i64);
361358
// use min(now - get_retention_period(), retention_time) as gc orphan files retention time

src/query/service/src/catalogs/default/mutable_catalog.rs

Lines changed: 20 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@ use std::any::Any;
1616
use std::fmt::Debug;
1717
use std::fmt::Formatter;
1818
use std::sync::Arc;
19+
use std::time::Instant;
1920

2021
use databend_common_catalog::catalog::Catalog;
2122
use databend_common_config::InnerConfig;
@@ -511,7 +512,14 @@ impl Catalog for MutableCatalog {
511512
table_info.desc,
512513
req.copied_files.is_some()
513514
);
514-
Ok(self.ctx.meta.update_table_meta(req).await?)
515+
let begin = Instant::now();
516+
let res = self.ctx.meta.update_table_meta(req).await;
517+
info!(
518+
"update table meta done. table id: {:?}, time used {:?}",
519+
table_info.ident,
520+
begin.elapsed()
521+
);
522+
Ok(res?)
515523
}
516524
DatabaseType::ShareDB(share_ident) => {
517525
let tenant = Tenant::new_or_err(share_ident.tenant_name(), func_name!())?;
@@ -523,7 +531,17 @@ impl Catalog for MutableCatalog {
523531

524532
#[async_backtrace::framed]
525533
async fn update_multi_table_meta(&self, reqs: UpdateMultiTableMetaReq) -> Result<()> {
526-
Ok(self.ctx.meta.update_multi_table_meta(reqs).await?)
534+
info!(
535+
"updating multi table meta. number of tables: {}",
536+
reqs.update_table_metas.len()
537+
);
538+
let begin = Instant::now();
539+
let res = self.ctx.meta.update_multi_table_meta(reqs).await;
540+
info!(
541+
"update multi table meta done. time used {:?}",
542+
begin.elapsed()
543+
);
544+
Ok(res?)
527545
}
528546

529547
async fn set_table_column_mask_policy(

src/query/service/src/interpreters/interpreter_table_recluster.rs

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -184,9 +184,8 @@ impl Interpreter for ReclusterTableInterpreter {
184184
// Status.
185185
{
186186
let status = format!(
187-
"recluster: run recluster tasks:{} times, cost:{} sec",
188-
times,
189-
elapsed_time.as_secs()
187+
"recluster: run recluster tasks:{} times, cost:{:?}",
188+
times, elapsed_time
190189
);
191190
ctx.set_status_info(&status);
192191
}
@@ -197,8 +196,8 @@ impl Interpreter for ReclusterTableInterpreter {
197196

198197
if elapsed_time >= timeout {
199198
warn!(
200-
"Recluster stopped because the runtime was over {} secs",
201-
timeout.as_secs()
199+
"Recluster stopped because the runtime was over {:?}",
200+
timeout
202201
);
203202
break;
204203
}

src/query/storages/fuse/src/io/snapshots.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -192,10 +192,10 @@ impl SnapshotsIO {
192192
{
193193
count += chunk.len();
194194
let status = format!(
195-
"read snapshot files:{}/{}, cost:{} sec",
195+
"read snapshot files:{}/{}, cost:{:?}",
196196
count,
197197
snapshot_files.len(),
198-
start.elapsed().as_secs()
198+
start.elapsed()
199199
);
200200
info!("{}", status);
201201
(status_callback)(status);

src/query/storages/fuse/src/operations/analyze.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -263,10 +263,10 @@ pub async fn regenerate_statistics(
263263
{
264264
read_segment_count += chunk.len();
265265
let status = format!(
266-
"analyze: read segment files:{}/{}, cost:{} sec",
266+
"analyze: read segment files:{}/{}, cost:{:?}",
267267
read_segment_count,
268268
number_segments,
269-
start.elapsed().as_secs()
269+
start.elapsed()
270270
);
271271
ctx.set_status_info(&status);
272272
}

src/query/storages/fuse/src/operations/changes.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -296,9 +296,9 @@ impl FuseTable {
296296
let pruning_stats = pruner.pruning_stats();
297297

298298
info!(
299-
"prune snapshot block end, final block numbers:{}, cost:{}",
299+
"prune snapshot block end, final block numbers:{}, cost:{:?}",
300300
block_metas.len(),
301-
start.elapsed().as_secs()
301+
start.elapsed()
302302
);
303303

304304
let block_metas = block_metas

src/query/storages/fuse/src/operations/common/processors/sink_commit.rs

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -430,12 +430,12 @@ where F: SnapshotGenerator + Send + 'static
430430
}
431431
metrics_inc_commit_mutation_success();
432432
{
433-
let elapsed_time = self.start_time.elapsed().as_millis();
433+
let elapsed_time = self.start_time.elapsed();
434434
let status = format!(
435-
"commit mutation success after {} retries, which took {} ms",
435+
"commit mutation success after {} retries, which took {:?}",
436436
self.retries, elapsed_time
437437
);
438-
metrics_inc_commit_milliseconds(elapsed_time);
438+
metrics_inc_commit_milliseconds(elapsed_time.as_millis());
439439
self.ctx.set_status_info(&status);
440440
}
441441
if let Some(files) = &self.copied_files {

src/query/storages/fuse/src/operations/common/processors/transform_mutation_aggregator.rs

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -136,10 +136,10 @@ impl TableMutationAggregator {
136136
// Refresh status
137137
{
138138
let status = format!(
139-
"{}: run tasks:{}, cost:{} sec",
139+
"{}: run tasks:{}, cost:{:?}",
140140
self.kind,
141141
self.finished_tasks,
142-
self.start_time.elapsed().as_secs()
142+
self.start_time.elapsed()
143143
);
144144
self.ctx.set_status_info(&status);
145145
}
@@ -262,11 +262,11 @@ impl TableMutationAggregator {
262262
{
263263
count += chunk.len();
264264
let status = format!(
265-
"{}: generate new segment files:{}/{}, cost:{} sec",
265+
"{}: generate new segment files:{}/{}, cost:{:?}",
266266
self.kind,
267267
count,
268268
segment_indices.len(),
269-
start.elapsed().as_secs()
269+
start.elapsed()
270270
);
271271
self.ctx.set_status_info(&status);
272272
}

0 commit comments

Comments
 (0)