@@ -27,29 +27,29 @@ import (
27
27
// by the function and test if it an instance of kafka.WriteErrors in order to
28
28
// identify which messages have succeeded or failed, for example:
29
29
//
30
- // // Construct a synchronous writer (the default mode).
31
- // w := &kafka.Writer{
32
- // Addr: Addr: kafka.TCP("localhost:9092", "localhost:9093", "localhost:9094"),
33
- // Topic: "topic-A",
34
- // RequiredAcks: kafka.RequireAll,
35
- // }
30
+ // // Construct a synchronous writer (the default mode).
31
+ // w := &kafka.Writer{
32
+ // Addr: Addr: kafka.TCP("localhost:9092", "localhost:9093", "localhost:9094"),
33
+ // Topic: "topic-A",
34
+ // RequiredAcks: kafka.RequireAll,
35
+ // }
36
36
//
37
- // ...
37
+ // ...
38
38
//
39
- // // Passing a context can prevent the operation from blocking indefinitely.
40
- // switch err := w.WriteMessages(ctx, msgs...).(type) {
41
- // case nil:
42
- // case kafka.WriteErrors:
43
- // for i := range msgs {
44
- // if err[i] != nil {
45
- // // handle the error writing msgs[i]
46
- // ...
39
+ // // Passing a context can prevent the operation from blocking indefinitely.
40
+ // switch err := w.WriteMessages(ctx, msgs...).(type) {
41
+ // case nil:
42
+ // case kafka.WriteErrors:
43
+ // for i := range msgs {
44
+ // if err[i] != nil {
45
+ // // handle the error writing msgs[i]
46
+ // ...
47
+ // }
47
48
// }
49
+ // default:
50
+ // // handle other errors
51
+ // ...
48
52
// }
49
- // default:
50
- // // handle other errors
51
- // ...
52
- // }
53
53
//
54
54
// In asynchronous mode, the program may configure a completion handler on the
55
55
// writer to receive notifications of messages being written to kafka:
@@ -348,12 +348,13 @@ type WriterStats struct {
348
348
Bytes int64 `metric:"kafka.writer.message.bytes" type:"counter"`
349
349
Errors int64 `metric:"kafka.writer.error.count" type:"counter"`
350
350
351
- BatchTime DurationStats `metric:"kafka.writer.batch.seconds"`
352
- WriteTime DurationStats `metric:"kafka.writer.write.seconds"`
353
- WaitTime DurationStats `metric:"kafka.writer.wait.seconds"`
354
- Retries int64 `metric:"kafka.writer.retries.count" type:"counter"`
355
- BatchSize SummaryStats `metric:"kafka.writer.batch.size"`
356
- BatchBytes SummaryStats `metric:"kafka.writer.batch.bytes"`
351
+ BatchTime DurationStats `metric:"kafka.writer.batch.seconds"`
352
+ BatchQueueTime DurationStats `metric:"kafka.writer.batch.queue.seconds"`
353
+ WriteTime DurationStats `metric:"kafka.writer.write.seconds"`
354
+ WaitTime DurationStats `metric:"kafka.writer.wait.seconds"`
355
+ Retries int64 `metric:"kafka.writer.retries.count" type:"counter"`
356
+ BatchSize SummaryStats `metric:"kafka.writer.batch.size"`
357
+ BatchBytes SummaryStats `metric:"kafka.writer.batch.bytes"`
357
358
358
359
MaxAttempts int64 `metric:"kafka.writer.attempts.max" type:"gauge"`
359
360
WriteBackoffMin time.Duration `metric:"kafka.writer.backoff.min" type:"gauge"`
@@ -398,6 +399,7 @@ type writerStats struct {
398
399
errors counter
399
400
dialTime summary
400
401
batchTime summary
402
+ batchQueueTime summary
401
403
writeTime summary
402
404
waitTime summary
403
405
retries counter
@@ -880,6 +882,7 @@ func (w *Writer) Stats() WriterStats {
880
882
Errors : stats .errors .snapshot (),
881
883
DialTime : stats .dialTime .snapshotDuration (),
882
884
BatchTime : stats .batchTime .snapshotDuration (),
885
+ BatchQueueTime : stats .batchQueueTime .snapshotDuration (),
883
886
WriteTime : stats .writeTime .snapshotDuration (),
884
887
WaitTime : stats .waitTime .snapshotDuration (),
885
888
Retries : stats .retries .snapshot (),
@@ -1088,6 +1091,8 @@ func (ptw *partitionWriter) awaitBatch(batch *writeBatch) {
1088
1091
// having it leak until it expires.
1089
1092
batch .timer .Stop ()
1090
1093
}
1094
+ stats := ptw .w .stats ()
1095
+ stats .batchQueueTime .observe (int64 (time .Since (batch .time )))
1091
1096
}
1092
1097
1093
1098
func (ptw * partitionWriter ) writeBatch (batch * writeBatch ) {
0 commit comments