Skip to content

Commit 8cda0de

Browse files
committed
ksql client with example docker/prometheus configuration and setup
1 parent 9e79a1b commit 8cda0de

File tree

4 files changed

+164
-1
lines changed

4 files changed

+164
-1
lines changed

docker-compose.yml

Lines changed: 32 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,14 @@
22
version: "3.2"
33

44
services:
5+
prometheus:
6+
image: prom/prometheus
7+
container_name: prometheus
8+
ports:
9+
- 9090:9090
10+
- 8080:8080
11+
volumes:
12+
- ./prometheus/:/etc/prometheus/
513
zookeeper:
614
image: confluentinc/cp-zookeeper:7.0.1
715
hostname: zookeeper
@@ -35,13 +43,15 @@ services:
3543
hostname: ksqldb-server
3644
container_name: ksqldb-server
3745
volumes:
46+
- ./jmx_exporter:/usr/share/jmx_exporter/
3847
- type: bind
3948
source: ./
4049
target: /home/appuser
4150
depends_on:
4251
- broker
4352
ports:
4453
- "8088:8088"
54+
- "1090:1099"
4555
environment:
4656
KSQL_LISTENERS: http://0.0.0.0:8088
4757
KSQL_BOOTSTRAP_SERVERS: broker:9092
@@ -59,7 +69,12 @@ services:
5969
# KSQL_SSL_KEY_PASSWORD: ${SSL_PASSWORD}
6070
KSQL_KSQL_HEARTBEAT_ENABLE: "true"
6171
KSQL_KSQL_LAG_REPORTING_ENABLE: "true"
62-
72+
KSQL_KSQL_PULL_METRICS_ENABLED: "true"
73+
KSQL_JMX_OPTS: >
74+
-Dcom.sun.management.jmxremote.authenticate=false
75+
-Dcom.sun.management.jmxremote.ssl=false
76+
-Djava.util.logging.config.file=logging.properties
77+
-javaagent:/usr/share/jmx_exporter/jmx_prometheus_javaagent-0.17.0.jar=7010:/usr/share/jmx_exporter/ksqldb.yml
6378
ksqldb-cli:
6479
image: confluentinc/ksqldb-cli:0.25.1
6580
container_name: ksqldb-cli
@@ -68,3 +83,19 @@ services:
6883
- ksqldb-server
6984
entrypoint: /bin/sh
7085
tty: true
86+
prometheus:
87+
image: prom/prometheus
88+
container_name: prometheus
89+
ports:
90+
- 9090:9090
91+
- 8080:8080
92+
volumes:
93+
- ./prometheus/:/etc/prometheus/
94+
95+
# Possible JMX OPT alternative?
96+
# -Djava.rmi.server.hostname=localhost
97+
# -Dcom.sun.management.jmxremote
98+
# -Dcom.sun.management.jmxremote.port=1099
99+
# -Dcom.sun.management.jmxremote.authenticate=false
100+
# -Dcom.sun.management.jmxremote.ssl=false
101+
# -Dcom.sun.management.jmxremote.rmi.port=1099docke
Binary file not shown.

jmx_exporter/ksqldb.yml

Lines changed: 124 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,124 @@
1+
---
2+
lowercaseOutputName: true
3+
lowercaseOutputLabelNames: true
4+
whitelistObjectNames:
5+
- "io.confluent.ksql.metrics:*"
6+
# The two lines below are used to pull the Kafka Client Producer & consumer metrics from KSQL Client.
7+
# If you care about Producer/Consumer metrics for KSQL, please uncomment 2 lines below.
8+
# Please note that this increases the scrape duration to about 1 second as it needs to parse a lot of data.
9+
- "kafka.consumer:*"
10+
- "kafka.producer:*"
11+
- "kafka.streams:*"
12+
blacklistObjectNames:
13+
- kafka.streams:type=kafka-metrics-count
14+
# This will ignore the admin client metrics from KSQL server and will blacklist certain metrics
15+
# that do not make sense for ingestion.
16+
- "kafka.admin.client:*"
17+
- "kafka.consumer:type=*,id=*"
18+
- "kafka.consumer:type=*,client-id=*"
19+
- "kafka.consumer:type=*,client-id=*,node-id=*"
20+
- "kafka.producer:type=*,id=*"
21+
- "kafka.producer:type=*,client-id=*"
22+
- "kafka.producer:type=*,client-id=*,node-id=*"
23+
- "kafka.streams:type=stream-processor-node-metrics,thread-id=*,task-id=*,processor-node-id=*"
24+
- "kafka.*:type=kafka-metrics-count,*"
25+
rules:
26+
# "io.confluent.ksql.metrics:type=producer-metrics,key=*,id=*"
27+
# "io.confluent.ksql.metrics:type=consumer-metrics,key=*,id=*"
28+
- pattern: io.confluent.ksql.metrics<type=(.+), key=(.+), id=(.+)><>([^:]+)
29+
name: ksql_$1_$4
30+
labels:
31+
key: "$2"
32+
id: "$3"
33+
# "io.confluent.ksql.metrics:type=_confluent-ksql-<cluster-id>ksql-engine-query-stats"
34+
# The below statement parses KSQL Cluster Name and adds a new label so that per cluster data is searchable.
35+
- pattern: io.confluent.ksql.metrics<type=_confluent-ksql-(.+)ksql-engine-query-stats><>([^:]+)
36+
name: "ksql_ksql_engine_query_stats_$2"
37+
labels:
38+
ksql_cluster: $1
39+
# "io.confluent.ksql.metrics:type=ksql-queries,status=_confluent-ksql-<cluser-id>_query_<query>
40+
# The below statement parses KSQL query specific status
41+
- pattern: "io.confluent.ksql.metrics<type=(.+), status=_confluent-ksql-(.+)query_(.+)><>(.+): (.+)"
42+
value: 1
43+
name: ksql_ksql_metrics_$1_$4
44+
labels:
45+
ksql_query: $3
46+
ksql_cluster: $2
47+
$4: $5
48+
# kafka.streams:type=stream-processor-node-metrics,processor-node-id=*,task-id=*,thread-id=*
49+
# kafka.streams:type=stream-record-cache-metrics,record-cache-id=*,task-id=*,thread-id=*
50+
# kafka.streams:type=stream-state-metrics,rocksdb-state-id=*,task-id=*,thread-id=*
51+
# kafka.streams:type=stream-state-metrics,rocksdb-state-id=*,task-id=*,thread-id=*
52+
# - pattern: "kafka.streams<type=(.+), thread-id=(.+), task-id=(.+), (.+)=(.+)><>(.+):"
53+
# name: kafka_streams_$1_$6
54+
# type: GAUGE
55+
# labels:
56+
# thread_id: "$2"
57+
# task_id: "$3"
58+
# $4: "$5"
59+
# kafka.streams:type=stream-task-metrics,task-id=*,thread-id=*
60+
# - pattern: "kafka.streams<type=(.+), thread-id=(.+), task-id=(.+)><>(.+):"
61+
# name: kafka_streams_$1_$4
62+
# type: GAUGE
63+
# labels:
64+
# thread_id: "$2"
65+
# task_id: "$3"
66+
# kafka.streams:type=stream-metrics,client-id=*
67+
# - pattern: "kafka.streams<type=stream-metrics, (.+)=(.+)><>(state|alive-stream-threads|commit-id|version|application-id): (.+)"
68+
# name: kafka_streams_stream_metrics
69+
# value: 1
70+
# type: UNTYPED
71+
# labels:
72+
# $1: "$2"
73+
# $3: "$4"
74+
# kafka.streams:type=stream-thread-metrics,thread-id=*
75+
# - pattern: "kafka.streams<type=(.+), (.+)=(.+)><>([^:]+)"
76+
# name: kafka_streams_$1_$4
77+
# type: GAUGE
78+
# labels:
79+
# $2: "$3"
80+
# "kafka.consumer:type=app-info,client-id=*"
81+
# "kafka.producer:type=app-info,client-id=*"
82+
# - pattern: "kafka.(.+)<type=app-info, client-id=(.+)><>(.+): (.+)"
83+
# value: 1
84+
# name: kafka_$1_app_info
85+
# labels:
86+
# client_type: $1
87+
# client_id: $2
88+
# $3: $4
89+
# type: UNTYPED
90+
# "kafka.consumer:type=consumer-metrics,client-id=*, protocol=*, cipher=*"
91+
# "kafka.consumer:type=type=consumer-fetch-manager-metrics,client-id=*, topic=*, partition=*"
92+
# "kafka.producer:type=producer-metrics,client-id=*, protocol=*, cipher=*"
93+
# - pattern: "kafka.(.+)<type=(.+), (.+)=(.+), (.+)=(.+), (.+)=(.+)><>(.+):"
94+
# name: kafka_$1_$2_$9
95+
# type: GAUGE
96+
# labels:
97+
# client_type: $1
98+
# $3: "$4"
99+
# $5: "$6"
100+
# $7: "$8"
101+
# "kafka.consumer:type=consumer-node-metrics,client-id=*, node-id=*"
102+
# "kafka.consumer:type=consumer-fetch-manager-metrics,client-id=*, topic=*"
103+
# "kafka.producer:type=producer-node-metrics,client-id=*, node-id=*"
104+
# "kafka.producer:type=producer-topic-metrics,client-id=*, topic=*"
105+
# - pattern: "kafka.(.+)<type=(.+), (.+)=(.+), (.+)=(.+)><>(.+):"
106+
# name: kafka_$1_$2_$7
107+
# type: GAUGE
108+
# labels:
109+
# client_type: $1
110+
# $3: "$4"
111+
# $5: "$6"
112+
# "kafka.consumer:type=consumer-fetch-manager-metrics,client-id=*"
113+
# "kafka.consumer:type=consumer-metrics,client-id=*"
114+
# "kafka.producer:type=producer-metrics,client-id=*"
115+
# - pattern: "kafka.(.+)<type=(.+), (.+)=(.+)><>(.+):"
116+
# name: kafka_$1_$2_$5
117+
# type: GAUGE
118+
# labels:
119+
# client_type: $1
120+
# $3: "$4"
121+
# - pattern: "kafka.(.+)<type=(.+)><>(.+):"
122+
# name: kafka_$1_$2_$3
123+
# labels:
124+
# client_type: $1

prometheus/prometheus.yml

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
global:
2+
scrape_interval: 10s
3+
evaluation_interval: 10s
4+
scrape_configs:
5+
- job_name: "ksqldb"
6+
static_configs:
7+
- targets:
8+
- ksqldb-server:7010

0 commit comments

Comments
 (0)