Skip to content

Commit 0bb6586

Browse files
author
Charith Ellawala
committed
Switch to using ExternalResource
1 parent b1bc7c0 commit 0bb6586

File tree

4 files changed

+152
-62
lines changed

4 files changed

+152
-62
lines changed

README.md

Lines changed: 13 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,14 +13,25 @@ Use https://jitpack.io/ until I get around to doing a proper release to Maven Ce
1313
Usage
1414
------
1515

16-
Create an instance of the rule in your test class and annotate it with `@Rule`.
16+
Create an instance of the rule in your test class and annotate it with `@Rule`. This will start and stop the
17+
broker between each test invocation.
1718

1819
```java
1920
@Rule
2021
public KafkaJunitRule kafkaRule = new KafkaJunitRule();
2122
```
2223

23-
`kafkaRule` can now be referenced from within your test methods.
24+
25+
To spin up the broker at the beginning of a test suite and tear it down at the end, use `@ClassRule`.
26+
27+
```java
28+
@ClassRule
29+
public static KafkaJunitRule kafkaRule = new KafkaJunitRule();
30+
```
31+
32+
33+
34+
`kafkaRule` can be referenced from within your test methods to obtain information about the Kafka broker.
2435

2536
```java
2637
@Test

src/main/java/com/github/charithe/kafka/KafkaJunitRule.java

Lines changed: 47 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -22,9 +22,7 @@
2222
import kafka.server.KafkaServerStartable;
2323
import org.apache.curator.test.InstanceSpec;
2424
import org.apache.curator.test.TestingServer;
25-
import org.junit.rules.TestRule;
26-
import org.junit.runner.Description;
27-
import org.junit.runners.model.Statement;
25+
import org.junit.rules.ExternalResource;
2826
import org.slf4j.Logger;
2927
import org.slf4j.LoggerFactory;
3028

@@ -39,41 +37,66 @@
3937
/**
4038
* Starts up a local Zookeeper and a Kafka broker
4139
*/
42-
public class KafkaJunitRule implements TestRule {
40+
public class KafkaJunitRule extends ExternalResource {
4341

4442
private static final Logger LOGGER = LoggerFactory.getLogger(KafkaJunitRule.class);
4543

4644
private TestingServer zookeeper;
4745
private KafkaServerStartable kafkaServer;
4846

47+
private int zookeeperPort;
48+
private String zookeeperConnectionString;
4949
private int kafkaPort = 9092;
5050
private Path kafkaLogDir;
5151

52-
@Override
53-
public Statement apply(final Statement statement, Description description) {
54-
return new Statement() {
55-
@Override
56-
public void evaluate() throws Throwable {
57-
try {
58-
startKafkaServer();
59-
statement.evaluate();
60-
} finally {
61-
stopKafkaServer();
62-
}
63-
}
64-
};
65-
}
6652

67-
private void startKafkaServer() throws Exception {
53+
@Override
54+
protected void before() throws Throwable {
6855
zookeeper = new TestingServer(true);
69-
String zkQuorumStr = zookeeper.getConnectString();
70-
KafkaConfig kafkaConfig = buildKafkaConfig(zkQuorumStr);
56+
zookeeperPort = zookeeper.getPort();
57+
zookeeperConnectionString = zookeeper.getConnectString();
58+
KafkaConfig kafkaConfig = buildKafkaConfig(zookeeperConnectionString);
7159

7260
LOGGER.info("Starting Kafka server with config: {}", kafkaConfig.props().props());
7361
kafkaServer = new KafkaServerStartable(kafkaConfig);
7462
kafkaServer.startup();
7563
}
7664

65+
@Override
66+
protected void after() {
67+
try {
68+
if (kafkaServer != null) {
69+
LOGGER.info("Shutting down Kafka Server");
70+
kafkaServer.shutdown();
71+
}
72+
73+
if (zookeeper != null) {
74+
LOGGER.info("Shutting down Zookeeper");
75+
zookeeper.close();
76+
}
77+
78+
if (Files.exists(kafkaLogDir)) {
79+
LOGGER.info("Deleting the log dir: {}", kafkaLogDir);
80+
Files.walkFileTree(kafkaLogDir, new SimpleFileVisitor<Path>() {
81+
@Override
82+
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
83+
Files.deleteIfExists(file);
84+
return FileVisitResult.CONTINUE;
85+
}
86+
87+
@Override
88+
public FileVisitResult postVisitDirectory(Path dir, IOException exc) throws IOException {
89+
Files.deleteIfExists(dir);
90+
return FileVisitResult.CONTINUE;
91+
}
92+
});
93+
}
94+
}
95+
catch(Exception e){
96+
LOGGER.error("Failed to clean-up Kafka",e);
97+
}
98+
}
99+
77100
private KafkaConfig buildKafkaConfig(String zookeeperQuorum) throws IOException {
78101
kafkaLogDir = Files.createTempDirectory("kafka_junit");
79102
kafkaPort = InstanceSpec.getRandomPort();
@@ -88,34 +111,6 @@ private KafkaConfig buildKafkaConfig(String zookeeperQuorum) throws IOException
88111
return new KafkaConfig(props);
89112
}
90113

91-
private void stopKafkaServer() throws IOException {
92-
if (kafkaServer != null) {
93-
LOGGER.info("Shutting down Kafka Server");
94-
kafkaServer.shutdown();
95-
}
96-
97-
if (zookeeper != null) {
98-
LOGGER.info("Shutting down Zookeeper");
99-
zookeeper.close();
100-
}
101-
102-
if (Files.exists(kafkaLogDir)) {
103-
LOGGER.info("Deleting the log dir: {}", kafkaLogDir);
104-
Files.walkFileTree(kafkaLogDir, new SimpleFileVisitor<Path>() {
105-
@Override
106-
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
107-
Files.deleteIfExists(file);
108-
return FileVisitResult.CONTINUE;
109-
}
110-
111-
@Override
112-
public FileVisitResult postVisitDirectory(Path dir, IOException exc) throws IOException {
113-
Files.deleteIfExists(dir);
114-
return FileVisitResult.CONTINUE;
115-
}
116-
});
117-
}
118-
}
119114

120115
/**
121116
* Create a producer configuration.
@@ -140,7 +135,7 @@ public ProducerConfig producerConfig() {
140135
*/
141136
public ConsumerConfig consumerConfig() {
142137
Properties props = new Properties();
143-
props.put("zookeeper.connect", zookeeper.getConnectString());
138+
props.put("zookeeper.connect", zookeeperConnectionString);
144139
props.put("group.id", "kafka-junit-consumer");
145140
props.put("zookeeper.session.timeout.ms", "400");
146141
props.put("zookeeper.sync.time.ms", "200");
@@ -170,14 +165,14 @@ public int kafkaBrokerPort(){
170165
* @return zookeeper port
171166
*/
172167
public int zookeeperPort(){
173-
return zookeeper.getPort();
168+
return zookeeperPort;
174169
}
175170

176171
/**
177172
* Get the zookeeper connection string
178173
* @return zookeeper connection string
179174
*/
180175
public String zookeeperConnectionString(){
181-
return zookeeper.getConnectString();
176+
return zookeeperConnectionString;
182177
}
183178
}
Lines changed: 78 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,78 @@
1+
/*
2+
* Copyright 2015 Charith Ellawala
3+
*
4+
* Licensed under the Apache License, Version 2.0 (the "License");
5+
* you may not use this file except in compliance with the License.
6+
* You may obtain a copy of the License at
7+
*
8+
* http://www.apache.org/licenses/LICENSE-2.0
9+
*
10+
* Unless required by applicable law or agreed to in writing, software
11+
* distributed under the License is distributed on an "AS IS" BASIS,
12+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
* See the License for the specific language governing permissions and
14+
* limitations under the License.
15+
*/
16+
17+
package com.github.charithe.kafka;
18+
19+
import kafka.consumer.Consumer;
20+
import kafka.consumer.ConsumerConfig;
21+
import kafka.consumer.ConsumerIterator;
22+
import kafka.consumer.KafkaStream;
23+
import kafka.javaapi.consumer.ConsumerConnector;
24+
import kafka.javaapi.producer.Producer;
25+
import kafka.message.MessageAndMetadata;
26+
import kafka.producer.KeyedMessage;
27+
import kafka.producer.ProducerConfig;
28+
import kafka.serializer.StringDecoder;
29+
import org.junit.ClassRule;
30+
import org.junit.Rule;
31+
import org.junit.Test;
32+
33+
import java.util.HashMap;
34+
import java.util.List;
35+
import java.util.Map;
36+
37+
import static org.hamcrest.CoreMatchers.*;
38+
import static org.junit.Assert.assertThat;
39+
40+
41+
public class KafkaJunitClassRuleTest {
42+
43+
private static final String TOPIC = "topicY";
44+
private static final String KEY = "keyY";
45+
private static final String VALUE = "valueY";
46+
47+
@ClassRule
48+
public static KafkaJunitRule kafkaRule = new KafkaJunitRule();
49+
50+
@Test
51+
public void testKafkaServerIsUp() {
52+
ProducerConfig conf = kafkaRule.producerConfig();
53+
Producer<String, String> producer = new Producer<>(conf);
54+
producer.send(new KeyedMessage<>(TOPIC, KEY, VALUE));
55+
producer.close();
56+
57+
58+
ConsumerConfig consumerConf = kafkaRule.consumerConfig();
59+
ConsumerConnector consumer = Consumer.createJavaConsumerConnector(consumerConf);
60+
Map<String, Integer> topicCountMap = new HashMap<>();
61+
topicCountMap.put(TOPIC, 1);
62+
Map<String, List<KafkaStream<String, String>>> consumerMap = consumer
63+
.createMessageStreams(topicCountMap, new StringDecoder(consumerConf.props()),
64+
new StringDecoder(consumerConf.props()));
65+
List<KafkaStream<String, String>> streams = consumerMap.get(TOPIC);
66+
67+
assertThat(streams, is(notNullValue()));
68+
assertThat(streams.size(), is(equalTo(1)));
69+
70+
KafkaStream<String, String> ks = streams.get(0);
71+
ConsumerIterator<String, String> iterator = ks.iterator();
72+
MessageAndMetadata<String, String> msg = iterator.next();
73+
74+
assertThat(msg, is(notNullValue()));
75+
assertThat(msg.key(), is(equalTo(KEY)));
76+
assertThat(msg.message(), is(equalTo(VALUE)));
77+
}
78+
}

src/test/java/com/github/charithe/kafka/KafkaJunitRuleTest.java

Lines changed: 14 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -39,34 +39,40 @@
3939

4040
public class KafkaJunitRuleTest {
4141

42+
private static final String TOPIC = "topicX";
43+
private static final String KEY = "keyX";
44+
private static final String VALUE = "valueX";
45+
4246
@Rule
4347
public KafkaJunitRule kafkaRule = new KafkaJunitRule();
4448

4549
@Test
4650
public void testKafkaServerIsUp() {
4751
ProducerConfig conf = kafkaRule.producerConfig();
4852
Producer<String, String> producer = new Producer<>(conf);
49-
producer.send(new KeyedMessage<>("topic", "k1", "value1"));
53+
producer.send(new KeyedMessage<>(TOPIC, KEY, VALUE));
5054
producer.close();
5155

56+
5257
ConsumerConfig consumerConf = kafkaRule.consumerConfig();
5358
ConsumerConnector consumer = Consumer.createJavaConsumerConnector(consumerConf);
54-
5559
Map<String, Integer> topicCountMap = new HashMap<>();
56-
topicCountMap.put("topic", 1);
57-
Map<String, List<KafkaStream<String, String>>> consumerMap = consumer.createMessageStreams(topicCountMap, new StringDecoder(consumerConf.props()), new StringDecoder(consumerConf.props()));
58-
List<KafkaStream<String, String>> streams = consumerMap.get("topic");
59-
60+
topicCountMap.put(TOPIC, 1);
61+
Map<String, List<KafkaStream<String, String>>> consumerMap = consumer
62+
.createMessageStreams(topicCountMap, new StringDecoder(consumerConf.props()),
63+
new StringDecoder(consumerConf.props()));
64+
List<KafkaStream<String, String>> streams = consumerMap.get(TOPIC);
6065

6166
assertThat(streams, is(notNullValue()));
6267
assertThat(streams.size(), is(equalTo(1)));
6368

6469
KafkaStream<String, String> ks = streams.get(0);
6570
ConsumerIterator<String, String> iterator = ks.iterator();
6671
MessageAndMetadata<String, String> msg = iterator.next();
72+
6773
assertThat(msg, is(notNullValue()));
68-
assertThat(msg.key(), is(equalTo("k1")));
69-
assertThat(msg.message(), is(equalTo("value1")));
74+
assertThat(msg.key(), is(equalTo(KEY)));
75+
assertThat(msg.message(), is(equalTo(VALUE)));
7076
}
7177

7278
}

0 commit comments

Comments
 (0)