Skip to content
This repository was archived by the owner on Oct 23, 2023. It is now read-only.

Commit 9185032

Browse files
aquamatthiasmarkglh
authored andcommitted
Make KinesisSourceGraphIntegrationSpec more robust. (#46)
* Make KinesisSourceGraphIntegrationSpec more robust. All test cases assumed exactly once delivery - now assume at least once, which is what Kinesis provides. Let the spec operate on one shard, so we are able to specify conditions on the content easily and use `takeWhile` instead of `take`. Specify the batchSize exactly, where the default does not apply. Remove the 4 worker test, since more worker than shards is already covered in the 2 worker setup.
1 parent e731cd6 commit 9185032

File tree

3 files changed

+24
-46
lines changed

3 files changed

+24
-46
lines changed

src/it/scala/com/weightwatchers/reactive/kinesis/common/KinesisSuite.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -152,7 +152,7 @@ trait KinesisSuite
152152

153153
def TestStreamNrOfMessagesPerShard: Long
154154

155-
def TestStreamNumberOfShards: Long = 2
155+
def TestStreamNumberOfShards: Long = 1
156156

157157
private lazy val kclSetupConfig =
158158
consumerConfFor(streamName = TestStreamName, appName = suiteName).kclConfiguration

src/it/scala/com/weightwatchers/reactive/kinesis/consumer/ConsumerProcessingManagerIntegrationSpec.scala

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@ class ConsumerProcessingManagerIntegrationSpec
2727
with Eventually {
2828

2929
override def TestStreamNrOfMessagesPerShard: Long = 0
30+
override def TestStreamNumberOfShards: Long = 2
3031
override implicit def patienceConfig: PatienceConfig = PatienceConfig(60.seconds, 1.second)
3132

3233
"A ConsumerProcessingManager on a stream with 2 shards" - {

src/it/scala/com/weightwatchers/reactive/kinesis/stream/KinesisSourceGraphIntegrationSpec.scala

Lines changed: 22 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -26,17 +26,18 @@ class KinesisSourceGraphIntegrationSpec
2626
"process all messages of a stream with one worker" in new withKinesisConfForApp("1worker") {
2727
val result = Kinesis
2828
.source(consumerConf = consumerConf())
29-
.take(TestStreamNumberOfShards * TestStreamNrOfMessagesPerShard)
29+
.takeWhile(_.payload.payloadAsString().toLong < TestStreamNrOfMessagesPerShard,
30+
inclusive = true)
3031
.map { event =>
3132
event.commit()
32-
event.payload.payload
33+
event.payload.payloadAsString()
3334
}
3435
.runWith(Sink.seq)
3536

3637
val grouped = result.futureValue.groupBy(identity)
3738
result.futureValue.distinct should have size TestStreamNrOfMessagesPerShard
3839
grouped should have size TestStreamNrOfMessagesPerShard
39-
grouped.values.foreach(_ should have size TestStreamNumberOfShards)
40+
grouped.values.foreach(_.size.toLong shouldBe >=(TestStreamNumberOfShards))
4041
}
4142

4243
"process all messages of a stream with 2 workers" in new withKinesisConfForApp("2worker") {
@@ -48,44 +49,18 @@ class KinesisSourceGraphIntegrationSpec
4849
val source2 = Kinesis.source(consumerConf = consumerConf())
4950
val result = source1
5051
.merge(source2)
51-
.take(TestStreamNrOfMessagesPerShard * TestStreamNumberOfShards)
52+
.takeWhile(_.payload.payloadAsString().toLong < TestStreamNrOfMessagesPerShard,
53+
inclusive = true)
5254
.map { event =>
5355
event.commit()
54-
event.payload.payload
56+
event.payload.payloadAsString()
5557
}
5658
.runWith(Sink.seq)
5759

5860
val grouped = result.futureValue.groupBy(identity)
5961
result.futureValue.distinct should have size TestStreamNrOfMessagesPerShard
6062
grouped should have size TestStreamNrOfMessagesPerShard
61-
grouped.values.foreach(_ should have size TestStreamNumberOfShards)
62-
}
63-
64-
"process all messages of a stream with 4 workers" in new withKinesisConfForApp("4worker") {
65-
// Please note: since all sources are started simultaneously, all will assume there is no other worker.
66-
// During register all except one will fail and not read any message until retry
67-
// Depending on timing one or multiple sources will read all events
68-
val batchSize = TestStreamNrOfMessagesPerShard
69-
val source1 = Kinesis.source(consumerConf = consumerConf())
70-
val source2 = Kinesis.source(consumerConf = consumerConf())
71-
val source3 = Kinesis.source(consumerConf = consumerConf())
72-
val source4 = Kinesis.source(consumerConf = consumerConf())
73-
val result = source1
74-
.merge(source2)
75-
.merge(source3)
76-
.merge(source4)
77-
// Since only 2 clients can take batchSize messages, an overall take is needed here to end the stream
78-
.take(TestStreamNrOfMessagesPerShard * TestStreamNumberOfShards)
79-
.map { event =>
80-
event.commit()
81-
event.payload.payload
82-
}
83-
.runWith(Sink.seq)
84-
85-
val grouped = result.futureValue.groupBy(identity)
86-
result.futureValue.distinct should have size TestStreamNrOfMessagesPerShard
87-
grouped should have size TestStreamNrOfMessagesPerShard
88-
grouped.values.foreach(_ should have size TestStreamNumberOfShards)
63+
grouped.values.foreach(_.size.toLong shouldBe >=(TestStreamNumberOfShards))
8964
}
9065

9166
"maintain the read position in the stream correctly" in new withKinesisConfForApp(
@@ -98,15 +73,16 @@ class KinesisSourceGraphIntegrationSpec
9873
// - dies after one batch
9974
// We expect to get all messages by n reads (which means, that the read position was stored correctly)
10075
val result =
101-
for (_ <- 1
76+
for (iteration <- 1
10277
.to((TestStreamNumberOfShards * TestStreamNrOfMessagesPerShard / batchSize).toInt))
10378
yield {
10479
Kinesis
105-
.source(consumerConf = consumerConf())
106-
.take(batchSize)
80+
.source(consumerConf = consumerConf(batchSize = batchSize))
81+
.takeWhile(_.payload.payloadAsString().toLong < batchSize * iteration,
82+
inclusive = true)
10783
.map { event =>
10884
event.commit()
109-
event.payload.payload
85+
event.payload
11086
}
11187
.runWith(Sink.seq)
11288
.futureValue
@@ -117,38 +93,39 @@ class KinesisSourceGraphIntegrationSpec
11793
val grouped = allMessages.groupBy(identity)
11894
allMessages.distinct should have size TestStreamNrOfMessagesPerShard
11995
grouped should have size TestStreamNrOfMessagesPerShard
96+
grouped.values.foreach(_.size.toLong shouldBe >=(TestStreamNumberOfShards))
12097
}
12198

12299
"not commit the position, if the event is not committed" in new withKinesisConfForApp(
123100
"not_committed"
124101
) {
125-
val batchSize = TestStreamNrOfMessagesPerShard / 2 // 2 * NrOfShards batches needed
126-
127-
// This worker will read batchSize events and will not commit
102+
// This worker will read all events and will not commit
128103
// We expect that the read position will not change
129104
val uncommitted = Kinesis
130105
.source(consumerConf())
131-
.take(batchSize)
106+
.takeWhile(_.payload.payloadAsString().toLong < TestStreamNrOfMessagesPerShard,
107+
inclusive = true)
132108
.runWith(Sink.seq)
133109
.futureValue
134110

135111
// This worker will read all available events.
136112
// This works only, if the first worker has not committed anything
137113
val committed = Kinesis
138114
.source(consumerConf = consumerConf())
139-
.take(TestStreamNumberOfShards * TestStreamNrOfMessagesPerShard)
115+
.takeWhile(_.payload.payloadAsString().toLong < TestStreamNrOfMessagesPerShard,
116+
inclusive = true)
140117
.map { event =>
141118
event.commit()
142-
event.payload.payload
119+
event.payload.payloadAsString()
143120
}
144121
.runWith(Sink.seq)
145122
.futureValue
146123

147-
uncommitted should have size batchSize
124+
uncommitted should have size TestStreamNrOfMessagesPerShard
148125
val grouped = committed.groupBy(identity)
149126
committed.distinct should have size TestStreamNrOfMessagesPerShard
150127
grouped should have size TestStreamNrOfMessagesPerShard
151-
grouped.values.foreach(_ should have size TestStreamNumberOfShards)
128+
grouped.values.foreach(_.size.toLong shouldBe >=(TestStreamNumberOfShards))
152129
}
153130
}
154131
}

0 commit comments

Comments
 (0)