24
24
import ai .langstream .api .model .Module ;
25
25
import ai .langstream .api .model .StreamingCluster ;
26
26
import ai .langstream .api .model .TopicDefinition ;
27
+ import ai .langstream .api .runner .code .Header ;
27
28
import ai .langstream .api .runner .code .Record ;
28
29
import ai .langstream .api .runner .code .SimpleRecord ;
29
30
import ai .langstream .api .runner .topics .TopicConnectionsRuntimeRegistry ;
39
40
import ai .langstream .kafka .runner .KafkaTopicConnectionsRuntime ;
40
41
import ai .langstream .kafka .runtime .KafkaTopic ;
41
42
import java .util .ArrayList ;
43
+ import java .util .Arrays ;
42
44
import java .util .List ;
43
45
import java .util .Map ;
44
46
import java .util .Set ;
47
+ import java .util .UUID ;
45
48
import java .util .concurrent .ExecutorService ;
46
49
import java .util .concurrent .Executors ;
47
50
import java .util .concurrent .TimeUnit ;
@@ -66,7 +69,7 @@ class KafkaConsumerTest {
66
69
@ ValueSource (ints = {1 , 4 })
67
70
public void testKafkaConsumerCommitOffsets (int numPartitions ) throws Exception {
68
71
final AdminClient admin = kafkaContainer .getAdmin ();
69
- String topicName = "input-topic-" + numPartitions + "parts" ;
72
+ String topicName = "input-topic-" + numPartitions + "parts-" + UUID . randomUUID () ;
70
73
Application applicationInstance =
71
74
ModelBuilder .buildApplicationInstance (
72
75
Map .of (
@@ -190,7 +193,7 @@ public void testKafkaConsumerCommitOffsetsMultiThread() throws Exception {
190
193
int numPartitions = 4 ;
191
194
int numThreads = 8 ;
192
195
final AdminClient admin = kafkaContainer .getAdmin ();
193
- String topicName = "input-topic-" + numPartitions + "-parts-mt" ;
196
+ String topicName = "input-topic-" + numPartitions + "-parts-mt-" + UUID . randomUUID () ;
194
197
Application applicationInstance =
195
198
ModelBuilder .buildApplicationInstance (
196
199
Map .of (
@@ -285,7 +288,7 @@ public void testKafkaConsumerCommitOffsetsMultiThread() throws Exception {
285
288
public void testRestartConsumer () throws Exception {
286
289
int numPartitions = 1 ;
287
290
final AdminClient admin = kafkaContainer .getAdmin ();
288
- String topicName = "input-topic-restart" ;
291
+ String topicName = "input-topic-restart-" + UUID . randomUUID () ;
289
292
Application applicationInstance =
290
293
ModelBuilder .buildApplicationInstance (
291
294
Map .of (
@@ -370,6 +373,105 @@ public void testRestartConsumer() throws Exception {
370
373
}
371
374
}
372
375
376
+ @ Test
377
+ public void testMultipleSchemas () throws Exception {
378
+ int numPartitions = 1 ;
379
+ final AdminClient admin = kafkaContainer .getAdmin ();
380
+ String topicName = "input-topic-multi-schemas-" + UUID .randomUUID ();
381
+ Application applicationInstance =
382
+ ModelBuilder .buildApplicationInstance (
383
+ Map .of (
384
+ "module.yaml" ,
385
+ """
386
+ module: "module-1"
387
+ id: "pipeline-1"
388
+ topics:
389
+ - name: %s
390
+ creation-mode: create-if-not-exists
391
+ partitions: %d
392
+ """
393
+ .formatted (topicName , numPartitions )),
394
+ buildInstanceYaml (),
395
+ null )
396
+ .getApplication ();
397
+
398
+ @ Cleanup
399
+ ApplicationDeployer deployer =
400
+ ApplicationDeployer .builder ()
401
+ .registry (new ClusterRuntimeRegistry ())
402
+ .pluginsRegistry (new PluginsRegistry ())
403
+ .topicConnectionsRuntimeRegistry (new TopicConnectionsRuntimeRegistry ())
404
+ .build ();
405
+
406
+ Module module = applicationInstance .getModule ("module-1" );
407
+
408
+ ExecutionPlan implementation = deployer .createImplementation ("app" , applicationInstance );
409
+ assertTrue (
410
+ implementation .getConnectionImplementation (
411
+ module , Connection .fromTopic (TopicDefinition .fromName (topicName )))
412
+ instanceof KafkaTopic );
413
+
414
+ deployer .deploy ("tenant" , implementation , null );
415
+
416
+ Set <String > topics = admin .listTopics ().names ().get ();
417
+ log .info ("Topics {}" , topics );
418
+ assertTrue (topics .contains (topicName ));
419
+
420
+ Map <String , TopicDescription > stats = admin .describeTopics (Set .of (topicName )).all ().get ();
421
+ assertEquals (numPartitions , stats .get (topicName ).partitions ().size ());
422
+
423
+ deployer .delete ("tenant" , implementation , null );
424
+ topics = admin .listTopics ().names ().get ();
425
+ log .info ("Topics {}" , topics );
426
+ assertFalse (topics .contains (topicName ));
427
+
428
+ StreamingCluster streamingCluster =
429
+ implementation .getApplication ().getInstance ().streamingCluster ();
430
+ KafkaTopicConnectionsRuntime runtime = new KafkaTopicConnectionsRuntime ();
431
+ runtime .init (streamingCluster );
432
+ String agentId = "agent-1" ;
433
+ try (TopicProducer producer =
434
+ runtime .createProducer (agentId , streamingCluster , Map .of ("topic" , topicName )); ) {
435
+ producer .start ();
436
+
437
+ int numIterations = 5 ;
438
+ for (int i = 0 ; i < numIterations ; i ++) {
439
+
440
+ producer .write (generateRecord (1 , "string" )).join ();
441
+ producer .write (generateRecord ("two" , 2 )).join ();
442
+
443
+ producer .write (
444
+ generateRecord (
445
+ "two" ,
446
+ 2 ,
447
+ new SimpleRecord .SimpleHeader ("h1" , 7 ),
448
+ new SimpleRecord .SimpleHeader ("h2" , "bar" )))
449
+ .join ();
450
+
451
+ producer .write (generateRecord (1 , "string" )).join ();
452
+ producer .write (generateRecord ("two" , 2 )).join ();
453
+
454
+ producer .write (
455
+ generateRecord (
456
+ "two" ,
457
+ 2 ,
458
+ new SimpleRecord .SimpleHeader ("h1" , 7 ),
459
+ new SimpleRecord .SimpleHeader ("h2" , "bar" )))
460
+ .join ();
461
+
462
+ try (KafkaConsumerWrapper consumer =
463
+ (KafkaConsumerWrapper )
464
+ runtime .createConsumer (
465
+ agentId , streamingCluster , Map .of ("topic" , topicName ))) {
466
+
467
+ consumer .start ();
468
+ List <Record > readFromConsumer = consumeRecords (consumer , 6 );
469
+ consumer .commit (readFromConsumer );
470
+ }
471
+ }
472
+ }
473
+ }
474
+
373
475
@ NotNull
374
476
private static List <Record > consumeRecords (TopicConsumer consumer , int atLeast ) {
375
477
List <Record > readFromConsumer = new ArrayList <>();
@@ -387,12 +489,17 @@ private static List<Record> consumeRecords(TopicConsumer consumer, int atLeast)
387
489
return readFromConsumer ;
388
490
}
389
491
390
- private static Record generateRecord (String value ) {
492
+ private static Record generateRecord (Object value ) {
493
+ return generateRecord (value , value );
494
+ }
495
+
496
+ private static Record generateRecord (Object key , Object value , Header ... headers ) {
391
497
return SimpleRecord .builder ()
392
- .key (value )
498
+ .key (key )
393
499
.value (value )
394
500
.origin ("origin" )
395
501
.timestamp (System .currentTimeMillis ())
502
+ .headers (headers != null ? Arrays .asList (headers ) : List .of ())
396
503
.build ();
397
504
}
398
505
0 commit comments