@@ -109,11 +109,19 @@ void TRecord::Serialize(IKafkaProtocolWriter* writer, int version) const
109
109
WRITE_KAFKA_FIELD (recordWriter, WriteVarLong, TimestampDelta)
110
110
WRITE_KAFKA_FIELD (recordWriter, WriteVarInt, OffsetDelta)
111
111
112
- WRITE_KAFKA_FIELD (recordWriter, WriteVarInt, Key.size ())
113
- WRITE_KAFKA_FIELD (recordWriter, WriteData, Key)
112
+ if (Key) {
113
+ WRITE_KAFKA_FIELD (recordWriter, WriteVarInt, Key->size ())
114
+ WRITE_KAFKA_FIELD (recordWriter, WriteData, *Key)
115
+ } else {
116
+ WRITE_KAFKA_FIELD (recordWriter, WriteVarInt, -1 )
117
+ }
114
118
115
- WRITE_KAFKA_FIELD (recordWriter, WriteVarInt, Value.size ())
116
- WRITE_KAFKA_FIELD (recordWriter, WriteData, Value)
119
+ if (Value) {
120
+ WRITE_KAFKA_FIELD (recordWriter, WriteVarInt, Value->size ())
121
+ WRITE_KAFKA_FIELD (recordWriter, WriteData, *Value)
122
+ } else {
123
+ WRITE_KAFKA_FIELD (recordWriter, WriteVarInt, -1 )
124
+ }
117
125
118
126
WRITE_KAFKA_FIELD (recordWriter, WriteVarInt, Headers.size ())
119
127
for (const auto & header : Headers) {
@@ -131,8 +139,16 @@ void TRecord::Serialize(IKafkaProtocolWriter* writer, int version) const
131
139
writer->WriteInt64 (TimestampDelta);
132
140
}
133
141
134
- writer->WriteBytes (Key);
135
- writer->WriteBytes (Value);
142
+ if (Key) {
143
+ writer->WriteBytes (*Key);
144
+ } else {
145
+ writer->WriteInt32 (-1 );
146
+ }
147
+ if (Value) {
148
+ writer->WriteBytes (*Value);
149
+ } else {
150
+ writer->WriteInt32 (-1 );
151
+ }
136
152
} else {
137
153
THROW_ERROR_EXCEPTION (" Unsupported Record version %v in serialization" , version);
138
154
}
@@ -153,14 +169,18 @@ void TRecord::Deserialize(IKafkaProtocolReader* reader, int version)
153
169
154
170
auto keySize = reader->ReadVarInt ();
155
171
YT_LOG_TRACE (" Parsing Record (KeySize: %v)" , keySize);
156
- reader->ReadString (&Key, keySize);
172
+ if (keySize > 0 ) {
173
+ Key = TString{};
174
+ reader->ReadString (&(*Key), keySize);
175
+ }
157
176
158
177
i32 valueSize;
159
178
READ_KAFKA_FIELD (valueSize, ReadVarInt);
160
179
161
180
if (valueSize > 0 ) {
162
181
YT_LOG_TRACE (" Parsing Record (ValueSize: %v)" , valueSize);
163
- reader->ReadString (&Value, valueSize);
182
+ Value = TString{};
183
+ reader->ReadString (&(*Value), valueSize);
164
184
}
165
185
166
186
i32 headerCount;
@@ -329,13 +349,13 @@ void TRspApiVersions::Serialize(IKafkaProtocolWriter* writer, int apiVersion) co
329
349
void TReqMetadataTopic::Deserialize (IKafkaProtocolReader* reader, int apiVersion)
330
350
{
331
351
if (apiVersion >= 10 ) {
332
- TopicId = reader-> ReadUuid ();
352
+ READ_KAFKA_FIELD ( TopicId, ReadUuid)
333
353
}
334
354
335
355
if (apiVersion < 9 ) {
336
- Topic = reader-> ReadString ();
356
+ READ_KAFKA_FIELD (Name, ReadString)
337
357
} else {
338
- Topic = reader-> ReadCompactString ();
358
+ READ_KAFKA_FIELD (Name, ReadCompactString)
339
359
}
340
360
if (apiVersion >= 9 ) {
341
361
NKafka::Deserialize (TagBuffer, reader, /* isCompact*/ true );
@@ -347,14 +367,14 @@ void TReqMetadata::Deserialize(IKafkaProtocolReader* reader, int apiVersion)
347
367
NKafka::Deserialize (Topics, reader, apiVersion >= 9 , apiVersion);
348
368
349
369
if (apiVersion >= 4 ) {
350
- AllowAutoTopicCreation = reader-> ReadBool ();
370
+ READ_KAFKA_FIELD ( AllowAutoTopicCreation, ReadBool)
351
371
}
352
372
353
373
if (apiVersion >= 8 ) {
354
374
if (apiVersion <= 10 ) {
355
- IncludeClusterAuthorizedOperations = reader-> ReadBool ();
375
+ READ_KAFKA_FIELD ( IncludeClusterAuthorizedOperations, ReadBool)
356
376
}
357
- IncludeTopicAuthorizedOperations = reader-> ReadBool ();
377
+ READ_KAFKA_FIELD ( IncludeTopicAuthorizedOperations, ReadBool)
358
378
}
359
379
360
380
if (apiVersion >= 9 ) {
@@ -364,11 +384,19 @@ void TReqMetadata::Deserialize(IKafkaProtocolReader* reader, int apiVersion)
364
384
365
385
void TRspMetadataBroker::Serialize (IKafkaProtocolWriter* writer, int apiVersion) const
366
386
{
367
- writer->WriteInt32 (NodeId);
368
- writer->WriteString (Host);
369
- writer->WriteInt32 (Port);
387
+ WRITE_KAFKA_FIELD (writer, WriteInt32, NodeId)
388
+ if (apiVersion < 9 ) {
389
+ WRITE_KAFKA_FIELD (writer, WriteString, Host)
390
+ } else {
391
+ WRITE_KAFKA_FIELD (writer, WriteCompactString, Host)
392
+ }
393
+ WRITE_KAFKA_FIELD (writer, WriteInt32, Port)
370
394
if (apiVersion >= 1 ) {
371
- writer->WriteNullableString (Rack);
395
+ if (apiVersion < 9 ) {
396
+ WRITE_KAFKA_FIELD (writer, WriteNullableString, Rack)
397
+ } else {
398
+ WRITE_KAFKA_FIELD (writer, WriteCompactNullableString, Rack)
399
+ }
372
400
}
373
401
if (apiVersion >= 9 ) {
374
402
NKafka::Serialize (TagBuffer, writer, /* isCompact*/ true );
@@ -377,45 +405,70 @@ void TRspMetadataBroker::Serialize(IKafkaProtocolWriter* writer, int apiVersion)
377
405
378
406
void TRspMetadataTopicPartition::Serialize (IKafkaProtocolWriter* writer, int apiVersion) const
379
407
{
380
- writer->WriteErrorCode (ErrorCode);
381
- writer->WriteInt32 (PartitionIndex);
382
- writer->WriteInt32 (LeaderId);
408
+ WRITE_KAFKA_FIELD (writer, WriteErrorCode, ErrorCode)
409
+ WRITE_KAFKA_FIELD (writer, WriteInt32, PartitionIndex)
410
+ WRITE_KAFKA_FIELD (writer, WriteInt32, LeaderId)
411
+
383
412
// TODO(nadya73): check version.
384
413
writer->WriteInt32 (ReplicaNodes.size ());
385
414
for (auto replicaNode : ReplicaNodes) {
386
415
writer->WriteInt32 (replicaNode);
387
416
}
388
- // TODO(nadya73): check version.
417
+ // TODO(nadya73): check version.
389
418
writer->WriteInt32 (IsrNodes.size ());
390
419
for (auto isrNode : IsrNodes) {
391
420
writer->WriteInt32 (isrNode);
392
421
}
422
+ if (apiVersion >= 5 ) {
423
+ // TODO(nadya73): check version.
424
+ writer->WriteInt32 (OfflineReplicas.size ());
425
+ for (auto offlineReplica : OfflineReplicas) {
426
+ writer->WriteInt32 (offlineReplica);
427
+ }
428
+ }
393
429
if (apiVersion >= 9 ) {
394
430
NKafka::Serialize (TagBuffer, writer, /* isCompact*/ true );
395
431
}
396
432
}
397
433
398
434
void TRspMetadataTopic::Serialize (IKafkaProtocolWriter* writer, int apiVersion) const
399
435
{
400
- writer->WriteErrorCode (ErrorCode);
401
- writer->WriteString (Name);
436
+ WRITE_KAFKA_FIELD (writer, WriteErrorCode, ErrorCode)
437
+ if (apiVersion < 9 ) {
438
+ WRITE_KAFKA_FIELD (writer, WriteString, Name)
439
+ } else {
440
+ WRITE_KAFKA_FIELD (writer, WriteCompactString, Name)
441
+ }
442
+ if (apiVersion >= 10 ) {
443
+ WRITE_KAFKA_FIELD (writer, WriteUuid, TopicId)
444
+ }
402
445
if (apiVersion >= 1 ) {
403
- writer-> WriteBool ( IsInternal);
446
+ WRITE_KAFKA_FIELD ( writer, WriteBool, IsInternal)
404
447
}
405
448
NKafka::Serialize (Partitions, writer, apiVersion >= 9 , apiVersion);
449
+ if (apiVersion >= 8 ) {
450
+ WRITE_KAFKA_FIELD (writer, WriteInt32, TopicAuthorizedOperations)
451
+ }
406
452
if (apiVersion >= 9 ) {
407
453
NKafka::Serialize (TagBuffer, writer, /* isCompact*/ true );
408
454
}
409
455
}
410
456
411
457
void TRspMetadata::Serialize (IKafkaProtocolWriter* writer, int apiVersion) const
412
458
{
459
+ if (apiVersion >= 3 ) {
460
+ WRITE_KAFKA_FIELD (writer, WriteInt32, ThrottleTimeMs)
461
+ }
413
462
NKafka::Serialize (Brokers, writer, apiVersion >= 9 , apiVersion);
414
463
if (apiVersion >= 2 ) {
415
- writer->WriteNullableString (ClusterId);
464
+ if (apiVersion < 9 ) {
465
+ WRITE_KAFKA_FIELD (writer, WriteNullableString, ClusterId)
466
+ } else {
467
+ WRITE_KAFKA_FIELD (writer, WriteCompactNullableString, ClusterId)
468
+ }
416
469
}
417
470
if (apiVersion >= 1 ) {
418
- writer-> WriteInt32 ( ControllerId);
471
+ WRITE_KAFKA_FIELD ( writer, WriteInt32, ControllerId)
419
472
}
420
473
NKafka::Serialize (Topics, writer, apiVersion >= 9 , apiVersion);
421
474
if (apiVersion >= 9 ) {
@@ -841,52 +894,52 @@ void TRspProduce::Serialize(IKafkaProtocolWriter* writer, int apiVersion) const
841
894
842
895
void TReqListOffsetsTopicPartition::Deserialize (IKafkaProtocolReader* reader, int /* apiVersion*/ )
843
896
{
844
- PartitionIndex = reader->ReadInt32 ();
845
- Timestamp = reader->ReadInt64 (); // TODO: use timestamp?
846
- MaxNumOffsets = reader->ReadInt32 ();
897
+ READ_KAFKA_FIELD (PartitionIndex, ReadInt32)
898
+ READ_KAFKA_FIELD (Timestamp, ReadInt64) // TODO: use timestamp?
847
899
}
848
900
849
901
void TReqListOffsetsTopic::Deserialize (IKafkaProtocolReader* reader, int apiVersion)
850
902
{
851
- Name = reader->ReadString ();
852
- Partitions.resize (reader->ReadInt32 ());
903
+ READ_KAFKA_FIELD (Name, ReadString)
904
+ i32 partitionCount;
905
+ READ_KAFKA_FIELD (partitionCount, ReadInt32)
906
+ Partitions.resize (partitionCount);
853
907
for (auto & partition : Partitions) {
854
908
partition.Deserialize (reader, apiVersion);
855
909
}
856
910
}
857
911
858
912
void TReqListOffsets::Deserialize (IKafkaProtocolReader* reader, int apiVersion)
859
913
{
860
- ReplicaId = reader->ReadInt32 ();
861
- Topics.resize (reader->ReadInt32 ());
914
+ READ_KAFKA_FIELD (ReplicaId, ReadInt32)
915
+ i32 topicCount;
916
+ READ_KAFKA_FIELD (topicCount, ReadInt32)
917
+ Topics.resize (topicCount);
862
918
for (auto & topic : Topics) {
863
919
topic.Deserialize (reader, apiVersion);
864
920
}
865
921
}
866
922
867
- void TRspListOffsetsTopicPartition::Serialize (IKafkaProtocolWriter* writer, int apiVersion) const
923
+ void TRspListOffsetsTopicPartition::Serialize (IKafkaProtocolWriter* writer, int /* apiVersion*/ ) const
868
924
{
869
- writer->WriteInt32 (PartitionIndex);
870
- writer->WriteErrorCode (ErrorCode);
871
-
872
- if (apiVersion <= 0 ) {
873
- writer->WriteInt32 (1 ); // Size of 'old_style_offsets'.
874
- }
875
- writer->WriteInt64 (Offset);
925
+ WRITE_KAFKA_FIELD (writer, WriteInt32, PartitionIndex)
926
+ WRITE_KAFKA_FIELD (writer, WriteErrorCode, ErrorCode)
927
+ WRITE_KAFKA_FIELD (writer, WriteInt64, Timestamp)
928
+ WRITE_KAFKA_FIELD (writer, WriteInt64, Offset)
876
929
}
877
930
878
931
void TRspListOffsetsTopic::Serialize (IKafkaProtocolWriter* writer, int apiVersion) const
879
932
{
880
- writer-> WriteString ( Name);
881
- writer-> WriteInt32 ( Partitions.size ());
933
+ WRITE_KAFKA_FIELD ( writer, WriteString, Name)
934
+ WRITE_KAFKA_FIELD ( writer, WriteInt32, Partitions.size ())
882
935
for (const auto & partition : Partitions) {
883
936
partition.Serialize (writer, apiVersion);
884
937
}
885
938
}
886
939
887
940
void TRspListOffsets::Serialize (IKafkaProtocolWriter* writer, int apiVersion) const
888
941
{
889
- writer-> WriteInt32 ( Topics.size ());
942
+ WRITE_KAFKA_FIELD ( writer, WriteInt32, Topics.size ())
890
943
for (const auto & topic : Topics) {
891
944
topic.Serialize (writer, apiVersion);
892
945
}
0 commit comments