Skip to content

Commit 6077958

Browse files
committed
latency on perftest
Signed-off-by: Gabriele Santomaggio <G.santomaggio@gmail.com>
1 parent 3b0dc4c commit 6077958

File tree

4 files changed

+102
-45
lines changed

4 files changed

+102
-45
lines changed

.github/workflows/build_and_test.yml

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -87,3 +87,23 @@ jobs:
8787
- name: Install GNU make
8888
run: choco install make
8989
- run: make test GO_VERSION=${{ steps.setup_go.outputs.go-version }}
90+
publish:
91+
runs-on: ubuntu-latest
92+
needs: [test]
93+
steps:
94+
- uses: docker/setup-buildx-action@v2
95+
- uses: docker/login-action@v2
96+
with:
97+
username: ${{ secrets.DOCKERHUB_USERNAME }}
98+
password: ${{ secrets.DOCKERHUB_TOKEN }}
99+
- uses: actions/checkout@v3
100+
- name: Publish Docker Image
101+
run: |
102+
set -x
103+
VERSION=latest
104+
export VERSION
105+
if [[ ! $GITHUB_REF =~ "/tags/" ]]
106+
then
107+
VERSION=dev
108+
fi
109+
make perf-test-docker-push

examples/tls/getting_started_tls.go

Lines changed: 27 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@ import (
44
"bufio"
55
"crypto/tls"
66
"fmt"
7-
"github.com/google/uuid"
87
"github.com/rabbitmq/rabbitmq-stream-go-client/pkg/amqp"
98
"github.com/rabbitmq/rabbitmq-stream-go-client/pkg/logs"
109
"github.com/rabbitmq/rabbitmq-stream-go-client/pkg/stream"
@@ -48,13 +47,18 @@ func main() {
4847
fmt.Println("Getting started with Streaming TLS client for RabbitMQ")
4948
fmt.Println("Connecting to RabbitMQ streaming ...")
5049

50+
addressResolver := stream.AddressResolver{
51+
Host: "35.234.132.231",
52+
Port: 5551,
53+
}
5154
// Connect to the broker ( or brokers )
5255
env, err := stream.NewEnvironment(
5356
stream.NewEnvironmentOptions().
54-
SetHost("localhost").
55-
SetPort(5551). // standard TLS port
56-
SetUser("guest").
57-
SetPassword("guest").
57+
SetAddressResolver(addressResolver).
58+
SetPort(addressResolver.Port). // standard TLS port
59+
SetHost(addressResolver.Host).
60+
SetUser("remote").
61+
SetPassword("remote").
5862
IsTLS(true).
5963
// use tls.Config to customize the TLS configuration
6064
// for tests you may need InsecureSkipVerify: true
@@ -73,12 +77,12 @@ func main() {
7377
// err = env.DeclareStream(streamName, nil)
7478
// it is the best practise to define a size, 1GB for example:
7579

76-
streamName := uuid.New().String()
77-
err = env.DeclareStream(streamName,
78-
&stream.StreamOptions{
79-
MaxLengthBytes: stream.ByteCapacity{}.GB(2),
80-
},
81-
)
80+
streamName := "perf-test-go"
81+
//err = env.DeclareStream(streamName,
82+
// &stream.StreamOptions{
83+
// MaxLengthBytes: stream.ByteCapacity{}.GB(2),
84+
// },
85+
//)
8286

8387
CheckErr(err)
8488

@@ -92,7 +96,7 @@ func main() {
9296

9397
// the send method automatically aggregates the messages
9498
// based on batch size
95-
for i := 0; i < 1000; i++ {
99+
for i := 0; i < 10; i++ {
96100
err := producer.Send(amqp.NewMessage([]byte("hello_world_" + strconv.Itoa(i))))
97101
CheckErr(err)
98102
}
@@ -107,8 +111,17 @@ func main() {
107111
//
108112
//}, nil)
109113
// if you need to track the offset you need a consumer name like:
114+
consumed := 0
110115
handleMessages := func(consumerContext stream.ConsumerContext, message *amqp.Message) {
111-
fmt.Printf("consumer name: %s, text: %s \n ", consumerContext.Consumer.GetName(), message.Data)
116+
117+
consumed++
118+
if consumed%1000 == 0 {
119+
120+
fmt.Printf("name: %s, offset %d, chunk entities count: %d, total: %d \n ",
121+
consumerContext.Consumer.GetName(), consumerContext.Consumer.GetOffset(), consumerContext.GetEntriesCount(), consumed)
122+
123+
}
124+
112125
}
113126

114127
consumer, err := env.NewConsumer(
@@ -128,7 +141,7 @@ func main() {
128141
err = consumer.Close()
129142
time.Sleep(200 * time.Millisecond)
130143
CheckErr(err)
131-
err = env.DeleteStream(streamName)
144+
//err = env.DeleteStream(streamName)
132145
CheckErr(err)
133146
err = env.Close()
134147
CheckErr(err)

perfTest/cmd/commands.go

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,7 @@ var (
4646
crcCheck bool
4747
runDuration int
4848
initialCredits int
49+
isBatchSend bool
4950
)
5051

5152
func init() {
@@ -76,6 +77,7 @@ func setupCli(baseCmd *cobra.Command) {
7677
baseCmd.PersistentFlags().StringVarP(&maxSegmentSizeBytes, "stream-max-segment-size-bytes", "", "500MB", "Stream segment size bytes, e.g. 10MB, 1GB, etc.")
7778
baseCmd.PersistentFlags().StringVarP(&consumerOffset, "consumer-offset", "", "first", "Staring consuming, ex: first,last,next or random")
7879
baseCmd.PersistentFlags().IntVarP(&initialCredits, "initial-credits", "", 10, "Consumer initial credits")
80+
baseCmd.PersistentFlags().BoolVarP(&isBatchSend, "batch-send", "", false, "Enable batch send")
7981
baseCmd.AddCommand(versionCmd)
8082
baseCmd.AddCommand(newSilent())
8183
}

perfTest/cmd/silent.go

Lines changed: 53 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -36,8 +36,10 @@ func newSilent() *cobra.Command {
3636
}
3737

3838
var (
39-
publisherMessageCount int32
40-
consumerMessageCount int32
39+
publisherMessageCount int32
40+
consumerMessageCount int32
41+
//consumerMessageCountPerLatency int32
42+
totalLatency int64
4143
confirmedMessageCount int32
4244
notConfirmedMessageCount int32
4345
consumersCloseCount int32
@@ -77,9 +79,15 @@ func printStats() {
7779

7880
PMessagesPerSecond := float64(atomic.LoadInt32(&publisherMessageCount)) / float64(v) * 1000
7981
CMessagesPerSecond := float64(atomic.LoadInt32(&consumerMessageCount)) / float64(v) * 1000
82+
//latency := float64(totalLatency) / float64(atomic.LoadInt32(&consumerMessageCount))
83+
averageLatency := int64(0)
84+
if atomic.LoadInt32(&consumerMessageCount) > 0 {
85+
averageLatency = totalLatency / int64(atomic.LoadInt32(&consumerMessageCount))
86+
}
87+
8088
ConfirmedMessagesPerSecond := float64(atomic.LoadInt32(&confirmedMessageCount)) / float64(v) * 1000
81-
logInfo("Published %8.1f msg/s | Confirmed %8.1f msg/s | Consumed %8.1f msg/s | %3v | %3v | msg sent: %3v |",
82-
PMessagesPerSecond, ConfirmedMessagesPerSecond, CMessagesPerSecond, decodeRate(), decodeBody(), atomic.LoadInt64(&messagesSent))
89+
logInfo("Published %8.1f msg/s | Confirmed %8.1f msg/s | Consumed %8.1f msg/s | %3v | %3v | msg sent: %3v | latency: %d ms",
90+
PMessagesPerSecond, ConfirmedMessagesPerSecond, CMessagesPerSecond, decodeRate(), decodeBody(), atomic.LoadInt64(&messagesSent), averageLatency)
8391
}
8492
}
8593

@@ -273,28 +281,9 @@ func startPublisher(streamName string) error {
273281
return err
274282
}
275283

276-
var arr []message.StreamMessage
277-
var body []byte
278-
for z := 0; z < batchSize; z++ {
279-
280-
if fixedBody > 0 {
281-
body = make([]byte, fixedBody)
282-
} else {
283-
if variableBody > 0 {
284-
rand.Seed(time.Now().UnixNano())
285-
body = make([]byte, rand.Intn(variableBody))
286-
}
287-
}
288-
n := time.Now().UnixNano()
289-
var buff = make([]byte, 8)
290-
binary.BigEndian.PutUint64(buff, uint64(n))
291-
/// added to calculate the latency
292-
msg := amqp.NewMessage(append(buff, body...))
293-
arr = append(arr, msg)
294-
}
295-
296-
go func(prod *ha.ReliableProducer, messages []message.StreamMessage) {
284+
go func(prod *ha.ReliableProducer) {
297285
for {
286+
298287
if rate > 0 {
299288
rateWithBatchSize := float64(rate) / float64(batchSize)
300289
sleepAfterMessage := float64(time.Second) / rateWithBatchSize
@@ -313,21 +302,50 @@ func startPublisher(streamName string) error {
313302
}
314303
time.Sleep(time.Duration(sleep) * time.Millisecond)
315304
}
305+
messages := buildMessages()
316306

317-
atomic.AddInt64(&messagesSent, int64(len(arr)))
318-
for _, streamMessage := range arr {
319-
err = prod.Send(streamMessage)
307+
atomic.AddInt64(&messagesSent, int64(len(messages)))
308+
if isBatchSend {
309+
err = prod.BatchSend(messages)
320310
checkErr(err)
311+
} else {
312+
for _, streamMessage := range messages {
313+
err = prod.Send(streamMessage)
314+
checkErr(err)
315+
}
321316
}
322-
atomic.AddInt32(&publisherMessageCount, int32(len(arr)))
317+
318+
atomic.AddInt32(&publisherMessageCount, int32(len(messages)))
323319

324320
}
325-
}(rPublisher, arr)
321+
}(rPublisher)
326322

327323
return nil
328324

329325
}
330326

327+
func buildMessages() []message.StreamMessage {
328+
var arr []message.StreamMessage
329+
for z := 0; z < batchSize; z++ {
330+
//var body []byte
331+
if fixedBody > 0 {
332+
// body = make([]byte, fixedBody)
333+
} else {
334+
if variableBody > 0 {
335+
rand.Seed(time.Now().UnixNano())
336+
// body = make([]byte, rand.Intn(variableBody))
337+
}
338+
}
339+
var buff = make([]byte, 8)
340+
sentTime := time.Now().UnixMilli()
341+
binary.BigEndian.PutUint64(buff, uint64(sentTime))
342+
/// added to calculate the latency
343+
msg := amqp.NewMessage(buff)
344+
arr = append(arr, msg)
345+
}
346+
return arr
347+
}
348+
331349
func startPublishers() error {
332350

333351
logInfo("Starting %d publishers...", publishers)
@@ -362,8 +380,12 @@ func handleConsumerClose(channelClose stream.ChannelClose) {
362380
func startConsumer(consumerName string, streamName string) error {
363381

364382
handleMessages := func(consumerContext stream.ConsumerContext, message *amqp.Message) {
365-
atomic.AddInt32(&consumerMessageCount, 1)
366383

384+
sentTime := binary.BigEndian.Uint64(message.GetData()[:8]) // Decode the timestamp
385+
startTimeFromMessage := time.UnixMilli(int64(sentTime))
386+
latency := time.Now().Sub(startTimeFromMessage).Milliseconds()
387+
totalLatency += latency
388+
atomic.AddInt32(&consumerMessageCount, 1)
367389
}
368390
offsetSpec := stream.OffsetSpecification{}.Last()
369391
switch consumerOffset {

0 commit comments

Comments
 (0)