Skip to content
This repository was archived by the owner on Jan 13, 2023. It is now read-only.

Commit 83de186

Browse files
authored
Merge pull request #54 from itzmeanjan/develop
Improve chain reorganisation handling, pubsub broker management flow
2 parents 8946574 + 8289ecb commit 83de186

33 files changed

+1290
-694
lines changed

README.md

Lines changed: 11 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -110,15 +110,17 @@ cd ette
110110
- Set `Production` to `yes` before running it in production; otherwise you can simply skip it
111111
- `ette` can be run in any of 👇 5 possible modes, which can be set by `EtteMode`
112112

113-
```json
114-
{
115-
"1": "Only Historical Data Query Allowed",
116-
"2": "Only Real-time Subscription Allowed",
117-
"3": "Both Historical Data Query & Real-time Subscription Allowed",
118-
"4": "Attempt to take snapshot from data in backing DB",
119-
"5": "Attempt to restore data from snapshot file"
120-
}
121-
```
113+
---
114+
115+
EtteMode | Interpretation
116+
--- | ---
117+
1 | Only Historical Data Query Allowed
118+
2 | Only Real-time Subscription Allowed
119+
3 | Both Historical Data Query & Real-time Subscription Allowed
120+
4 | Attempt to take snapshot from data in backing DB
121+
5 | Attempt to restore data from snapshot file
122+
123+
---
122124

123125
- For testing historical data query using browser based GraphQL Playground in `ette`, you can set `EtteGraphQLPlayGround` to `yes` in config file
124126
- For processing block(s)/ tx(s) concurrently, it'll create `ConcurrencyFactor * #-of CPUs on machine` workers, who will pick up jobs submitted to them.

app/block/block.go

Lines changed: 22 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -64,17 +64,6 @@ func ProcessBlockContent(client *ethclient.Client, block *types.Block, _db *gorm
6464

6565
}
6666

67-
if !HasBlockFinalized(status, packedBlock.Block.Number) {
68-
69-
log.Print(color.LightRed.Sprintf("[x] Non-final block %d with 0 tx(s) [ Took : %s | Latest Block : %d | In Queue : %d ]", packedBlock.Block.Number, time.Now().UTC().Sub(startingAt), status.GetLatestBlockNumber(), GetUnfinalizedQueueLength(redis)))
70-
71-
// Pushing into unfinalized block queue, to be picked up only when
72-
// finality for this block has been achieved
73-
PushBlockIntoUnfinalizedQueue(redis, fmt.Sprintf("%d", packedBlock.Block.Number))
74-
return true
75-
76-
}
77-
7867
// If block doesn't contain any tx, we'll attempt to persist only block
7968
if err := db.StoreBlock(_db, packedBlock, status); err != nil {
8069

@@ -86,6 +75,17 @@ func ProcessBlockContent(client *ethclient.Client, block *types.Block, _db *gorm
8675

8776
}
8877

78+
if !HasBlockFinalized(status, packedBlock.Block.Number) {
79+
80+
log.Print(color.LightRed.Sprintf("[x] Non-final block %d with 0 tx(s) [ Took : %s | Latest Block : %d | In Queue : %d ]", packedBlock.Block.Number, time.Now().UTC().Sub(startingAt), status.GetLatestBlockNumber(), GetUnfinalizedQueueLength(redis)))
81+
82+
// Pushing into unfinalized block queue, to be picked up only when
83+
// finality for this block has been achieved
84+
PushBlockIntoUnfinalizedQueue(redis, fmt.Sprintf("%d", packedBlock.Block.Number))
85+
return true
86+
87+
}
88+
8989
// Successfully processed block
9090
log.Print(color.Green.Sprintf("[+] Block %d with 0 tx(s) [ Took : %s ]", block.NumberU64(), time.Now().UTC().Sub(startingAt)))
9191
status.IncrementBlocksProcessed()
@@ -187,17 +187,6 @@ func ProcessBlockContent(client *ethclient.Client, block *types.Block, _db *gorm
187187

188188
}
189189

190-
if !HasBlockFinalized(status, packedBlock.Block.Number) {
191-
192-
log.Print(color.LightRed.Sprintf("[x] Non-final block %d with %d tx(s) [ Took : %s | Latest Block : %d | In Queue : %d ]", packedBlock.Block.Number, block.Transactions().Len(), time.Now().UTC().Sub(startingAt), status.GetLatestBlockNumber(), GetUnfinalizedQueueLength(redis)))
193-
194-
// Pushing into unfinalized block queue, to be picked up only when
195-
// finality for this block has been achieved
196-
PushBlockIntoUnfinalizedQueue(redis, fmt.Sprintf("%d", packedBlock.Block.Number))
197-
return true
198-
199-
}
200-
201190
// If block doesn't contain any tx, we'll attempt to persist only block
202191
if err := db.StoreBlock(_db, packedBlock, status); err != nil {
203192

@@ -209,6 +198,17 @@ func ProcessBlockContent(client *ethclient.Client, block *types.Block, _db *gorm
209198

210199
}
211200

201+
if !HasBlockFinalized(status, packedBlock.Block.Number) {
202+
203+
log.Print(color.LightRed.Sprintf("[x] Non-final block %d with %d tx(s) [ Took : %s | Latest Block : %d | In Queue : %d ]", packedBlock.Block.Number, block.Transactions().Len(), time.Now().UTC().Sub(startingAt), status.GetLatestBlockNumber(), GetUnfinalizedQueueLength(redis)))
204+
205+
// Pushing into unfinalized block queue, to be picked up only when
206+
// finality for this block has been achieved
207+
PushBlockIntoUnfinalizedQueue(redis, fmt.Sprintf("%d", packedBlock.Block.Number))
208+
return true
209+
210+
}
211+
212212
// Successfully processed block
213213
log.Print(color.Green.Sprintf("[+] Block %d with %d tx(s) [ Took : %s ]", block.NumberU64(), block.Transactions().Len(), time.Now().UTC().Sub(startingAt)))
214214

app/block/listener.go

Lines changed: 37 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -42,12 +42,46 @@ func SubscribeToNewBlocks(connection *d.BlockChainNodeConnection, _db *gorm.DB,
4242
for {
4343
select {
4444
case err := <-subs.Err():
45+
4546
log.Fatal(color.Red.Sprintf("[!] Listener stopped : %s", err.Error()))
46-
break
47+
4748
case header := <-headerChan:
4849

49-
// Latest block number seen, is getting safely updated, as
50-
// soon as new block mined data gets propagated to network
50+
// At very beginning iteration, newly mined block number
51+
// should be greater than max block number obtained from DB
52+
if first && !(header.Number.Uint64() > status.MaxBlockNumberAtStartUp()) {
53+
54+
log.Fatal(color.Red.Sprintf("[!] Bad block received : expected > `%d`\n", status.MaxBlockNumberAtStartUp()))
55+
56+
}
57+
58+
// At any iteration other than first one, if received block number
59+
// is more than latest block number + 1, it's definite that we've some
60+
// block ( >=1 ) missed & the RPC node we're relying on might be feeding us with
61+
// wrong data
62+
//
63+
// It's better stop relying on it, we crash the program
64+
// @note This is not the state-of-the art solution, but this is it, as of now
65+
// It can be improved.
66+
if !first && header.Number.Uint64() > status.GetLatestBlockNumber()+1 {
67+
68+
log.Fatal(color.Red.Sprintf("[!] Bad block received %d, expected %d", header.Number.Uint64(), status.GetLatestBlockNumber()))
69+
70+
}
71+
72+
// At any iteration other than first one, if received block number
73+
// not exactly current latest block number + 1, then it's probably one
74+
// reorganization, we'll attempt to process this new block
75+
if !first && !(header.Number.Uint64() == status.GetLatestBlockNumber()+1) {
76+
77+
log.Printf(color.Blue.Sprintf("[*] Received block %d again, expected %d, attempting to process", header.Number.Uint64(), status.GetLatestBlockNumber()+1))
78+
79+
} else {
80+
81+
log.Printf(color.Blue.Sprintf("[*] Received block %d, attempting to process", header.Number.Uint64()))
82+
83+
}
84+
5185
status.SetLatestBlockNumber(header.Number.Uint64())
5286

5387
if first {

app/block/pack_block.go

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
package block
22

33
import (
4+
"github.com/ethereum/go-ethereum/common/hexutil"
45
"github.com/ethereum/go-ethereum/core/types"
56
"github.com/itzmeanjan/ette/app/db"
67
)
@@ -19,11 +20,14 @@ func BuildPackedBlock(block *types.Block, txs []*db.PackedTransaction) *db.Packe
1920
Difficulty: block.Difficulty().String(),
2021
GasUsed: block.GasUsed(),
2122
GasLimit: block.GasLimit(),
22-
Nonce: block.Nonce(),
23+
Nonce: hexutil.EncodeUint64(block.Nonce()),
2324
Miner: block.Coinbase().Hex(),
2425
Size: float64(block.Size()),
26+
StateRootHash: block.Root().Hex(),
27+
UncleHash: block.UncleHash().Hex(),
2528
TransactionRootHash: block.TxHash().Hex(),
2629
ReceiptRootHash: block.ReceiptHash().Hex(),
30+
ExtraData: block.Extra(),
2731
}
2832
packedBlock.Transactions = txs
2933

app/block/publish.go

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,8 +27,11 @@ func PublishBlock(block *db.PackedBlock, redis *d.RedisInfo) {
2727
Nonce: block.Block.Nonce,
2828
Miner: block.Block.Miner,
2929
Size: block.Block.Size,
30+
StateRootHash: block.Block.StateRootHash,
31+
UncleHash: block.Block.UncleHash,
3032
TransactionRootHash: block.Block.TransactionRootHash,
3133
ReceiptRootHash: block.Block.ReceiptRootHash,
34+
ExtraData: block.Block.ExtraData,
3235
}).Err(); err != nil {
3336
log.Print(color.Red.Sprintf("[!] Failed to publish block %d in channel : %s", block.Block.Number, err.Error()))
3437
return

app/block/retry.go

Lines changed: 19 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ import (
2525
// Keeps repeating
2626
func RetryQueueManager(client *ethclient.Client, _db *gorm.DB, redis *data.RedisInfo, status *d.StatusHolder) {
2727
sleep := func() {
28-
time.Sleep(time.Duration(1000) * time.Millisecond)
28+
time.Sleep(time.Duration(100) * time.Millisecond)
2929
}
3030

3131
// Creating worker pool and submitting jobs as soon as it's determined
@@ -42,8 +42,8 @@ func RetryQueueManager(client *ethclient.Client, _db *gorm.DB, redis *data.Redis
4242
continue
4343
}
4444

45-
attemptCount := GetAttemptCountFromTable(redis, blockNumber)
46-
if attemptCount != 0 && attemptCount%3 != 0 {
45+
attemptCount, _ := GetAttemptCountFromTable(redis, blockNumber)
46+
if attemptCount != 0 && attemptCount%2 != 0 {
4747

4848
PushBlockIntoRetryQueue(redis, blockNumber)
4949
continue
@@ -102,12 +102,21 @@ func PushBlockIntoRetryQueue(redis *data.RedisInfo, blockNumber string) {
102102
// IncrementAttemptCountOfBlockNumber - Given block number, increments failed attempt count
103103
// of processing this block
104104
//
105-
// If block doesn't yet exist in tracker table, it'll be inserted first time & counter to be set to 1
105+
// If block doesn't yet exist in tracker table, it'll be inserted first time & counter to be set to 0
106106
//
107-
// It'll be wrapped back to 0 as soon as it reaches 101
107+
// It'll be wrapped back to 0 as soon as it reaches 100
108108
func IncrementAttemptCountOfBlockNumber(redis *data.RedisInfo, blockNumber string) {
109109

110-
wrappedAttemptCount := (GetAttemptCountFromTable(redis, blockNumber) + 1) % 101
110+
var wrappedAttemptCount int
111+
112+
// Attempting to increment 👇, only when it's not first time
113+
// when this attempt counter for block number being initialized
114+
//
115+
// So this ensures for first time it gets initialized to 0
116+
attemptCount, err := GetAttemptCountFromTable(redis, blockNumber)
117+
if err == nil {
118+
wrappedAttemptCount = (int(attemptCount) + 1) % 100
119+
}
111120

112121
if _, err := redis.Client.HSet(context.Background(), redis.BlockRetryCountTable, blockNumber, wrappedAttemptCount).Result(); err != nil {
113122
log.Print(color.Red.Sprintf("[!] Failed to increment attempt count of block %s : %s", blockNumber, err.Error()))
@@ -129,19 +138,19 @@ func CheckBlockInAttemptCounterTable(redis *data.RedisInfo, blockNumber string)
129138

130139
// GetAttemptCountFromTable - Returns current attempt counter from table
131140
// for given block number
132-
func GetAttemptCountFromTable(redis *data.RedisInfo, blockNumber string) uint64 {
141+
func GetAttemptCountFromTable(redis *data.RedisInfo, blockNumber string) (uint64, error) {
133142

134143
count, err := redis.Client.HGet(context.Background(), redis.BlockRetryCountTable, blockNumber).Result()
135144
if err != nil {
136-
return 0
145+
return 0, err
137146
}
138147

139148
parsedCount, err := strconv.ParseUint(count, 10, 64)
140149
if err != nil {
141-
return 0
150+
return 0, err
142151
}
143152

144-
return parsedCount
153+
return parsedCount, nil
145154

146155
}
147156

app/common/common.go

Lines changed: 69 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,11 @@
11
package common
22

3-
import "github.com/ethereum/go-ethereum/common"
3+
import (
4+
"errors"
5+
"strconv"
6+
7+
"github.com/ethereum/go-ethereum/common"
8+
)
49

510
// StringifyEventTopics - Given array of event topic signatures,
611
// returns their stringified form, to be required for publishing data to subscribers
@@ -14,3 +19,66 @@ func StringifyEventTopics(data []common.Hash) []string {
1419

1520
return buffer
1621
}
22+
23+
// CreateEventTopicMap - Given array of event topics, returns map
24+
// of valid event topics, to be used when performing selective field based
25+
// queries on event topics
26+
func CreateEventTopicMap(topics []string) map[uint8]string {
27+
28+
_topics := make(map[uint8]string)
29+
30+
if topics[0] != "" {
31+
_topics[0] = topics[0]
32+
}
33+
34+
if topics[1] != "" {
35+
_topics[1] = topics[1]
36+
}
37+
38+
if topics[2] != "" {
39+
_topics[2] = topics[2]
40+
}
41+
42+
if topics[3] != "" {
43+
_topics[3] = topics[3]
44+
}
45+
46+
return _topics
47+
48+
}
49+
50+
// ParseNumber - Given an integer as string, attempts to parse it
51+
func ParseNumber(number string) (uint64, error) {
52+
53+
_num, err := strconv.ParseUint(number, 10, 64)
54+
if err != nil {
55+
56+
return 0, errors.New("Failed to parse integer")
57+
58+
}
59+
60+
return _num, nil
61+
62+
}
63+
64+
// RangeChecker - Checks whether given number range is at max
65+
// `limit` far away
66+
func RangeChecker(from string, to string, limit uint64) (uint64, uint64, error) {
67+
68+
_from, err := ParseNumber(from)
69+
if err != nil {
70+
return 0, 0, errors.New("Failed to parse integer")
71+
}
72+
73+
_to, err := ParseNumber(to)
74+
if err != nil {
75+
return 0, 0, errors.New("Failed to parse integer")
76+
}
77+
78+
if !(_to-_from < limit) {
79+
return 0, 0, errors.New("Range too long")
80+
}
81+
82+
return _from, _to, nil
83+
84+
}

app/data/data.go

Lines changed: 31 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -186,11 +186,14 @@ type Block struct {
186186
Difficulty string `json:"difficulty" gorm:"column:difficulty"`
187187
GasUsed uint64 `json:"gasUsed" gorm:"column:gasused"`
188188
GasLimit uint64 `json:"gasLimit" gorm:"column:gaslimit"`
189-
Nonce uint64 `json:"nonce" gorm:"column:nonce"`
189+
Nonce string `json:"nonce" gorm:"column:nonce"`
190190
Miner string `json:"miner" gorm:"column:miner"`
191191
Size float64 `json:"size" gorm:"column:size"`
192+
StateRootHash string `json:"stateRootHash" gorm:"column:stateroothash"`
193+
UncleHash string `json:"uncleHash" gorm:"column:unclehash"`
192194
TransactionRootHash string `json:"txRootHash" gorm:"column:txroothash"`
193195
ReceiptRootHash string `json:"receiptRootHash" gorm:"column:receiptroothash"`
196+
ExtraData []byte `json:"extraData" gorm:"column:extradata"`
194197
}
195198

196199
// MarshalBinary - Implementing binary marshalling function, to be invoked
@@ -199,6 +202,33 @@ func (b *Block) MarshalBinary() ([]byte, error) {
199202
return json.Marshal(b)
200203
}
201204

205+
// MarshalJSON - Custom JSON encoder
206+
func (b *Block) MarshalJSON() ([]byte, error) {
207+
208+
extraData := ""
209+
if _h := hex.EncodeToString(b.ExtraData); _h != "" {
210+
extraData = fmt.Sprintf("0x%s", _h)
211+
}
212+
213+
return []byte(fmt.Sprintf(`{"hash":%q,"number":%d,"time":%d,"parentHash":%q,"difficulty":%q,"gasUsed":%d,"gasLimit":%d,"nonce":%q,"miner":%q,"size":%f,"stateRootHash":%q,"uncleHash":%q,"txRootHash":%q,"receiptRootHash":%q,"extraData":%q}`,
214+
b.Hash,
215+
b.Number,
216+
b.Time,
217+
b.ParentHash,
218+
b.Difficulty,
219+
b.GasUsed,
220+
b.GasLimit,
221+
b.Nonce,
222+
b.Miner,
223+
b.Size,
224+
b.StateRootHash,
225+
b.UncleHash,
226+
b.TransactionRootHash,
227+
b.ReceiptRootHash,
228+
extraData)), nil
229+
230+
}
231+
202232
// ToJSON - Encodes into JSON, to be supplied when queried for block data
203233
func (b *Block) ToJSON() []byte {
204234
data, err := json.Marshal(b)

0 commit comments

Comments
 (0)