Skip to content

Commit cbd6ed9

Browse files
core/filtermaps: define APIs for map, epoch calculation (#31659)
This pull request refines the filtermap implementation, defining key APIs for map and epoch calculations to improve readability. This pull request doesn't change any logic, it's a pure cleanup. --------- Co-authored-by: zsfelfoldi <zsfelfoldi@gmail.com>
1 parent cc8d58f commit cbd6ed9

File tree

9 files changed

+161
-86
lines changed

9 files changed

+161
-86
lines changed

core/filtermaps/filtermaps.go

Lines changed: 60 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -185,11 +185,14 @@ type filterMapsRange struct {
185185
initialized bool
186186
headIndexed bool
187187
headDelimiter uint64 // zero if headIndexed is false
188+
188189
// if initialized then all maps are rendered in the maps range
189190
maps common.Range[uint32]
191+
190192
// if tailPartialEpoch > 0 then maps between firstRenderedMap-mapsPerEpoch and
191193
// firstRenderedMap-mapsPerEpoch+tailPartialEpoch-1 are rendered
192194
tailPartialEpoch uint32
195+
193196
// if initialized then all log values in the blocks range are fully
194197
// rendered
195198
// blockLvPointers are available in the blocks range
@@ -223,13 +226,15 @@ type Config struct {
223226
}
224227

225228
// NewFilterMaps creates a new FilterMaps and starts the indexer.
226-
func NewFilterMaps(db ethdb.KeyValueStore, initView *ChainView, historyCutoff, finalBlock uint64, params Params, config Config) *FilterMaps {
229+
func NewFilterMaps(db ethdb.KeyValueStore, initView *ChainView, historyCutoff, finalBlock uint64, params Params, config Config) (*FilterMaps, error) {
227230
rs, initialized, err := rawdb.ReadFilterMapsRange(db)
228231
if err != nil || (initialized && rs.Version != databaseVersion) {
229232
rs, initialized = rawdb.FilterMapsRange{}, false
230233
log.Warn("Invalid log index database version; resetting log index")
231234
}
232-
params.deriveFields()
235+
if err := params.sanitize(); err != nil {
236+
return nil, err
237+
}
233238
f := &FilterMaps{
234239
db: db,
235240
closeCh: make(chan struct{}),
@@ -254,15 +259,14 @@ func NewFilterMaps(db ethdb.KeyValueStore, initView *ChainView, historyCutoff, f
254259
},
255260
// deleting last unindexed epoch might have been interrupted by shutdown
256261
cleanedEpochsBefore: max(rs.MapsFirst>>params.logMapsPerEpoch, 1) - 1,
257-
258-
historyCutoff: historyCutoff,
259-
finalBlock: finalBlock,
260-
matcherSyncCh: make(chan *FilterMapsMatcherBackend),
261-
matchers: make(map[*FilterMapsMatcherBackend]struct{}),
262-
filterMapCache: lru.NewCache[uint32, filterMap](cachedFilterMaps),
263-
lastBlockCache: lru.NewCache[uint32, lastBlockOfMap](cachedLastBlocks),
264-
lvPointerCache: lru.NewCache[uint64, uint64](cachedLvPointers),
265-
renderSnapshots: lru.NewCache[uint64, *renderedMap](cachedRenderSnapshots),
262+
historyCutoff: historyCutoff,
263+
finalBlock: finalBlock,
264+
matcherSyncCh: make(chan *FilterMapsMatcherBackend),
265+
matchers: make(map[*FilterMapsMatcherBackend]struct{}),
266+
filterMapCache: lru.NewCache[uint32, filterMap](cachedFilterMaps),
267+
lastBlockCache: lru.NewCache[uint32, lastBlockOfMap](cachedLastBlocks),
268+
lvPointerCache: lru.NewCache[uint64, uint64](cachedLvPointers),
269+
renderSnapshots: lru.NewCache[uint64, *renderedMap](cachedRenderSnapshots),
266270
}
267271
f.checkRevertRange() // revert maps that are inconsistent with the current chain view
268272

@@ -272,7 +276,7 @@ func NewFilterMaps(db ethdb.KeyValueStore, initView *ChainView, historyCutoff, f
272276
"firstmap", f.indexedRange.maps.First(), "lastmap", f.indexedRange.maps.Last(),
273277
"headindexed", f.indexedRange.headIndexed)
274278
}
275-
return f
279+
return f, nil
276280
}
277281

278282
// Start starts the indexer.
@@ -399,7 +403,7 @@ func (f *FilterMaps) init() error {
399403
batch := f.db.NewBatch()
400404
for epoch := range bestLen {
401405
cp := checkpoints[bestIdx][epoch]
402-
f.storeLastBlockOfMap(batch, (uint32(epoch+1)<<f.logMapsPerEpoch)-1, cp.BlockNumber, cp.BlockId)
406+
f.storeLastBlockOfMap(batch, f.lastEpochMap(uint32(epoch)), cp.BlockNumber, cp.BlockId)
403407
f.storeBlockLvPointer(batch, cp.BlockNumber, cp.FirstIndex)
404408
}
405409
fmr := filterMapsRange{
@@ -408,7 +412,7 @@ func (f *FilterMaps) init() error {
408412
if bestLen > 0 {
409413
cp := checkpoints[bestIdx][bestLen-1]
410414
fmr.blocks = common.NewRange(cp.BlockNumber+1, 0)
411-
fmr.maps = common.NewRange(uint32(bestLen)<<f.logMapsPerEpoch, 0)
415+
fmr.maps = common.NewRange(f.firstEpochMap(uint32(bestLen)), 0)
412416
}
413417
f.setRange(batch, f.targetView, fmr, false)
414418
return batch.Write()
@@ -578,9 +582,11 @@ func (f *FilterMaps) getFilterMapRows(mapIndices []uint32, rowIndex uint32, base
578582
rows := make([]FilterRow, len(mapIndices))
579583
var ptr int
580584
for len(mapIndices) > ptr {
581-
baseRowGroup := mapIndices[ptr] / f.baseRowGroupLength
582-
groupLength := 1
583-
for ptr+groupLength < len(mapIndices) && mapIndices[ptr+groupLength]/f.baseRowGroupLength == baseRowGroup {
585+
var (
586+
groupIndex = f.mapGroupIndex(mapIndices[ptr])
587+
groupLength = 1
588+
)
589+
for ptr+groupLength < len(mapIndices) && f.mapGroupIndex(mapIndices[ptr+groupLength]) == groupIndex {
584590
groupLength++
585591
}
586592
if err := f.getFilterMapRowsOfGroup(rows[ptr:ptr+groupLength], mapIndices[ptr:ptr+groupLength], rowIndex, baseLayerOnly); err != nil {
@@ -594,17 +600,19 @@ func (f *FilterMaps) getFilterMapRows(mapIndices []uint32, rowIndex uint32, base
594600
// getFilterMapRowsOfGroup fetches a set of filter map rows at map indices
595601
// belonging to the same base row group.
596602
func (f *FilterMaps) getFilterMapRowsOfGroup(target []FilterRow, mapIndices []uint32, rowIndex uint32, baseLayerOnly bool) error {
597-
baseRowGroup := mapIndices[0] / f.baseRowGroupLength
598-
baseMapRowIndex := f.mapRowIndex(baseRowGroup*f.baseRowGroupLength, rowIndex)
599-
baseRows, err := rawdb.ReadFilterMapBaseRows(f.db, baseMapRowIndex, f.baseRowGroupLength, f.logMapWidth)
603+
var (
604+
groupIndex = f.mapGroupIndex(mapIndices[0])
605+
mapRowIndex = f.mapRowIndex(groupIndex, rowIndex)
606+
)
607+
baseRows, err := rawdb.ReadFilterMapBaseRows(f.db, mapRowIndex, f.baseRowGroupSize, f.logMapWidth)
600608
if err != nil {
601-
return fmt.Errorf("failed to retrieve base row group %d of row %d: %v", baseRowGroup, rowIndex, err)
609+
return fmt.Errorf("failed to retrieve base row group %d of row %d: %v", groupIndex, rowIndex, err)
602610
}
603611
for i, mapIndex := range mapIndices {
604-
if mapIndex/f.baseRowGroupLength != baseRowGroup {
605-
panic("mapIndices are not in the same base row group")
612+
if f.mapGroupIndex(mapIndex) != groupIndex {
613+
return fmt.Errorf("maps are not in the same base row group, index: %d, group: %d", mapIndex, groupIndex)
606614
}
607-
row := baseRows[mapIndex&(f.baseRowGroupLength-1)]
615+
row := baseRows[f.mapGroupOffset(mapIndex)]
608616
if !baseLayerOnly {
609617
extRow, err := rawdb.ReadFilterMapExtRow(f.db, f.mapRowIndex(mapIndex, rowIndex), f.logMapWidth)
610618
if err != nil {
@@ -621,48 +629,52 @@ func (f *FilterMaps) getFilterMapRowsOfGroup(target []FilterRow, mapIndices []ui
621629
// indices and a shared row index.
622630
func (f *FilterMaps) storeFilterMapRows(batch ethdb.Batch, mapIndices []uint32, rowIndex uint32, rows []FilterRow) error {
623631
for len(mapIndices) > 0 {
624-
baseRowGroup := mapIndices[0] / f.baseRowGroupLength
625-
groupLength := 1
626-
for groupLength < len(mapIndices) && mapIndices[groupLength]/f.baseRowGroupLength == baseRowGroup {
627-
groupLength++
628-
}
629-
if err := f.storeFilterMapRowsOfGroup(batch, mapIndices[:groupLength], rowIndex, rows[:groupLength]); err != nil {
632+
var (
633+
pos = 1
634+
groupIndex = f.mapGroupIndex(mapIndices[0])
635+
)
636+
for pos < len(mapIndices) && f.mapGroupIndex(mapIndices[pos]) == groupIndex {
637+
pos++
638+
}
639+
if err := f.storeFilterMapRowsOfGroup(batch, mapIndices[:pos], rowIndex, rows[:pos]); err != nil {
630640
return err
631641
}
632-
mapIndices, rows = mapIndices[groupLength:], rows[groupLength:]
642+
mapIndices, rows = mapIndices[pos:], rows[pos:]
633643
}
634644
return nil
635645
}
636646

637647
// storeFilterMapRowsOfGroup stores a set of filter map rows at map indices
638648
// belonging to the same base row group.
639649
func (f *FilterMaps) storeFilterMapRowsOfGroup(batch ethdb.Batch, mapIndices []uint32, rowIndex uint32, rows []FilterRow) error {
640-
baseRowGroup := mapIndices[0] / f.baseRowGroupLength
641-
baseMapRowIndex := f.mapRowIndex(baseRowGroup*f.baseRowGroupLength, rowIndex)
642-
var baseRows [][]uint32
643-
if uint32(len(mapIndices)) != f.baseRowGroupLength { // skip base rows read if all rows are replaced
650+
var (
651+
baseRows [][]uint32
652+
groupIndex = f.mapGroupIndex(mapIndices[0])
653+
mapRowIndex = f.mapRowIndex(groupIndex, rowIndex)
654+
)
655+
if uint32(len(mapIndices)) != f.baseRowGroupSize { // skip base rows read if all rows are replaced
644656
var err error
645-
baseRows, err = rawdb.ReadFilterMapBaseRows(f.db, baseMapRowIndex, f.baseRowGroupLength, f.logMapWidth)
657+
baseRows, err = rawdb.ReadFilterMapBaseRows(f.db, mapRowIndex, f.baseRowGroupSize, f.logMapWidth)
646658
if err != nil {
647-
return fmt.Errorf("failed to retrieve base row group %d of row %d for modification: %v", baseRowGroup, rowIndex, err)
659+
return fmt.Errorf("failed to retrieve filter map %d base rows %d for modification: %v", groupIndex, rowIndex, err)
648660
}
649661
} else {
650-
baseRows = make([][]uint32, f.baseRowGroupLength)
662+
baseRows = make([][]uint32, f.baseRowGroupSize)
651663
}
652664
for i, mapIndex := range mapIndices {
653-
if mapIndex/f.baseRowGroupLength != baseRowGroup {
654-
panic("mapIndices are not in the same base row group")
665+
if f.mapGroupIndex(mapIndex) != groupIndex {
666+
return fmt.Errorf("maps are not in the same base row group, index: %d, group: %d", mapIndex, groupIndex)
655667
}
656668
baseRow := []uint32(rows[i])
657669
var extRow FilterRow
658670
if uint32(len(rows[i])) > f.baseRowLength {
659671
extRow = baseRow[f.baseRowLength:]
660672
baseRow = baseRow[:f.baseRowLength]
661673
}
662-
baseRows[mapIndex&(f.baseRowGroupLength-1)] = baseRow
674+
baseRows[f.mapGroupOffset(mapIndex)] = baseRow
663675
rawdb.WriteFilterMapExtRow(batch, f.mapRowIndex(mapIndex, rowIndex), extRow, f.logMapWidth)
664676
}
665-
rawdb.WriteFilterMapBaseRows(batch, baseMapRowIndex, baseRows, f.logMapWidth)
677+
rawdb.WriteFilterMapBaseRows(batch, mapRowIndex, baseRows, f.logMapWidth)
666678
return nil
667679
}
668680

@@ -747,12 +759,12 @@ func (f *FilterMaps) deleteTailEpoch(epoch uint32) (bool, error) {
747759
defer f.indexLock.Unlock()
748760

749761
// determine epoch boundaries
750-
firstMap := epoch << f.logMapsPerEpoch
751-
lastBlock, _, err := f.getLastBlockOfMap(firstMap + f.mapsPerEpoch - 1)
762+
lastBlock, _, err := f.getLastBlockOfMap(f.lastEpochMap(epoch))
752763
if err != nil {
753764
return false, fmt.Errorf("failed to retrieve last block of deleted epoch %d: %v", epoch, err)
754765
}
755766
var firstBlock uint64
767+
firstMap := f.firstEpochMap(epoch)
756768
if epoch > 0 {
757769
firstBlock, _, err = f.getLastBlockOfMap(firstMap - 1)
758770
if err != nil {
@@ -763,8 +775,8 @@ func (f *FilterMaps) deleteTailEpoch(epoch uint32) (bool, error) {
763775
// update rendered range if necessary
764776
var (
765777
fmr = f.indexedRange
766-
firstEpoch = f.indexedRange.maps.First() >> f.logMapsPerEpoch
767-
afterLastEpoch = (f.indexedRange.maps.AfterLast() + f.mapsPerEpoch - 1) >> f.logMapsPerEpoch
778+
firstEpoch = f.mapEpoch(f.indexedRange.maps.First())
779+
afterLastEpoch = f.mapEpoch(f.indexedRange.maps.AfterLast() + f.mapsPerEpoch - 1)
768780
)
769781
if f.indexedRange.tailPartialEpoch != 0 && firstEpoch > 0 {
770782
firstEpoch--
@@ -776,7 +788,7 @@ func (f *FilterMaps) deleteTailEpoch(epoch uint32) (bool, error) {
776788
// first fully or partially rendered epoch and there is at least one
777789
// rendered map in the next epoch; remove from indexed range
778790
fmr.tailPartialEpoch = 0
779-
fmr.maps.SetFirst((epoch + 1) << f.logMapsPerEpoch)
791+
fmr.maps.SetFirst(f.firstEpochMap(epoch + 1))
780792
fmr.blocks.SetFirst(lastBlock + 1)
781793
f.setRange(f.db, f.indexedView, fmr, false)
782794
default:
@@ -857,7 +869,7 @@ func (f *FilterMaps) exportCheckpoints() {
857869
w.WriteString("[\n")
858870
comma := ","
859871
for epoch := uint32(0); epoch < epochCount; epoch++ {
860-
lastBlock, lastBlockId, err := f.getLastBlockOfMap((epoch+1)<<f.logMapsPerEpoch - 1)
872+
lastBlock, lastBlockId, err := f.getLastBlockOfMap(f.lastEpochMap(epoch))
861873
if err != nil {
862874
log.Error("Error fetching last block of epoch", "epoch", epoch, "error", err)
863875
return

core/filtermaps/indexer.go

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -281,7 +281,7 @@ func (f *FilterMaps) tryIndexHead() error {
281281
// is changed.
282282
func (f *FilterMaps) tryIndexTail() (bool, error) {
283283
for {
284-
firstEpoch := f.indexedRange.maps.First() >> f.logMapsPerEpoch
284+
firstEpoch := f.mapEpoch(f.indexedRange.maps.First())
285285
if firstEpoch == 0 || !f.needTailEpoch(firstEpoch-1) {
286286
break
287287
}
@@ -359,7 +359,7 @@ func (f *FilterMaps) tryIndexTail() (bool, error) {
359359
// Note that unindexing is very quick as it only removes continuous ranges of
360360
// data from the database and is also called while running head indexing.
361361
func (f *FilterMaps) tryUnindexTail() (bool, error) {
362-
firstEpoch := f.indexedRange.maps.First() >> f.logMapsPerEpoch
362+
firstEpoch := f.mapEpoch(f.indexedRange.maps.First())
363363
if f.indexedRange.tailPartialEpoch > 0 && firstEpoch > 0 {
364364
firstEpoch--
365365
}
@@ -392,11 +392,11 @@ func (f *FilterMaps) tryUnindexTail() (bool, error) {
392392
// needTailEpoch returns true if the given tail epoch needs to be kept
393393
// according to the current tail target, false if it can be removed.
394394
func (f *FilterMaps) needTailEpoch(epoch uint32) bool {
395-
firstEpoch := f.indexedRange.maps.First() >> f.logMapsPerEpoch
395+
firstEpoch := f.mapEpoch(f.indexedRange.maps.First())
396396
if epoch > firstEpoch {
397397
return true
398398
}
399-
if (epoch+1)<<f.logMapsPerEpoch >= f.indexedRange.maps.AfterLast() {
399+
if f.firstEpochMap(epoch+1) >= f.indexedRange.maps.AfterLast() {
400400
return true
401401
}
402402
if epoch+1 < firstEpoch {
@@ -405,7 +405,7 @@ func (f *FilterMaps) needTailEpoch(epoch uint32) bool {
405405
var lastBlockOfPrevEpoch uint64
406406
if epoch > 0 {
407407
var err error
408-
lastBlockOfPrevEpoch, _, err = f.getLastBlockOfMap(epoch<<f.logMapsPerEpoch - 1)
408+
lastBlockOfPrevEpoch, _, err = f.getLastBlockOfMap(f.lastEpochMap(epoch - 1))
409409
if err != nil {
410410
log.Error("Could not get last block of previous epoch", "epoch", epoch-1, "error", err)
411411
return epoch >= firstEpoch
@@ -414,7 +414,7 @@ func (f *FilterMaps) needTailEpoch(epoch uint32) bool {
414414
if f.historyCutoff > lastBlockOfPrevEpoch {
415415
return false
416416
}
417-
lastBlockOfEpoch, _, err := f.getLastBlockOfMap((epoch+1)<<f.logMapsPerEpoch - 1)
417+
lastBlockOfEpoch, _, err := f.getLastBlockOfMap(f.lastEpochMap(epoch))
418418
if err != nil {
419419
log.Error("Could not get last block of epoch", "epoch", epoch, "error", err)
420420
return epoch >= firstEpoch

core/filtermaps/indexer_test.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ var testParams = Params{
4141
logMapWidth: 24,
4242
logMapsPerEpoch: 4,
4343
logValuesPerMap: 4,
44-
baseRowGroupLength: 4,
44+
baseRowGroupSize: 4,
4545
baseRowLengthRatio: 2,
4646
logLayerDiff: 2,
4747
}
@@ -370,7 +370,7 @@ func (ts *testSetup) setHistory(history uint64, noHistory bool) {
370370
History: history,
371371
Disabled: noHistory,
372372
}
373-
ts.fm = NewFilterMaps(ts.db, view, 0, 0, ts.params, config)
373+
ts.fm, _ = NewFilterMaps(ts.db, view, 0, 0, ts.params, config)
374374
ts.fm.testDisableSnapshots = ts.testDisableSnapshots
375375
ts.fm.Start()
376376
}

core/filtermaps/map_renderer.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -284,7 +284,7 @@ func (r *mapRenderer) run(stopCb func() bool, writeCb func()) (bool, error) {
284284
// map finished
285285
r.finishedMaps[r.currentMap.mapIndex] = r.currentMap
286286
r.finished.SetLast(r.finished.AfterLast())
287-
if len(r.finishedMaps) >= maxMapsPerBatch || r.finished.AfterLast()&(r.f.baseRowGroupLength-1) == 0 {
287+
if len(r.finishedMaps) >= maxMapsPerBatch || r.f.mapGroupOffset(r.finished.AfterLast()) == 0 {
288288
if err := r.writeFinishedMaps(stopCb); err != nil {
289289
return false, err
290290
}

core/filtermaps/matcher.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -844,7 +844,7 @@ func (m *matchSequenceInstance) dropNext(mapIndex uint32) bool {
844844
// results at mapIndex and mapIndex+1. Note that acquiring nextNextRes may be
845845
// skipped and it can be substituted with an empty list if baseRes has no potential
846846
// matches that could be sequence matched with anything that could be in nextNextRes.
847-
func (params *Params) matchResults(mapIndex uint32, offset uint64, baseRes, nextRes potentialMatches) potentialMatches {
847+
func (p *Params) matchResults(mapIndex uint32, offset uint64, baseRes, nextRes potentialMatches) potentialMatches {
848848
if nextRes == nil || (baseRes != nil && len(baseRes) == 0) {
849849
// if nextRes is a wild card or baseRes is empty then the sequence matcher
850850
// result equals baseRes.
@@ -854,7 +854,7 @@ func (params *Params) matchResults(mapIndex uint32, offset uint64, baseRes, next
854854
// if baseRes is a wild card or nextRes is empty then the sequence matcher
855855
// result is the items of nextRes with a negative offset applied.
856856
result := make(potentialMatches, 0, len(nextRes))
857-
min := (uint64(mapIndex) << params.logValuesPerMap) + offset
857+
min := (uint64(mapIndex) << p.logValuesPerMap) + offset
858858
for _, v := range nextRes {
859859
if v >= min {
860860
result = append(result, v-offset)

0 commit comments

Comments
 (0)