From af5ed0be0db806af0a3f525867cd889ff31a9d0f Mon Sep 17 00:00:00 2001 From: zhi Date: Wed, 2 Jul 2025 11:37:24 +0800 Subject: [PATCH 1/7] Revert "cow cache for staking view (#4655)" This reverts commit 7d5c499f2b27793dd940e7ab6e92e3d2c63a8308. --- action/protocol/staking/bucket_pool_test.go | 1 - .../protocol/staking/candidate_statereader.go | 5 +- action/protocol/staking/viewdata.go | 17 +-- action/protocol/staking/viewdata_test.go | 10 +- blockindex/contractstaking/indexer.go | 2 +- blockindex/contractstaking/stakeview.go | 63 +++-------- systemcontractindex/stakingindex/cache.go | 2 +- systemcontractindex/stakingindex/cowcache.go | 104 ------------------ .../stakingindex/cowcache_test.go | 48 -------- .../stakingindex/event_handler.go | 6 +- systemcontractindex/stakingindex/index.go | 6 +- systemcontractindex/stakingindex/stakeview.go | 14 +-- 12 files changed, 29 insertions(+), 249 deletions(-) delete mode 100644 systemcontractindex/stakingindex/cowcache.go delete mode 100644 systemcontractindex/stakingindex/cowcache_test.go diff --git a/action/protocol/staking/bucket_pool_test.go b/action/protocol/staking/bucket_pool_test.go index 744a590052..2e6e330347 100644 --- a/action/protocol/staking/bucket_pool_test.go +++ b/action/protocol/staking/bucket_pool_test.go @@ -79,7 +79,6 @@ func TestBucketPool(t *testing.T) { } view, _, err := CreateBaseView(sm, false) - view.contractsStake = &contractStakeView{} r.NoError(err) r.NoError(sm.WriteView(_protocolID, view)) pool = view.bucketPool diff --git a/action/protocol/staking/candidate_statereader.go b/action/protocol/staking/candidate_statereader.go index 728dee254c..d41e7d6613 100644 --- a/action/protocol/staking/candidate_statereader.go +++ b/action/protocol/staking/candidate_statereader.go @@ -176,9 +176,8 @@ func CreateBaseView(sr protocol.StateReader, enableSMStorage bool) (*ViewData, u } return &ViewData{ - candCenter: center, - bucketPool: pool, - contractsStake: &contractStakeView{}, + candCenter: center, + bucketPool: pool, }, height, nil } diff --git a/action/protocol/staking/viewdata.go b/action/protocol/staking/viewdata.go index e1ddee51b1..2f0fc9fba2 100644 --- a/action/protocol/staking/viewdata.go +++ b/action/protocol/staking/viewdata.go @@ -10,10 +10,9 @@ import ( "math/big" "github.com/iotexproject/iotex-address/address" - "github.com/pkg/errors" - "github.com/iotexproject/iotex-core/v2/action" "github.com/iotexproject/iotex-core/v2/action/protocol" + "github.com/pkg/errors" ) type ( @@ -22,7 +21,6 @@ type ( Clone() ContractStakeView CreatePreStates(ctx context.Context) error Handle(ctx context.Context, receipt *action.Receipt) error - Commit() BucketsByCandidate(ownerAddr address.Address) ([]*VoteBucket, error) } // ViewData is the data that need to be stored in protocol's view @@ -81,7 +79,6 @@ func (v *ViewData) Commit(ctx context.Context, sr protocol.StateReader) error { if err := v.bucketPool.Commit(sr); err != nil { return err } - v.contractsStake.Commit() v.snapshots = []Snapshot{} return nil @@ -175,15 +172,3 @@ func (csv *contractStakeView) Handle(ctx context.Context, receipt *action.Receip } return nil } - -func (csv *contractStakeView) Commit() { - if csv.v1 != nil { - csv.v1.Commit() - } - if csv.v2 != nil { - csv.v2.Commit() - } - if csv.v3 != nil { - csv.v3.Commit() - } -} diff --git a/action/protocol/staking/viewdata_test.go b/action/protocol/staking/viewdata_test.go index 716e290888..17517d319a 100644 --- a/action/protocol/staking/viewdata_test.go +++ b/action/protocol/staking/viewdata_test.go @@ -6,10 +6,9 @@ import ( "testing" "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" - "github.com/iotexproject/iotex-core/v2/test/identityset" "github.com/iotexproject/iotex-core/v2/test/mock/mock_chainmanager" + "github.com/stretchr/testify/require" ) func TestViewData_Clone(t *testing.T) { @@ -62,10 +61,9 @@ func prepareViewData(t *testing.T) (*ViewData, int) { }, } viewData := &ViewData{ - candCenter: candCenter, - bucketPool: bucketPool, - snapshots: []Snapshot{}, - contractsStake: &contractStakeView{}, + candCenter: candCenter, + bucketPool: bucketPool, + snapshots: []Snapshot{}, } return viewData, viewData.Snapshot() } diff --git a/blockindex/contractstaking/indexer.go b/blockindex/contractstaking/indexer.go index d012d87602..26600dc08f 100644 --- a/blockindex/contractstaking/indexer.go +++ b/blockindex/contractstaking/indexer.go @@ -87,7 +87,7 @@ func (s *Indexer) StartView(ctx context.Context) (staking.ContractStakeView, err } return &stakeView{ helper: s, - clean: s.cache.Clone(), + cache: s.cache.Clone(), height: s.cache.Height(), }, nil } diff --git a/blockindex/contractstaking/stakeview.go b/blockindex/contractstaking/stakeview.go index 6ba7e87431..de3130613b 100644 --- a/blockindex/contractstaking/stakeview.go +++ b/blockindex/contractstaking/stakeview.go @@ -2,96 +2,59 @@ package contractstaking import ( "context" - "sync" "github.com/iotexproject/iotex-address/address" - "github.com/iotexproject/iotex-proto/golang/iotextypes" - "github.com/iotexproject/iotex-core/v2/action" "github.com/iotexproject/iotex-core/v2/action/protocol" "github.com/iotexproject/iotex-core/v2/action/protocol/staking" + "github.com/iotexproject/iotex-proto/golang/iotextypes" ) type stakeView struct { helper *Indexer - clean *contractStakingCache - dirty *contractStakingCache + cache *contractStakingCache height uint64 - mu sync.RWMutex } func (s *stakeView) Clone() staking.ContractStakeView { - s.mu.Lock() - defer s.mu.Unlock() - clone := &stakeView{ + return &stakeView{ helper: s.helper, - clean: s.clean, - dirty: nil, + cache: s.cache.Clone(), height: s.height, } - if s.dirty != nil { - clone.clean = s.dirty.Clone() - } - return clone } func (s *stakeView) BucketsByCandidate(candidate address.Address) ([]*Bucket, error) { - s.mu.RLock() - defer s.mu.RUnlock() - if s.dirty != nil { - return s.dirty.bucketsByCandidate(candidate, s.height) - } - return s.clean.bucketsByCandidate(candidate, s.height) + return s.cache.bucketsByCandidate(candidate, s.height) } func (s *stakeView) CreatePreStates(ctx context.Context) error { - s.mu.Lock() - defer s.mu.Unlock() blkCtx := protocol.MustGetBlockCtx(ctx) s.height = blkCtx.BlockHeight return nil } func (s *stakeView) Handle(ctx context.Context, receipt *action.Receipt) error { + blkCtx := protocol.MustGetBlockCtx(ctx) + // new event handler for this receipt + handler := newContractStakingEventHandler(s.cache) + + // handle events of receipt if receipt.Status != uint64(iotextypes.ReceiptStatus_Success) { return nil } - var ( - blkCtx = protocol.MustGetBlockCtx(ctx) - handler *contractStakingEventHandler - ) for _, log := range receipt.Logs() { if log.Address != s.helper.config.ContractAddress { continue } - if handler == nil { - s.mu.Lock() - // new event handler for this receipt - if s.dirty == nil { - s.dirty = s.clean.Clone() - } - handler = newContractStakingEventHandler(s.dirty) - s.mu.Unlock() - } if err := handler.HandleEvent(ctx, blkCtx.BlockHeight, log); err != nil { return err } } - if handler == nil { - return nil - } _, delta := handler.Result() // update cache - s.mu.Lock() - defer s.mu.Unlock() - return s.dirty.Merge(delta, blkCtx.BlockHeight) -} - -func (s *stakeView) Commit() { - s.mu.Lock() - defer s.mu.Unlock() - if s.dirty != nil { - s.clean = s.dirty - s.dirty = nil + if err := s.cache.Merge(delta, blkCtx.BlockHeight); err != nil { + return err } + return nil } diff --git a/systemcontractindex/stakingindex/cache.go b/systemcontractindex/stakingindex/cache.go index 5925806943..8587864e8c 100644 --- a/systemcontractindex/stakingindex/cache.go +++ b/systemcontractindex/stakingindex/cache.go @@ -56,7 +56,7 @@ func (s *cache) Load(kvstore db.KVStore) error { return nil } -func (s *cache) Copy() bucketCache { +func (s *cache) Copy() *cache { c := newCache(s.ns, s.bucketNS) for k, v := range s.buckets { c.buckets[k] = v.Clone() diff --git a/systemcontractindex/stakingindex/cowcache.go b/systemcontractindex/stakingindex/cowcache.go deleted file mode 100644 index 6ad1915cad..0000000000 --- a/systemcontractindex/stakingindex/cowcache.go +++ /dev/null @@ -1,104 +0,0 @@ -package stakingindex - -import ( - "errors" - "sync" - - "github.com/iotexproject/iotex-address/address" - - "github.com/iotexproject/iotex-core/v2/db" -) - -type ( - bucketCache interface { - Load(kvstore db.KVStore) error - Copy() bucketCache - PutBucket(id uint64, bkt *Bucket) - DeleteBucket(id uint64) - BucketIdxs() []uint64 - Bucket(id uint64) *Bucket - Buckets(indices []uint64) []*Bucket - BucketIdsByCandidate(candidate address.Address) []uint64 - TotalBucketCount() uint64 - } - - cowCache struct { - cache bucketCache - dirty bool - mu sync.Mutex - } -) - -func newCowCache(cache bucketCache) *cowCache { - return &cowCache{ - cache: cache, - dirty: false, - } -} - -func (cow *cowCache) Copy() bucketCache { - cow.mu.Lock() - defer cow.mu.Unlock() - if cow.dirty { - cow.dirty = false - } - return &cowCache{ - cache: cow.cache, - dirty: false, - } -} - -func (cow *cowCache) Load(kvstore db.KVStore) error { - return errors.New("not supported in cowCache") -} - -func (cow *cowCache) BucketIdsByCandidate(candidate address.Address) []uint64 { - cow.mu.Lock() - defer cow.mu.Unlock() - return cow.cache.BucketIdsByCandidate(candidate) -} - -func (cow *cowCache) PutBucket(id uint64, bkt *Bucket) { - cow.mu.Lock() - defer cow.mu.Unlock() - cow.ensureCopied() - cow.cache.PutBucket(id, bkt) -} - -func (cow *cowCache) DeleteBucket(id uint64) { - cow.mu.Lock() - defer cow.mu.Unlock() - cow.ensureCopied() - cow.cache.DeleteBucket(id) -} - -func (cow *cowCache) BucketIdxs() []uint64 { - cow.mu.Lock() - defer cow.mu.Unlock() - return cow.cache.BucketIdxs() -} - -func (cow *cowCache) Bucket(id uint64) *Bucket { - cow.mu.Lock() - defer cow.mu.Unlock() - return cow.cache.Bucket(id) -} - -func (cow *cowCache) Buckets(indices []uint64) []*Bucket { - cow.mu.Lock() - defer cow.mu.Unlock() - return cow.cache.Buckets(indices) -} - -func (cow *cowCache) TotalBucketCount() uint64 { - cow.mu.Lock() - defer cow.mu.Unlock() - return cow.cache.TotalBucketCount() -} - -func (cow *cowCache) ensureCopied() { - if !cow.dirty { - cow.cache = cow.cache.Copy() - cow.dirty = true - } -} diff --git a/systemcontractindex/stakingindex/cowcache_test.go b/systemcontractindex/stakingindex/cowcache_test.go deleted file mode 100644 index 9a56d03dc9..0000000000 --- a/systemcontractindex/stakingindex/cowcache_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package stakingindex - -import ( - "math/big" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/iotexproject/iotex-core/v2/test/identityset" -) - -func TestCowCache(t *testing.T) { - r := require.New(t) - buckets := []*Bucket{ - {Candidate: identityset.Address(1), Owner: identityset.Address(2), StakedAmount: big.NewInt(1000), Timestamped: true, StakedDuration: 3600, CreatedAt: 1622548800, UnlockedAt: 1622552400, UnstakedAt: 1622556000, Muted: false}, - {Candidate: identityset.Address(3), Owner: identityset.Address(4), StakedAmount: big.NewInt(2000), Timestamped: false, StakedDuration: 7200, CreatedAt: 1622548801, UnlockedAt: 1622552401, UnstakedAt: 1622556001, Muted: true}, - {Candidate: identityset.Address(5), Owner: identityset.Address(6), StakedAmount: big.NewInt(3000), Timestamped: true, StakedDuration: 10800, CreatedAt: 1622548802, UnlockedAt: 1622552402, UnstakedAt: 1622556002, Muted: false}, - {Candidate: identityset.Address(7), Owner: identityset.Address(8), StakedAmount: big.NewInt(4000), Timestamped: true, StakedDuration: 10800, CreatedAt: 1622548802, UnlockedAt: 1622552402, UnstakedAt: 1622556002, Muted: false}, - } - original := newCache("testNS", "testBucketNS") - original.PutBucket(0, buckets[0]) - // case 1: read cowCache without modification - cow := newCowCache(original) - r.Equal(buckets[0], cow.Bucket(0)) - - // case 2: modify cowCache but not affect original cache - cow.PutBucket(1, buckets[1]) - r.Equal(buckets[1], cow.Bucket(1)) - r.Nil(original.Bucket(1)) - cow.DeleteBucket(0) - r.Nil(cow.Bucket(0)) - r.Equal(buckets[0], original.Bucket(0)) - - // case 3: not real copy before modification - copi := cow.Copy() - r.Equal(buckets[1], copi.Bucket(1)) - r.Equal(cow.cache, copi.(*cowCache).cache) - - // case 4: copied not affected by original modification - cow.PutBucket(2, buckets[2]) - r.Equal(buckets[2], cow.Bucket(2)) - r.Nil(copi.Bucket(2)) - - // case 5: original not affected by copied modification - copi.PutBucket(3, buckets[3]) - r.Equal(buckets[3], copi.Bucket(3)) - r.Nil(cow.Bucket(3)) -} diff --git a/systemcontractindex/stakingindex/event_handler.go b/systemcontractindex/stakingindex/event_handler.go index 797d07dc19..e9dd990b1f 100644 --- a/systemcontractindex/stakingindex/event_handler.go +++ b/systemcontractindex/stakingindex/event_handler.go @@ -32,7 +32,7 @@ var ( type eventHandler struct { stakingBucketNS string - dirty bucketCache // dirty cache, a view for current block + dirty *cache // dirty cache, a view for current block delta batch.KVStoreBatch // delta for db to store buckets of current block tokenOwner map[uint64]address.Address // context for event handler @@ -49,7 +49,7 @@ func init() { } } -func newEventHandler(bucketNS string, dirty bucketCache, blkCtx protocol.BlockCtx, timestamped, muted bool) *eventHandler { +func newEventHandler(bucketNS string, dirty *cache, blkCtx protocol.BlockCtx, timestamped, muted bool) *eventHandler { return &eventHandler{ stakingBucketNS: bucketNS, dirty: dirty, @@ -284,7 +284,7 @@ func (eh *eventHandler) HandleDonatedEvent(event *abiutil.EventParam) error { return nil } -func (eh *eventHandler) Finalize() (batch.KVStoreBatch, bucketCache) { +func (eh *eventHandler) Finalize() (batch.KVStoreBatch, *cache) { delta, dirty := eh.delta, eh.dirty eh.delta, eh.dirty = nil, nil return delta, dirty diff --git a/systemcontractindex/stakingindex/index.go b/systemcontractindex/stakingindex/index.go index 4ff289061f..30f651c737 100644 --- a/systemcontractindex/stakingindex/index.go +++ b/systemcontractindex/stakingindex/index.go @@ -59,12 +59,12 @@ type ( HandleMergedEvent(event *abiutil.EventParam) error HandleBucketExpandedEvent(event *abiutil.EventParam) error HandleDonatedEvent(event *abiutil.EventParam) error - Finalize() (batch.KVStoreBatch, bucketCache) + Finalize() (batch.KVStoreBatch, *cache) } // Indexer is the staking indexer Indexer struct { common *systemcontractindex.IndexerCommon - cache bucketCache // in-memory cache, used to query index data + cache *cache // in-memory cache, used to query index data mutex sync.RWMutex blocksToDuration blocksDurationAtFn // function to calculate duration from block range bucketNS string @@ -130,7 +130,7 @@ func (s *Indexer) StartView(ctx context.Context) (staking.ContractStakeView, err } return &stakeView{ helper: s, - cache: newCowCache(s.cache.Copy()), + cache: s.cache.Copy(), height: s.common.Height(), }, nil } diff --git a/systemcontractindex/stakingindex/stakeview.go b/systemcontractindex/stakingindex/stakeview.go index dcdb51eeea..45f434f3e6 100644 --- a/systemcontractindex/stakingindex/stakeview.go +++ b/systemcontractindex/stakingindex/stakeview.go @@ -2,7 +2,6 @@ package stakingindex import ( "context" - "sync" "github.com/iotexproject/iotex-address/address" @@ -13,14 +12,11 @@ import ( type stakeView struct { helper *Indexer - cache bucketCache + cache *cache height uint64 - mu sync.RWMutex } func (s *stakeView) Clone() staking.ContractStakeView { - s.mu.Lock() - defer s.mu.Unlock() return &stakeView{ helper: s.helper, cache: s.cache.Copy(), @@ -28,8 +24,6 @@ func (s *stakeView) Clone() staking.ContractStakeView { } } func (s *stakeView) BucketsByCandidate(candidate address.Address) ([]*VoteBucket, error) { - s.mu.RLock() - defer s.mu.RUnlock() idxs := s.cache.BucketIdsByCandidate(candidate) bkts := s.cache.Buckets(idxs) // filter out muted buckets @@ -46,20 +40,14 @@ func (s *stakeView) BucketsByCandidate(candidate address.Address) ([]*VoteBucket } func (s *stakeView) CreatePreStates(ctx context.Context) error { - s.mu.Lock() - defer s.mu.Unlock() blkCtx := protocol.MustGetBlockCtx(ctx) s.height = blkCtx.BlockHeight return nil } func (s *stakeView) Handle(ctx context.Context, receipt *action.Receipt) error { - s.mu.Lock() - defer s.mu.Unlock() blkCtx := protocol.MustGetBlockCtx(ctx) muted := s.helper.muteHeight > 0 && blkCtx.BlockHeight >= s.helper.muteHeight handler := newEventHandler(s.helper.bucketNS, s.cache, blkCtx, s.helper.timestamped, muted) return s.helper.handleReceipt(ctx, handler, receipt) } - -func (s *stakeView) Commit() {} From c3097a9251400b97ee7af448aa4eccb6cac49105 Mon Sep 17 00:00:00 2001 From: zhi Date: Tue, 17 Jun 2025 21:09:30 +0800 Subject: [PATCH 2/7] introduce wrapped cache --- blockindex/contractstaking/cache.go | 4 + blockindex/contractstaking/delta_cache.go | 4 - systemcontractindex/stakingindex/cache.go | 226 ++++++++++++++++-- .../stakingindex/event_handler.go | 6 +- systemcontractindex/stakingindex/index.go | 21 +- systemcontractindex/stakingindex/stakeview.go | 4 +- 6 files changed, 224 insertions(+), 41 deletions(-) diff --git a/blockindex/contractstaking/cache.go b/blockindex/contractstaking/cache.go index 9cf441d308..2403c1c51e 100644 --- a/blockindex/contractstaking/cache.go +++ b/blockindex/contractstaking/cache.go @@ -30,6 +30,10 @@ type ( mutex sync.RWMutex // a RW mutex for the cache to protect concurrent access config Config } + + wrappedCache struct { + cache contractStakingCache + } ) var ( diff --git a/blockindex/contractstaking/delta_cache.go b/blockindex/contractstaking/delta_cache.go index 293a9534e7..7e323854e2 100644 --- a/blockindex/contractstaking/delta_cache.go +++ b/blockindex/contractstaking/delta_cache.go @@ -154,10 +154,6 @@ func (s *contractStakingDelta) AddedBucketTypeCnt() uint64 { return cnt } -func (s *contractStakingDelta) isBucketDeleted(id uint64) bool { - return s.bucketInfoDeltaState[id] == deltaStateRemoved -} - func (s *contractStakingDelta) addBucketInfo(id uint64, bi *bucketInfo) error { var err error s.bucketInfoDeltaState[id], err = s.bucketInfoDeltaState[id].Transfer(deltaActionAdd) diff --git a/systemcontractindex/stakingindex/cache.go b/systemcontractindex/stakingindex/cache.go index 8587864e8c..f89bfdba2f 100644 --- a/systemcontractindex/stakingindex/cache.go +++ b/systemcontractindex/stakingindex/cache.go @@ -9,28 +9,46 @@ import ( "github.com/iotexproject/iotex-core/v2/pkg/util/byteutil" ) -// cache is the in-memory cache for staking index -// it is not thread-safe and should be protected by the caller -type cache struct { - buckets map[uint64]*Bucket - bucketsByCandidate map[string]map[uint64]struct{} - totalBucketCount uint64 - ns, bucketNS string -} +type ( + indexerCache interface { + PutBucket(id uint64, bkt *Bucket) + DeleteBucket(id uint64) + BucketIdxs() []uint64 + Bucket(id uint64) *Bucket + Buckets(indices []uint64) []*Bucket + BucketIdsByCandidate(candidate address.Address) []uint64 + TotalBucketCount() uint64 + Base() indexerCache + Commit() error + IsDirty() bool + } + // base is the in-memory base for staking index + // it is not thread-safe and should be protected by the caller + base struct { + buckets map[uint64]*Bucket + bucketsByCandidate map[string]map[uint64]struct{} + totalBucketCount uint64 + } + + wrappedCache struct { + cache indexerCache + bucketsByCandidate map[string]map[uint64]bool // buckets by candidate in current block + updatedBuckets map[uint64]*Bucket // updated buckets in current block + deletedBucketIds map[uint64]struct{} // deleted buckets in current block + } +) -func newCache(ns, bucketNS string) *cache { - return &cache{ +func newCache() *base { + return &base{ buckets: make(map[uint64]*Bucket), bucketsByCandidate: make(map[string]map[uint64]struct{}), - ns: ns, - bucketNS: bucketNS, } } -func (s *cache) Load(kvstore db.KVStore) error { +func (s *base) Load(kvstore db.KVStore, ns, bucketNS string) error { // load total bucket count var totalBucketCount uint64 - tbc, err := kvstore.Get(s.ns, stakingTotalBucketCountKey) + tbc, err := kvstore.Get(ns, stakingTotalBucketCountKey) if err != nil { if !errors.Is(err, db.ErrNotExist) { return err @@ -42,7 +60,7 @@ func (s *cache) Load(kvstore db.KVStore) error { s.totalBucketCount = totalBucketCount // load buckets - ks, vs, err := kvstore.Filter(s.bucketNS, func(k, v []byte) bool { return true }, nil, nil) + ks, vs, err := kvstore.Filter(bucketNS, func(k, v []byte) bool { return true }, nil, nil) if err != nil && !errors.Is(err, db.ErrBucketNotExist) { return err } @@ -56,8 +74,8 @@ func (s *cache) Load(kvstore db.KVStore) error { return nil } -func (s *cache) Copy() *cache { - c := newCache(s.ns, s.bucketNS) +func (s *base) DeepClone() indexerCache { + c := newCache() for k, v := range s.buckets { c.buckets[k] = v.Clone() } @@ -71,7 +89,7 @@ func (s *cache) Copy() *cache { return c } -func (s *cache) PutBucket(id uint64, bkt *Bucket) { +func (s *base) PutBucket(id uint64, bkt *Bucket) { cand := bkt.Candidate.String() if s.buckets[id] != nil { prevCand := s.buckets[id].Candidate.String() @@ -87,10 +105,9 @@ func (s *cache) PutBucket(id uint64, bkt *Bucket) { s.bucketsByCandidate[cand] = make(map[uint64]struct{}) } s.bucketsByCandidate[cand][id] = struct{}{} - return } -func (s *cache) DeleteBucket(id uint64) { +func (s *base) DeleteBucket(id uint64) { bkt, ok := s.buckets[id] if !ok { return @@ -103,7 +120,7 @@ func (s *cache) DeleteBucket(id uint64) { delete(s.buckets, id) } -func (s *cache) BucketIdxs() []uint64 { +func (s *base) BucketIdxs() []uint64 { idxs := make([]uint64, 0, len(s.buckets)) for id := range s.buckets { idxs = append(idxs, id) @@ -111,14 +128,14 @@ func (s *cache) BucketIdxs() []uint64 { return idxs } -func (s *cache) Bucket(id uint64) *Bucket { +func (s *base) Bucket(id uint64) *Bucket { if bkt, ok := s.buckets[id]; ok { return bkt.Clone() } return nil } -func (s *cache) Buckets(indices []uint64) []*Bucket { +func (s *base) Buckets(indices []uint64) []*Bucket { buckets := make([]*Bucket, 0, len(indices)) for _, idx := range indices { if bkt, ok := s.buckets[idx]; ok { @@ -128,7 +145,7 @@ func (s *cache) Buckets(indices []uint64) []*Bucket { return buckets } -func (s *cache) BucketIdsByCandidate(candidate address.Address) []uint64 { +func (s *base) BucketIdsByCandidate(candidate address.Address) []uint64 { cand := candidate.String() buckets := make([]uint64, 0, len(s.bucketsByCandidate[cand])) for idx := range s.bucketsByCandidate[cand] { @@ -137,6 +154,165 @@ func (s *cache) BucketIdsByCandidate(candidate address.Address) []uint64 { return buckets } -func (s *cache) TotalBucketCount() uint64 { +func (s *base) TotalBucketCount() uint64 { return s.totalBucketCount } + +func (s *base) Base() indexerCache { + return s +} + +func (s *base) IsDirty() bool { + return false +} + +func (s *base) Commit() error { + return nil +} + +func newWrappedCache(cache indexerCache) *wrappedCache { + return &wrappedCache{ + cache: cache, + bucketsByCandidate: make(map[string]map[uint64]bool), + updatedBuckets: make(map[uint64]*Bucket), + deletedBucketIds: make(map[uint64]struct{}), + } +} + +func (w *wrappedCache) PutBucket(id uint64, bkt *Bucket) { + oldBucket, ok := w.updatedBuckets[id] + if !ok { + oldBucket = w.cache.Bucket(id) + } + if oldBucket != nil { + oldCand := oldBucket.Candidate.String() + if w.bucketsByCandidate[oldCand] == nil { + w.bucketsByCandidate[oldCand] = make(map[uint64]bool) + } + w.bucketsByCandidate[oldCand][id] = false + } + w.updatedBuckets[id] = bkt + delete(w.deletedBucketIds, id) + cand := bkt.Candidate.String() + if w.bucketsByCandidate[cand] == nil { + w.bucketsByCandidate[cand] = make(map[uint64]bool) + } + w.bucketsByCandidate[cand][id] = true +} + +func (w *wrappedCache) DeleteBucket(id uint64) { + w.deletedBucketIds[id] = struct{}{} + delete(w.updatedBuckets, id) + for cand := range w.bucketsByCandidate { + delete(w.bucketsByCandidate[cand], id) + if len(w.bucketsByCandidate[cand]) == 0 { + delete(w.bucketsByCandidate, cand) + } + } +} + +func (w *wrappedCache) BucketIdxs() []uint64 { + idxMap := make(map[uint64]struct{}) + // Load from underlying cache + for _, id := range w.cache.BucketIdxs() { + if _, deleted := w.deletedBucketIds[id]; !deleted { + idxMap[id] = struct{}{} + } + } + // Add updatedBuckets + for id := range w.updatedBuckets { + if _, deleted := w.deletedBucketIds[id]; !deleted { + idxMap[id] = struct{}{} + } + } + idxs := make([]uint64, 0, len(idxMap)) + for id := range idxMap { + idxs = append(idxs, id) + } + return idxs +} + +func (w *wrappedCache) Bucket(id uint64) *Bucket { + if _, deleted := w.deletedBucketIds[id]; deleted { + return nil + } + if bkt, ok := w.updatedBuckets[id]; ok { + return bkt.Clone() + } + return w.cache.Bucket(id) +} + +func (w *wrappedCache) Buckets(indices []uint64) []*Bucket { + buckets := make([]*Bucket, 0, len(indices)) + for _, idx := range indices { + if _, deleted := w.deletedBucketIds[idx]; deleted { + continue + } + if bkt, ok := w.updatedBuckets[idx]; ok { + buckets = append(buckets, bkt.Clone()) + } else if bkt := w.cache.Bucket(idx); bkt != nil { + buckets = append(buckets, bkt.Clone()) + } + } + return buckets +} + +func (w *wrappedCache) BucketIdsByCandidate(candidate address.Address) []uint64 { + cand := candidate.String() + ids := make(map[uint64]struct{}) + // Read ids from cache first + for _, id := range w.cache.BucketIdsByCandidate(candidate) { + ids[id] = struct{}{} + } + // Update ids according to current block changes + if vals, ok := w.bucketsByCandidate[cand]; ok { + for id, keep := range vals { + if keep { + ids[id] = struct{}{} + } else { + delete(ids, id) + } + } + } + // Remove deleted ids + for id := range w.deletedBucketIds { + delete(ids, id) + } + result := make([]uint64, 0, len(ids)) + for id := range ids { + result = append(result, id) + } + return result +} + +func (w *wrappedCache) Base() indexerCache { + return w.cache.Base() +} + +func (w *wrappedCache) TotalBucketCount() uint64 { + // TODO: update total bucket count based on current block changes + return w.cache.TotalBucketCount() +} + +func (w *wrappedCache) Commit() error { + if w.isDirty() { + for id, bkt := range w.updatedBuckets { + w.cache.PutBucket(id, bkt) + } + for id := range w.deletedBucketIds { + w.cache.DeleteBucket(id) + } + w.updatedBuckets = make(map[uint64]*Bucket) + w.deletedBucketIds = make(map[uint64]struct{}) + w.bucketsByCandidate = make(map[string]map[uint64]bool) + } + return w.cache.Commit() +} + +func (w *wrappedCache) isDirty() bool { + return len(w.updatedBuckets) > 0 || len(w.deletedBucketIds) > 0 || len(w.bucketsByCandidate) > 0 +} + +func (w *wrappedCache) IsDirty() bool { + return w.cache.IsDirty() || w.isDirty() +} diff --git a/systemcontractindex/stakingindex/event_handler.go b/systemcontractindex/stakingindex/event_handler.go index e9dd990b1f..5b79dff2b4 100644 --- a/systemcontractindex/stakingindex/event_handler.go +++ b/systemcontractindex/stakingindex/event_handler.go @@ -32,7 +32,7 @@ var ( type eventHandler struct { stakingBucketNS string - dirty *cache // dirty cache, a view for current block + dirty indexerCache // dirty cache, a view for current block delta batch.KVStoreBatch // delta for db to store buckets of current block tokenOwner map[uint64]address.Address // context for event handler @@ -49,7 +49,7 @@ func init() { } } -func newEventHandler(bucketNS string, dirty *cache, blkCtx protocol.BlockCtx, timestamped, muted bool) *eventHandler { +func newEventHandler(bucketNS string, dirty indexerCache, blkCtx protocol.BlockCtx, timestamped, muted bool) *eventHandler { return &eventHandler{ stakingBucketNS: bucketNS, dirty: dirty, @@ -284,7 +284,7 @@ func (eh *eventHandler) HandleDonatedEvent(event *abiutil.EventParam) error { return nil } -func (eh *eventHandler) Finalize() (batch.KVStoreBatch, *cache) { +func (eh *eventHandler) Finalize() (batch.KVStoreBatch, indexerCache) { delta, dirty := eh.delta, eh.dirty eh.delta, eh.dirty = nil, nil return delta, dirty diff --git a/systemcontractindex/stakingindex/index.go b/systemcontractindex/stakingindex/index.go index 30f651c737..48b72fca7b 100644 --- a/systemcontractindex/stakingindex/index.go +++ b/systemcontractindex/stakingindex/index.go @@ -59,12 +59,12 @@ type ( HandleMergedEvent(event *abiutil.EventParam) error HandleBucketExpandedEvent(event *abiutil.EventParam) error HandleDonatedEvent(event *abiutil.EventParam) error - Finalize() (batch.KVStoreBatch, *cache) + Finalize() (batch.KVStoreBatch, indexerCache) } // Indexer is the staking indexer Indexer struct { common *systemcontractindex.IndexerCommon - cache *cache // in-memory cache, used to query index data + cache *base // in-memory cache, used to query index data mutex sync.RWMutex blocksToDuration blocksDurationAtFn // function to calculate duration from block range bucketNS string @@ -99,7 +99,7 @@ func NewIndexer(kvstore db.KVStore, contractAddr string, startHeight uint64, blo ns := contractAddr + "#" + stakingNS idx := &Indexer{ common: systemcontractindex.NewIndexerCommon(kvstore, ns, stakingHeightKey, contractAddr, startHeight), - cache: newCache(ns, bucketNS), + cache: newCache(), blocksToDuration: blocksToDurationFn, bucketNS: bucketNS, ns: ns, @@ -130,7 +130,7 @@ func (s *Indexer) StartView(ctx context.Context) (staking.ContractStakeView, err } return &stakeView{ helper: s, - cache: s.cache.Copy(), + cache: s.cache.DeepClone(), height: s.common.Height(), }, nil } @@ -139,7 +139,7 @@ func (s *Indexer) start(ctx context.Context) error { if err := s.common.Start(ctx); err != nil { return err } - return s.cache.Load(s.common.KVStore()) + return s.cache.Load(s.common.KVStore(), s.ns, s.bucketNS) } // Stop stops the indexer @@ -271,7 +271,7 @@ func (s *Indexer) PutBlock(ctx context.Context, blk *block.Block) error { } // handle events of block muted := s.muteHeight > 0 && blk.Height() >= s.muteHeight - handler := newEventHandler(s.bucketNS, s.cache.Copy(), protocol.MustGetBlockCtx(ctx), s.timestamped, muted) + handler := newEventHandler(s.bucketNS, s.cache.DeepClone(), protocol.MustGetBlockCtx(ctx), s.timestamped, muted) for _, receipt := range blk.Receipts { if err := s.handleReceipt(ctx, handler, receipt); err != nil { return errors.Wrapf(err, "handle receipt %x failed", receipt.ActionHash) @@ -346,8 +346,15 @@ func (s *Indexer) commit(handler stakingEventHandler, height uint64) error { if err := s.common.Commit(height, delta); err != nil { return err } + if err := dirty.Commit(); err != nil { + return errors.Wrapf(err, "commit dirty cache failed") + } + cache, ok := dirty.Base().(*base) + if !ok { + return errors.Errorf("unexpected cache type %T, expect *base", dirty) + } // update cache - s.cache = dirty + s.cache = cache return nil } diff --git a/systemcontractindex/stakingindex/stakeview.go b/systemcontractindex/stakingindex/stakeview.go index 45f434f3e6..9bc54902e2 100644 --- a/systemcontractindex/stakingindex/stakeview.go +++ b/systemcontractindex/stakingindex/stakeview.go @@ -12,14 +12,14 @@ import ( type stakeView struct { helper *Indexer - cache *cache + cache indexerCache height uint64 } func (s *stakeView) Clone() staking.ContractStakeView { return &stakeView{ helper: s.helper, - cache: s.cache.Copy(), + cache: newWrappedCache(s.cache), height: s.height, } } From 35607714165442e353a3473b9791c6d64b2a854a Mon Sep 17 00:00:00 2001 From: zhi Date: Wed, 18 Jun 2025 23:15:38 +0800 Subject: [PATCH 3/7] keep original in snapshot --- action/protocol/staking/viewdata.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/action/protocol/staking/viewdata.go b/action/protocol/staking/viewdata.go index 2f0fc9fba2..c42b395daf 100644 --- a/action/protocol/staking/viewdata.go +++ b/action/protocol/staking/viewdata.go @@ -90,13 +90,15 @@ func (v *ViewData) IsDirty() bool { func (v *ViewData) Snapshot() int { snapshot := len(v.snapshots) + clone := v.contractsStake.Clone() v.snapshots = append(v.snapshots, Snapshot{ size: v.candCenter.size, changes: v.candCenter.change.size(), amount: new(big.Int).Set(v.bucketPool.total.amount), count: v.bucketPool.total.count, - contractsStake: v.contractsStake.Clone(), + contractsStake: v.contractsStake, }) + v.contractsStake = clone return snapshot } From 4d947975d4f90525d95ad54bacbe8c1aca7520a0 Mon Sep 17 00:00:00 2001 From: zhi Date: Wed, 18 Jun 2025 23:49:47 +0800 Subject: [PATCH 4/7] refactor blockindex --- blockindex/contractstaking/bucket_info.go | 4 +- blockindex/contractstaking/cache.go | 260 +++------- blockindex/contractstaking/cache_test.go | 452 +++++++----------- blockindex/contractstaking/delta_action.go | 14 - blockindex/contractstaking/delta_cache.go | 165 ------- .../contractstaking/delta_cache_test.go | 269 ----------- blockindex/contractstaking/delta_state.go | 47 -- .../contractstaking/delta_state_test.go | 54 --- blockindex/contractstaking/dirty_cache.go | 75 ++- .../contractstaking/dirty_cache_test.go | 66 +-- blockindex/contractstaking/event_handler.go | 47 +- blockindex/contractstaking/indexer.go | 203 +++++++- blockindex/contractstaking/indexer_test.go | 43 +- blockindex/contractstaking/stakeview.go | 34 +- blockindex/contractstaking/util.go | 24 + blockindex/contractstaking/wrappedcache.go | 234 +++++++++ systemcontractindex/stakingindex/stakeview.go | 1 + 17 files changed, 813 insertions(+), 1179 deletions(-) delete mode 100644 blockindex/contractstaking/delta_action.go delete mode 100644 blockindex/contractstaking/delta_cache.go delete mode 100644 blockindex/contractstaking/delta_cache_test.go delete mode 100644 blockindex/contractstaking/delta_state.go delete mode 100644 blockindex/contractstaking/delta_state_test.go create mode 100644 blockindex/contractstaking/wrappedcache.go diff --git a/blockindex/contractstaking/bucket_info.go b/blockindex/contractstaking/bucket_info.go index 3737452f9d..0359050d5c 100644 --- a/blockindex/contractstaking/bucket_info.go +++ b/blockindex/contractstaking/bucket_info.go @@ -39,8 +39,8 @@ func (bi *bucketInfo) Deserialize(b []byte) error { return bi.loadProto(&m) } -// clone clones the bucket info -func (bi *bucketInfo) clone() *bucketInfo { +// Clone clones the bucket info +func (bi *bucketInfo) Clone() *bucketInfo { delegate := bi.Delegate if delegate != nil { delegate, _ = address.FromBytes(delegate.Bytes()) diff --git a/blockindex/contractstaking/cache.go b/blockindex/contractstaking/cache.go index 2403c1c51e..ed0ad90837 100644 --- a/blockindex/contractstaking/cache.go +++ b/blockindex/contractstaking/cache.go @@ -6,20 +6,34 @@ package contractstaking import ( - "context" "math/big" "sync" - "time" "github.com/iotexproject/iotex-address/address" "github.com/pkg/errors" - "github.com/iotexproject/iotex-core/v2/action/protocol" "github.com/iotexproject/iotex-core/v2/db" "github.com/iotexproject/iotex-core/v2/pkg/util/byteutil" ) type ( + stakingCache interface { + BucketInfo(id uint64) (*bucketInfo, bool) + MustGetBucketInfo(id uint64) *bucketInfo + MustGetBucketType(id uint64) *BucketType + MatchBucketType(amount *big.Int, duration uint64) (uint64, *BucketType, bool) + BucketType(id uint64) (*BucketType, bool) + BucketTypeCount() int + BucketsByCandidate(candidate address.Address) ([]uint64, []*BucketType, []*bucketInfo) + TotalBucketCount() uint64 + IsDirty() bool + Base() stakingCache + + PutBucketType(id uint64, bt *BucketType) + PutBucketInfo(id uint64, bi *bucketInfo) + DeleteBucketInfo(id uint64) + Commit() + } contractStakingCache struct { bucketInfoMap map[uint64]*bucketInfo // map[token]bucketInfo candidateBucketMap map[string]map[uint64]bool // map[candidate]bucket @@ -30,10 +44,6 @@ type ( mutex sync.RWMutex // a RW mutex for the cache to protect concurrent access config Config } - - wrappedCache struct { - cache contractStakingCache - } ) var ( @@ -53,70 +63,28 @@ func newContractStakingCache(config Config) *contractStakingCache { } } -func (s *contractStakingCache) Height() uint64 { - s.mutex.RLock() - defer s.mutex.RUnlock() - return s.height -} - -func (s *contractStakingCache) CandidateVotes(ctx context.Context, candidate address.Address, height uint64) (*big.Int, error) { - s.mutex.RLock() - defer s.mutex.RUnlock() - - if err := s.validateHeight(height); err != nil { - return nil, err - } - votes := big.NewInt(0) - m, ok := s.candidateBucketMap[candidate.String()] - if !ok { - return votes, nil - } - featureCtx := protocol.MustGetFeatureCtx(ctx) - for id, existed := range m { - if !existed { - continue - } - bi := s.mustGetBucketInfo(id) - // only count the bucket that is not unstaked - if bi.UnstakedAt != maxBlockNumber { - continue - } - bt := s.mustGetBucketType(bi.TypeIndex) - if featureCtx.FixContractStakingWeightedVotes { - votes.Add(votes, s.config.CalculateVoteWeight(assembleBucket(id, bi, bt, s.config.ContractAddress, s.genBlockDurationFn(height)))) - } else { - votes.Add(votes, bt.Amount) - } - } - return votes, nil -} - -func (s *contractStakingCache) Buckets(height uint64) ([]*Bucket, error) { +func (s *contractStakingCache) Buckets() ([]uint64, []*BucketType, []*bucketInfo) { s.mutex.RLock() defer s.mutex.RUnlock() - if err := s.validateHeight(height); err != nil { - return nil, err - } - - vbs := []*Bucket{} + ids := make([]uint64, 0, len(s.bucketInfoMap)) + ts := make([]*BucketType, 0, len(s.bucketInfoMap)) + infos := make([]*bucketInfo, 0, len(s.bucketInfoMap)) for id, bi := range s.bucketInfoMap { + ids = append(ids, id) bt := s.mustGetBucketType(bi.TypeIndex) - vb := assembleBucket(id, bi.clone(), bt, s.config.ContractAddress, s.genBlockDurationFn(height)) - vbs = append(vbs, vb) + ts = append(ts, bt) + infos = append(infos, bi.Clone()) } - return vbs, nil + + return sortByIds(ids, ts, infos) } -func (s *contractStakingCache) Bucket(id, height uint64) (*Bucket, bool, error) { +func (s *contractStakingCache) Bucket(id uint64) (*BucketType, *bucketInfo) { s.mutex.RLock() defer s.mutex.RUnlock() - if err := s.validateHeight(height); err != nil { - return nil, false, err - } - bt, ok := s.getBucket(id, height) - return bt, ok, nil + return s.getBucket(id) } func (s *contractStakingCache) BucketInfo(id uint64) (*bucketInfo, bool) { @@ -147,67 +115,56 @@ func (s *contractStakingCache) BucketType(id uint64) (*BucketType, bool) { return s.getBucketType(id) } -func (s *contractStakingCache) BucketsByCandidate(candidate address.Address, height uint64) ([]*Bucket, error) { +func (s *contractStakingCache) BucketsByCandidate(candidate address.Address) ([]uint64, []*BucketType, []*bucketInfo) { s.mutex.RLock() defer s.mutex.RUnlock() - - if err := s.validateHeight(height); err != nil { - return nil, err - } - return s.bucketsByCandidate(candidate, height) -} - -func (s *contractStakingCache) bucketsByCandidate(candidate address.Address, height uint64) ([]*Bucket, error) { bucketMap := s.candidateBucketMap[candidate.String()] - vbs := make([]*Bucket, 0, len(bucketMap)) + ids := make([]uint64, 0, len(bucketMap)) + ts := make([]*BucketType, 0, len(bucketMap)) + infos := make([]*bucketInfo, 0, len(bucketMap)) for id := range bucketMap { - vb := s.mustGetBucket(id, height) - vbs = append(vbs, vb) + info := s.mustGetBucketInfo(id) + t := s.mustGetBucketType(info.TypeIndex) + ids = append(ids, id) + ts = append(ts, t) + infos = append(infos, info) } - return vbs, nil + + return sortByIds(ids, ts, infos) } -func (s *contractStakingCache) BucketsByIndices(indices []uint64, height uint64) ([]*Bucket, error) { +func (s *contractStakingCache) BucketsByIndices(indices []uint64) ([]*BucketType, []*bucketInfo) { s.mutex.RLock() defer s.mutex.RUnlock() - if err := s.validateHeight(height); err != nil { - return nil, err - } - vbs := make([]*Bucket, 0, len(indices)) + vbs := make([]*BucketType, 0, len(indices)) + infos := make([]*bucketInfo, 0, len(indices)) for _, id := range indices { - vb, ok := s.getBucket(id, height) - if ok { - vbs = append(vbs, vb) - } + bt, info := s.getBucket(id) + vbs = append(vbs, bt) + infos = append(infos, info) } - return vbs, nil + return vbs, infos } -func (s *contractStakingCache) TotalBucketCount(height uint64) (uint64, error) { +func (s *contractStakingCache) TotalBucketCount() uint64 { s.mutex.RLock() defer s.mutex.RUnlock() - if err := s.validateHeight(height); err != nil { - return 0, err - } - return s.totalBucketCount, nil + return s.totalBucketCount } -func (s *contractStakingCache) ActiveBucketTypes(height uint64) (map[uint64]*BucketType, error) { +func (s *contractStakingCache) ActiveBucketTypes() map[uint64]*BucketType { s.mutex.RLock() defer s.mutex.RUnlock() - if err := s.validateHeight(height); err != nil { - return nil, err - } m := make(map[uint64]*BucketType) for k, v := range s.bucketTypeMap { if v.ActivatedAt != maxBlockNumber { m[k] = v.Clone() } } - return m, nil + return m } func (s *contractStakingCache) PutBucketType(id uint64, bt *BucketType) { @@ -231,18 +188,6 @@ func (s *contractStakingCache) DeleteBucketInfo(id uint64) { s.deleteBucketInfo(id) } -func (s *contractStakingCache) Merge(delta *contractStakingDelta, height uint64) error { - s.mutex.Lock() - defer s.mutex.Unlock() - - if err := s.mergeDelta(delta); err != nil { - return err - } - s.putHeight(height) - s.putTotalBucketCount(s.totalBucketCount + delta.AddedBucketCnt()) - return nil -} - func (s *contractStakingCache) MatchBucketType(amount *big.Int, duration uint64) (uint64, *BucketType, bool) { s.mutex.RLock() defer s.mutex.RUnlock() @@ -254,34 +199,10 @@ func (s *contractStakingCache) MatchBucketType(amount *big.Int, duration uint64) return id, s.mustGetBucketType(id), true } -func (s *contractStakingCache) BucketTypeCount(height uint64) (uint64, error) { - s.mutex.RLock() - defer s.mutex.RUnlock() - - if err := s.validateHeight(height); err != nil { - return 0, err - } - return uint64(len(s.bucketTypeMap)), nil -} - func (s *contractStakingCache) LoadFromDB(kvstore db.KVStore) error { s.mutex.Lock() defer s.mutex.Unlock() - // load height - var height uint64 - h, err := kvstore.Get(_StakingNS, _stakingHeightKey) - if err != nil { - if !errors.Is(err, db.ErrNotExist) { - return err - } - height = 0 - } else { - height = byteutil.BytesToUint64BigEndian(h) - - } - s.putHeight(height) - // load total bucket count var totalBucketCount uint64 tbc, err := kvstore.Get(_StakingNS, _stakingTotalBucketCountKey) @@ -293,7 +214,7 @@ func (s *contractStakingCache) LoadFromDB(kvstore db.KVStore) error { } else { totalBucketCount = byteutil.BytesToUint64BigEndian(tbc) } - s.putTotalBucketCount(totalBucketCount) + s.totalBucketCount = totalBucketCount // load bucket info ks, vs, err := kvstore.Filter(_StakingBucketInfoNS, func(k, v []byte) bool { return true }, nil, nil) @@ -330,11 +251,10 @@ func (s *contractStakingCache) Clone() *contractStakingCache { c := &contractStakingCache{ config: s.config, totalBucketCount: s.totalBucketCount, - height: s.height, } c.bucketInfoMap = make(map[uint64]*bucketInfo, len(s.bucketInfoMap)) for k, v := range s.bucketInfoMap { - c.bucketInfoMap[k] = v.clone() + c.bucketInfoMap[k] = v.Clone() } c.candidateBucketMap = make(map[string]map[uint64]bool, len(s.candidateBucketMap)) for k, v := range s.candidateBucketMap { @@ -387,7 +307,7 @@ func (s *contractStakingCache) getBucketInfo(id uint64) (*bucketInfo, bool) { if !ok { return nil, false } - return bi.clone(), ok + return bi.Clone(), ok } func (s *contractStakingCache) mustGetBucketInfo(id uint64) *bucketInfo { @@ -398,19 +318,12 @@ func (s *contractStakingCache) mustGetBucketInfo(id uint64) *bucketInfo { return bt } -func (s *contractStakingCache) mustGetBucket(id, at uint64) *Bucket { - bi := s.mustGetBucketInfo(id) - bt := s.mustGetBucketType(bi.TypeIndex) - return assembleBucket(id, bi, bt, s.config.ContractAddress, s.genBlockDurationFn(at)) -} - -func (s *contractStakingCache) getBucket(id, at uint64) (*Bucket, bool) { +func (s *contractStakingCache) getBucket(id uint64) (*BucketType, *bucketInfo) { bi, ok := s.getBucketInfo(id) if !ok { - return nil, false + return nil, nil } - bt := s.mustGetBucketType(bi.TypeIndex) - return assembleBucket(id, bi, bt, s.config.ContractAddress, s.genBlockDurationFn(at)), true + return s.mustGetBucketType(bi.TypeIndex), bi } func (s *contractStakingCache) putBucketType(id uint64, bt *BucketType) { @@ -438,6 +351,9 @@ func (s *contractStakingCache) putBucketType(id uint64, bt *BucketType) { func (s *contractStakingCache) putBucketInfo(id uint64, bi *bucketInfo) { oldBi := s.bucketInfoMap[id] s.bucketInfoMap[id] = bi + if id > s.totalBucketCount { + s.totalBucketCount = id + } // update candidate bucket map newDelegate := bi.Delegate.String() if _, ok := s.candidateBucketMap[newDelegate]; !ok { @@ -470,56 +386,26 @@ func (s *contractStakingCache) deleteBucketInfo(id uint64) { delete(s.candidateBucketMap[bi.Delegate.String()], id) } -func (s *contractStakingCache) putTotalBucketCount(count uint64) { - s.totalBucketCount = count +func (s *contractStakingCache) SetHeight(height uint64) { + s.mutex.Lock() + defer s.mutex.Unlock() + s.height = height } -func (s *contractStakingCache) putHeight(height uint64) { - s.height = height +func (s *contractStakingCache) BucketTypeCount() int { + s.mutex.RLock() + defer s.mutex.RUnlock() + + return len(s.bucketTypeMap) } -func (s *contractStakingCache) mergeDelta(delta *contractStakingDelta) error { - if delta == nil { - return errors.New("invalid contract staking delta") - } - for state, btMap := range delta.BucketTypeDelta() { - if state == deltaStateAdded || state == deltaStateModified { - for id, bt := range btMap { - s.putBucketType(id, bt) - } - } - } - for state, biMap := range delta.BucketInfoDelta() { - if state == deltaStateAdded || state == deltaStateModified { - for id, bi := range biMap { - s.putBucketInfo(id, bi) - } - } else if state == deltaStateRemoved { - for id := range biMap { - s.deleteBucketInfo(id) - } - } - } - return nil +func (s *contractStakingCache) Base() stakingCache { + return s } -func (s *contractStakingCache) validateHeight(height uint64) error { - // means latest height - if height == 0 { - return nil - } - // Currently, historical block data query is not supported. - // However, the latest data is actually returned when querying historical block data, for the following reasons: - // 1. to maintain compatibility with the current code's invocation of ActiveCandidate - // 2. to cause consensus errors when the indexer is lagging behind - if height > s.height { - return errors.Wrapf(ErrInvalidHeight, "expected %d, actual %d", s.height, height) - } - return nil +func (s *contractStakingCache) IsDirty() bool { + return false } -func (s *contractStakingCache) genBlockDurationFn(view uint64) blocksDurationFn { - return func(start, end uint64) time.Duration { - return s.config.BlocksToDuration(start, end, view) - } +func (s *contractStakingCache) Commit() { } diff --git a/blockindex/contractstaking/cache_test.go b/blockindex/contractstaking/cache_test.go index bf6fa39149..1391ac6cb2 100644 --- a/blockindex/contractstaking/cache_test.go +++ b/blockindex/contractstaking/cache_test.go @@ -21,11 +21,13 @@ import ( "github.com/iotexproject/iotex-core/v2/testutil" ) -func _checkCacheCandidateVotes(ctx context.Context, r *require.Assertions, cache *contractStakingCache, height uint64, addr address.Address, expectVotes int64) { - votes, err := cache.CandidateVotes(ctx, addr, height) +/* +func _checkCacheCandidateVotes(ctx context.Context, r *require.Assertions, cache *contractStakingCache, _ uint64, addr address.Address, expectVotes int64) { + votes, err := cache.CandidateVotes(ctx, addr) r.NoError(err) r.EqualValues(expectVotes, votes.Int64()) } +*/ func calculateVoteWeightGen(c genesis.VoteWeightCalConsts) calculateVoteWeightFunc { return func(v *Bucket) *big.Int { @@ -36,7 +38,7 @@ func calculateVoteWeightGen(c genesis.VoteWeightCalConsts) calculateVoteWeightFu func TestContractStakingCache_CandidateVotes(t *testing.T) { checkCacheCandidateVotesGen := func(ctx context.Context) func(r *require.Assertions, cache *contractStakingCache, height uint64, addr address.Address, expectVotes int64) { return func(r *require.Assertions, cache *contractStakingCache, height uint64, addr address.Address, expectVotes int64) { - _checkCacheCandidateVotes(ctx, r, cache, height, addr, expectVotes) + // _checkCacheCandidateVotes(ctx, r, cache, height, addr, expectVotes) } } require := require.New(t) @@ -126,73 +128,72 @@ func TestContractStakingCache_Buckets(t *testing.T) { contractAddr := identityset.Address(27).String() cache := newContractStakingCache(Config{ContractAddress: contractAddr, CalculateVoteWeight: calculateVoteWeightGen(genesis.TestDefault().VoteWeightCalConsts), BlocksToDuration: _blockDurationFn}) - height := uint64(0) // no bucket - bts, err := cache.Buckets(height) - require.NoError(err) - require.Empty(bts) + ids, _, _ := cache.Buckets() + require.Empty(ids) // add one bucket cache.PutBucketType(1, &BucketType{Amount: big.NewInt(100), Duration: 100, ActivatedAt: 1}) cache.PutBucketInfo(1, &bucketInfo{TypeIndex: 1, CreatedAt: 1, UnlockedAt: maxBlockNumber, UnstakedAt: maxBlockNumber, Delegate: identityset.Address(1), Owner: identityset.Address(2)}) - buckets, err := cache.Buckets(height) - require.NoError(err) - require.Len(buckets, 1) - checkVoteBucket(require, buckets[0], 1, identityset.Address(1).String(), identityset.Address(2).String(), 100, 100, 1, 1, maxBlockNumber, true, contractAddr) - bucket, ok, err := cache.Bucket(1, height) - require.NoError(err) - require.True(ok) - checkVoteBucket(require, bucket, 1, identityset.Address(1).String(), identityset.Address(2).String(), 100, 100, 1, 1, maxBlockNumber, true, contractAddr) + ids, bts, bis := cache.Buckets() + require.Len(ids, 1) + require.Len(bts, 1) + require.Len(bis, 1) + checkBucket(require, ids[0], bts[0], bis[0], 1, identityset.Address(1).String(), identityset.Address(2).String(), 100, 100, 1, 1, maxBlockNumber, true) + bt, bi := cache.Bucket(1) + require.NotNil(bt) + require.NotNil(bi) + checkBucket(require, 1, bt, bi, 1, identityset.Address(1).String(), identityset.Address(2).String(), 100, 100, 1, 1, maxBlockNumber, true) // add one bucket with different index cache.PutBucketType(2, &BucketType{Amount: big.NewInt(200), Duration: 200, ActivatedAt: 1}) cache.PutBucketInfo(2, &bucketInfo{TypeIndex: 2, CreatedAt: 1, UnlockedAt: maxBlockNumber, UnstakedAt: maxBlockNumber, Delegate: identityset.Address(3), Owner: identityset.Address(4)}) - bts, err = cache.Buckets(height) - require.NoError(err) - bucketMaps := bucketsToMap(bts) - require.Len(bucketMaps, 2) - checkVoteBucket(require, bucketMaps[1], 1, identityset.Address(1).String(), identityset.Address(2).String(), 100, 100, 1, 1, maxBlockNumber, true, contractAddr) - checkVoteBucket(require, bucketMaps[2], 2, identityset.Address(3).String(), identityset.Address(4).String(), 200, 200, 1, 1, maxBlockNumber, true, contractAddr) - bucket, ok, err = cache.Bucket(1, height) - require.NoError(err) - require.True(ok) - checkVoteBucket(require, bucket, 1, identityset.Address(1).String(), identityset.Address(2).String(), 100, 100, 1, 1, maxBlockNumber, true, contractAddr) - bucket, ok, err = cache.Bucket(2, height) - require.NoError(err) - require.True(ok) - checkVoteBucket(require, bucket, 2, identityset.Address(3).String(), identityset.Address(4).String(), 200, 200, 1, 1, maxBlockNumber, true, contractAddr) + ids, bts, bis = cache.Buckets() + require.Len(ids, 2) + require.Len(bts, 2) + require.Len(bis, 2) + checkBucket(require, ids[0], bts[0], bis[0], 1, identityset.Address(1).String(), identityset.Address(2).String(), 100, 100, 1, 1, maxBlockNumber, true) + checkBucket(require, ids[1], bts[1], bis[1], 2, identityset.Address(3).String(), identityset.Address(4).String(), 200, 200, 1, 1, maxBlockNumber, true) + bt, bi = cache.Bucket(1) + require.NotNil(bt) + require.NotNil(bi) + checkBucket(require, 1, bt, bi, 1, identityset.Address(1).String(), identityset.Address(2).String(), 100, 100, 1, 1, maxBlockNumber, true) + bt, bi = cache.Bucket(2) + require.NotNil(bt) + require.NotNil(bi) + checkBucket(require, 2, bt, bi, 2, identityset.Address(3).String(), identityset.Address(4).String(), 200, 200, 1, 1, maxBlockNumber, true) // update delegate of bucket 2 cache.PutBucketInfo(2, &bucketInfo{TypeIndex: 2, CreatedAt: 1, UnlockedAt: maxBlockNumber, UnstakedAt: maxBlockNumber, Delegate: identityset.Address(5), Owner: identityset.Address(4)}) - bts, err = cache.Buckets(height) - require.NoError(err) - bucketMaps = bucketsToMap(bts) - require.Len(bucketMaps, 2) - checkVoteBucket(require, bucketMaps[1], 1, identityset.Address(1).String(), identityset.Address(2).String(), 100, 100, 1, 1, maxBlockNumber, true, contractAddr) - checkVoteBucket(require, bucketMaps[2], 2, identityset.Address(5).String(), identityset.Address(4).String(), 200, 200, 1, 1, maxBlockNumber, true, contractAddr) - bucket, ok, err = cache.Bucket(1, height) - require.NoError(err) - require.True(ok) - checkVoteBucket(require, bucket, 1, identityset.Address(1).String(), identityset.Address(2).String(), 100, 100, 1, 1, maxBlockNumber, true, contractAddr) - bucket, ok, err = cache.Bucket(2, height) - require.NoError(err) - require.True(ok) - checkVoteBucket(require, bucket, 2, identityset.Address(5).String(), identityset.Address(4).String(), 200, 200, 1, 1, maxBlockNumber, true, contractAddr) + ids, bts, bis = cache.Buckets() + require.Len(ids, 2) + require.Len(bts, 2) + require.Len(bis, 2) + checkBucket(require, ids[0], bts[0], bis[0], 1, identityset.Address(1).String(), identityset.Address(2).String(), 100, 100, 1, 1, maxBlockNumber, true) + checkBucket(require, ids[1], bts[1], bis[1], 2, identityset.Address(5).String(), identityset.Address(4).String(), 200, 200, 1, 1, maxBlockNumber, true) + bt, bi = cache.Bucket(1) + require.NotNil(bt) + require.NotNil(bi) + checkBucket(require, 1, bt, bi, 1, identityset.Address(1).String(), identityset.Address(2).String(), 100, 100, 1, 1, maxBlockNumber, true) + bt, bi = cache.Bucket(2) + require.NotNil(bt) + require.NotNil(bi) + checkBucket(require, 2, bt, bi, 2, identityset.Address(5).String(), identityset.Address(4).String(), 200, 200, 1, 1, maxBlockNumber, true) // delete bucket 1 cache.DeleteBucketInfo(1) - bts, err = cache.Buckets(height) - require.NoError(err) - bucketMaps = bucketsToMap(bts) - require.Len(bucketMaps, 1) - checkVoteBucket(require, bucketMaps[2], 2, identityset.Address(5).String(), identityset.Address(4).String(), 200, 200, 1, 1, maxBlockNumber, true, contractAddr) - _, ok, err = cache.Bucket(1, height) - require.NoError(err) - require.False(ok) - bucket, ok, err = cache.Bucket(2, height) - require.NoError(err) - require.True(ok) - checkVoteBucket(require, bucket, 2, identityset.Address(5).String(), identityset.Address(4).String(), 200, 200, 1, 1, maxBlockNumber, true, contractAddr) + ids, bts, bis = cache.Buckets() + require.Len(ids, 1) + require.Len(bts, 1) + require.Len(bis, 1) + checkBucket(require, ids[0], bts[0], bis[0], 2, identityset.Address(5).String(), identityset.Address(4).String(), 200, 200, 1, 1, maxBlockNumber, true) + bt, bi = cache.Bucket(1) + require.Nil(bt) + require.Nil(bi) + bt, bi = cache.Bucket(2) + require.NotNil(bt) + require.NotNil(bi) + checkBucket(require, 2, bt, bi, 2, identityset.Address(5).String(), identityset.Address(4).String(), 200, 200, 1, 1, maxBlockNumber, true) } func TestContractStakingCache_BucketsByCandidate(t *testing.T) { @@ -200,70 +201,66 @@ func TestContractStakingCache_BucketsByCandidate(t *testing.T) { contractAddr := identityset.Address(27).String() cache := newContractStakingCache(Config{ContractAddress: contractAddr, CalculateVoteWeight: calculateVoteWeightGen(genesis.TestDefault().VoteWeightCalConsts), BlocksToDuration: _blockDurationFn}) - height := uint64(0) // no bucket - buckets, err := cache.BucketsByCandidate(identityset.Address(1), height) - require.NoError(err) - require.Len(buckets, 0) + ids, _, _ := cache.BucketsByCandidate(identityset.Address(1)) + require.Len(ids, 0) // one bucket cache.PutBucketType(1, &BucketType{Amount: big.NewInt(100), Duration: 100, ActivatedAt: 1}) cache.PutBucketInfo(1, &bucketInfo{TypeIndex: 1, CreatedAt: 1, UnlockedAt: maxBlockNumber, UnstakedAt: maxBlockNumber, Delegate: identityset.Address(1), Owner: identityset.Address(2)}) - bts, err := cache.BucketsByCandidate(identityset.Address(1), height) - require.NoError(err) - bucketMaps := bucketsToMap(bts) - require.Len(bucketMaps, 1) - checkVoteBucket(require, bucketMaps[1], 1, identityset.Address(1).String(), identityset.Address(2).String(), 100, 100, 1, 1, maxBlockNumber, true, contractAddr) + ids, bts, bis := cache.BucketsByCandidate(identityset.Address(1)) + require.Len(ids, 1) + require.Len(bts, 1) + require.Len(bis, 1) + checkBucket(require, ids[0], bts[0], bis[0], 1, identityset.Address(1).String(), identityset.Address(2).String(), 100, 100, 1, 1, maxBlockNumber, true) // two buckets cache.PutBucketInfo(2, &bucketInfo{TypeIndex: 1, CreatedAt: 1, UnlockedAt: maxBlockNumber, UnstakedAt: maxBlockNumber, Delegate: identityset.Address(1), Owner: identityset.Address(2)}) - bts, err = cache.BucketsByCandidate(identityset.Address(1), height) - require.NoError(err) - bucketMaps = bucketsToMap(bts) - require.Len(bucketMaps, 2) - checkVoteBucket(require, bucketMaps[1], 1, identityset.Address(1).String(), identityset.Address(2).String(), 100, 100, 1, 1, maxBlockNumber, true, contractAddr) - checkVoteBucket(require, bucketMaps[2], 2, identityset.Address(1).String(), identityset.Address(2).String(), 100, 100, 1, 1, maxBlockNumber, true, contractAddr) + ids, bts, bis = cache.BucketsByCandidate(identityset.Address(1)) + require.Len(ids, 2) + require.Len(bts, 2) + require.Len(bis, 2) + checkBucket(require, ids[0], bts[0], bis[0], 1, identityset.Address(1).String(), identityset.Address(2).String(), 100, 100, 1, 1, maxBlockNumber, true) + checkBucket(require, ids[1], bts[1], bis[0], 2, identityset.Address(1).String(), identityset.Address(2).String(), 100, 100, 1, 1, maxBlockNumber, true) // add one bucket with different delegate cache.PutBucketInfo(3, &bucketInfo{TypeIndex: 1, CreatedAt: 1, UnlockedAt: maxBlockNumber, UnstakedAt: maxBlockNumber, Delegate: identityset.Address(3), Owner: identityset.Address(2)}) - bts, err = cache.BucketsByCandidate(identityset.Address(1), height) - require.NoError(err) - bucketMaps = bucketsToMap(bts) - require.Len(bucketMaps, 2) - require.Nil(bucketMaps[3]) - bts, err = cache.BucketsByCandidate(identityset.Address(3), height) - require.NoError(err) - bucketMaps = bucketsToMap(bts) - require.Len(bucketMaps, 1) - checkVoteBucket(require, bucketMaps[3], 3, identityset.Address(3).String(), identityset.Address(2).String(), 100, 100, 1, 1, maxBlockNumber, true, contractAddr) + ids, bts, bis = cache.BucketsByCandidate(identityset.Address(1)) + require.Len(ids, 2) + require.Len(bts, 2) + require.Len(bis, 2) + ids, bts, bis = cache.BucketsByCandidate(identityset.Address(3)) + require.Len(ids, 1) + require.Len(bts, 1) + require.Len(bis, 1) + checkBucket(require, ids[0], bts[0], bis[0], 3, identityset.Address(3).String(), identityset.Address(2).String(), 100, 100, 1, 1, maxBlockNumber, true) // change delegate of bucket 1 cache.PutBucketInfo(1, &bucketInfo{TypeIndex: 1, CreatedAt: 1, UnlockedAt: maxBlockNumber, UnstakedAt: maxBlockNumber, Delegate: identityset.Address(3), Owner: identityset.Address(2)}) - bts, err = cache.BucketsByCandidate(identityset.Address(1), height) - require.NoError(err) - bucketMaps = bucketsToMap(bts) - require.Len(bucketMaps, 1) - require.Nil(bucketMaps[1]) - checkVoteBucket(require, bucketMaps[2], 2, identityset.Address(1).String(), identityset.Address(2).String(), 100, 100, 1, 1, maxBlockNumber, true, contractAddr) - bts, err = cache.BucketsByCandidate(identityset.Address(3), height) - require.NoError(err) - bucketMaps = bucketsToMap(bts) - require.Len(bucketMaps, 2) - checkVoteBucket(require, bucketMaps[1], 1, identityset.Address(3).String(), identityset.Address(2).String(), 100, 100, 1, 1, maxBlockNumber, true, contractAddr) - checkVoteBucket(require, bucketMaps[3], 3, identityset.Address(3).String(), identityset.Address(2).String(), 100, 100, 1, 1, maxBlockNumber, true, contractAddr) + ids, bts, bis = cache.BucketsByCandidate(identityset.Address(1)) + require.Len(ids, 1) + require.Len(bts, 1) + require.Len(bis, 1) + checkBucket(require, ids[0], bts[0], bis[0], 2, identityset.Address(1).String(), identityset.Address(2).String(), 100, 100, 1, 1, maxBlockNumber, true) + ids, bts, bis = cache.BucketsByCandidate(identityset.Address(3)) + require.Len(ids, 2) + require.Len(bts, 2) + require.Len(bis, 2) + checkBucket(require, ids[0], bts[0], bis[0], 1, identityset.Address(3).String(), identityset.Address(2).String(), 100, 100, 1, 1, maxBlockNumber, true) + checkBucket(require, ids[1], bts[1], bis[1], 3, identityset.Address(3).String(), identityset.Address(2).String(), 100, 100, 1, 1, maxBlockNumber, true) // delete bucket 2 cache.DeleteBucketInfo(2) - bts, err = cache.BucketsByCandidate(identityset.Address(1), height) - require.NoError(err) - bucketMaps = bucketsToMap(bts) - require.Len(bucketMaps, 0) - bts, err = cache.BucketsByCandidate(identityset.Address(3), height) - require.NoError(err) - bucketMaps = bucketsToMap(bts) - require.Len(bucketMaps, 2) - checkVoteBucket(require, bucketMaps[1], 1, identityset.Address(3).String(), identityset.Address(2).String(), 100, 100, 1, 1, maxBlockNumber, true, contractAddr) - checkVoteBucket(require, bucketMaps[3], 3, identityset.Address(3).String(), identityset.Address(2).String(), 100, 100, 1, 1, maxBlockNumber, true, contractAddr) + ids, bts, bis = cache.BucketsByCandidate(identityset.Address(1)) + require.Len(ids, 0) + require.Len(bts, 0) + require.Len(bis, 0) + ids, bts, bis = cache.BucketsByCandidate(identityset.Address(3)) + require.Len(ids, 2) + require.Len(bts, 2) + require.Len(bis, 2) + checkBucket(require, ids[0], bts[0], bis[0], 1, identityset.Address(3).String(), identityset.Address(2).String(), 100, 100, 1, 1, maxBlockNumber, true) + checkBucket(require, ids[1], bts[1], bis[1], 3, identityset.Address(3).String(), identityset.Address(2).String(), 100, 100, 1, 1, maxBlockNumber, true) } @@ -272,92 +269,64 @@ func TestContractStakingCache_BucketsByIndices(t *testing.T) { contractAddr := identityset.Address(27).String() cache := newContractStakingCache(Config{ContractAddress: contractAddr, CalculateVoteWeight: calculateVoteWeightGen(genesis.TestDefault().VoteWeightCalConsts), BlocksToDuration: _blockDurationFn}) - height := uint64(0) // no bucket - buckets, err := cache.BucketsByIndices([]uint64{1}, height) - require.NoError(err) - require.Len(buckets, 0) + bts, bis := cache.BucketsByIndices([]uint64{1}) + require.Len(bts, 1) + require.Len(bis, 1) + require.Nil(bts[0]) + require.Nil(bis[0]) // one bucket cache.PutBucketType(1, &BucketType{Amount: big.NewInt(100), Duration: 100, ActivatedAt: 1}) cache.PutBucketInfo(1, &bucketInfo{TypeIndex: 1, CreatedAt: 1, UnlockedAt: maxBlockNumber, UnstakedAt: maxBlockNumber, Delegate: identityset.Address(1), Owner: identityset.Address(2)}) - buckets, err = cache.BucketsByIndices([]uint64{1}, height) - require.NoError(err) - require.Len(buckets, 1) - bucketMaps := bucketsToMap(buckets) - checkVoteBucket(require, bucketMaps[1], 1, identityset.Address(1).String(), identityset.Address(2).String(), 100, 100, 1, 1, maxBlockNumber, true, contractAddr) + bts, bis = cache.BucketsByIndices([]uint64{1}) + require.Len(bts, 1) + require.Len(bis, 1) + checkBucket(require, 1, bts[0], bis[0], 1, identityset.Address(1).String(), identityset.Address(2).String(), 100, 100, 1, 1, maxBlockNumber, true) // two buckets cache.PutBucketInfo(2, &bucketInfo{TypeIndex: 1, CreatedAt: 1, UnlockedAt: maxBlockNumber, UnstakedAt: maxBlockNumber, Delegate: identityset.Address(1), Owner: identityset.Address(2)}) - buckets, err = cache.BucketsByIndices([]uint64{1, 2}, height) - require.NoError(err) - require.Len(buckets, 2) - bucketMaps = bucketsToMap(buckets) - checkVoteBucket(require, bucketMaps[1], 1, identityset.Address(1).String(), identityset.Address(2).String(), 100, 100, 1, 1, maxBlockNumber, true, contractAddr) - checkVoteBucket(require, bucketMaps[2], 2, identityset.Address(1).String(), identityset.Address(2).String(), 100, 100, 1, 1, maxBlockNumber, true, contractAddr) + bts, bis = cache.BucketsByIndices([]uint64{1, 2}) + require.Len(bts, 2) + require.Len(bis, 2) + checkBucket(require, 1, bts[0], bis[0], 1, identityset.Address(1).String(), identityset.Address(2).String(), 100, 100, 1, 1, maxBlockNumber, true) + checkBucket(require, 2, bts[1], bis[1], 2, identityset.Address(1).String(), identityset.Address(2).String(), 100, 100, 1, 1, maxBlockNumber, true) // one bucket not found - buckets, err = cache.BucketsByIndices([]uint64{3}, height) - require.NoError(err) - require.Len(buckets, 0) + bts, bis = cache.BucketsByIndices([]uint64{3}) + require.Len(bts, 1) + require.Len(bis, 1) + require.Nil(bts[0]) + require.Nil(bis[0]) // one bucket found, one not found - buckets, err = cache.BucketsByIndices([]uint64{1, 3}, height) - require.NoError(err) - require.Len(buckets, 1) - bucketMaps = bucketsToMap(buckets) - checkVoteBucket(require, bucketMaps[1], 1, identityset.Address(1).String(), identityset.Address(2).String(), 100, 100, 1, 1, maxBlockNumber, true, contractAddr) + bts, bis = cache.BucketsByIndices([]uint64{1, 3}) + require.Len(bts, 2) + require.Len(bis, 2) + require.Nil(bts[1]) + require.Nil(bis[1]) + checkBucket(require, 1, bts[0], bis[0], 1, identityset.Address(1).String(), identityset.Address(2).String(), 100, 100, 1, 1, maxBlockNumber, true) // delete bucket 1 cache.DeleteBucketInfo(1) - buckets, err = cache.BucketsByIndices([]uint64{1}, height) - require.NoError(err) - require.Len(buckets, 0) -} - -func TestContractStakingCache_TotalBucketCount(t *testing.T) { - require := require.New(t) - cache := newContractStakingCache(Config{ContractAddress: identityset.Address(27).String(), CalculateVoteWeight: calculateVoteWeightGen(genesis.TestDefault().VoteWeightCalConsts), BlocksToDuration: _blockDurationFn}) - - height := uint64(0) - // no bucket - tbc, err := cache.TotalBucketCount(height) - require.NoError(err) - require.EqualValues(0, tbc) - - // one bucket - cache.putTotalBucketCount(1) - tbc, err = cache.TotalBucketCount(height) - require.NoError(err) - require.EqualValues(1, tbc) - - // two buckets - cache.putTotalBucketCount(2) - tbc, err = cache.TotalBucketCount(height) - require.NoError(err) - require.EqualValues(2, tbc) - - // delete bucket 1 - cache.DeleteBucketInfo(1) - tbc, err = cache.TotalBucketCount(height) - require.NoError(err) - require.EqualValues(2, tbc) + bts, bis = cache.BucketsByIndices([]uint64{1}) + require.Len(bts, 1) + require.Len(bis, 1) + require.Nil(bts[0]) + require.Nil(bis[0]) } func TestContractStakingCache_ActiveBucketTypes(t *testing.T) { require := require.New(t) cache := newContractStakingCache(Config{ContractAddress: identityset.Address(27).String(), CalculateVoteWeight: calculateVoteWeightGen(genesis.TestDefault().VoteWeightCalConsts), BlocksToDuration: _blockDurationFn}) - height := uint64(0) // no bucket type - abt, err := cache.ActiveBucketTypes(height) - require.NoError(err) + abt := cache.ActiveBucketTypes() require.Empty(abt) // one bucket type cache.PutBucketType(1, &BucketType{Amount: big.NewInt(100), Duration: 100, ActivatedAt: 1}) - activeBucketTypes, err := cache.ActiveBucketTypes(height) - require.NoError(err) + activeBucketTypes := cache.ActiveBucketTypes() require.Len(activeBucketTypes, 1) require.EqualValues(100, activeBucketTypes[1].Amount.Int64()) require.EqualValues(100, activeBucketTypes[1].Duration) @@ -365,8 +334,7 @@ func TestContractStakingCache_ActiveBucketTypes(t *testing.T) { // two bucket types cache.PutBucketType(2, &BucketType{Amount: big.NewInt(200), Duration: 200, ActivatedAt: 2}) - activeBucketTypes, err = cache.ActiveBucketTypes(height) - require.NoError(err) + activeBucketTypes = cache.ActiveBucketTypes() require.Len(activeBucketTypes, 2) require.EqualValues(100, activeBucketTypes[1].Amount.Int64()) require.EqualValues(100, activeBucketTypes[1].Duration) @@ -377,8 +345,7 @@ func TestContractStakingCache_ActiveBucketTypes(t *testing.T) { // add one inactive bucket type cache.PutBucketType(3, &BucketType{Amount: big.NewInt(300), Duration: 300, ActivatedAt: maxBlockNumber}) - activeBucketTypes, err = cache.ActiveBucketTypes(height) - require.NoError(err) + activeBucketTypes = cache.ActiveBucketTypes() require.Len(activeBucketTypes, 2) require.EqualValues(100, activeBucketTypes[1].Amount.Int64()) require.EqualValues(100, activeBucketTypes[1].Duration) @@ -389,8 +356,7 @@ func TestContractStakingCache_ActiveBucketTypes(t *testing.T) { // deactivate bucket type 1 cache.PutBucketType(1, &BucketType{Amount: big.NewInt(100), Duration: 100, ActivatedAt: maxBlockNumber}) - activeBucketTypes, err = cache.ActiveBucketTypes(height) - require.NoError(err) + activeBucketTypes = cache.ActiveBucketTypes() require.Len(activeBucketTypes, 1) require.EqualValues(200, activeBucketTypes[2].Amount.Int64()) require.EqualValues(200, activeBucketTypes[2].Duration) @@ -398,8 +364,7 @@ func TestContractStakingCache_ActiveBucketTypes(t *testing.T) { // reactivate bucket type 1 cache.PutBucketType(1, &BucketType{Amount: big.NewInt(100), Duration: 100, ActivatedAt: 1}) - activeBucketTypes, err = cache.ActiveBucketTypes(height) - require.NoError(err) + activeBucketTypes = cache.ActiveBucketTypes() require.Len(activeBucketTypes, 2) require.EqualValues(100, activeBucketTypes[1].Amount.Int64()) require.EqualValues(100, activeBucketTypes[1].Duration) @@ -409,70 +374,6 @@ func TestContractStakingCache_ActiveBucketTypes(t *testing.T) { require.EqualValues(2, activeBucketTypes[2].ActivatedAt) } -func TestContractStakingCache_Merge(t *testing.T) { - require := require.New(t) - g := genesis.TestDefault() - cache := newContractStakingCache(Config{ContractAddress: identityset.Address(27).String(), CalculateVoteWeight: calculateVoteWeightGen(g.VoteWeightCalConsts), BlocksToDuration: _blockDurationFn}) - height := uint64(1) - ctx := protocol.WithFeatureCtx(protocol.WithBlockCtx(genesis.WithGenesisContext(context.Background(), g), protocol.BlockCtx{BlockHeight: height})) - - // create delta with one bucket type - delta := newContractStakingDelta() - delta.AddBucketType(1, &BucketType{Amount: big.NewInt(100), Duration: 100, ActivatedAt: 1}) - // merge delta into cache - err := cache.Merge(delta, height) - require.NoError(err) - // check that bucket type was added to cache - activeBucketTypes, err := cache.ActiveBucketTypes(height) - require.NoError(err) - require.Len(activeBucketTypes, 1) - require.EqualValues(100, activeBucketTypes[1].Amount.Int64()) - require.EqualValues(100, activeBucketTypes[1].Duration) - require.EqualValues(1, activeBucketTypes[1].ActivatedAt) - require.EqualValues(height, cache.Height()) - - // create delta with one bucket - delta = newContractStakingDelta() - delta.AddBucketInfo(1, &bucketInfo{TypeIndex: 1, CreatedAt: 1, UnlockedAt: maxBlockNumber, UnstakedAt: maxBlockNumber, Delegate: identityset.Address(1), Owner: identityset.Address(2)}) - // merge delta into cache - err = cache.Merge(delta, height) - require.NoError(err) - // check that bucket was added to cache and vote count is correct - votes, err := cache.CandidateVotes(ctx, identityset.Address(1), height) - require.NoError(err) - require.EqualValues(100, votes.Int64()) - - // create delta with updated bucket delegate - delta = newContractStakingDelta() - delta.UpdateBucketInfo(1, &bucketInfo{TypeIndex: 1, CreatedAt: 1, UnlockedAt: maxBlockNumber, UnstakedAt: maxBlockNumber, Delegate: identityset.Address(3), Owner: identityset.Address(2)}) - // merge delta into cache - err = cache.Merge(delta, height) - require.NoError(err) - // check that bucket delegate was updated and vote count is correct - votes, err = cache.CandidateVotes(ctx, identityset.Address(1), height) - require.NoError(err) - require.EqualValues(0, votes.Int64()) - votes, err = cache.CandidateVotes(ctx, identityset.Address(3), height) - require.NoError(err) - require.EqualValues(100, votes.Int64()) - - // create delta with deleted bucket - delta = newContractStakingDelta() - delta.DeleteBucketInfo(1) - // merge delta into cache - err = cache.Merge(delta, height) - require.NoError(err) - // check that bucket was deleted from cache and vote count is 0 - votes, err = cache.CandidateVotes(ctx, identityset.Address(3), height) - require.NoError(err) - require.EqualValues(0, votes.Int64()) - - // invalid delta - err = cache.Merge(nil, height) - require.Error(err) - require.Equal(err.Error(), "invalid contract staking delta") -} - func TestContractStakingCache_MatchBucketType(t *testing.T) { require := require.New(t) cache := newContractStakingCache(Config{ContractAddress: identityset.Address(27).String(), CalculateVoteWeight: calculateVoteWeightGen(genesis.TestDefault().VoteWeightCalConsts), BlocksToDuration: _blockDurationFn}) @@ -512,33 +413,24 @@ func TestContractStakingCache_BucketTypeCount(t *testing.T) { require := require.New(t) cache := newContractStakingCache(Config{ContractAddress: identityset.Address(27).String(), CalculateVoteWeight: calculateVoteWeightGen(genesis.TestDefault().VoteWeightCalConsts), BlocksToDuration: _blockDurationFn}) - height := uint64(0) // no bucket type - btc, err := cache.BucketTypeCount(height) - require.NoError(err) + btc := cache.BucketTypeCount() require.EqualValues(0, btc) // one bucket type cache.PutBucketType(1, &BucketType{Amount: big.NewInt(100), Duration: 100, ActivatedAt: 1}) - btc, err = cache.BucketTypeCount(height) - require.NoError(err) + btc = cache.BucketTypeCount() require.EqualValues(1, btc) // two bucket types cache.PutBucketType(2, &BucketType{Amount: big.NewInt(200), Duration: 200, ActivatedAt: 2}) - btc, err = cache.BucketTypeCount(height) - require.NoError(err) + btc = cache.BucketTypeCount() require.EqualValues(2, btc) // deactivate bucket type 1 cache.PutBucketType(1, &BucketType{Amount: big.NewInt(100), Duration: 100, ActivatedAt: maxBlockNumber}) - btc, err = cache.BucketTypeCount(height) - require.NoError(err) + btc = cache.BucketTypeCount() require.EqualValues(2, btc) - - btc, err = cache.BucketTypeCount(1) - require.Error(err) - require.Contains(err.Error(), "invalid height") } func TestContractStakingCache_LoadFromDB(t *testing.T) { @@ -556,34 +448,33 @@ func TestContractStakingCache_LoadFromDB(t *testing.T) { require.Equal(err.Error(), "err1") // kvstore exception at load total bucket count - mockKvStore.EXPECT().Get(gomock.Any(), gomock.Any()).Return(nil, db.ErrNotExist).Times(1) mockKvStore.EXPECT().Get(gomock.Any(), gomock.Any()).Return(nil, errors.New("err2")).Times(1) err = cache.LoadFromDB(mockKvStore) require.Error(err) require.Equal(err.Error(), "err2") // kvstore exception at load bucket info - mockKvStore.EXPECT().Get(gomock.Any(), gomock.Any()).Return(nil, db.ErrNotExist).Times(2) + mockKvStore.EXPECT().Get(gomock.Any(), gomock.Any()).Return(nil, db.ErrNotExist).Times(1) mockKvStore.EXPECT().Filter(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil, errors.New("err3")).Times(1) err = cache.LoadFromDB(mockKvStore) require.Error(err) require.Equal(err.Error(), "err3") // mock bucketInfo Deserialize failed - mockKvStore.EXPECT().Get(gomock.Any(), gomock.Any()).Return(nil, db.ErrNotExist).Times(2) + mockKvStore.EXPECT().Get(gomock.Any(), gomock.Any()).Return(nil, db.ErrNotExist).Times(1) mockKvStore.EXPECT().Filter(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, [][]byte{nil}, nil).Times(1) err = cache.LoadFromDB(mockKvStore) require.Error(err) // kvstore exception at load bucket type - mockKvStore.EXPECT().Get(gomock.Any(), gomock.Any()).Return(nil, db.ErrNotExist).Times(2) + mockKvStore.EXPECT().Get(gomock.Any(), gomock.Any()).Return(nil, db.ErrNotExist).Times(1) mockKvStore.EXPECT().Filter(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil, db.ErrBucketNotExist).Times(1) mockKvStore.EXPECT().Filter(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil, errors.New("err4")).Times(1) err = cache.LoadFromDB(mockKvStore) require.Error(err) require.Equal(err.Error(), "err4") - mockKvStore.EXPECT().Get(gomock.Any(), gomock.Any()).Return(nil, db.ErrNotExist).Times(2) + mockKvStore.EXPECT().Get(gomock.Any(), gomock.Any()).Return(nil, db.ErrNotExist).Times(1) mockKvStore.EXPECT().Filter(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil, db.ErrBucketNotExist).Times(1) mockKvStore.EXPECT().Filter(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, [][]byte{nil}, nil).Times(1) err = cache.LoadFromDB(mockKvStore) @@ -602,14 +493,12 @@ func TestContractStakingCache_LoadFromDB(t *testing.T) { height := uint64(0) err = cache.LoadFromDB(kvstore) require.NoError(err) - tbc, err := cache.TotalBucketCount(height) - require.NoError(err) - require.Equal(uint64(0), tbc) - bts, err := cache.Buckets(height) - require.NoError(err) + require.Equal(uint64(0), cache.TotalBucketCount()) + ids, bts, bis := cache.Buckets() + require.Equal(0, len(ids)) require.Equal(0, len(bts)) - btc, err := cache.BucketTypeCount(height) - require.NoError(err) + require.Equal(0, len(bis)) + btc := cache.BucketTypeCount() require.EqualValues(0, btc) // load from db with height and total bucket count @@ -618,14 +507,12 @@ func TestContractStakingCache_LoadFromDB(t *testing.T) { kvstore.Put(_StakingNS, _stakingTotalBucketCountKey, byteutil.Uint64ToBytesBigEndian(10)) err = cache.LoadFromDB(kvstore) require.NoError(err) - tbc, err = cache.TotalBucketCount(height) - require.NoError(err) - require.Equal(uint64(10), tbc) - bts, err = cache.Buckets(height) - require.NoError(err) + require.Equal(uint64(10), cache.TotalBucketCount()) + ids, bts, bis = cache.Buckets() + require.Equal(0, len(ids)) require.Equal(0, len(bts)) - btc, err = cache.BucketTypeCount(height) - require.NoError(err) + require.Equal(0, len(bis)) + btc = cache.BucketTypeCount() require.EqualValues(0, btc) // load from db with bucket @@ -635,17 +522,15 @@ func TestContractStakingCache_LoadFromDB(t *testing.T) { kvstore.Put(_StakingBucketTypeNS, byteutil.Uint64ToBytesBigEndian(1), bucketType.Serialize()) err = cache.LoadFromDB(kvstore) require.NoError(err) - tbc, err = cache.TotalBucketCount(height) - require.NoError(err) - require.Equal(uint64(10), tbc) + require.Equal(uint64(10), cache.TotalBucketCount()) bi, ok := cache.BucketInfo(1) require.True(ok) - bts, err = cache.Buckets(height) - require.NoError(err) + ids, bts, bis = cache.Buckets() + require.Equal(1, len(ids)) require.Equal(1, len(bts)) + require.Equal(1, len(bis)) require.Equal(bucketInfo, bi) - btc, err = cache.BucketTypeCount(height) - require.NoError(err) + btc = cache.BucketTypeCount() require.EqualValues(1, btc) id, bt, ok := cache.MatchBucketType(big.NewInt(100), 100) require.True(ok) @@ -655,7 +540,7 @@ func TestContractStakingCache_LoadFromDB(t *testing.T) { require.EqualValues(1, bt.ActivatedAt) } -func bucketsToMap(buckets []*staking.VoteBucket) map[uint64]*staking.VoteBucket { +func bucketsToMap(ids []uint64, buckets []*staking.VoteBucket) map[uint64]*staking.VoteBucket { m := make(map[uint64]*staking.VoteBucket) for _, bucket := range buckets { m[bucket.Index] = bucket @@ -663,17 +548,16 @@ func bucketsToMap(buckets []*staking.VoteBucket) map[uint64]*staking.VoteBucket return m } -func checkVoteBucket(r *require.Assertions, bucket *staking.VoteBucket, index uint64, candidate, owner string, amount, duration, createHeight, startHeight, unstakeHeight uint64, autoStake bool, contractAddr string) { - r.EqualValues(index, bucket.Index) - r.EqualValues(candidate, bucket.Candidate.String()) +func checkBucket(r *require.Assertions, id uint64, bt *BucketType, bucket *bucketInfo, index uint64, candidate, owner string, amount, duration, createHeight, startHeight, unstakeHeight uint64, autoStake bool) { + r.EqualValues(index, id) + r.EqualValues(candidate, bucket.Delegate.String()) r.EqualValues(owner, bucket.Owner.String()) - r.EqualValues(amount, bucket.StakedAmount.Int64()) - r.EqualValues(duration, bucket.StakedDurationBlockNumber) - r.EqualValues(createHeight, bucket.CreateBlockHeight) - r.EqualValues(startHeight, bucket.StakeStartBlockHeight) - r.EqualValues(unstakeHeight, bucket.UnstakeStartBlockHeight) - r.EqualValues(autoStake, bucket.AutoStake) - r.EqualValues(contractAddr, bucket.ContractAddress) + r.EqualValues(amount, bt.Amount.Int64()) + r.EqualValues(duration, bt.Duration) + r.EqualValues(createHeight, bucket.CreatedAt) + r.EqualValues(startHeight, bucket.CreatedAt) + r.EqualValues(unstakeHeight, bucket.UnstakedAt) + r.EqualValues(autoStake, bucket.UnlockedAt == maxBlockNumber) } func TestContractStakingCache_MustGetBucketInfo(t *testing.T) { diff --git a/blockindex/contractstaking/delta_action.go b/blockindex/contractstaking/delta_action.go deleted file mode 100644 index b843ae34f7..0000000000 --- a/blockindex/contractstaking/delta_action.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (c) 2023 IoTeX Foundation -// This source code is provided 'as is' and no warranties are given as to title or non-infringement, merchantability -// or fitness for purpose and, to the extent permitted by law, all liability for your use of the code is disclaimed. -// This source code is governed by Apache License 2.0 that can be found in the LICENSE file. - -package contractstaking - -const ( - deltaActionAdd deltaAction = iota - deltaActionRemove - deltaActionModify -) - -type deltaAction int diff --git a/blockindex/contractstaking/delta_cache.go b/blockindex/contractstaking/delta_cache.go deleted file mode 100644 index 7e323854e2..0000000000 --- a/blockindex/contractstaking/delta_cache.go +++ /dev/null @@ -1,165 +0,0 @@ -// Copyright (c) 2023 IoTeX Foundation -// This source code is provided 'as is' and no warranties are given as to title or non-infringement, merchantability -// or fitness for purpose and, to the extent permitted by law, all liability for your use of the code is disclaimed. -// This source code is governed by Apache License 2.0 that can be found in the LICENSE file. - -package contractstaking - -import ( - "math/big" -) - -type ( - contractStakingDelta struct { - cache *contractStakingCache // easy to query buckets - - bucketTypeDeltaState map[uint64]deltaState - bucketInfoDeltaState map[uint64]deltaState - } -) - -func newContractStakingDelta() *contractStakingDelta { - return &contractStakingDelta{ - cache: newContractStakingCache(Config{}), - bucketTypeDeltaState: make(map[uint64]deltaState), - bucketInfoDeltaState: make(map[uint64]deltaState), - } -} - -func (s *contractStakingDelta) BucketInfoDelta() map[deltaState]map[uint64]*bucketInfo { - delta := map[deltaState]map[uint64]*bucketInfo{ - deltaStateAdded: make(map[uint64]*bucketInfo), - deltaStateRemoved: make(map[uint64]*bucketInfo), - deltaStateModified: make(map[uint64]*bucketInfo), - } - for id, state := range s.bucketInfoDeltaState { - switch state { - case deltaStateAdded: - delta[state][id] = s.cache.MustGetBucketInfo(id) - case deltaStateRemoved: - delta[state][id] = nil - case deltaStateModified: - delta[state][id] = s.cache.MustGetBucketInfo(id) - } - } - return delta -} - -func (s *contractStakingDelta) BucketTypeDelta() map[deltaState]map[uint64]*BucketType { - delta := map[deltaState]map[uint64]*BucketType{ - deltaStateAdded: make(map[uint64]*BucketType), - deltaStateModified: make(map[uint64]*BucketType), - } - for id, state := range s.bucketTypeDeltaState { - switch state { - case deltaStateAdded: - delta[state][id] = s.cache.MustGetBucketType(id) - case deltaStateModified: - delta[state][id] = s.cache.MustGetBucketType(id) - } - } - return delta -} - -func (s *contractStakingDelta) MustGetBucketType(id uint64) *BucketType { - return s.cache.MustGetBucketType(id) -} - -func (s *contractStakingDelta) MatchBucketType(amount *big.Int, duration uint64) (uint64, *BucketType, bool) { - return s.cache.MatchBucketType(amount, duration) -} - -func (s *contractStakingDelta) GetBucketInfo(id uint64) (*bucketInfo, deltaState) { - state := s.bucketInfoDeltaState[id] - switch state { - case deltaStateAdded, deltaStateModified: - return s.cache.MustGetBucketInfo(id), state - default: // deltaStateRemoved, deltaStateUnchanged - return nil, state - } -} - -func (s *contractStakingDelta) GetBucketType(id uint64) (*BucketType, deltaState) { - state := s.bucketTypeDeltaState[id] - switch state { - case deltaStateAdded, deltaStateModified: - return s.cache.MustGetBucketType(id), state - default: // deltaStateUnchanged - return nil, state - } -} - -func (s *contractStakingDelta) AddBucketInfo(id uint64, bi *bucketInfo) error { - return s.addBucketInfo(id, bi) -} - -func (s *contractStakingDelta) AddBucketType(id uint64, bt *BucketType) error { - var err error - s.bucketTypeDeltaState[id], err = s.bucketTypeDeltaState[id].Transfer(deltaActionAdd) - if err != nil { - return err - } - - s.cache.PutBucketType(id, bt) - return nil -} - -func (s *contractStakingDelta) UpdateBucketType(id uint64, bt *BucketType) error { - var err error - s.bucketTypeDeltaState[id], err = s.bucketTypeDeltaState[id].Transfer(deltaActionModify) - if err != nil { - return err - } - s.cache.PutBucketType(id, bt) - return nil -} - -func (s *contractStakingDelta) UpdateBucketInfo(id uint64, bi *bucketInfo) error { - var err error - s.bucketInfoDeltaState[id], err = s.bucketInfoDeltaState[id].Transfer(deltaActionModify) - if err != nil { - return err - } - s.cache.PutBucketInfo(id, bi) - return nil -} - -func (s *contractStakingDelta) DeleteBucketInfo(id uint64) error { - var err error - s.bucketInfoDeltaState[id], err = s.bucketInfoDeltaState[id].Transfer(deltaActionRemove) - if err != nil { - return err - } - s.cache.DeleteBucketInfo(id) - return nil -} - -func (s *contractStakingDelta) AddedBucketCnt() uint64 { - addedBucketCnt := uint64(0) - for _, state := range s.bucketInfoDeltaState { - if state == deltaStateAdded { - addedBucketCnt++ - } - } - return addedBucketCnt -} - -func (s *contractStakingDelta) AddedBucketTypeCnt() uint64 { - cnt := uint64(0) - for _, state := range s.bucketTypeDeltaState { - if state == deltaStateAdded { - cnt++ - } - } - return cnt -} - -func (s *contractStakingDelta) addBucketInfo(id uint64, bi *bucketInfo) error { - var err error - s.bucketInfoDeltaState[id], err = s.bucketInfoDeltaState[id].Transfer(deltaActionAdd) - if err != nil { - return err - } - s.cache.PutBucketInfo(id, bi) - return nil -} diff --git a/blockindex/contractstaking/delta_cache_test.go b/blockindex/contractstaking/delta_cache_test.go deleted file mode 100644 index d9964874a4..0000000000 --- a/blockindex/contractstaking/delta_cache_test.go +++ /dev/null @@ -1,269 +0,0 @@ -package contractstaking - -import ( - "math/big" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/iotexproject/iotex-core/v2/test/identityset" -) - -func TestContractStakingDelta_BucketInfoDelta(t *testing.T) { - require := require.New(t) - - // create a new delta cache - cache := newContractStakingDelta() - - // add bucket info - bi := &bucketInfo{TypeIndex: 1, CreatedAt: 1, UnlockedAt: maxBlockNumber, UnstakedAt: maxBlockNumber, Delegate: identityset.Address(1), Owner: identityset.Address(2)} - require.NoError(cache.AddBucketInfo(1, bi)) - - // modify bucket info - bi = &bucketInfo{TypeIndex: 2, CreatedAt: 2, UnlockedAt: maxBlockNumber, UnstakedAt: maxBlockNumber, Delegate: identityset.Address(3), Owner: identityset.Address(4)} - require.NoError(cache.UpdateBucketInfo(2, bi)) - - // remove bucket info - require.NoError(cache.DeleteBucketInfo(3)) - - // get bucket info delta - delta := cache.BucketInfoDelta() - - // check added bucket info - require.Len(delta[deltaStateAdded], 1) - added, ok := delta[deltaStateAdded][1] - require.True(ok) - require.NotNil(added) - require.EqualValues(1, added.TypeIndex) - require.EqualValues(1, added.CreatedAt) - require.EqualValues(maxBlockNumber, added.UnlockedAt) - require.EqualValues(maxBlockNumber, added.UnstakedAt) - require.EqualValues(identityset.Address(1), added.Delegate) - require.EqualValues(identityset.Address(2), added.Owner) - - // check modified bucket info - require.Len(delta[deltaStateModified], 1) - modified, ok := delta[deltaStateModified][2] - require.True(ok) - require.NotNil(modified) - require.EqualValues(2, modified.TypeIndex) - require.EqualValues(2, modified.CreatedAt) - require.EqualValues(maxBlockNumber, modified.UnlockedAt) - require.EqualValues(maxBlockNumber, modified.UnstakedAt) - require.EqualValues(identityset.Address(3), modified.Delegate) - require.EqualValues(identityset.Address(4), modified.Owner) - - // check removed bucket info - require.Len(delta[deltaStateRemoved], 1) - removed, ok := delta[deltaStateRemoved][3] - require.True(ok) - require.Nil(removed) - -} - -func TestContractStakingDelta_BucketTypeDelta(t *testing.T) { - require := require.New(t) - - // create a new delta cache - cache := newContractStakingDelta() - - // add bucket type - require.NoError(cache.AddBucketType(1, &BucketType{Amount: big.NewInt(100), Duration: 100, ActivatedAt: 1})) - require.NoError(cache.AddBucketType(2, &BucketType{Amount: big.NewInt(200), Duration: 100, ActivatedAt: 1})) - - // modify bucket type 1 & 3 - require.NoError(cache.UpdateBucketType(1, &BucketType{Amount: big.NewInt(100), Duration: 100, ActivatedAt: 3})) - require.NoError(cache.UpdateBucketType(3, &BucketType{Amount: big.NewInt(100), Duration: 100, ActivatedAt: 4})) - - delta := cache.BucketTypeDelta() - // check added bucket type - require.Len(delta[deltaStateAdded], 2) - added, ok := delta[deltaStateAdded][1] - require.True(ok) - require.NotNil(added) - require.EqualValues(100, added.Amount.Int64()) - require.EqualValues(100, added.Duration) - require.EqualValues(3, added.ActivatedAt) - // check modified bucket type - modified, ok := delta[deltaStateModified][3] - require.True(ok) - require.NotNil(modified) - require.EqualValues(100, modified.Amount.Int64()) - require.EqualValues(100, modified.Duration) - require.EqualValues(4, modified.ActivatedAt) - -} - -func TestContractStakingDelta_MatchBucketType(t *testing.T) { - require := require.New(t) - - // create a new delta cache - cache := newContractStakingDelta() - - // test with empty bucket type - index, bucketType, ok := cache.MatchBucketType(big.NewInt(100), 100) - require.False(ok) - require.EqualValues(0, index) - require.Nil(bucketType) - - // add bucket types - require.NoError(cache.AddBucketType(1, &BucketType{Amount: big.NewInt(100), Duration: 100, ActivatedAt: 1})) - require.NoError(cache.AddBucketType(2, &BucketType{Amount: big.NewInt(200), Duration: 100, ActivatedAt: 1})) - - // test with amount and duration that match bucket type 1 - amount := big.NewInt(100) - duration := uint64(100) - index, bucketType, ok = cache.MatchBucketType(amount, duration) - require.True(ok) - require.EqualValues(1, index) - require.NotNil(bucketType) - require.EqualValues(big.NewInt(100), bucketType.Amount) - require.EqualValues(uint64(100), bucketType.Duration) - require.EqualValues(uint64(1), bucketType.ActivatedAt) - - // test with amount and duration that match bucket type 2 - amount = big.NewInt(200) - duration = uint64(100) - index, bucketType, ok = cache.MatchBucketType(amount, duration) - require.True(ok) - require.EqualValues(2, index) - require.NotNil(bucketType) - require.EqualValues(big.NewInt(200), bucketType.Amount) - require.EqualValues(uint64(100), bucketType.Duration) - require.EqualValues(uint64(1), bucketType.ActivatedAt) - - // test with amount and duration that do not match any bucket type - amount = big.NewInt(300) - duration = uint64(100) - index, bucketType, ok = cache.MatchBucketType(amount, duration) - require.False(ok) - require.EqualValues(0, index) - require.Nil(bucketType) -} - -func TestContractStakingDelta_GetBucketInfo(t *testing.T) { - require := require.New(t) - - // create a new delta cache - cache := newContractStakingDelta() - - // add bucket info - bi := &bucketInfo{TypeIndex: 1, CreatedAt: 1, UnlockedAt: maxBlockNumber, UnstakedAt: maxBlockNumber, Delegate: identityset.Address(1), Owner: identityset.Address(2)} - require.NoError(cache.AddBucketInfo(1, bi)) - - // get added bucket info - info, state := cache.GetBucketInfo(1) - require.NotNil(info) - require.EqualValues(1, info.TypeIndex) - require.EqualValues(1, info.CreatedAt) - require.EqualValues(maxBlockNumber, info.UnlockedAt) - require.EqualValues(maxBlockNumber, info.UnstakedAt) - require.EqualValues(identityset.Address(1), info.Delegate) - require.EqualValues(identityset.Address(2), info.Owner) - require.EqualValues(deltaStateAdded, state) - - // modify bucket info 2 - bi = &bucketInfo{TypeIndex: 2, CreatedAt: 2, UnlockedAt: maxBlockNumber, UnstakedAt: maxBlockNumber, Delegate: identityset.Address(3), Owner: identityset.Address(4)} - require.NoError(cache.UpdateBucketInfo(2, bi)) - // get modified bucket info - info, state = cache.GetBucketInfo(2) - require.NotNil(info) - require.EqualValues(2, info.TypeIndex) - require.EqualValues(2, info.CreatedAt) - require.EqualValues(maxBlockNumber, info.UnlockedAt) - require.EqualValues(maxBlockNumber, info.UnstakedAt) - require.EqualValues(identityset.Address(3), info.Delegate) - require.EqualValues(identityset.Address(4), info.Owner) - require.EqualValues(deltaStateModified, state) - - // remove bucket info 2 - require.NoError(cache.DeleteBucketInfo(2)) - // get removed bucket info - info, state = cache.GetBucketInfo(2) - require.Nil(info) - require.EqualValues(deltaStateRemoved, state) -} - -func TestContractStakingDelta_GetBucketType(t *testing.T) { - require := require.New(t) - - // create a new delta cache - cache := newContractStakingDelta() - - // add bucket type - bt := &BucketType{Amount: big.NewInt(100), Duration: 100, ActivatedAt: 1} - require.NoError(cache.AddBucketType(1, bt)) - - // get added bucket type - bucketType, state := cache.GetBucketType(1) - require.NotNil(bucketType) - require.EqualValues(big.NewInt(100), bucketType.Amount) - require.EqualValues(100, bucketType.Duration) - require.EqualValues(1, bucketType.ActivatedAt) - require.EqualValues(deltaStateAdded, state) - - // modify bucket type - bt = &BucketType{Amount: big.NewInt(200), Duration: 200, ActivatedAt: 2} - require.NoError(cache.UpdateBucketType(2, bt)) - // get modified bucket type - bucketType, state = cache.GetBucketType(2) - require.NotNil(bucketType) - require.EqualValues(big.NewInt(200), bucketType.Amount) - require.EqualValues(200, bucketType.Duration) - require.EqualValues(2, bucketType.ActivatedAt) - require.EqualValues(deltaStateModified, state) - -} - -func TestContractStakingDelta_AddedBucketCnt(t *testing.T) { - require := require.New(t) - - // create a new delta cache - cache := newContractStakingDelta() - - // test with no added bucket info - addedBucketCnt := cache.AddedBucketCnt() - require.EqualValues(0, addedBucketCnt) - - // add bucket types - require.NoError(cache.AddBucketType(1, &BucketType{Amount: big.NewInt(100), Duration: 100, ActivatedAt: 1})) - require.NoError(cache.AddBucketType(2, &BucketType{Amount: big.NewInt(200), Duration: 100, ActivatedAt: 1})) - - // add bucket info - bi := &bucketInfo{TypeIndex: 1, CreatedAt: 1, UnlockedAt: maxBlockNumber, UnstakedAt: maxBlockNumber, Delegate: identityset.Address(1), Owner: identityset.Address(2)} - require.NoError(cache.AddBucketInfo(1, bi)) - // add bucket info - bi = &bucketInfo{TypeIndex: 2, CreatedAt: 1, UnlockedAt: maxBlockNumber, UnstakedAt: maxBlockNumber, Delegate: identityset.Address(1), Owner: identityset.Address(2)} - require.NoError(cache.AddBucketInfo(2, bi)) - - // test with added bucket info - addedBucketCnt = cache.AddedBucketCnt() - require.EqualValues(2, addedBucketCnt) - - // remove bucket info - require.NoError(cache.DeleteBucketInfo(3)) - - // test with removed bucket info - addedBucketCnt = cache.AddedBucketCnt() - require.EqualValues(2, addedBucketCnt) -} - -func TestContractStakingDelta_AddedBucketTypeCnt(t *testing.T) { - require := require.New(t) - - // create a new delta cache - cache := newContractStakingDelta() - - // test with no added bucket types - addedBucketTypeCnt := cache.AddedBucketTypeCnt() - require.EqualValues(0, addedBucketTypeCnt) - - // add bucket types - require.NoError(cache.AddBucketType(1, &BucketType{Amount: big.NewInt(100), Duration: 100, ActivatedAt: 1})) - require.NoError(cache.AddBucketType(2, &BucketType{Amount: big.NewInt(200), Duration: 100, ActivatedAt: 1})) - require.NoError(cache.AddBucketType(3, &BucketType{Amount: big.NewInt(300), Duration: 100, ActivatedAt: 1})) - - // test with added bucket type - addedBucketTypeCnt = cache.AddedBucketTypeCnt() - require.EqualValues(3, addedBucketTypeCnt) -} diff --git a/blockindex/contractstaking/delta_state.go b/blockindex/contractstaking/delta_state.go deleted file mode 100644 index 9e2357048b..0000000000 --- a/blockindex/contractstaking/delta_state.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (c) 2023 IoTeX Foundation -// This source code is provided 'as is' and no warranties are given as to title or non-infringement, merchantability -// or fitness for purpose and, to the extent permitted by law, all liability for your use of the code is disclaimed. -// This source code is governed by Apache License 2.0 that can be found in the LICENSE file. - -package contractstaking - -import "github.com/pkg/errors" - -const ( - // deltaState constants - // deltaStateUnchanged is the zero-value of the type deltaState - deltaStateUnchanged deltaState = iota - deltaStateAdded - deltaStateRemoved - deltaStateModified -) - -type deltaState int - -var ( - deltaStateTransferMap = map[deltaState]map[deltaAction]deltaState{ - deltaStateUnchanged: { - deltaActionAdd: deltaStateAdded, - deltaActionRemove: deltaStateRemoved, - deltaActionModify: deltaStateModified, - }, - deltaStateAdded: { - deltaActionModify: deltaStateAdded, - deltaActionRemove: deltaStateUnchanged, - }, - deltaStateModified: { - deltaActionModify: deltaStateModified, - deltaActionRemove: deltaStateRemoved, - }, - } -) - -func (s deltaState) Transfer(act deltaAction) (deltaState, error) { - if _, ok := deltaStateTransferMap[s]; !ok { - return s, errors.Errorf("invalid delta state %d", s) - } - if _, ok := deltaStateTransferMap[s][act]; !ok { - return s, errors.Errorf("invalid delta action %d on state %d", act, s) - } - return deltaStateTransferMap[s][act], nil -} diff --git a/blockindex/contractstaking/delta_state_test.go b/blockindex/contractstaking/delta_state_test.go deleted file mode 100644 index 55274129ec..0000000000 --- a/blockindex/contractstaking/delta_state_test.go +++ /dev/null @@ -1,54 +0,0 @@ -package contractstaking - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestDeltaState_Transfer(t *testing.T) { - require := require.New(t) - - cases := []struct { - name string - state deltaState - action deltaAction - expected deltaState - err string - }{ - {"unchanged->add", deltaStateUnchanged, deltaActionAdd, deltaStateAdded, ""}, - {"unchanged->remove", deltaStateUnchanged, deltaActionRemove, deltaStateRemoved, ""}, - {"unchanged->modify", deltaStateUnchanged, deltaActionModify, deltaStateModified, ""}, - {"added->add", deltaStateAdded, deltaActionAdd, deltaStateUnchanged, "invalid delta action 0 on state 1"}, - {"added->remove", deltaStateAdded, deltaActionRemove, deltaStateUnchanged, ""}, - {"added->modify", deltaStateAdded, deltaActionModify, deltaStateAdded, ""}, - {"removed->add", deltaStateRemoved, deltaActionAdd, deltaStateUnchanged, "invalid delta state 2"}, - {"removed->remove", deltaStateRemoved, deltaActionRemove, deltaStateUnchanged, "invalid delta state 2"}, - {"removed->modify", deltaStateRemoved, deltaActionModify, deltaStateUnchanged, "invalid delta state 2"}, - {"modified->add", deltaStateModified, deltaActionAdd, deltaStateUnchanged, "invalid delta action 0 on state 3"}, - {"modified->remove", deltaStateModified, deltaActionRemove, deltaStateRemoved, ""}, - {"modified->modify", deltaStateModified, deltaActionModify, deltaStateModified, ""}, - {"invalid state", deltaState(100), deltaActionAdd, deltaState(100), "invalid delta state 100"}, - {"invalid action", deltaStateUnchanged, deltaAction(100), deltaStateUnchanged, "invalid delta action 100 on state 0"}, - } - - for _, c := range cases { - t.Run(c.name, func(t *testing.T) { - s, err := c.state.Transfer(c.action) - if len(c.err) > 0 { - require.Error(err) - require.Contains(err.Error(), c.err) - } else { - require.NoError(err) - require.Equal(c.expected, s) - } - }) - } -} - -func TestDeltaState_ZeroValue(t *testing.T) { - require := require.New(t) - - var state deltaState - require.Equal(deltaStateUnchanged, state) -} diff --git a/blockindex/contractstaking/dirty_cache.go b/blockindex/contractstaking/dirty_cache.go index ec7b3d44a8..eb4a35ab15 100644 --- a/blockindex/contractstaking/dirty_cache.go +++ b/blockindex/contractstaking/dirty_cache.go @@ -29,9 +29,8 @@ type ( // 2. get up-to-date bucket // 3. store delta to merge to clean cache contractStakingDirty struct { - clean *contractStakingCache // clean cache to get buckets of last block - delta *contractStakingDelta // delta for cache to store buckets of current block - batch batch.KVStoreBatch // batch for db to store buckets of current block + cache stakingCache // clean cache to get buckets of last block + batch batch.KVStoreBatch // batch for db to store buckets of current block once sync.Once } ) @@ -43,94 +42,74 @@ var ( errBucketTypeNotExist = errors.New("bucket type does not exist") ) -func newContractStakingDirty(clean *contractStakingCache) *contractStakingDirty { +func newContractStakingDirty(clean stakingCache) *contractStakingDirty { return &contractStakingDirty{ - clean: clean, - delta: newContractStakingDelta(), + cache: clean, batch: batch.NewBatch(), } } -func (dirty *contractStakingDirty) addBucketInfo(id uint64, bi *bucketInfo) error { +func (dirty *contractStakingDirty) addBucketInfo(id uint64, bi *bucketInfo) { dirty.batch.Put(_StakingBucketInfoNS, byteutil.Uint64ToBytesBigEndian(id), bi.Serialize(), "failed to put bucket info") - return dirty.delta.AddBucketInfo(id, bi) + dirty.cache.PutBucketInfo(id, bi) } -func (dirty *contractStakingDirty) updateBucketInfo(id uint64, bi *bucketInfo) error { +func (dirty *contractStakingDirty) updateBucketInfo(id uint64, bi *bucketInfo) { dirty.batch.Put(_StakingBucketInfoNS, byteutil.Uint64ToBytesBigEndian(id), bi.Serialize(), "failed to put bucket info") - return dirty.delta.UpdateBucketInfo(id, bi) + dirty.cache.PutBucketInfo(id, bi) } -func (dirty *contractStakingDirty) deleteBucketInfo(id uint64) error { +func (dirty *contractStakingDirty) deleteBucketInfo(id uint64) { dirty.batch.Delete(_StakingBucketInfoNS, byteutil.Uint64ToBytesBigEndian(id), "failed to delete bucket info") - return dirty.delta.DeleteBucketInfo(id) + dirty.cache.DeleteBucketInfo(id) } -func (dirty *contractStakingDirty) putBucketType(bt *BucketType) error { +func (dirty *contractStakingDirty) putBucketType(bt *BucketType) { id, _, ok := dirty.matchBucketType(bt.Amount, bt.Duration) if !ok { id = dirty.getBucketTypeCount() - if err := dirty.addBucketType(id, bt); err != nil { - return err - } + dirty.addBucketType(id, bt) } - return dirty.updateBucketType(id, bt) + dirty.updateBucketType(id, bt) } func (dirty *contractStakingDirty) getBucketType(id uint64) (*BucketType, bool) { - bt, state := dirty.delta.GetBucketType(id) - switch state { - case deltaStateAdded, deltaStateModified: - return bt, true - default: - return dirty.clean.BucketType(id) - } + return dirty.cache.BucketType(id) } func (dirty *contractStakingDirty) getBucketInfo(id uint64) (*bucketInfo, bool) { - bi, state := dirty.delta.GetBucketInfo(id) - switch state { - case deltaStateAdded, deltaStateModified: - return bi, true - case deltaStateRemoved: - return nil, false - default: - return dirty.clean.BucketInfo(id) - } + return dirty.cache.BucketInfo(id) } -func (dirty *contractStakingDirty) finalize() (batch.KVStoreBatch, *contractStakingDelta) { - return dirty.finalizeBatch(), dirty.delta +func (dirty *contractStakingDirty) finalize() (batch.KVStoreBatch, stakingCache) { + b := dirty.finalizeBatch() + dirty.cache.Commit() + + return b, dirty.cache } func (dirty *contractStakingDirty) finalizeBatch() batch.KVStoreBatch { dirty.once.Do(func() { - tbc, _ := dirty.clean.TotalBucketCount(0) - total := tbc + dirty.delta.AddedBucketCnt() + total := dirty.cache.TotalBucketCount() dirty.batch.Put(_StakingNS, _stakingTotalBucketCountKey, byteutil.Uint64ToBytesBigEndian(total), "failed to put total bucket count") }) return dirty.batch } -func (dirty *contractStakingDirty) addBucketType(id uint64, bt *BucketType) error { +func (dirty *contractStakingDirty) addBucketType(id uint64, bt *BucketType) { dirty.batch.Put(_StakingBucketTypeNS, byteutil.Uint64ToBytesBigEndian(id), bt.Serialize(), "failed to put bucket type") - return dirty.delta.AddBucketType(id, bt) + dirty.cache.PutBucketType(id, bt) } func (dirty *contractStakingDirty) matchBucketType(amount *big.Int, duration uint64) (uint64, *BucketType, bool) { - id, bt, ok := dirty.delta.MatchBucketType(amount, duration) - if ok { - return id, bt, true - } - return dirty.clean.MatchBucketType(amount, duration) + return dirty.cache.MatchBucketType(amount, duration) } func (dirty *contractStakingDirty) getBucketTypeCount() uint64 { - btc, _ := dirty.clean.BucketTypeCount(0) - return uint64(btc) + dirty.delta.AddedBucketTypeCnt() + return uint64(dirty.cache.BucketTypeCount()) } -func (dirty *contractStakingDirty) updateBucketType(id uint64, bt *BucketType) error { +func (dirty *contractStakingDirty) updateBucketType(id uint64, bt *BucketType) { dirty.batch.Put(_StakingBucketTypeNS, byteutil.Uint64ToBytesBigEndian(id), bt.Serialize(), "failed to put bucket type") - return dirty.delta.UpdateBucketType(id, bt) + dirty.cache.PutBucketType(id, bt) } diff --git a/blockindex/contractstaking/dirty_cache_test.go b/blockindex/contractstaking/dirty_cache_test.go index 4d5f401a2b..4b29d6d1b2 100644 --- a/blockindex/contractstaking/dirty_cache_test.go +++ b/blockindex/contractstaking/dirty_cache_test.go @@ -62,8 +62,8 @@ func TestContractStakingDirty_getBucketInfo(t *testing.T) { require.Equal(identityset.Address(2), bi.Owner) // added bucket info - require.NoError(dirty.addBucketType(2, &BucketType{Amount: big.NewInt(200), Duration: 200, ActivatedAt: 2})) - require.NoError(dirty.addBucketInfo(2, &bucketInfo{TypeIndex: 2, CreatedAt: 2, UnlockedAt: maxBlockNumber, UnstakedAt: maxBlockNumber, Delegate: identityset.Address(2), Owner: identityset.Address(3)})) + dirty.addBucketType(2, &BucketType{Amount: big.NewInt(200), Duration: 200, ActivatedAt: 2}) + dirty.addBucketInfo(2, &bucketInfo{TypeIndex: 2, CreatedAt: 2, UnlockedAt: maxBlockNumber, UnstakedAt: maxBlockNumber, Delegate: identityset.Address(2), Owner: identityset.Address(3)}) bi, ok = dirty.getBucketInfo(2) require.True(ok) require.EqualValues(2, bi.TypeIndex) @@ -74,7 +74,7 @@ func TestContractStakingDirty_getBucketInfo(t *testing.T) { require.Equal(identityset.Address(3), bi.Owner) // modified bucket info - require.NoError(dirty.updateBucketInfo(1, &bucketInfo{TypeIndex: 2, CreatedAt: 3, UnlockedAt: maxBlockNumber, UnstakedAt: maxBlockNumber, Delegate: identityset.Address(3), Owner: identityset.Address(4)})) + dirty.updateBucketInfo(1, &bucketInfo{TypeIndex: 2, CreatedAt: 3, UnlockedAt: maxBlockNumber, UnstakedAt: maxBlockNumber, Delegate: identityset.Address(3), Owner: identityset.Address(4)}) bi, ok = dirty.getBucketInfo(1) require.True(ok) require.EqualValues(2, bi.TypeIndex) @@ -85,7 +85,7 @@ func TestContractStakingDirty_getBucketInfo(t *testing.T) { require.Equal(identityset.Address(4), bi.Owner) // removed bucket info - require.NoError(dirty.deleteBucketInfo(1)) + dirty.deleteBucketInfo(1) bi, ok = dirty.getBucketInfo(1) require.False(ok) require.Nil(bi) @@ -112,7 +112,7 @@ func TestContractStakingDirty_matchBucketType(t *testing.T) { require.EqualValues(1, id) // added bucket type - require.NoError(dirty.addBucketType(2, &BucketType{Amount: big.NewInt(200), Duration: 200, ActivatedAt: 2})) + dirty.addBucketType(2, &BucketType{Amount: big.NewInt(200), Duration: 200, ActivatedAt: 2}) id, bt, ok = dirty.matchBucketType(big.NewInt(200), 200) require.True(ok) require.EqualValues(200, bt.Amount.Int64()) @@ -136,7 +136,7 @@ func TestContractStakingDirty_getBucketTypeCount(t *testing.T) { require.EqualValues(1, count) // added bucket type - require.NoError(dirty.addBucketType(2, &BucketType{Amount: big.NewInt(200), Duration: 200, ActivatedAt: 2})) + dirty.addBucketType(2, &BucketType{Amount: big.NewInt(200), Duration: 200, ActivatedAt: 2}) count = dirty.getBucketTypeCount() require.EqualValues(2, count) } @@ -144,10 +144,12 @@ func TestContractStakingDirty_getBucketTypeCount(t *testing.T) { func TestContractStakingDirty_finalize(t *testing.T) { require := require.New(t) clean := newContractStakingCache(Config{CalculateVoteWeight: calculateVoteWeightGen(genesis.TestDefault().VoteWeightCalConsts)}) - dirty := newContractStakingDirty(clean) + dirty := newContractStakingDirty(newWrappedCache(clean)) + totalCnt := clean.TotalBucketCount() + require.EqualValues(0, totalCnt) // no dirty data - batcher, delta := dirty.finalize() + batcher, cache := dirty.finalize() require.EqualValues(1, batcher.Size()) info, err := batcher.Entry(0) require.NoError(err) @@ -155,17 +157,12 @@ func TestContractStakingDirty_finalize(t *testing.T) { require.EqualValues(batch.Put, info.WriteType()) require.EqualValues(_stakingTotalBucketCountKey, info.Key()) require.EqualValues(byteutil.Uint64ToBytesBigEndian(0), info.Value()) - for _, d := range delta.BucketTypeDelta() { - require.Len(d, 0) - } - for _, d := range delta.BucketTypeDelta() { - require.Len(d, 0) - } + require.Equal(0, cache.BucketTypeCount()) // added bucket type bt := &BucketType{Amount: big.NewInt(100), Duration: 100, ActivatedAt: 1} - require.NoError(dirty.addBucketType(1, bt)) - batcher, delta = dirty.finalize() + dirty.addBucketType(1, bt) + batcher, cache = dirty.finalize() require.EqualValues(2, batcher.Size()) info, err = batcher.Entry(1) require.NoError(err) @@ -173,17 +170,12 @@ func TestContractStakingDirty_finalize(t *testing.T) { require.EqualValues(batch.Put, info.WriteType()) require.EqualValues(byteutil.Uint64ToBytesBigEndian(1), info.Key()) require.EqualValues(bt.Serialize(), info.Value()) - btDelta := delta.BucketTypeDelta() - require.NotNil(btDelta[deltaStateAdded]) - require.Len(btDelta[deltaStateAdded], 1) - require.EqualValues(100, btDelta[deltaStateAdded][1].Amount.Int64()) - require.EqualValues(100, btDelta[deltaStateAdded][1].Duration) - require.EqualValues(1, btDelta[deltaStateAdded][1].ActivatedAt) + require.Equal(1, cache.BucketTypeCount()) // add bucket info bi := &bucketInfo{TypeIndex: 1, CreatedAt: 2, UnlockedAt: 3, UnstakedAt: 4, Delegate: identityset.Address(1), Owner: identityset.Address(2)} - require.NoError(dirty.addBucketInfo(1, bi)) - batcher, delta = dirty.finalize() + dirty.addBucketInfo(1, bi) + batcher, cache = dirty.finalize() require.EqualValues(3, batcher.Size()) info, err = batcher.Entry(2) require.NoError(err) @@ -191,46 +183,38 @@ func TestContractStakingDirty_finalize(t *testing.T) { require.EqualValues(batch.Put, info.WriteType()) require.EqualValues(byteutil.Uint64ToBytesBigEndian(1), info.Key()) require.EqualValues(bi.Serialize(), info.Value()) - biDelta := delta.BucketInfoDelta() - require.NotNil(biDelta[deltaStateAdded]) - require.Len(biDelta[deltaStateAdded], 1) - require.EqualValues(1, biDelta[deltaStateAdded][1].TypeIndex) - require.EqualValues(2, biDelta[deltaStateAdded][1].CreatedAt) - require.EqualValues(3, biDelta[deltaStateAdded][1].UnlockedAt) - require.EqualValues(4, biDelta[deltaStateAdded][1].UnstakedAt) - require.EqualValues(identityset.Address(1).String(), biDelta[deltaStateAdded][1].Delegate.String()) - require.EqualValues(identityset.Address(2).String(), biDelta[deltaStateAdded][1].Owner.String()) - + totalCnt = cache.TotalBucketCount() + require.EqualValues(2, totalCnt) } func TestContractStakingDirty_noSideEffectOnClean(t *testing.T) { require := require.New(t) clean := newContractStakingCache(Config{CalculateVoteWeight: calculateVoteWeightGen(genesis.TestDefault().VoteWeightCalConsts)}) - dirty := newContractStakingDirty(clean) + dirty := newContractStakingDirty(newWrappedCache(clean)) // add bucket type to dirty cache - require.NoError(dirty.addBucketType(1, &BucketType{Amount: big.NewInt(100), Duration: 100, ActivatedAt: 1})) + dirty.addBucketType(1, &BucketType{Amount: big.NewInt(100), Duration: 100, ActivatedAt: 1}) // check that clean cache is not affected bt, ok := clean.getBucketType(1) require.False(ok) require.Nil(bt) // add bucket info to dirty cache - require.NoError(dirty.addBucketInfo(1, &bucketInfo{TypeIndex: 1, CreatedAt: 1, UnlockedAt: maxBlockNumber, UnstakedAt: maxBlockNumber, Delegate: identityset.Address(1), Owner: identityset.Address(2)})) + dirty.addBucketInfo(1, &bucketInfo{TypeIndex: 1, CreatedAt: 1, UnlockedAt: maxBlockNumber, UnstakedAt: maxBlockNumber, Delegate: identityset.Address(1), Owner: identityset.Address(2)}) // check that clean cache is not affected bi, ok := clean.getBucketInfo(1) require.False(ok) require.Nil(bi) // update bucket type in dirty cache - require.NoError(dirty.updateBucketType(1, &BucketType{Amount: big.NewInt(200), Duration: 200, ActivatedAt: 2})) + dirty.updateBucketType(1, &BucketType{Amount: big.NewInt(200), Duration: 200, ActivatedAt: 2}) // check that clean cache is not affected bt, ok = clean.getBucketType(1) require.False(ok) require.Nil(bt) // update bucket info in dirty cache - require.NoError(dirty.updateBucketInfo(1, &bucketInfo{TypeIndex: 2, CreatedAt: 3, UnlockedAt: maxBlockNumber, UnstakedAt: maxBlockNumber, Delegate: identityset.Address(3), Owner: identityset.Address(4)})) + dirty.updateBucketInfo(1, &bucketInfo{TypeIndex: 2, CreatedAt: 3, UnlockedAt: maxBlockNumber, UnstakedAt: maxBlockNumber, Delegate: identityset.Address(3), Owner: identityset.Address(4)}) // check that clean cache is not affected bi, ok = clean.getBucketInfo(1) require.False(ok) @@ -239,7 +223,7 @@ func TestContractStakingDirty_noSideEffectOnClean(t *testing.T) { // update bucket info existed in clean cache clean.PutBucketInfo(2, &bucketInfo{TypeIndex: 1, CreatedAt: 1, UnlockedAt: maxBlockNumber, UnstakedAt: maxBlockNumber, Delegate: identityset.Address(1), Owner: identityset.Address(2)}) // update bucket info in dirty cache - require.NoError(dirty.updateBucketInfo(2, &bucketInfo{TypeIndex: 1, CreatedAt: 3, UnlockedAt: maxBlockNumber, UnstakedAt: maxBlockNumber, Delegate: identityset.Address(3), Owner: identityset.Address(4)})) + dirty.updateBucketInfo(2, &bucketInfo{TypeIndex: 1, CreatedAt: 3, UnlockedAt: maxBlockNumber, UnstakedAt: maxBlockNumber, Delegate: identityset.Address(3), Owner: identityset.Address(4)}) // check that clean cache is not affected bi, ok = clean.getBucketInfo(2) require.True(ok) @@ -253,7 +237,7 @@ func TestContractStakingDirty_noSideEffectOnClean(t *testing.T) { // remove bucket info existed in clean cache clean.PutBucketInfo(3, &bucketInfo{TypeIndex: 1, CreatedAt: 1, UnlockedAt: maxBlockNumber, UnstakedAt: maxBlockNumber, Delegate: identityset.Address(1), Owner: identityset.Address(2)}) // remove bucket info from dirty cache - require.NoError(dirty.deleteBucketInfo(3)) + dirty.deleteBucketInfo(3) // check that clean cache is not affected bi, ok = clean.getBucketInfo(3) require.True(ok) diff --git a/blockindex/contractstaking/event_handler.go b/blockindex/contractstaking/event_handler.go index a34b758bd6..16cee07398 100644 --- a/blockindex/contractstaking/event_handler.go +++ b/blockindex/contractstaking/event_handler.go @@ -365,7 +365,7 @@ func init() { } } -func newContractStakingEventHandler(cache *contractStakingCache) *contractStakingEventHandler { +func newContractStakingEventHandler(cache stakingCache) *contractStakingEventHandler { dirty := newContractStakingDirty(cache) return &contractStakingEventHandler{ dirty: dirty, @@ -418,7 +418,7 @@ func (eh *contractStakingEventHandler) HandleEvent(ctx context.Context, height u } } -func (eh *contractStakingEventHandler) Result() (batch.KVStoreBatch, *contractStakingDelta) { +func (eh *contractStakingEventHandler) Result() (batch.KVStoreBatch, stakingCache) { return eh.dirty.finalize() } @@ -438,7 +438,7 @@ func (eh *contractStakingEventHandler) handleTransferEvent(event eventParam) err // update bucket owner if token exists if bi, ok := eh.dirty.getBucketInfo(tokenID); ok { bi.Owner = to - return eh.dirty.updateBucketInfo(tokenID, bi) + eh.dirty.updateBucketInfo(tokenID, bi) } return nil @@ -459,7 +459,8 @@ func (eh *contractStakingEventHandler) handleBucketTypeActivatedEvent(event even Duration: durationParam.Uint64(), ActivatedAt: height, } - return eh.dirty.putBucketType(&bt) + eh.dirty.putBucketType(&bt) + return nil } func (eh *contractStakingEventHandler) handleBucketTypeDeactivatedEvent(event eventParam, height uint64) error { @@ -477,7 +478,9 @@ func (eh *contractStakingEventHandler) handleBucketTypeDeactivatedEvent(event ev return errors.Wrapf(errBucketTypeNotExist, "amount %d, duration %d", amountParam.Int64(), durationParam.Uint64()) } bt.ActivatedAt = maxBlockNumber - return eh.dirty.updateBucketType(id, bt) + eh.dirty.updateBucketType(id, bt) + + return nil } func (eh *contractStakingEventHandler) handleStakedEvent(event eventParam, height uint64) error { @@ -514,7 +517,8 @@ func (eh *contractStakingEventHandler) handleStakedEvent(event eventParam, heigh UnlockedAt: maxBlockNumber, UnstakedAt: maxBlockNumber, } - return eh.dirty.addBucketInfo(tokenIDParam.Uint64(), &bucket) + eh.dirty.addBucketInfo(tokenIDParam.Uint64(), &bucket) + return nil } func (eh *contractStakingEventHandler) handleLockedEvent(event eventParam) error { @@ -541,7 +545,9 @@ func (eh *contractStakingEventHandler) handleLockedEvent(event eventParam) error } b.TypeIndex = newBtIdx b.UnlockedAt = maxBlockNumber - return eh.dirty.updateBucketInfo(tokenIDParam.Uint64(), b) + eh.dirty.updateBucketInfo(tokenIDParam.Uint64(), b) + + return nil } func (eh *contractStakingEventHandler) handleUnlockedEvent(event eventParam, height uint64) error { @@ -555,7 +561,9 @@ func (eh *contractStakingEventHandler) handleUnlockedEvent(event eventParam, hei return errors.Wrapf(ErrBucketNotExist, "token id %d", tokenIDParam.Uint64()) } b.UnlockedAt = height - return eh.dirty.updateBucketInfo(tokenIDParam.Uint64(), b) + eh.dirty.updateBucketInfo(tokenIDParam.Uint64(), b) + + return nil } func (eh *contractStakingEventHandler) handleUnstakedEvent(event eventParam, height uint64) error { @@ -569,7 +577,9 @@ func (eh *contractStakingEventHandler) handleUnstakedEvent(event eventParam, hei return errors.Wrapf(ErrBucketNotExist, "token id %d", tokenIDParam.Uint64()) } b.UnstakedAt = height - return eh.dirty.updateBucketInfo(tokenIDParam.Uint64(), b) + eh.dirty.updateBucketInfo(tokenIDParam.Uint64(), b) + + return nil } func (eh *contractStakingEventHandler) handleMergedEvent(event eventParam) error { @@ -598,11 +608,11 @@ func (eh *contractStakingEventHandler) handleMergedEvent(event eventParam) error b.TypeIndex = btIdx b.UnlockedAt = maxBlockNumber for i := 1; i < len(tokenIDsParam); i++ { - if err = eh.dirty.deleteBucketInfo(tokenIDsParam[i].Uint64()); err != nil { - return err - } + eh.dirty.deleteBucketInfo(tokenIDsParam[i].Uint64()) } - return eh.dirty.updateBucketInfo(tokenIDsParam[0].Uint64(), b) + eh.dirty.updateBucketInfo(tokenIDsParam[0].Uint64(), b) + + return nil } func (eh *contractStakingEventHandler) handleBucketExpandedEvent(event eventParam) error { @@ -628,7 +638,9 @@ func (eh *contractStakingEventHandler) handleBucketExpandedEvent(event eventPara return errors.Wrapf(errBucketTypeNotExist, "amount %d, duration %d", amountParam.Int64(), durationParam.Uint64()) } b.TypeIndex = newBtIdx - return eh.dirty.updateBucketInfo(tokenIDParam.Uint64(), b) + eh.dirty.updateBucketInfo(tokenIDParam.Uint64(), b) + + return nil } func (eh *contractStakingEventHandler) handleDelegateChangedEvent(event eventParam) error { @@ -646,7 +658,9 @@ func (eh *contractStakingEventHandler) handleDelegateChangedEvent(event eventPar return errors.Wrapf(ErrBucketNotExist, "token id %d", tokenIDParam.Uint64()) } b.Delegate = delegateParam - return eh.dirty.updateBucketInfo(tokenIDParam.Uint64(), b) + eh.dirty.updateBucketInfo(tokenIDParam.Uint64(), b) + + return nil } func (eh *contractStakingEventHandler) handleWithdrawalEvent(event eventParam) error { @@ -654,6 +668,7 @@ func (eh *contractStakingEventHandler) handleWithdrawalEvent(event eventParam) e if err != nil { return err } + eh.dirty.deleteBucketInfo(tokenIDParam.Uint64()) - return eh.dirty.deleteBucketInfo(tokenIDParam.Uint64()) + return nil } diff --git a/blockindex/contractstaking/indexer.go b/blockindex/contractstaking/indexer.go index 26600dc08f..382fd70282 100644 --- a/blockindex/contractstaking/indexer.go +++ b/blockindex/contractstaking/indexer.go @@ -8,6 +8,7 @@ package contractstaking import ( "context" "math/big" + "sync" "time" "github.com/ethereum/go-ethereum/common/math" @@ -15,6 +16,7 @@ import ( "github.com/iotexproject/iotex-proto/golang/iotextypes" "github.com/pkg/errors" + "github.com/iotexproject/iotex-core/v2/action/protocol" "github.com/iotexproject/iotex-core/v2/action/protocol/staking" "github.com/iotexproject/iotex-core/v2/blockchain/block" "github.com/iotexproject/iotex-core/v2/db" @@ -35,6 +37,8 @@ type ( kvstore db.KVStore // persistent storage, used to initialize index cache at startup cache *contractStakingCache // in-memory index for clean data, used to query index data config Config // indexer config + height uint64 + mu sync.RWMutex lifecycle.Readiness } @@ -88,7 +92,7 @@ func (s *Indexer) StartView(ctx context.Context) (staking.ContractStakeView, err return &stakeView{ helper: s, cache: s.cache.Clone(), - height: s.cache.Height(), + height: s.height, }, nil } @@ -96,6 +100,8 @@ func (s *Indexer) start(ctx context.Context) error { if err := s.kvstore.Start(ctx); err != nil { return err } + s.mu.Lock() + defer s.mu.Unlock() if err := s.loadFromDB(); err != nil { return err } @@ -115,7 +121,9 @@ func (s *Indexer) Stop(ctx context.Context) error { // Height returns the tip block height func (s *Indexer) Height() (uint64, error) { - return s.cache.Height(), nil + s.mu.RLock() + defer s.mu.RUnlock() + return s.height, nil } // StartHeight returns the start height of the indexer @@ -133,7 +141,43 @@ func (s *Indexer) CandidateVotes(ctx context.Context, candidate address.Address, if s.isIgnored(height) { return big.NewInt(0), nil } - return s.cache.CandidateVotes(ctx, candidate, height) + if err := s.validateHeight(height); err != nil { + return nil, err + } + fn := s.genBlockDurationFn() + s.mu.RLock() + ids, types, infos := s.cache.BucketsByCandidate(candidate) + s.mu.RUnlock() + if len(types) != len(infos) || len(types) != len(ids) { + return nil, errors.New("inconsistent bucket data") + } + if len(ids) == 0 { + return big.NewInt(0), nil + } + featureCtx := protocol.MustGetFeatureCtx(ctx) + votes := big.NewInt(0) + for i, id := range ids { + bi := infos[i] + if bi == nil || bi.UnstakedAt != maxBlockNumber { + continue + } + if featureCtx.FixContractStakingWeightedVotes { + votes.Add(votes, s.config.CalculateVoteWeight(assembleBucket(id, bi, types[i], s.config.ContractAddress, fn))) + } else { + votes.Add(votes, types[i].Amount) + } + } + + return votes, nil +} + +func (s *Indexer) genBlockDurationFn() func(start, end uint64) time.Duration { + s.mu.RLock() + height := s.height + s.mu.RUnlock() + return func(start, end uint64) time.Duration { + return s.config.BlocksToDuration(start, end, height) + } } // Buckets returns the buckets @@ -141,7 +185,29 @@ func (s *Indexer) Buckets(height uint64) ([]*Bucket, error) { if s.isIgnored(height) { return []*Bucket{}, nil } - return s.cache.Buckets(height) + if err := s.validateHeight(height); err != nil { + return nil, err + } + fn := s.genBlockDurationFn() + s.mu.RLock() + ids, types, infos := s.cache.Buckets() + s.mu.RUnlock() + if len(types) != len(infos) || len(types) != len(ids) { + return nil, errors.New("inconsistent bucket data") + } + if len(ids) == 0 { + return []*Bucket{}, nil + } + + buckets := make([]*Bucket, 0, len(ids)) + for i, id := range ids { + bucket := assembleBucket(id, infos[i], types[i], s.config.ContractAddress, fn) + if bucket != nil { + buckets = append(buckets, bucket) + } + } + + return buckets, nil } // Bucket returns the bucket @@ -149,7 +215,18 @@ func (s *Indexer) Bucket(id uint64, height uint64) (*Bucket, bool, error) { if s.isIgnored(height) { return nil, false, nil } - return s.cache.Bucket(id, height) + if err := s.validateHeight(height); err != nil { + return nil, false, err + } + fn := s.genBlockDurationFn() + s.mu.RLock() + bt, bi := s.cache.Bucket(id) + s.mu.RUnlock() + if bt == nil || bi == nil { + return nil, false, nil + } + + return assembleBucket(id, bi, bt, s.config.ContractAddress, fn), true, nil } // BucketsByIndices returns the buckets by indices @@ -157,7 +234,28 @@ func (s *Indexer) BucketsByIndices(indices []uint64, height uint64) ([]*Bucket, if s.isIgnored(height) { return []*Bucket{}, nil } - return s.cache.BucketsByIndices(indices, height) + if err := s.validateHeight(height); err != nil { + return nil, err + } + fn := s.genBlockDurationFn() + s.mu.RLock() + ts, infos := s.cache.BucketsByIndices(indices) + s.mu.RUnlock() + if len(ts) != len(infos) || len(ts) != len(indices) { + return nil, errors.New("inconsistent bucket data") + } + buckets := make([]*Bucket, 0, len(ts)) + for i, id := range indices { + if ts[i] == nil || infos[i] == nil { + continue + } + bucket := assembleBucket(id, infos[i], ts[i], s.config.ContractAddress, fn) + if bucket != nil { + buckets = append(buckets, bucket) + } + } + + return buckets, nil } // BucketsByCandidate returns the buckets by candidate @@ -165,7 +263,21 @@ func (s *Indexer) BucketsByCandidate(candidate address.Address, height uint64) ( if s.isIgnored(height) { return []*Bucket{}, nil } - return s.cache.BucketsByCandidate(candidate, height) + if err := s.validateHeight(height); err != nil { + return nil, err + } + fn := s.genBlockDurationFn() + s.mu.RLock() + ids, types, infos := s.cache.BucketsByCandidate(candidate) + s.mu.RUnlock() + buckets := make([]*Bucket, 0, len(infos)) + for i, id := range ids { + info := infos[i] + bucket := assembleBucket(id, info, types[i], s.config.ContractAddress, fn) + buckets = append(buckets, bucket) + } + + return buckets, nil } // TotalBucketCount returns the total bucket count including active and burnt buckets @@ -173,7 +285,12 @@ func (s *Indexer) TotalBucketCount(height uint64) (uint64, error) { if s.isIgnored(height) { return 0, nil } - return s.cache.TotalBucketCount(height) + if err := s.validateHeight(height); err != nil { + return 0, err + } + s.mu.RLock() + defer s.mu.RUnlock() + return s.cache.TotalBucketCount(), nil } // BucketTypes returns the active bucket types @@ -181,10 +298,12 @@ func (s *Indexer) BucketTypes(height uint64) ([]*BucketType, error) { if s.isIgnored(height) { return []*BucketType{}, nil } - btMap, err := s.cache.ActiveBucketTypes(height) - if err != nil { + if err := s.validateHeight(height); err != nil { return nil, err } + s.mu.RLock() + btMap := s.cache.ActiveBucketTypes() + s.mu.RUnlock() bts := make([]*BucketType, 0, len(btMap)) for _, bt := range btMap { bts = append(bts, bt) @@ -194,7 +313,10 @@ func (s *Indexer) BucketTypes(height uint64) ([]*BucketType, error) { // PutBlock puts a block into indexer func (s *Indexer) PutBlock(ctx context.Context, blk *block.Block) error { - expectHeight := s.cache.Height() + 1 + s.mu.RLock() + expectHeight := s.height + 1 + cache := newWrappedCache(s.cache) + s.mu.RUnlock() if expectHeight < s.config.ContractDeployHeight { expectHeight = s.config.ContractDeployHeight } @@ -205,7 +327,7 @@ func (s *Indexer) PutBlock(ctx context.Context, blk *block.Block) error { return errors.Errorf("invalid block height %d, expect %d", blk.Height(), expectHeight) } // new event handler for this block - handler := newContractStakingEventHandler(s.cache) + handler := newContractStakingEventHandler(cache) // handle events of block for _, receipt := range blk.Receipts { @@ -222,32 +344,48 @@ func (s *Indexer) PutBlock(ctx context.Context, blk *block.Block) error { } } + s.mu.Lock() + defer s.mu.Unlock() // commit the result - return s.commit(handler, blk.Height()) + if err := s.commit(handler, blk.Height()); err != nil { + return errors.Wrapf(err, "failed to commit block %d", blk.Height()) + } + return nil } func (s *Indexer) commit(handler *contractStakingEventHandler, height uint64) error { batch, delta := handler.Result() - // update cache - if err := s.cache.Merge(delta, height); err != nil { - s.reloadCache() - return err + delta.Commit() + cache := delta.Base() + base, ok := cache.(*contractStakingCache) + if !ok { + return errors.New("invalid cache type of base") } // update db batch.Put(_StakingNS, _stakingHeightKey, byteutil.Uint64ToBytesBigEndian(height), "failed to put height") if err := s.kvstore.WriteBatch(batch); err != nil { - s.reloadCache() - return err + s.cache = newContractStakingCache(s.config) + return s.loadFromDB() } + s.height = height + s.cache = base return nil } -func (s *Indexer) reloadCache() error { - s.cache = newContractStakingCache(s.config) - return s.loadFromDB() -} - func (s *Indexer) loadFromDB() error { + // load height + var height uint64 + h, err := s.kvstore.Get(_StakingNS, _stakingHeightKey) + if err != nil { + if !errors.Is(err, db.ErrNotExist) { + return err + } + height = 0 + } else { + height = byteutil.BytesToUint64BigEndian(h) + + } + s.height = height return s.cache.LoadFromDB(s.kvstore) } @@ -257,3 +395,20 @@ func (s *Indexer) loadFromDB() error { func (s *Indexer) isIgnored(height uint64) bool { return height < s.config.ContractDeployHeight } + +func (s *Indexer) validateHeight(height uint64) error { + s.mu.RLock() + defer s.mu.RUnlock() + // means latest height + if height == 0 { + return nil + } + // Currently, historical block data query is not supported. + // However, the latest data is actually returned when querying historical block data, for the following reasons: + // 1. to maintain compatibility with the current code's invocation of ActiveCandidate + // 2. to cause consensus errors when the indexer is lagging behind + if height > s.height { + return errors.Wrapf(ErrInvalidHeight, "expected %d, actual %d", s.height, height) + } + return nil +} diff --git a/blockindex/contractstaking/indexer_test.go b/blockindex/contractstaking/indexer_test.go index 9b450fc7d5..f0b846b784 100644 --- a/blockindex/contractstaking/indexer_test.go +++ b/blockindex/contractstaking/indexer_test.go @@ -215,7 +215,8 @@ func TestContractStakingIndexerThreadSafe(t *testing.T) { r.NoError(err) _, err = indexer.BucketsByCandidate(delegate, 0) r.NoError(err) - indexer.CandidateVotes(ctx, delegate, 0) + _, err = indexer.CandidateVotes(ctx, delegate, 0) + r.NoError(err) _, err = indexer.Height() r.NoError(err) indexer.TotalBucketCount(0) @@ -226,15 +227,19 @@ func TestContractStakingIndexerThreadSafe(t *testing.T) { go func() { defer wait.Done() // activate bucket type + indexer.mu.Lock() handler := newContractStakingEventHandler(indexer.cache) activateBucketType(r, handler, 10, 100, 1) r.NoError(indexer.commit(handler, 1)) + indexer.mu.Unlock() for i := 2; i < 1000; i++ { height := uint64(i) + indexer.mu.Lock() handler := newContractStakingEventHandler(indexer.cache) stake(r, handler, owner, delegate, int64(i), 10, 100, height) err := indexer.commit(handler, height) r.NoError(err) + indexer.mu.Unlock() } }() wait.Wait() @@ -411,7 +416,6 @@ func TestContractStakingIndexerBucketInfo(t *testing.T) { bucket, ok, err = indexer.Bucket(bucket.Index, height) r.NoError(err) r.True(ok) - r.EqualValues(1, bucket.Index) r.EqualValues(newOwner, bucket.Owner) r.EqualValues(delegate, bucket.Candidate) r.EqualValues(10, bucket.StakedAmount.Int64()) @@ -436,7 +440,6 @@ func TestContractStakingIndexerBucketInfo(t *testing.T) { bucket, ok, err = indexer.Bucket(bucket.Index, height) r.NoError(err) r.True(ok) - r.EqualValues(1, bucket.Index) r.EqualValues(newOwner, bucket.Owner) r.EqualValues(delegate, bucket.Candidate) r.EqualValues(10, bucket.StakedAmount.Int64()) @@ -457,12 +460,12 @@ func TestContractStakingIndexerBucketInfo(t *testing.T) { height++ handler = newContractStakingEventHandler(indexer.cache) unlock(r, handler, int64(bucket.Index), height) + t.Log("unstake bucket", bucket.Index, "at height", height) unstake(r, handler, int64(bucket.Index), height) r.NoError(indexer.commit(handler, height)) bucket, ok, err = indexer.Bucket(bucket.Index, height) r.NoError(err) r.True(ok) - r.EqualValues(1, bucket.Index) r.EqualValues(newOwner, bucket.Owner) r.EqualValues(delegate, bucket.Candidate) r.EqualValues(10, bucket.StakedAmount.Int64()) @@ -596,7 +599,7 @@ func TestContractStakingIndexerReadBuckets(t *testing.T) { height++ handler = newContractStakingEventHandler(indexer.cache) for i, data := range stakeData { - stake(r, handler, identityset.Address(data.owner), identityset.Address(data.delegate), int64(i), int64(data.amount), int64(data.duration), height) + stake(r, handler, identityset.Address(data.owner), identityset.Address(data.delegate), int64(i+1), int64(data.amount), int64(data.duration), height) } r.NoError(err) r.NoError(indexer.commit(handler, height)) @@ -673,7 +676,7 @@ func TestContractStakingIndexerCacheClean(t *testing.T) { // init bucket type height := uint64(1) - handler := newContractStakingEventHandler(indexer.cache) + handler := newContractStakingEventHandler(newWrappedCache(indexer.cache)) activateBucketType(r, handler, 10, 10, height) activateBucketType(r, handler, 20, 20, height) // create bucket @@ -684,22 +687,22 @@ func TestContractStakingIndexerCacheClean(t *testing.T) { stake(r, handler, owner, delegate1, 2, 20, 20, height) stake(r, handler, owner, delegate2, 3, 20, 20, height) stake(r, handler, owner, delegate2, 4, 20, 20, height) - abt, err := indexer.cache.ActiveBucketTypes(height - 1) - r.NoError(err) + abt := indexer.cache.ActiveBucketTypes() r.Len(abt, 0) - bts, err := indexer.cache.Buckets(height - 1) - r.NoError(err) + ids, bts, bis := indexer.cache.Buckets() + r.Len(ids, 0) r.Len(bts, 0) + r.Len(bis, 0) r.NoError(indexer.commit(handler, height)) - abt, err = indexer.cache.ActiveBucketTypes(height) - r.NoError(err) + abt = indexer.cache.ActiveBucketTypes() r.Len(abt, 2) - bts, err = indexer.cache.Buckets(height) - r.NoError(err) + ids, bts, bis = indexer.cache.Buckets() + r.Len(ids, 4) r.Len(bts, 4) + r.Len(bis, 4) height++ - handler = newContractStakingEventHandler(indexer.cache) + handler = newContractStakingEventHandler(newWrappedCache(indexer.cache)) changeDelegate(r, handler, delegate1, 3) transfer(r, handler, delegate1, 1) bt, ok, err := indexer.Bucket(3, height-1) @@ -797,10 +800,10 @@ func TestContractStakingIndexerVotes(t *testing.T) { r.NoError(indexer.commit(handler, height)) votes, err = indexer.CandidateVotes(ctx, delegate1, height) r.NoError(err) - r.EqualValues(40, votes.Uint64()) + r.EqualValues(uint64(40), votes.Uint64()) votes, err = indexer.CandidateVotes(ctx, delegate2, height) r.NoError(err) - r.EqualValues(20, votes.Uint64()) + r.EqualValues(uint64(20), votes.Uint64()) // expand bucket 2 height++ @@ -1055,7 +1058,7 @@ func TestIndexer_ReadHeightRestriction(t *testing.T) { r.NoError(indexer.Stop(context.Background())) testutil.CleanupPath(dbPath) }() - indexer.cache.putHeight(height) + indexer.height = height // check read api ctx := protocol.WithFeatureCtx(protocol.WithBlockCtx(genesis.WithGenesisContext(context.Background(), genesis.TestDefault()), protocol.BlockCtx{BlockHeight: 1})) h := c.readHeight @@ -1140,7 +1143,7 @@ func TestIndexer_PutBlock(t *testing.T) { r.NoError(indexer.Stop(context.Background())) testutil.CleanupPath(dbPath) }() - indexer.cache.putHeight(height) + indexer.height = height // Create a mock block builder := block.NewBuilder(block.NewRunnableActionsBuilder().Build()) builder.SetHeight(c.blockHeight) @@ -1154,7 +1157,7 @@ func TestIndexer_PutBlock(t *testing.T) { r.NoError(err) } // Check the block height - r.EqualValues(c.expectedHeight, indexer.cache.Height()) + r.EqualValues(c.expectedHeight, indexer.height) }) } diff --git a/blockindex/contractstaking/stakeview.go b/blockindex/contractstaking/stakeview.go index de3130613b..ec663b332e 100644 --- a/blockindex/contractstaking/stakeview.go +++ b/blockindex/contractstaking/stakeview.go @@ -2,6 +2,7 @@ package contractstaking import ( "context" + "time" "github.com/iotexproject/iotex-address/address" "github.com/iotexproject/iotex-core/v2/action" @@ -12,20 +13,39 @@ import ( type stakeView struct { helper *Indexer - cache *contractStakingCache + cache stakingCache height uint64 } func (s *stakeView) Clone() staking.ContractStakeView { return &stakeView{ helper: s.helper, - cache: s.cache.Clone(), + cache: newWrappedCache(s.cache), height: s.height, } } func (s *stakeView) BucketsByCandidate(candidate address.Address) ([]*Bucket, error) { - return s.cache.bucketsByCandidate(candidate, s.height) + ids, types, infos := s.cache.BucketsByCandidate(candidate) + vbs := make([]*Bucket, 0, len(ids)) + for i, id := range ids { + bt := types[i] + info := infos[i] + if bt != nil && info != nil { + vbs = append(vbs, s.assembleBucket(id, info, bt)) + } + } + return vbs, nil +} + +func (s *stakeView) assembleBucket(token uint64, bi *bucketInfo, bt *BucketType) *Bucket { + return assembleBucket(token, bi, bt, s.helper.config.ContractAddress, s.genBlockDurationFn(s.height)) +} + +func (s *stakeView) genBlockDurationFn(view uint64) blocksDurationFn { + return func(start, end uint64) time.Duration { + return s.helper.config.BlocksToDuration(start, end, view) + } } func (s *stakeView) CreatePreStates(ctx context.Context) error { @@ -37,7 +57,7 @@ func (s *stakeView) CreatePreStates(ctx context.Context) error { func (s *stakeView) Handle(ctx context.Context, receipt *action.Receipt) error { blkCtx := protocol.MustGetBlockCtx(ctx) // new event handler for this receipt - handler := newContractStakingEventHandler(s.cache) + handler := newContractStakingEventHandler(newWrappedCache(s.cache)) // handle events of receipt if receipt.Status != uint64(iotextypes.ReceiptStatus_Success) { @@ -52,9 +72,7 @@ func (s *stakeView) Handle(ctx context.Context, receipt *action.Receipt) error { } } _, delta := handler.Result() - // update cache - if err := s.cache.Merge(delta, blkCtx.BlockHeight); err != nil { - return err - } + s.cache = delta + return nil } diff --git a/blockindex/contractstaking/util.go b/blockindex/contractstaking/util.go index cdd0185d94..0d7d9cd4b3 100644 --- a/blockindex/contractstaking/util.go +++ b/blockindex/contractstaking/util.go @@ -7,6 +7,7 @@ package contractstaking import ( "math/big" + "sort" "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/common" @@ -97,3 +98,26 @@ func unpackEventParam(abiEvent *abi.Event, log *action.Log) (eventParam, error) } return event, nil } + +func sortByIds(ids []uint64, bts []*BucketType, bis []*bucketInfo) ([]uint64, []*BucketType, []*bucketInfo) { + if len(ids) == 0 { + return ids, bts, bis + } + if len(bts) != len(ids) || len(bis) != len(ids) { + panic("length of ids, bts and bis should be the same") + } + sorted := make([]int, len(ids)) + for i := range sorted { + sorted[i] = i + } + sort.Slice(sorted, func(i, j int) bool { return ids[sorted[i]] < ids[sorted[j]] }) + sortedIds := make([]uint64, len(ids)) + sortedBts := make([]*BucketType, len(bts)) + sortedBis := make([]*bucketInfo, len(bis)) + for i, idx := range sorted { + sortedIds[i] = ids[idx] + sortedBts[i] = bts[idx] + sortedBis[i] = bis[idx] + } + return sortedIds, sortedBts, sortedBis +} diff --git a/blockindex/contractstaking/wrappedcache.go b/blockindex/contractstaking/wrappedcache.go new file mode 100644 index 0000000000..827c0be432 --- /dev/null +++ b/blockindex/contractstaking/wrappedcache.go @@ -0,0 +1,234 @@ +// Copyright (c) 2023 IoTeX Foundation +// This source code is provided 'as is' and no warranties are given as to title or non-infringement, merchantability +// or fitness for purpose and, to the extent permitted by law, all liability for your use of the code is disclaimed. +// This source code is governed by Apache License 2.0 that can be found in the LICENSE file. + +package contractstaking + +import ( + "math/big" + "sort" + "sync" + + "github.com/iotexproject/iotex-address/address" +) + +type ( + wrappedCache struct { + totalBucketCount uint64 + updatedBucketInfos map[uint64]*bucketInfo + updatedBucketTypes map[uint64]*BucketType + updatedCandidates map[string]map[uint64]bool + propertyBucketTypeMap map[uint64]map[uint64]uint64 + + mu sync.RWMutex + cache stakingCache + } +) + +func newWrappedCache(cache stakingCache) *wrappedCache { + return &wrappedCache{ + updatedBucketInfos: make(map[uint64]*bucketInfo), + updatedBucketTypes: make(map[uint64]*BucketType), + updatedCandidates: make(map[string]map[uint64]bool), + propertyBucketTypeMap: make(map[uint64]map[uint64]uint64), + mu: sync.RWMutex{}, + cache: cache, + } +} + +func (wc *wrappedCache) BucketInfo(id uint64) (*bucketInfo, bool) { + info, ok := wc.updatedBucketInfos[id] + if !ok { + return wc.cache.BucketInfo(id) + } + if info == nil { + return nil, false + } + return info.Clone(), true +} + +func (wc *wrappedCache) MustGetBucketInfo(id uint64) *bucketInfo { + info, ok := wc.updatedBucketInfos[id] + if !ok { + return wc.cache.MustGetBucketInfo(id) + } + if info == nil { + panic("must get bucket info from wrapped cache") + } + + return info +} + +func (wc *wrappedCache) MustGetBucketType(id uint64) *BucketType { + return wc.mustGetBucketType(id) +} + +func (wc *wrappedCache) mustGetBucketType(id uint64) *BucketType { + bt, ok := wc.updatedBucketTypes[id] + if !ok { + return wc.cache.MustGetBucketType(id) + } + if bt == nil { + panic("must get bucket type from wrapped cache") + } + + return bt +} + +func (wc *wrappedCache) BucketType(id uint64) (*BucketType, bool) { + bt, ok := wc.updatedBucketTypes[id] + if !ok { + return wc.cache.BucketType(id) + } + return bt, ok +} + +func (wc *wrappedCache) BucketsByCandidate(candidate address.Address) ([]uint64, []*BucketType, []*bucketInfo) { + ids, types, infos := wc.cache.BucketsByCandidate(candidate) + bucketMap := make(map[uint64]*bucketInfo, len(ids)) + for i, id := range ids { + bucketMap[id] = infos[i] + } + for id := range wc.updatedCandidates[candidate.String()] { + info, ok := wc.updatedBucketInfos[id] + if !ok { + // TODO: should not be false, double check + panic("bucket should exist in updated bucket info") + } + if info == nil || info.Delegate.String() != candidate.String() { + delete(bucketMap, id) + } else { + bucketMap[id] = info.Clone() + } + } + sort.Slice(ids, func(i, j int) bool { return ids[i] < ids[j] }) + for _, id := range ids { + info, ok := wc.updatedBucketInfos[id] + if !ok { + ids = append(ids, id) + info = bucketMap[id] + types = append(types, wc.cache.MustGetBucketType(info.TypeIndex)) + } else if info != nil { + ids = append(ids, id) + infos = append(infos, info.Clone()) + types = append(types, wc.mustGetBucketType(info.TypeIndex)) + } + } + + return ids, types, infos +} + +func (wc *wrappedCache) TotalBucketCount() uint64 { + wc.mu.RLock() + defer wc.mu.RUnlock() + total := wc.cache.TotalBucketCount() + return max(total, wc.totalBucketCount) +} + +func (wc *wrappedCache) PutBucketType(id uint64, bt *BucketType) { + wc.mu.Lock() + defer wc.mu.Unlock() + wc.updatedBucketTypes[id] = bt + if bt != nil { + if _, ok := wc.propertyBucketTypeMap[bt.Amount.Uint64()]; !ok { + wc.propertyBucketTypeMap[bt.Amount.Uint64()] = make(map[uint64]uint64) + } + wc.propertyBucketTypeMap[bt.Amount.Uint64()][bt.Duration] = id + } +} + +func (wc *wrappedCache) PutBucketInfo(id uint64, bi *bucketInfo) { + wc.mu.Lock() + defer wc.mu.Unlock() + if id >= wc.totalBucketCount { + wc.totalBucketCount = id + 1 + } + if _, ok := wc.updatedBucketInfos[id]; !ok { + if oldInfo, ok := wc.cache.BucketInfo(id); ok { + if _, ok := wc.updatedCandidates[oldInfo.Delegate.String()]; !ok { + wc.updatedCandidates[oldInfo.Delegate.String()] = make(map[uint64]bool) + } + wc.updatedCandidates[oldInfo.Delegate.String()][id] = true + } + } + wc.updatedBucketInfos[id] = bi + if _, ok := wc.updatedCandidates[bi.Delegate.String()]; !ok { + wc.updatedCandidates[bi.Delegate.String()] = make(map[uint64]bool) + } + wc.updatedCandidates[bi.Delegate.String()][id] = true +} + +func (wc *wrappedCache) Base() stakingCache { + wc.mu.RLock() + defer wc.mu.RUnlock() + return wc.cache +} + +func (wc *wrappedCache) Commit() { + wc.mu.Lock() + defer wc.mu.Unlock() + + for id, bt := range wc.updatedBucketTypes { + wc.cache.PutBucketType(id, bt) + } + + for id, bi := range wc.updatedBucketInfos { + if bi == nil { + wc.cache.DeleteBucketInfo(id) + } else { + wc.cache.PutBucketInfo(id, bi) + } + } +} + +func (wc *wrappedCache) IsDirty() bool { + wc.mu.RLock() + defer wc.mu.RUnlock() + return len(wc.updatedBucketInfos) > 0 || len(wc.updatedBucketTypes) > 0 +} + +func (wc *wrappedCache) DeleteBucketInfo(id uint64) { + wc.mu.Lock() + defer wc.mu.Unlock() + if _, ok := wc.updatedBucketInfos[id]; !ok { + oldInfo, ok := wc.cache.BucketInfo(id) + if ok { + wc.updatedCandidates[oldInfo.Delegate.String()][id] = true + } + } + wc.updatedBucketInfos[id] = nil +} + +func (wc *wrappedCache) MatchBucketType(amount *big.Int, duration uint64) (uint64, *BucketType, bool) { + wc.mu.RLock() + defer wc.mu.RUnlock() + if !amount.IsUint64() { + panic("amount must be uint64") + } + amountUint64 := amount.Uint64() + if amountMap, ok := wc.propertyBucketTypeMap[amountUint64]; ok { + if id, ok := amountMap[duration]; ok { + if bt, ok := wc.updatedBucketTypes[id]; ok { + if bt != nil { + return id, bt, true + } + return 0, nil, false + } + } + } + + return wc.cache.MatchBucketType(amount, duration) +} + +func (wc *wrappedCache) BucketTypeCount() int { + wc.mu.RLock() + defer wc.mu.RUnlock() + total := wc.cache.BucketTypeCount() + for id := range wc.updatedBucketTypes { + if _, exists := wc.cache.BucketType(id); !exists { + total += 1 + } + } + return total +} diff --git a/systemcontractindex/stakingindex/stakeview.go b/systemcontractindex/stakingindex/stakeview.go index 9bc54902e2..cbc8c41c01 100644 --- a/systemcontractindex/stakingindex/stakeview.go +++ b/systemcontractindex/stakingindex/stakeview.go @@ -23,6 +23,7 @@ func (s *stakeView) Clone() staking.ContractStakeView { height: s.height, } } + func (s *stakeView) BucketsByCandidate(candidate address.Address) ([]*VoteBucket, error) { idxs := s.cache.BucketIdsByCandidate(candidate) bkts := s.cache.Buckets(idxs) From 70776d39135cf625d17a7b43f80031854167e672 Mon Sep 17 00:00:00 2001 From: zhi Date: Wed, 2 Jul 2025 20:35:49 +0800 Subject: [PATCH 5/7] add unit tests and fix a few bugs --- blockindex/contractstaking/cache.go | 20 +- blockindex/contractstaking/cache_test.go | 25 ++- .../contractstaking/dirty_cache_test.go | 15 +- blockindex/contractstaking/indexer.go | 6 +- blockindex/contractstaking/wrappedcache.go | 136 +++++++++----- .../contractstaking/wrappedcache_test.go | 176 ++++++++++++++++++ 6 files changed, 297 insertions(+), 81 deletions(-) create mode 100644 blockindex/contractstaking/wrappedcache_test.go diff --git a/blockindex/contractstaking/cache.go b/blockindex/contractstaking/cache.go index ed0ad90837..cbedc4e93b 100644 --- a/blockindex/contractstaking/cache.go +++ b/blockindex/contractstaking/cache.go @@ -42,7 +42,6 @@ type ( totalBucketCount uint64 // total number of buckets including burned buckets height uint64 // current block height, it's put in cache for consistency on merge mutex sync.RWMutex // a RW mutex for the cache to protect concurrent access - config Config } ) @@ -53,13 +52,12 @@ var ( ErrInvalidHeight = errors.New("invalid height") ) -func newContractStakingCache(config Config) *contractStakingCache { +func newContractStakingCache() *contractStakingCache { return &contractStakingCache{ bucketInfoMap: make(map[uint64]*bucketInfo), bucketTypeMap: make(map[uint64]*BucketType), propertyBucketTypeMap: make(map[int64]map[uint64]uint64), candidateBucketMap: make(map[string]map[uint64]bool), - config: config, } } @@ -249,7 +247,6 @@ func (s *contractStakingCache) Clone() *contractStakingCache { defer s.mutex.RUnlock() c := &contractStakingCache{ - config: s.config, totalBucketCount: s.totalBucketCount, } c.bucketInfoMap = make(map[uint64]*bucketInfo, len(s.bucketInfoMap)) @@ -329,22 +326,23 @@ func (s *contractStakingCache) getBucket(id uint64) (*BucketType, *bucketInfo) { func (s *contractStakingCache) putBucketType(id uint64, bt *BucketType) { // remove old bucket map if oldBt, existed := s.bucketTypeMap[id]; existed { - amount := oldBt.Amount.Int64() - if _, existed := s.propertyBucketTypeMap[amount]; existed { - delete(s.propertyBucketTypeMap[amount], oldBt.Duration) - if len(s.propertyBucketTypeMap[amount]) == 0 { - delete(s.propertyBucketTypeMap, amount) - } + if oldBt.Amount.Cmp(bt.Amount) != 0 || oldBt.Duration != bt.Duration { + panic("bucket type amount or duration cannot be changed") } } // add new bucket map - s.bucketTypeMap[id] = bt amount := bt.Amount.Int64() m, ok := s.propertyBucketTypeMap[amount] if !ok { s.propertyBucketTypeMap[amount] = make(map[uint64]uint64) m = s.propertyBucketTypeMap[amount] + } else { + oldId, ok := m[bt.Duration] + if ok && oldId != id { + panic("bucket type with same amount and duration already exists") + } } + s.bucketTypeMap[id] = bt m[bt.Duration] = id } diff --git a/blockindex/contractstaking/cache_test.go b/blockindex/contractstaking/cache_test.go index 1391ac6cb2..564a0ddf26 100644 --- a/blockindex/contractstaking/cache_test.go +++ b/blockindex/contractstaking/cache_test.go @@ -43,7 +43,7 @@ func TestContractStakingCache_CandidateVotes(t *testing.T) { } require := require.New(t) g := genesis.TestDefault() - cache := newContractStakingCache(Config{ContractAddress: identityset.Address(1).String(), CalculateVoteWeight: calculateVoteWeightGen(g.VoteWeightCalConsts), BlocksToDuration: _blockDurationFn}) + cache := newContractStakingCache() checkCacheCandidateVotes := checkCacheCandidateVotesGen(protocol.WithFeatureCtx(protocol.WithBlockCtx(genesis.WithGenesisContext(context.Background(), g), protocol.BlockCtx{BlockHeight: 1}))) checkCacheCandidateVotesAfterRedsea := checkCacheCandidateVotesGen(protocol.WithFeatureCtx(protocol.WithBlockCtx(genesis.WithGenesisContext(context.Background(), g), protocol.BlockCtx{BlockHeight: g.RedseaBlockHeight}))) // no bucket @@ -125,8 +125,7 @@ func TestContractStakingCache_CandidateVotes(t *testing.T) { func TestContractStakingCache_Buckets(t *testing.T) { require := require.New(t) - contractAddr := identityset.Address(27).String() - cache := newContractStakingCache(Config{ContractAddress: contractAddr, CalculateVoteWeight: calculateVoteWeightGen(genesis.TestDefault().VoteWeightCalConsts), BlocksToDuration: _blockDurationFn}) + cache := newContractStakingCache() // no bucket ids, _, _ := cache.Buckets() @@ -198,8 +197,7 @@ func TestContractStakingCache_Buckets(t *testing.T) { func TestContractStakingCache_BucketsByCandidate(t *testing.T) { require := require.New(t) - contractAddr := identityset.Address(27).String() - cache := newContractStakingCache(Config{ContractAddress: contractAddr, CalculateVoteWeight: calculateVoteWeightGen(genesis.TestDefault().VoteWeightCalConsts), BlocksToDuration: _blockDurationFn}) + cache := newContractStakingCache() // no bucket ids, _, _ := cache.BucketsByCandidate(identityset.Address(1)) @@ -266,8 +264,7 @@ func TestContractStakingCache_BucketsByCandidate(t *testing.T) { func TestContractStakingCache_BucketsByIndices(t *testing.T) { require := require.New(t) - contractAddr := identityset.Address(27).String() - cache := newContractStakingCache(Config{ContractAddress: contractAddr, CalculateVoteWeight: calculateVoteWeightGen(genesis.TestDefault().VoteWeightCalConsts), BlocksToDuration: _blockDurationFn}) + cache := newContractStakingCache() // no bucket bts, bis := cache.BucketsByIndices([]uint64{1}) @@ -318,7 +315,7 @@ func TestContractStakingCache_BucketsByIndices(t *testing.T) { func TestContractStakingCache_ActiveBucketTypes(t *testing.T) { require := require.New(t) - cache := newContractStakingCache(Config{ContractAddress: identityset.Address(27).String(), CalculateVoteWeight: calculateVoteWeightGen(genesis.TestDefault().VoteWeightCalConsts), BlocksToDuration: _blockDurationFn}) + cache := newContractStakingCache() // no bucket type abt := cache.ActiveBucketTypes() @@ -376,7 +373,7 @@ func TestContractStakingCache_ActiveBucketTypes(t *testing.T) { func TestContractStakingCache_MatchBucketType(t *testing.T) { require := require.New(t) - cache := newContractStakingCache(Config{ContractAddress: identityset.Address(27).String(), CalculateVoteWeight: calculateVoteWeightGen(genesis.TestDefault().VoteWeightCalConsts), BlocksToDuration: _blockDurationFn}) + cache := newContractStakingCache() // no bucket types _, bucketType, ok := cache.MatchBucketType(big.NewInt(100), 100) @@ -411,7 +408,7 @@ func TestContractStakingCache_MatchBucketType(t *testing.T) { func TestContractStakingCache_BucketTypeCount(t *testing.T) { require := require.New(t) - cache := newContractStakingCache(Config{ContractAddress: identityset.Address(27).String(), CalculateVoteWeight: calculateVoteWeightGen(genesis.TestDefault().VoteWeightCalConsts), BlocksToDuration: _blockDurationFn}) + cache := newContractStakingCache() // no bucket type btc := cache.BucketTypeCount() @@ -435,7 +432,7 @@ func TestContractStakingCache_BucketTypeCount(t *testing.T) { func TestContractStakingCache_LoadFromDB(t *testing.T) { require := require.New(t) - cache := newContractStakingCache(Config{ContractAddress: identityset.Address(27).String(), CalculateVoteWeight: calculateVoteWeightGen(genesis.TestDefault().VoteWeightCalConsts), BlocksToDuration: _blockDurationFn}) + cache := newContractStakingCache() // mock kvstore exception ctrl := gomock.NewController(t) @@ -562,7 +559,7 @@ func checkBucket(r *require.Assertions, id uint64, bt *BucketType, bucket *bucke func TestContractStakingCache_MustGetBucketInfo(t *testing.T) { // build test condition to add a bucketInfo - cache := newContractStakingCache(Config{ContractAddress: identityset.Address(1).String(), CalculateVoteWeight: calculateVoteWeightGen(genesis.TestDefault().VoteWeightCalConsts), BlocksToDuration: _blockDurationFn}) + cache := newContractStakingCache() cache.PutBucketInfo(1, &bucketInfo{TypeIndex: 1, CreatedAt: 1, UnlockedAt: maxBlockNumber, UnstakedAt: maxBlockNumber, Delegate: identityset.Address(1), Owner: identityset.Address(2)}) tryCatchMustGetBucketInfo := func(i uint64) (v *bucketInfo, err error) { @@ -588,7 +585,7 @@ func TestContractStakingCache_MustGetBucketInfo(t *testing.T) { func TestContractStakingCache_MustGetBucketType(t *testing.T) { // build test condition to add a bucketType - cache := newContractStakingCache(Config{ContractAddress: identityset.Address(1).String(), CalculateVoteWeight: calculateVoteWeightGen(genesis.TestDefault().VoteWeightCalConsts), BlocksToDuration: _blockDurationFn}) + cache := newContractStakingCache() cache.PutBucketType(1, &BucketType{Amount: big.NewInt(100), Duration: 100, ActivatedAt: 1}) tryCatchMustGetBucketType := func(i uint64) (v *BucketType, err error) { @@ -614,7 +611,7 @@ func TestContractStakingCache_MustGetBucketType(t *testing.T) { func TestContractStakingCache_DeleteBucketInfo(t *testing.T) { // build test condition to add a bucketInfo - cache := newContractStakingCache(Config{ContractAddress: identityset.Address(1).String(), CalculateVoteWeight: calculateVoteWeightGen(genesis.TestDefault().VoteWeightCalConsts), BlocksToDuration: _blockDurationFn}) + cache := newContractStakingCache() bi1 := &bucketInfo{TypeIndex: 1, CreatedAt: 1, UnlockedAt: maxBlockNumber, UnstakedAt: maxBlockNumber, Delegate: identityset.Address(1), Owner: identityset.Address(1)} bi2 := &bucketInfo{TypeIndex: 2, CreatedAt: 1, UnlockedAt: maxBlockNumber, UnstakedAt: maxBlockNumber, Delegate: identityset.Address(1), Owner: identityset.Address(2)} cache.PutBucketInfo(1, bi1) diff --git a/blockindex/contractstaking/dirty_cache_test.go b/blockindex/contractstaking/dirty_cache_test.go index 4b29d6d1b2..2cc251e5c1 100644 --- a/blockindex/contractstaking/dirty_cache_test.go +++ b/blockindex/contractstaking/dirty_cache_test.go @@ -6,7 +6,6 @@ import ( "github.com/stretchr/testify/require" - "github.com/iotexproject/iotex-core/v2/blockchain/genesis" "github.com/iotexproject/iotex-core/v2/db/batch" "github.com/iotexproject/iotex-core/v2/pkg/util/byteutil" "github.com/iotexproject/iotex-core/v2/test/identityset" @@ -14,7 +13,7 @@ import ( func TestContractStakingDirty_getBucketType(t *testing.T) { require := require.New(t) - clean := newContractStakingCache(Config{CalculateVoteWeight: calculateVoteWeightGen(genesis.TestDefault().VoteWeightCalConsts)}) + clean := newContractStakingCache() dirty := newContractStakingDirty(clean) // no bucket type @@ -41,7 +40,7 @@ func TestContractStakingDirty_getBucketType(t *testing.T) { func TestContractStakingDirty_getBucketInfo(t *testing.T) { require := require.New(t) - clean := newContractStakingCache(Config{CalculateVoteWeight: calculateVoteWeightGen(genesis.TestDefault().VoteWeightCalConsts)}) + clean := newContractStakingCache() dirty := newContractStakingDirty(clean) // no bucket info @@ -93,7 +92,7 @@ func TestContractStakingDirty_getBucketInfo(t *testing.T) { func TestContractStakingDirty_matchBucketType(t *testing.T) { require := require.New(t) - clean := newContractStakingCache(Config{CalculateVoteWeight: calculateVoteWeightGen(genesis.TestDefault().VoteWeightCalConsts)}) + clean := newContractStakingCache() dirty := newContractStakingDirty(clean) // no bucket type @@ -123,7 +122,7 @@ func TestContractStakingDirty_matchBucketType(t *testing.T) { func TestContractStakingDirty_getBucketTypeCount(t *testing.T) { require := require.New(t) - clean := newContractStakingCache(Config{CalculateVoteWeight: calculateVoteWeightGen(genesis.TestDefault().VoteWeightCalConsts)}) + clean := newContractStakingCache() dirty := newContractStakingDirty(clean) // no bucket type @@ -143,7 +142,7 @@ func TestContractStakingDirty_getBucketTypeCount(t *testing.T) { func TestContractStakingDirty_finalize(t *testing.T) { require := require.New(t) - clean := newContractStakingCache(Config{CalculateVoteWeight: calculateVoteWeightGen(genesis.TestDefault().VoteWeightCalConsts)}) + clean := newContractStakingCache() dirty := newContractStakingDirty(newWrappedCache(clean)) totalCnt := clean.TotalBucketCount() require.EqualValues(0, totalCnt) @@ -184,12 +183,12 @@ func TestContractStakingDirty_finalize(t *testing.T) { require.EqualValues(byteutil.Uint64ToBytesBigEndian(1), info.Key()) require.EqualValues(bi.Serialize(), info.Value()) totalCnt = cache.TotalBucketCount() - require.EqualValues(2, totalCnt) + require.EqualValues(1, totalCnt) } func TestContractStakingDirty_noSideEffectOnClean(t *testing.T) { require := require.New(t) - clean := newContractStakingCache(Config{CalculateVoteWeight: calculateVoteWeightGen(genesis.TestDefault().VoteWeightCalConsts)}) + clean := newContractStakingCache() dirty := newContractStakingDirty(newWrappedCache(clean)) // add bucket type to dirty cache diff --git a/blockindex/contractstaking/indexer.go b/blockindex/contractstaking/indexer.go index 382fd70282..3c905427fa 100644 --- a/blockindex/contractstaking/indexer.go +++ b/blockindex/contractstaking/indexer.go @@ -69,7 +69,7 @@ func NewContractStakingIndexer(kvStore db.KVStore, config Config) (*Indexer, err } return &Indexer{ kvstore: kvStore, - cache: newContractStakingCache(config), + cache: newContractStakingCache(), config: config, }, nil } @@ -114,7 +114,7 @@ func (s *Indexer) Stop(ctx context.Context) error { if err := s.kvstore.Stop(ctx); err != nil { return err } - s.cache = newContractStakingCache(s.config) + s.cache = newContractStakingCache() s.TurnOff() return nil } @@ -364,7 +364,7 @@ func (s *Indexer) commit(handler *contractStakingEventHandler, height uint64) er // update db batch.Put(_StakingNS, _stakingHeightKey, byteutil.Uint64ToBytesBigEndian(height), "failed to put height") if err := s.kvstore.WriteBatch(batch); err != nil { - s.cache = newContractStakingCache(s.config) + s.cache = newContractStakingCache() return s.loadFromDB() } s.height = height diff --git a/blockindex/contractstaking/wrappedcache.go b/blockindex/contractstaking/wrappedcache.go index 827c0be432..bd90e2c180 100644 --- a/blockindex/contractstaking/wrappedcache.go +++ b/blockindex/contractstaking/wrappedcache.go @@ -21,26 +21,34 @@ type ( updatedCandidates map[string]map[uint64]bool propertyBucketTypeMap map[uint64]map[uint64]uint64 - mu sync.RWMutex - cache stakingCache + mu sync.RWMutex + base stakingCache } ) -func newWrappedCache(cache stakingCache) *wrappedCache { +func newWrappedCache(base stakingCache) *wrappedCache { + if base == nil { + panic("base staking cache cannot be nil") + } + totalBucketCount := base.TotalBucketCount() + return &wrappedCache{ + totalBucketCount: totalBucketCount, updatedBucketInfos: make(map[uint64]*bucketInfo), updatedBucketTypes: make(map[uint64]*BucketType), updatedCandidates: make(map[string]map[uint64]bool), propertyBucketTypeMap: make(map[uint64]map[uint64]uint64), mu: sync.RWMutex{}, - cache: cache, + base: base, } } func (wc *wrappedCache) BucketInfo(id uint64) (*bucketInfo, bool) { + wc.mu.RLock() + defer wc.mu.RUnlock() info, ok := wc.updatedBucketInfos[id] if !ok { - return wc.cache.BucketInfo(id) + return wc.base.BucketInfo(id) } if info == nil { return nil, false @@ -49,9 +57,11 @@ func (wc *wrappedCache) BucketInfo(id uint64) (*bucketInfo, bool) { } func (wc *wrappedCache) MustGetBucketInfo(id uint64) *bucketInfo { + wc.mu.RLock() + defer wc.mu.RUnlock() info, ok := wc.updatedBucketInfos[id] if !ok { - return wc.cache.MustGetBucketInfo(id) + return wc.base.MustGetBucketInfo(id) } if info == nil { panic("must get bucket info from wrapped cache") @@ -61,13 +71,15 @@ func (wc *wrappedCache) MustGetBucketInfo(id uint64) *bucketInfo { } func (wc *wrappedCache) MustGetBucketType(id uint64) *BucketType { + wc.mu.RLock() + defer wc.mu.RUnlock() return wc.mustGetBucketType(id) } func (wc *wrappedCache) mustGetBucketType(id uint64) *BucketType { bt, ok := wc.updatedBucketTypes[id] if !ok { - return wc.cache.MustGetBucketType(id) + return wc.base.MustGetBucketType(id) } if bt == nil { panic("must get bucket type from wrapped cache") @@ -77,18 +89,26 @@ func (wc *wrappedCache) mustGetBucketType(id uint64) *BucketType { } func (wc *wrappedCache) BucketType(id uint64) (*BucketType, bool) { + wc.mu.RLock() + defer wc.mu.RUnlock() + return wc.bucketType(id) +} + +func (wc *wrappedCache) bucketType(id uint64) (*BucketType, bool) { bt, ok := wc.updatedBucketTypes[id] if !ok { - return wc.cache.BucketType(id) + return wc.base.BucketType(id) } return bt, ok } func (wc *wrappedCache) BucketsByCandidate(candidate address.Address) ([]uint64, []*BucketType, []*bucketInfo) { - ids, types, infos := wc.cache.BucketsByCandidate(candidate) - bucketMap := make(map[uint64]*bucketInfo, len(ids)) + wc.mu.RLock() + defer wc.mu.RUnlock() + ids, _, infos := wc.base.BucketsByCandidate(candidate) + reverseMap := make(map[uint64]int, len(ids)) for i, id := range ids { - bucketMap[id] = infos[i] + reverseMap[id] = i } for id := range wc.updatedCandidates[candidate.String()] { info, ok := wc.updatedBucketInfos[id] @@ -97,55 +117,81 @@ func (wc *wrappedCache) BucketsByCandidate(candidate address.Address) ([]uint64, panic("bucket should exist in updated bucket info") } if info == nil || info.Delegate.String() != candidate.String() { - delete(bucketMap, id) + delete(reverseMap, id) } else { - bucketMap[id] = info.Clone() + if _, ok := reverseMap[id]; !ok { + reverseMap[id] = len(infos) + infos = append(infos, info.Clone()) + } else { + infos[reverseMap[id]] = info.Clone() + } } } - sort.Slice(ids, func(i, j int) bool { return ids[i] < ids[j] }) - for _, id := range ids { + retIDs := make([]uint64, 0, len(reverseMap)) + for id := range reverseMap { + retIDs = append(retIDs, id) + } + retInfos := make([]*bucketInfo, 0, len(retIDs)) + retTypes := make([]*BucketType, 0, len(retIDs)) + sort.Slice(retIDs, func(i, j int) bool { return retIDs[i] < retIDs[j] }) + for _, id := range retIDs { info, ok := wc.updatedBucketInfos[id] if !ok { - ids = append(ids, id) - info = bucketMap[id] - types = append(types, wc.cache.MustGetBucketType(info.TypeIndex)) + info = infos[reverseMap[id]] + retInfos = append(retInfos, info) + retTypes = append(retTypes, wc.mustGetBucketType(info.TypeIndex)) } else if info != nil { - ids = append(ids, id) - infos = append(infos, info.Clone()) - types = append(types, wc.mustGetBucketType(info.TypeIndex)) + retInfos = append(retInfos, info.Clone()) + retTypes = append(retTypes, wc.mustGetBucketType(info.TypeIndex)) + } else { + panic("bucket info should not be nil in updated bucket infos") } } - - return ids, types, infos + return retIDs, retTypes, retInfos } func (wc *wrappedCache) TotalBucketCount() uint64 { wc.mu.RLock() defer wc.mu.RUnlock() - total := wc.cache.TotalBucketCount() - return max(total, wc.totalBucketCount) + return wc.totalBucketCount } func (wc *wrappedCache) PutBucketType(id uint64, bt *BucketType) { wc.mu.Lock() defer wc.mu.Unlock() - wc.updatedBucketTypes[id] = bt - if bt != nil { - if _, ok := wc.propertyBucketTypeMap[bt.Amount.Uint64()]; !ok { - wc.propertyBucketTypeMap[bt.Amount.Uint64()] = make(map[uint64]uint64) + if bt == nil { + panic("bucket type cannot be nil") + } + oldBt, existed := wc.bucketType(id) + if existed { + if oldBt.Amount.Cmp(bt.Amount) != 0 || oldBt.Duration != bt.Duration { + panic("bucket type amount or duration cannot be changed") + } + } + oldId, _, ok := wc.matchBucketType(bt.Amount, bt.Duration) + if ok && oldId != id { + panic("bucket type with same amount and duration already exists") + } + if _, ok := wc.propertyBucketTypeMap[bt.Amount.Uint64()]; !ok { + wc.propertyBucketTypeMap[bt.Amount.Uint64()] = make(map[uint64]uint64) + } else { + oldID, ok := wc.propertyBucketTypeMap[bt.Amount.Uint64()][bt.Duration] + if ok && oldID != id { + panic("bucket type with same amount and duration already exists") } - wc.propertyBucketTypeMap[bt.Amount.Uint64()][bt.Duration] = id } + wc.updatedBucketTypes[id] = bt + wc.propertyBucketTypeMap[bt.Amount.Uint64()][bt.Duration] = id } func (wc *wrappedCache) PutBucketInfo(id uint64, bi *bucketInfo) { wc.mu.Lock() defer wc.mu.Unlock() - if id >= wc.totalBucketCount { - wc.totalBucketCount = id + 1 + if id > wc.totalBucketCount { + wc.totalBucketCount = id } if _, ok := wc.updatedBucketInfos[id]; !ok { - if oldInfo, ok := wc.cache.BucketInfo(id); ok { + if oldInfo, ok := wc.base.BucketInfo(id); ok { if _, ok := wc.updatedCandidates[oldInfo.Delegate.String()]; !ok { wc.updatedCandidates[oldInfo.Delegate.String()] = make(map[uint64]bool) } @@ -162,7 +208,7 @@ func (wc *wrappedCache) PutBucketInfo(id uint64, bi *bucketInfo) { func (wc *wrappedCache) Base() stakingCache { wc.mu.RLock() defer wc.mu.RUnlock() - return wc.cache + return wc.base } func (wc *wrappedCache) Commit() { @@ -170,14 +216,13 @@ func (wc *wrappedCache) Commit() { defer wc.mu.Unlock() for id, bt := range wc.updatedBucketTypes { - wc.cache.PutBucketType(id, bt) + wc.base.PutBucketType(id, bt) } - for id, bi := range wc.updatedBucketInfos { if bi == nil { - wc.cache.DeleteBucketInfo(id) + wc.base.DeleteBucketInfo(id) } else { - wc.cache.PutBucketInfo(id, bi) + wc.base.PutBucketInfo(id, bi) } } } @@ -192,7 +237,7 @@ func (wc *wrappedCache) DeleteBucketInfo(id uint64) { wc.mu.Lock() defer wc.mu.Unlock() if _, ok := wc.updatedBucketInfos[id]; !ok { - oldInfo, ok := wc.cache.BucketInfo(id) + oldInfo, ok := wc.base.BucketInfo(id) if ok { wc.updatedCandidates[oldInfo.Delegate.String()][id] = true } @@ -203,9 +248,10 @@ func (wc *wrappedCache) DeleteBucketInfo(id uint64) { func (wc *wrappedCache) MatchBucketType(amount *big.Int, duration uint64) (uint64, *BucketType, bool) { wc.mu.RLock() defer wc.mu.RUnlock() - if !amount.IsUint64() { - panic("amount must be uint64") - } + return wc.matchBucketType(amount, duration) +} + +func (wc *wrappedCache) matchBucketType(amount *big.Int, duration uint64) (uint64, *BucketType, bool) { amountUint64 := amount.Uint64() if amountMap, ok := wc.propertyBucketTypeMap[amountUint64]; ok { if id, ok := amountMap[duration]; ok { @@ -218,15 +264,15 @@ func (wc *wrappedCache) MatchBucketType(amount *big.Int, duration uint64) (uint6 } } - return wc.cache.MatchBucketType(amount, duration) + return wc.base.MatchBucketType(amount, duration) } func (wc *wrappedCache) BucketTypeCount() int { wc.mu.RLock() defer wc.mu.RUnlock() - total := wc.cache.BucketTypeCount() + total := wc.base.BucketTypeCount() for id := range wc.updatedBucketTypes { - if _, exists := wc.cache.BucketType(id); !exists { + if _, exists := wc.base.BucketType(id); !exists { total += 1 } } diff --git a/blockindex/contractstaking/wrappedcache_test.go b/blockindex/contractstaking/wrappedcache_test.go new file mode 100644 index 0000000000..3f93e7f00c --- /dev/null +++ b/blockindex/contractstaking/wrappedcache_test.go @@ -0,0 +1,176 @@ +// Copyright (c) 2023 IoTeX Foundation +// This source code is provided 'as is' and no warranties are given as to title or non-infringement, merchantability +// or fitness for purpose and, to the extent permitted by law, all liability for your use of the code is disclaimed. +// This source code is governed by Apache License 2.0 that can be found in the LICENSE file. + +package contractstaking + +import ( + "math/big" + "testing" + + "github.com/iotexproject/iotex-core/v2/test/identityset" + "github.com/stretchr/testify/require" +) + +// TestNewWrappedCache tests the creation and functionality of the wrapped cache +func TestNewWrappedCache(t *testing.T) { + require := require.New(t) + t.Run("nil base cache", func(t *testing.T) { + require.Panics(func() { + newWrappedCache(nil) + }, "base staking cache cannot be nil") + }) + t.Run("non-nil base cache", func(t *testing.T) { + base := newContractStakingCache() + base.PutBucketType(1, &BucketType{ + Amount: big.NewInt(1000), + Duration: 100, + }) + base.PutBucketType(2, &BucketType{ + Amount: big.NewInt(2000), + Duration: 200, + }) + base.PutBucketInfo(1, &bucketInfo{ + TypeIndex: 1, + Delegate: identityset.Address(0), + Owner: identityset.Address(2), + CreatedAt: 1, + UnlockedAt: maxBlockNumber, + UnstakedAt: maxBlockNumber, + }) + base.PutBucketInfo(2, &bucketInfo{ + TypeIndex: 2, + Delegate: identityset.Address(0), + Owner: identityset.Address(3), + CreatedAt: 1, + UnlockedAt: maxBlockNumber, + UnstakedAt: maxBlockNumber, + }) + base.PutBucketInfo(3, &bucketInfo{ + TypeIndex: 1, + Delegate: identityset.Address(1), + Owner: identityset.Address(2), + CreatedAt: 1, + UnlockedAt: maxBlockNumber, + UnstakedAt: maxBlockNumber, + }) + base.PutBucketInfo(4, &bucketInfo{ + TypeIndex: 2, + Delegate: identityset.Address(1), + Owner: identityset.Address(3), + CreatedAt: 1, + UnlockedAt: maxBlockNumber, + UnstakedAt: maxBlockNumber, + }) + require.Equal(uint64(4), base.TotalBucketCount()) + require.Equal(2, base.BucketTypeCount()) + ids, types, infos := base.Buckets() + require.Equal([]uint64{1, 2, 3, 4}, ids) + require.Equal(4, len(types)) + require.Equal(4, len(infos)) + ids, types, infos = base.BucketsByCandidate(identityset.Address(0)) + require.Equal([]uint64{1, 2}, ids) + require.Equal(2, len(types)) + require.Equal(2, len(infos)) + ids, types, infos = base.BucketsByCandidate(identityset.Address(1)) + require.Equal([]uint64{3, 4}, ids) + require.Equal(2, len(types)) + require.Equal(2, len(infos)) + ids, types, infos = base.BucketsByCandidate(identityset.Address(2)) + require.Equal(0, len(ids)) + require.Equal(0, len(types)) + require.Equal(0, len(infos)) + wrapped := newWrappedCache(base) + t.Run("wrapped cache properties", func(t *testing.T) { + require.NotNil(wrapped) + require.Equal(uint64(4), wrapped.TotalBucketCount()) + require.Equal(2, wrapped.BucketTypeCount()) + }) + t.Run("put an existing bucket type", func(t *testing.T) { + existingType := &BucketType{ + Amount: big.NewInt(2000), + Duration: 100, + } + require.Panics(func() { + wrapped.PutBucketType(1, existingType) + }, "putting an existing bucket type should panic") + existingType.Amount = big.NewInt(1000) + require.Panics(func() { + wrapped.PutBucketType(2, existingType) + }, "putting an existing bucket type should panic") + require.Equal(2, wrapped.BucketTypeCount()) + }) + t.Run("put new bucket type", func(t *testing.T) { + newType := &BucketType{ + Amount: big.NewInt(3000), + Duration: 300, + } + wrapped.PutBucketType(3, newType) + require.Equal(3, wrapped.BucketTypeCount()) + require.Equal(newType, wrapped.MustGetBucketType(3)) + require.Equal(2, base.BucketTypeCount()) + require.Panics(func() { + base.MustGetBucketType(3) + }, "must get bucket type from wrapped cache") + }) + t.Run("put new bucket info", func(t *testing.T) { + newInfo := &bucketInfo{ + TypeIndex: 3, + Delegate: identityset.Address(1), + Owner: identityset.Address(5), + CreatedAt: 1, + UnlockedAt: maxBlockNumber, + UnstakedAt: maxBlockNumber, + } + wrapped.PutBucketInfo(5, newInfo) + require.Equal(newInfo, wrapped.MustGetBucketInfo(5)) + require.Equal(uint64(5), wrapped.TotalBucketCount()) + require.Equal(uint64(4), base.TotalBucketCount()) + ids, types, infos = wrapped.BucketsByCandidate(identityset.Address(1)) + require.Equal([]uint64{3, 4, 5}, ids) + require.Equal(3, len(types)) + require.Equal(3, len(infos)) + require.Panics(func() { + base.MustGetBucketInfo(5) + }, "must get bucket info from wrapped cache") + }) + t.Run("update existing bucket info", func(t *testing.T) { + ids, types, infos = wrapped.BucketsByCandidate(identityset.Address(6)) + require.Equal([]uint64{}, ids) + require.Equal(0, len(types)) + require.Equal(0, len(infos)) + existingInfo := wrapped.MustGetBucketInfo(1) + existingInfo.Delegate = identityset.Address(6) + wrapped.PutBucketInfo(1, existingInfo) + updatedInfo := wrapped.MustGetBucketInfo(1) + require.Equal(identityset.Address(6), updatedInfo.Delegate) + require.Equal(existingInfo, updatedInfo) + ids, types, infos = wrapped.BucketsByCandidate(identityset.Address(0)) + require.Equal([]uint64{2}, ids) + require.Equal(1, len(types)) + require.Equal(1, len(infos)) + ids, types, infos = wrapped.BucketsByCandidate(identityset.Address(6)) + require.Equal([]uint64{1}, ids) + require.Equal(1, len(types)) + require.Equal(1, len(infos)) + }) + t.Run("delete bucket info", func(t *testing.T) { + ids, types, infos = wrapped.BucketsByCandidate(identityset.Address(0)) + require.Equal([]uint64{2}, ids) + require.Equal(1, len(types)) + require.Equal(1, len(infos)) + wrapped.DeleteBucketInfo(2) + _, ok := wrapped.BucketInfo(2) + require.False(ok, "bucket info should be deleted") + require.Equal(uint64(5), wrapped.TotalBucketCount()) + require.Equal(uint64(4), base.TotalBucketCount()) + ids, types, infos = wrapped.BucketsByCandidate(identityset.Address(0)) + require.Equal([]uint64{}, ids) + require.Equal(0, len(types)) + require.Equal(0, len(infos)) + _, ok = base.BucketInfo(2) + require.True(ok, "base cache should still have the bucket info") + }) + }) +} From 788681d8b1758a8141320a7fd0cb1fff494353e7 Mon Sep 17 00:00:00 2001 From: zhi Date: Thu, 3 Jul 2025 08:47:14 +0800 Subject: [PATCH 6/7] fix unit test --- blockindex/contractstaking/dirty_cache_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/blockindex/contractstaking/dirty_cache_test.go b/blockindex/contractstaking/dirty_cache_test.go index 2cc251e5c1..4151263f0f 100644 --- a/blockindex/contractstaking/dirty_cache_test.go +++ b/blockindex/contractstaking/dirty_cache_test.go @@ -206,7 +206,7 @@ func TestContractStakingDirty_noSideEffectOnClean(t *testing.T) { require.Nil(bi) // update bucket type in dirty cache - dirty.updateBucketType(1, &BucketType{Amount: big.NewInt(200), Duration: 200, ActivatedAt: 2}) + dirty.updateBucketType(1, &BucketType{Amount: big.NewInt(100), Duration: 100, ActivatedAt: 3}) // check that clean cache is not affected bt, ok = clean.getBucketType(1) require.False(ok) From 53be820d2cf60fab96dad26f28fe8d8d62840033 Mon Sep 17 00:00:00 2001 From: zhi Date: Fri, 4 Jul 2025 15:48:09 +0800 Subject: [PATCH 7/7] address comments --- action/protocol/staking/viewdata.go | 16 +++++ blockindex/contractstaking/stakeview.go | 4 ++ systemcontractindex/stakingindex/cache.go | 64 +++++++++---------- systemcontractindex/stakingindex/index.go | 4 +- systemcontractindex/stakingindex/stakeview.go | 5 ++ 5 files changed, 55 insertions(+), 38 deletions(-) diff --git a/action/protocol/staking/viewdata.go b/action/protocol/staking/viewdata.go index c42b395daf..356ca66119 100644 --- a/action/protocol/staking/viewdata.go +++ b/action/protocol/staking/viewdata.go @@ -19,6 +19,7 @@ type ( // ContractStakeView is the interface for contract stake view ContractStakeView interface { Clone() ContractStakeView + Commit() CreatePreStates(ctx context.Context) error Handle(ctx context.Context, receipt *action.Receipt) error BucketsByCandidate(ownerAddr address.Address) ([]*VoteBucket, error) @@ -79,6 +80,9 @@ func (v *ViewData) Commit(ctx context.Context, sr protocol.StateReader) error { if err := v.bucketPool.Commit(sr); err != nil { return err } + if v.contractsStake != nil { + v.contractsStake.Commit() + } v.snapshots = []Snapshot{} return nil @@ -156,6 +160,18 @@ func (csv *contractStakeView) CreatePreStates(ctx context.Context) error { return nil } +func (csv *contractStakeView) Commit() { + if csv.v1 != nil { + csv.v1.Commit() + } + if csv.v2 != nil { + csv.v2.Commit() + } + if csv.v3 != nil { + csv.v3.Commit() + } +} + func (csv *contractStakeView) Handle(ctx context.Context, receipt *action.Receipt) error { if csv.v1 != nil { if err := csv.v1.Handle(ctx, receipt); err != nil { diff --git a/blockindex/contractstaking/stakeview.go b/blockindex/contractstaking/stakeview.go index ec663b332e..7bed381268 100644 --- a/blockindex/contractstaking/stakeview.go +++ b/blockindex/contractstaking/stakeview.go @@ -76,3 +76,7 @@ func (s *stakeView) Handle(ctx context.Context, receipt *action.Receipt) error { return nil } + +func (s *stakeView) Commit() { + s.cache.Commit() +} diff --git a/systemcontractindex/stakingindex/cache.go b/systemcontractindex/stakingindex/cache.go index f89bfdba2f..68ae163f3b 100644 --- a/systemcontractindex/stakingindex/cache.go +++ b/systemcontractindex/stakingindex/cache.go @@ -2,6 +2,7 @@ package stakingindex import ( "errors" + "slices" "github.com/iotexproject/iotex-address/address" @@ -19,7 +20,7 @@ type ( BucketIdsByCandidate(candidate address.Address) []uint64 TotalBucketCount() uint64 Base() indexerCache - Commit() error + Commit() IsDirty() bool } // base is the in-memory base for staking index @@ -34,7 +35,6 @@ type ( cache indexerCache bucketsByCandidate map[string]map[uint64]bool // buckets by candidate in current block updatedBuckets map[uint64]*Bucket // updated buckets in current block - deletedBucketIds map[uint64]struct{} // deleted buckets in current block } ) @@ -166,16 +166,13 @@ func (s *base) IsDirty() bool { return false } -func (s *base) Commit() error { - return nil -} +func (s *base) Commit() {} func newWrappedCache(cache indexerCache) *wrappedCache { return &wrappedCache{ cache: cache, bucketsByCandidate: make(map[string]map[uint64]bool), updatedBuckets: make(map[uint64]*Bucket), - deletedBucketIds: make(map[uint64]struct{}), } } @@ -192,7 +189,6 @@ func (w *wrappedCache) PutBucket(id uint64, bkt *Bucket) { w.bucketsByCandidate[oldCand][id] = false } w.updatedBuckets[id] = bkt - delete(w.deletedBucketIds, id) cand := bkt.Candidate.String() if w.bucketsByCandidate[cand] == nil { w.bucketsByCandidate[cand] = make(map[uint64]bool) @@ -201,27 +197,20 @@ func (w *wrappedCache) PutBucket(id uint64, bkt *Bucket) { } func (w *wrappedCache) DeleteBucket(id uint64) { - w.deletedBucketIds[id] = struct{}{} - delete(w.updatedBuckets, id) - for cand := range w.bucketsByCandidate { - delete(w.bucketsByCandidate[cand], id) - if len(w.bucketsByCandidate[cand]) == 0 { - delete(w.bucketsByCandidate, cand) - } - } + w.updatedBuckets[id] = nil } func (w *wrappedCache) BucketIdxs() []uint64 { idxMap := make(map[uint64]struct{}) // Load from underlying cache for _, id := range w.cache.BucketIdxs() { - if _, deleted := w.deletedBucketIds[id]; !deleted { + if bucket, exist := w.updatedBuckets[id]; !exist || bucket != nil { idxMap[id] = struct{}{} } } // Add updatedBuckets - for id := range w.updatedBuckets { - if _, deleted := w.deletedBucketIds[id]; !deleted { + for id, bucket := range w.updatedBuckets { + if bucket != nil { idxMap[id] = struct{}{} } } @@ -229,14 +218,15 @@ func (w *wrappedCache) BucketIdxs() []uint64 { for id := range idxMap { idxs = append(idxs, id) } + slices.Sort(idxs) return idxs } func (w *wrappedCache) Bucket(id uint64) *Bucket { - if _, deleted := w.deletedBucketIds[id]; deleted { - return nil - } if bkt, ok := w.updatedBuckets[id]; ok { + if bkt == nil { + return nil + } return bkt.Clone() } return w.cache.Bucket(id) @@ -245,11 +235,12 @@ func (w *wrappedCache) Bucket(id uint64) *Bucket { func (w *wrappedCache) Buckets(indices []uint64) []*Bucket { buckets := make([]*Bucket, 0, len(indices)) for _, idx := range indices { - if _, deleted := w.deletedBucketIds[idx]; deleted { - continue - } if bkt, ok := w.updatedBuckets[idx]; ok { - buckets = append(buckets, bkt.Clone()) + if bkt == nil { + buckets = append(buckets, nil) + } else { + buckets = append(buckets, bkt.Clone()) + } } else if bkt := w.cache.Bucket(idx); bkt != nil { buckets = append(buckets, bkt.Clone()) } @@ -275,13 +266,16 @@ func (w *wrappedCache) BucketIdsByCandidate(candidate address.Address) []uint64 } } // Remove deleted ids - for id := range w.deletedBucketIds { - delete(ids, id) + for id, bucket := range w.updatedBuckets { + if bucket == nil { + delete(ids, id) + } } result := make([]uint64, 0, len(ids)) for id := range ids { result = append(result, id) } + slices.Sort(result) return result } @@ -294,23 +288,23 @@ func (w *wrappedCache) TotalBucketCount() uint64 { return w.cache.TotalBucketCount() } -func (w *wrappedCache) Commit() error { +func (w *wrappedCache) Commit() { if w.isDirty() { for id, bkt := range w.updatedBuckets { - w.cache.PutBucket(id, bkt) - } - for id := range w.deletedBucketIds { - w.cache.DeleteBucket(id) + if bkt == nil { + w.cache.DeleteBucket(id) + } else { + w.cache.PutBucket(id, bkt) + } } w.updatedBuckets = make(map[uint64]*Bucket) - w.deletedBucketIds = make(map[uint64]struct{}) w.bucketsByCandidate = make(map[string]map[uint64]bool) } - return w.cache.Commit() + w.cache.Commit() } func (w *wrappedCache) isDirty() bool { - return len(w.updatedBuckets) > 0 || len(w.deletedBucketIds) > 0 || len(w.bucketsByCandidate) > 0 + return len(w.updatedBuckets) > 0 || len(w.bucketsByCandidate) > 0 } func (w *wrappedCache) IsDirty() bool { diff --git a/systemcontractindex/stakingindex/index.go b/systemcontractindex/stakingindex/index.go index 48b72fca7b..adc13e1d5c 100644 --- a/systemcontractindex/stakingindex/index.go +++ b/systemcontractindex/stakingindex/index.go @@ -346,9 +346,7 @@ func (s *Indexer) commit(handler stakingEventHandler, height uint64) error { if err := s.common.Commit(height, delta); err != nil { return err } - if err := dirty.Commit(); err != nil { - return errors.Wrapf(err, "commit dirty cache failed") - } + dirty.Commit() cache, ok := dirty.Base().(*base) if !ok { return errors.Errorf("unexpected cache type %T, expect *base", dirty) diff --git a/systemcontractindex/stakingindex/stakeview.go b/systemcontractindex/stakingindex/stakeview.go index cbc8c41c01..d8c78197d4 100644 --- a/systemcontractindex/stakingindex/stakeview.go +++ b/systemcontractindex/stakingindex/stakeview.go @@ -52,3 +52,8 @@ func (s *stakeView) Handle(ctx context.Context, receipt *action.Receipt) error { handler := newEventHandler(s.helper.bucketNS, s.cache, blkCtx, s.helper.timestamped, muted) return s.helper.handleReceipt(ctx, handler, receipt) } + +func (s *stakeView) Commit() { + s.cache.Commit() + s.cache = s.cache.Base() +}