@@ -848,21 +848,13 @@ fs::path BlockManager::GetBlockPosFilename(const FlatFilePos& pos) const
848
848
return BlockFileSeq ().FileName (pos);
849
849
}
850
850
851
- bool BlockManager::FindBlockPos (FlatFilePos& pos, unsigned int nAddSize, unsigned int nHeight, uint64_t nTime, bool fKnown )
851
+ bool BlockManager::FindBlockPos (FlatFilePos& pos, unsigned int nAddSize, unsigned int nHeight, uint64_t nTime)
852
852
{
853
853
LOCK (cs_LastBlockFile);
854
854
855
855
const BlockfileType chain_type = BlockfileTypeForHeight (nHeight);
856
- // Check that chain type is NORMAL if fKnown is true, because fKnown is only
857
- // true during reindexing, and reindexing deletes snapshot chainstates, so
858
- // chain_type will not be SNAPSHOT. Also check that cursor exists, because
859
- // the normal cursor should never be null.
860
- if (fKnown ) {
861
- Assume (chain_type == BlockfileType::NORMAL);
862
- Assume (m_blockfile_cursors[chain_type]);
863
- }
864
856
865
- if (!fKnown && ! m_blockfile_cursors[chain_type]) {
857
+ if (!m_blockfile_cursors[chain_type]) {
866
858
// If a snapshot is loaded during runtime, we may not have initialized this cursor yet.
867
859
assert (chain_type == BlockfileType::ASSUMED);
868
860
const auto new_cursor = BlockfileCursor{this ->MaxBlockfileNum () + 1 };
@@ -871,90 +863,107 @@ bool BlockManager::FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigne
871
863
}
872
864
const int last_blockfile = m_blockfile_cursors[chain_type]->file_num ;
873
865
874
- int nFile = fKnown ? pos. nFile : last_blockfile;
866
+ int nFile = last_blockfile;
875
867
if (static_cast <int >(m_blockfile_info.size ()) <= nFile) {
876
868
m_blockfile_info.resize (nFile + 1 );
877
869
}
878
870
879
871
bool finalize_undo = false ;
880
- if (!fKnown ) {
881
- unsigned int max_blockfile_size{MAX_BLOCKFILE_SIZE};
882
- // Use smaller blockfiles in test-only -fastprune mode - but avoid
883
- // the possibility of having a block not fit into the block file.
884
- if (m_opts.fast_prune ) {
885
- max_blockfile_size = 0x10000 ; // 64kiB
886
- if (nAddSize >= max_blockfile_size) {
887
- // dynamically adjust the blockfile size to be larger than the added size
888
- max_blockfile_size = nAddSize + 1 ;
889
- }
872
+ unsigned int max_blockfile_size{MAX_BLOCKFILE_SIZE};
873
+ // Use smaller blockfiles in test-only -fastprune mode - but avoid
874
+ // the possibility of having a block not fit into the block file.
875
+ if (m_opts.fast_prune ) {
876
+ max_blockfile_size = 0x10000 ; // 64kiB
877
+ if (nAddSize >= max_blockfile_size) {
878
+ // dynamically adjust the blockfile size to be larger than the added size
879
+ max_blockfile_size = nAddSize + 1 ;
890
880
}
891
- assert (nAddSize < max_blockfile_size);
892
-
893
- while (m_blockfile_info[nFile]. nSize + nAddSize >= max_blockfile_size) {
894
- // when the undo file is keeping up with the block file, we want to flush it explicitly
895
- // when it is lagging behind (more blocks arrive than are being connected) , we let the
896
- // undo block write case handle it
897
- finalize_undo = ( static_cast < int >(m_blockfile_info[nFile]. nHeightLast ) ==
898
- Assert (m_blockfile_cursors[chain_type])-> undo_height );
899
-
900
- // Try the next unclaimed blockfile number
901
- nFile = this -> MaxBlockfileNum () + 1 ;
902
- // Set to increment MaxBlockfileNum() for next iteration
903
- m_blockfile_cursors[chain_type] = BlockfileCursor{nFile};
904
-
905
- if ( static_cast < int >(m_blockfile_info. size ()) <= nFile) {
906
- m_blockfile_info.resize (nFile + 1 );
907
- }
881
+ }
882
+ assert (nAddSize < max_blockfile_size);
883
+
884
+ while (m_blockfile_info[nFile]. nSize + nAddSize >= max_blockfile_size) {
885
+ // when the undo file is keeping up with the block file , we want to flush it explicitly
886
+ // when it is lagging behind (more blocks arrive than are being connected), we let the
887
+ // undo block write case handle it
888
+ finalize_undo = ( static_cast < int >(m_blockfile_info[nFile]. nHeightLast ) ==
889
+ Assert (m_blockfile_cursors[chain_type])-> undo_height );
890
+
891
+ // Try the next unclaimed blockfile number
892
+ nFile = this -> MaxBlockfileNum () + 1 ;
893
+ // Set to increment MaxBlockfileNum() for next iteration
894
+ m_blockfile_cursors[chain_type] = BlockfileCursor{nFile};
895
+
896
+ if ( static_cast < int >( m_blockfile_info.size ()) <= nFile) {
897
+ m_blockfile_info. resize (nFile + 1 );
908
898
}
909
- pos.nFile = nFile;
910
- pos.nPos = m_blockfile_info[nFile].nSize ;
911
899
}
900
+ pos.nFile = nFile;
901
+ pos.nPos = m_blockfile_info[nFile].nSize ;
912
902
913
903
if (nFile != last_blockfile) {
914
- if (!fKnown ) {
915
- LogPrint (BCLog::BLOCKSTORAGE, " Leaving block file %i: %s (onto %i) (height %i)\n " ,
916
- last_blockfile, m_blockfile_info[last_blockfile].ToString (), nFile, nHeight);
917
-
918
- // Do not propagate the return code. The flush concerns a previous block
919
- // and undo file that has already been written to. If a flush fails
920
- // here, and we crash, there is no expected additional block data
921
- // inconsistency arising from the flush failure here. However, the undo
922
- // data may be inconsistent after a crash if the flush is called during
923
- // a reindex. A flush error might also leave some of the data files
924
- // untrimmed.
925
- if (!FlushBlockFile (last_blockfile, !fKnown , finalize_undo)) {
926
- LogPrintLevel (BCLog::BLOCKSTORAGE, BCLog::Level::Warning,
927
- " Failed to flush previous block file %05i (finalize=%i, finalize_undo=%i) before opening new block file %05i\n " ,
928
- last_blockfile, !fKnown , finalize_undo, nFile);
929
- }
904
+ LogPrint (BCLog::BLOCKSTORAGE, " Leaving block file %i: %s (onto %i) (height %i)\n " ,
905
+ last_blockfile, m_blockfile_info[last_blockfile].ToString (), nFile, nHeight);
906
+
907
+ // Do not propagate the return code. The flush concerns a previous block
908
+ // and undo file that has already been written to. If a flush fails
909
+ // here, and we crash, there is no expected additional block data
910
+ // inconsistency arising from the flush failure here. However, the undo
911
+ // data may be inconsistent after a crash if the flush is called during
912
+ // a reindex. A flush error might also leave some of the data files
913
+ // untrimmed.
914
+ if (!FlushBlockFile (last_blockfile, /* fFinalize=*/ true , finalize_undo)) {
915
+ LogPrintLevel (BCLog::BLOCKSTORAGE, BCLog::Level::Warning,
916
+ " Failed to flush previous block file %05i (finalize=1, finalize_undo=%i) before opening new block file %05i\n " ,
917
+ last_blockfile, finalize_undo, nFile);
930
918
}
931
919
// No undo data yet in the new file, so reset our undo-height tracking.
932
920
m_blockfile_cursors[chain_type] = BlockfileCursor{nFile};
933
921
}
934
922
935
923
m_blockfile_info[nFile].AddBlock (nHeight, nTime);
936
- if (fKnown ) {
937
- m_blockfile_info[nFile].nSize = std::max (pos.nPos + nAddSize, m_blockfile_info[nFile].nSize );
938
- } else {
939
- m_blockfile_info[nFile].nSize += nAddSize;
940
- }
924
+ m_blockfile_info[nFile].nSize += nAddSize;
941
925
942
- if (!fKnown ) {
943
- bool out_of_space;
944
- size_t bytes_allocated = BlockFileSeq ().Allocate (pos, nAddSize, out_of_space);
945
- if (out_of_space) {
946
- m_opts.notifications .fatalError (_ (" Disk space is too low!" ));
947
- return false ;
948
- }
949
- if (bytes_allocated != 0 && IsPruneMode ()) {
950
- m_check_for_pruning = true ;
951
- }
926
+ bool out_of_space;
927
+ size_t bytes_allocated = BlockFileSeq ().Allocate (pos, nAddSize, out_of_space);
928
+ if (out_of_space) {
929
+ m_opts.notifications .fatalError (_ (" Disk space is too low!" ));
930
+ return false ;
931
+ }
932
+ if (bytes_allocated != 0 && IsPruneMode ()) {
933
+ m_check_for_pruning = true ;
952
934
}
953
935
954
936
m_dirty_fileinfo.insert (nFile);
955
937
return true ;
956
938
}
957
939
940
+ void BlockManager::UpdateBlockInfo (const CBlock& block, unsigned int nHeight, const FlatFilePos& pos)
941
+ {
942
+ LOCK (cs_LastBlockFile);
943
+
944
+ const unsigned int added_size = ::GetSerializeSize (TX_WITH_WITNESS (block));
945
+ const BlockfileType chain_type = BlockfileTypeForHeight (nHeight);
946
+ // Check that chain type is NORMAL, because this function is only
947
+ // called during reindexing, and reindexing deletes snapshot chainstates, so
948
+ // chain_type will not be SNAPSHOT. Also check that cursor exists, because
949
+ // the normal cursor should never be null.
950
+ Assume (chain_type == BlockfileType::NORMAL);
951
+ Assume (m_blockfile_cursors[chain_type]);
952
+ const int nFile = pos.nFile ;
953
+ if (static_cast <int >(m_blockfile_info.size ()) <= nFile) {
954
+ m_blockfile_info.resize (nFile + 1 );
955
+ }
956
+
957
+ const int last_blockfile = m_blockfile_cursors[chain_type]->file_num ;
958
+ if (nFile != last_blockfile) {
959
+ // No undo data yet in the new file, so reset our undo-height tracking.
960
+ m_blockfile_cursors[chain_type] = BlockfileCursor{nFile};
961
+ }
962
+ m_blockfile_info[nFile].AddBlock (nHeight, block.GetBlockTime ());
963
+ m_blockfile_info[nFile].nSize = std::max (pos.nPos + added_size, m_blockfile_info[nFile].nSize );
964
+ m_dirty_fileinfo.insert (nFile);
965
+ }
966
+
958
967
bool BlockManager::FindUndoPos (BlockValidationState& state, int nFile, FlatFilePos& pos, unsigned int nAddSize)
959
968
{
960
969
pos.nFile = nFile;
@@ -1145,17 +1154,17 @@ FlatFilePos BlockManager::SaveBlockToDisk(const CBlock& block, int nHeight, cons
1145
1154
const auto position_known {dbp != nullptr };
1146
1155
if (position_known) {
1147
1156
blockPos = *dbp;
1157
+ // position_known is set iff performing a reindex. In this case, no blocks need to be written, only the blockfile info database needs to be rebuilt.
1158
+ UpdateBlockInfo (block, nHeight, *dbp);
1148
1159
} else {
1149
1160
// when known, blockPos.nPos points at the offset of the block data in the blk file. that already accounts for
1150
1161
// the serialization header present in the file (the 4 magic message start bytes + the 4 length bytes = 8 bytes = BLOCK_SERIALIZATION_HEADER_SIZE).
1151
1162
// we add BLOCK_SERIALIZATION_HEADER_SIZE only for new blocks since they will have the serialization header added when written to disk.
1152
1163
nBlockSize += static_cast <unsigned int >(BLOCK_SERIALIZATION_HEADER_SIZE);
1153
- }
1154
- if (!FindBlockPos (blockPos, nBlockSize, nHeight, block.GetBlockTime (), position_known)) {
1155
- LogError (" %s: FindBlockPos failed\n " , __func__);
1156
- return FlatFilePos ();
1157
- }
1158
- if (!position_known) {
1164
+ if (!FindBlockPos (blockPos, nBlockSize, nHeight, block.GetBlockTime ())) {
1165
+ LogError (" %s: FindBlockPos failed\n " , __func__);
1166
+ return FlatFilePos ();
1167
+ }
1159
1168
if (!WriteBlockToDisk (block, blockPos)) {
1160
1169
m_opts.notifications .fatalError (_ (" Failed to write block." ));
1161
1170
return FlatFilePos ();
0 commit comments