Skip to content

Commit b1d05e4

Browse files
authored
Merge branch 'develop' into feat/fork-detection-state-machine
2 parents 0bad518 + 574e188 commit b1d05e4

File tree

7 files changed

+265
-23
lines changed

7 files changed

+265
-23
lines changed

clarity/src/vm/database/sqlite.rs

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -254,16 +254,13 @@ impl SqliteConnection {
254254

255255
Ok(())
256256
}
257+
257258
pub fn memory() -> Result<Connection> {
258259
let contract_db = SqliteConnection::inner_open(":memory:")?;
259260
SqliteConnection::initialize_conn(&contract_db)?;
260261
Ok(contract_db)
261262
}
262-
pub fn open(filename: &str) -> Result<Connection> {
263-
let contract_db = SqliteConnection::inner_open(filename)?;
264-
SqliteConnection::check_schema(&contract_db)?;
265-
Ok(contract_db)
266-
}
263+
267264
pub fn check_schema(conn: &Connection) -> Result<()> {
268265
let sql = "SELECT sql FROM sqlite_master WHERE name=?";
269266
let _: String = conn
@@ -272,10 +269,13 @@ impl SqliteConnection {
272269
let _: String = conn
273270
.query_row(sql, params!["metadata_table"], |row| row.get(0))
274271
.map_err(|x| InterpreterError::SqliteError(IncomparableError { err: x }))?;
272+
let _: String = conn
273+
.query_row(sql, params!["md_blockhashes"], |row| row.get(0))
274+
.map_err(|x| InterpreterError::SqliteError(IncomparableError { err: x }))?;
275275
Ok(())
276276
}
277277

278-
pub fn inner_open(filename: &str) -> Result<Connection> {
278+
fn inner_open(filename: &str) -> Result<Connection> {
279279
let conn = Connection::open(filename)
280280
.map_err(|x| InterpreterError::SqliteError(IncomparableError { err: x }))?;
281281

stackslib/src/chainstate/burn/db/sortdb.rs

Lines changed: 83 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -14,13 +14,16 @@
1414
// You should have received a copy of the GNU General Public License
1515
// along with this program. If not, see <http://www.gnu.org/licenses/>.
1616

17+
use std::cell::RefCell;
1718
use std::cmp::Ordering;
1819
use std::collections::{HashMap, HashSet};
1920
use std::io::{ErrorKind, Write};
2021
use std::ops::{Deref, DerefMut};
2122
use std::str::FromStr;
23+
use std::sync::{Arc, LazyLock, Mutex, MutexGuard};
2224
use std::{cmp, fmt, fs};
2325

26+
use clarity::util::lru_cache::LruCache;
2427
use clarity::vm::ast::ASTRules;
2528
use clarity::vm::costs::ExecutionCost;
2629
use clarity::vm::representations::{ClarityName, ContractName};
@@ -95,6 +98,10 @@ pub const REWARD_WINDOW_END: u64 = 144 * 90 + REWARD_WINDOW_START;
9598

9699
pub type BlockHeaderCache = HashMap<ConsensusHash, (Option<BlockHeaderHash>, ConsensusHash)>;
97100

101+
const DESCENDANCY_CACHE_SIZE: usize = 2000;
102+
static DESCENDANCY_CACHE: LazyLock<Arc<Mutex<LruCache<(SortitionId, BlockHeaderHash), bool>>>> =
103+
LazyLock::new(|| Arc::new(Mutex::new(LruCache::new(DESCENDANCY_CACHE_SIZE))));
104+
98105
pub enum FindIter<R> {
99106
Found(R),
100107
Continue,
@@ -1085,6 +1092,38 @@ pub trait SortitionHandle {
10851092
Ok(Some(StacksBlockId::new(&ch, &bhh)))
10861093
}
10871094

1095+
/// Check if the descendancy cache has an entry for whether or not the winning block in `key.0`
1096+
/// descends from `key.1`
1097+
///
1098+
/// If it does, return the cached entry
1099+
fn descendancy_cache_get(
1100+
cache: &mut MutexGuard<'_, LruCache<(SortitionId, BlockHeaderHash), bool>>,
1101+
key: &(SortitionId, BlockHeaderHash),
1102+
) -> Option<bool> {
1103+
match cache.get(key) {
1104+
Ok(result) => result,
1105+
// cache is broken, create a new one
1106+
Err(e) => {
1107+
error!("SortitionDB's descendant cache errored. Will continue operation with cleared cache"; "err" => %e);
1108+
**cache = LruCache::new(DESCENDANCY_CACHE_SIZE);
1109+
None
1110+
}
1111+
}
1112+
}
1113+
1114+
/// Cache the result of the descendancy check on whether or not the winning block in `key.0`
1115+
/// descends from `key.1`
1116+
fn descendancy_cache_put(
1117+
cache: &mut MutexGuard<'_, LruCache<(SortitionId, BlockHeaderHash), bool>>,
1118+
key: (SortitionId, BlockHeaderHash),
1119+
is_descended: bool,
1120+
) {
1121+
if let Err(e) = cache.insert_clean(key, is_descended) {
1122+
error!("SortitionDB's descendant cache errored. Will continue operation with cleared cache"; "err" => %e);
1123+
**cache = LruCache::new(DESCENDANCY_CACHE_SIZE);
1124+
}
1125+
}
1126+
10881127
/// is the given block a descendant of `potential_ancestor`?
10891128
/// * block_at_burn_height: the burn height of the sortition that chose the stacks block to check
10901129
/// * potential_ancestor: the stacks block hash of the potential ancestor
@@ -1112,12 +1151,43 @@ pub trait SortitionHandle {
11121151
test_debug!("No snapshot at height {}", block_at_burn_height);
11131152
db_error::NotFoundError
11141153
})?;
1154+
let top_sortition_id = sn.sortition_id;
1155+
1156+
let mut cache = DESCENDANCY_CACHE
1157+
.lock()
1158+
.expect("FATAL: lock poisoned in SortitionDB");
11151159

11161160
while sn.block_height >= earliest_block_height {
1161+
let cache_check_key = (sn.sortition_id, potential_ancestor.clone());
1162+
match Self::descendancy_cache_get(&mut cache, &cache_check_key) {
1163+
Some(result) => {
1164+
if sn.sortition_id != top_sortition_id {
1165+
Self::descendancy_cache_put(
1166+
&mut cache,
1167+
(top_sortition_id, cache_check_key.1),
1168+
result,
1169+
);
1170+
}
1171+
return Ok(result);
1172+
}
1173+
// not cached, don't need to do anything.
1174+
None => {}
1175+
}
1176+
11171177
if !sn.sortition {
1178+
Self::descendancy_cache_put(
1179+
&mut cache,
1180+
(top_sortition_id, cache_check_key.1),
1181+
false,
1182+
);
11181183
return Ok(false);
11191184
}
11201185
if &sn.winning_stacks_block_hash == potential_ancestor {
1186+
Self::descendancy_cache_put(
1187+
&mut cache,
1188+
(top_sortition_id, cache_check_key.1),
1189+
true,
1190+
);
11211191
return Ok(true);
11221192
}
11231193

@@ -1153,6 +1223,11 @@ pub trait SortitionHandle {
11531223
}
11541224
}
11551225
}
1226+
Self::descendancy_cache_put(
1227+
&mut cache,
1228+
(top_sortition_id, potential_ancestor.clone()),
1229+
false,
1230+
);
11561231
return Ok(false);
11571232
}
11581233
}
@@ -2028,15 +2103,15 @@ impl<'a> SortitionHandleConn<'a> {
20282103
connection: &'a SortitionDBConn<'a>,
20292104
chain_tip: &SortitionId,
20302105
) -> Result<SortitionHandleConn<'a>, db_error> {
2031-
Ok(SortitionHandleConn {
2032-
context: SortitionHandleContext {
2106+
Ok(SortitionHandleConn::new(
2107+
&connection.index,
2108+
SortitionHandleContext {
20332109
chain_tip: chain_tip.clone(),
20342110
first_block_height: connection.context.first_block_height,
20352111
pox_constants: connection.context.pox_constants.clone(),
20362112
dryrun: connection.context.dryrun,
20372113
},
2038-
index: connection.index,
2039-
})
2114+
))
20402115
}
20412116

20422117
fn get_tip_indexed(&self, key: &str) -> Result<Option<String>, db_error> {
@@ -3723,15 +3798,15 @@ impl SortitionDBTx<'_> {
37233798

37243799
impl SortitionDBConn<'_> {
37253800
pub fn as_handle<'b>(&'b self, chain_tip: &SortitionId) -> SortitionHandleConn<'b> {
3726-
SortitionHandleConn {
3727-
index: self.index,
3728-
context: SortitionHandleContext {
3801+
SortitionHandleConn::new(
3802+
&self.index,
3803+
SortitionHandleContext {
37293804
first_block_height: self.context.first_block_height.clone(),
37303805
chain_tip: chain_tip.clone(),
37313806
pox_constants: self.context.pox_constants.clone(),
37323807
dryrun: self.context.dryrun,
37333808
},
3734-
}
3809+
)
37353810
}
37363811

37373812
/// Given a burnchain consensus hash,

stackslib/src/chainstate/stacks/index/marf.rs

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@ use stacks_common::types::chainstate::{BlockHeaderHash, TrieHash, TRIEHASH_ENCOD
2525
use stacks_common::util::hash::Sha512Trunc256Sum;
2626
use stacks_common::util::log;
2727

28+
use super::storage::ReopenedTrieStorageConnection;
2829
use crate::chainstate::stacks::index::bits::{get_leaf_hash, get_node_hash, read_root_hash};
2930
use crate::chainstate::stacks::index::node::{
3031
clear_backptr, is_backptr, set_backptr, CursorError, TrieCursor, TrieNode, TrieNode16,
@@ -251,6 +252,20 @@ impl<T: MarfTrieId> MarfConnection<T> for MarfTransaction<'_, T> {
251252
}
252253
}
253254

255+
impl<T: MarfTrieId> MarfConnection<T> for ReopenedTrieStorageConnection<'_, T> {
256+
fn with_conn<F, R>(&mut self, exec: F) -> R
257+
where
258+
F: FnOnce(&mut TrieStorageConnection<T>) -> R,
259+
{
260+
let mut conn = self.connection();
261+
exec(&mut conn)
262+
}
263+
264+
fn sqlite_conn(&self) -> &Connection {
265+
self.db_conn()
266+
}
267+
}
268+
254269
impl<T: MarfTrieId> MarfConnection<T> for MARF<T> {
255270
fn with_conn<F, R>(&mut self, exec: F) -> R
256271
where
@@ -1620,6 +1635,21 @@ impl<T: MarfTrieId> MARF<T> {
16201635
})
16211636
}
16221637

1638+
/// Build a read-only storage connection which can be used for reads without modifying the
1639+
/// calling MARF struct (i.e., the tip pointer is only changed in the connection)
1640+
/// but reusing self's existing SQLite Connection (avoiding the overhead of
1641+
/// `reopen_readonly`).
1642+
pub fn reopen_connection(&self) -> Result<ReopenedTrieStorageConnection<'_, T>, Error> {
1643+
if self.open_chain_tip.is_some() {
1644+
error!(
1645+
"MARF at {} is already in the process of writing",
1646+
&self.storage.db_path
1647+
);
1648+
return Err(Error::InProgressError);
1649+
}
1650+
self.storage.reopen_connection()
1651+
}
1652+
16231653
/// Get the root trie hash at a particular block
16241654
pub fn get_root_hash_at(&mut self, block_hash: &T) -> Result<TrieHash, Error> {
16251655
self.storage.connection().get_root_hash_at(block_hash)

stackslib/src/chainstate/stacks/index/storage.rs

Lines changed: 92 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1310,6 +1310,50 @@ impl<T: MarfTrieId> TrieStorageTransientData<T> {
13101310
}
13111311
}
13121312

1313+
pub struct ReopenedTrieStorageConnection<'a, T: MarfTrieId> {
1314+
pub db_path: &'a str,
1315+
db: &'a Connection,
1316+
blobs: Option<TrieFile>,
1317+
data: TrieStorageTransientData<T>,
1318+
cache: TrieCache<T>,
1319+
bench: TrieBenchmark,
1320+
pub hash_calculation_mode: TrieHashCalculationMode,
1321+
1322+
/// row ID of a trie that represents unconfirmed state (i.e. trie state that will never become
1323+
/// part of the MARF, but nevertheless represents a persistent scratch space). If this field
1324+
/// is Some(..), then the storage connection here was used to (re-)open an unconfirmed trie
1325+
/// (via `open_unconfirmed()` or `open_block()` when `self.unconfirmed()` is `true`), or used
1326+
/// to create an unconfirmed trie (via `extend_to_unconfirmed_block()`).
1327+
unconfirmed_block_id: Option<u32>,
1328+
1329+
// used in testing in order to short-circuit block-height lookups
1330+
// when the trie struct is tested outside of marf.rs usage
1331+
#[cfg(test)]
1332+
pub test_genesis_block: Option<T>,
1333+
}
1334+
1335+
impl<'a, T: MarfTrieId> ReopenedTrieStorageConnection<'a, T> {
1336+
pub fn db_conn(&self) -> &Connection {
1337+
self.db
1338+
}
1339+
1340+
pub fn connection(&mut self) -> TrieStorageConnection<'_, T> {
1341+
TrieStorageConnection {
1342+
db: SqliteConnection::ConnRef(&self.db),
1343+
db_path: self.db_path,
1344+
data: &mut self.data,
1345+
blobs: self.blobs.as_mut(),
1346+
cache: &mut self.cache,
1347+
bench: &mut self.bench,
1348+
hash_calculation_mode: self.hash_calculation_mode,
1349+
unconfirmed_block_id: None,
1350+
1351+
#[cfg(test)]
1352+
test_genesis_block: &mut self.test_genesis_block,
1353+
}
1354+
}
1355+
}
1356+
13131357
impl<T: MarfTrieId> TrieFileStorage<T> {
13141358
pub fn connection(&mut self) -> TrieStorageConnection<'_, T> {
13151359
TrieStorageConnection {
@@ -1327,6 +1371,54 @@ impl<T: MarfTrieId> TrieFileStorage<T> {
13271371
}
13281372
}
13291373

1374+
/// Build a read-only storage connection which can be used for reads without modifying the
1375+
/// calling TrieFileStorage struct (i.e., the tip pointer is only changed in the connection)
1376+
/// but reusing the TrieFileStorage's existing SQLite Connection (avoiding the overhead of
1377+
/// `reopen_readonly`).
1378+
pub fn reopen_connection(&self) -> Result<ReopenedTrieStorageConnection<'_, T>, Error> {
1379+
let data = TrieStorageTransientData {
1380+
uncommitted_writes: self.data.uncommitted_writes.clone(),
1381+
cur_block: self.data.cur_block.clone(),
1382+
cur_block_id: self.data.cur_block_id.clone(),
1383+
1384+
read_count: 0,
1385+
read_backptr_count: 0,
1386+
read_node_count: 0,
1387+
read_leaf_count: 0,
1388+
1389+
write_count: 0,
1390+
write_node_count: 0,
1391+
write_leaf_count: 0,
1392+
1393+
trie_ancestor_hash_bytes_cache: None,
1394+
1395+
readonly: true,
1396+
unconfirmed: self.unconfirmed(),
1397+
};
1398+
// perf note: should we attempt to clone the cache
1399+
let cache = TrieCache::default();
1400+
let blobs = if self.blobs.is_some() {
1401+
Some(TrieFile::from_db_path(&self.db_path, true)?)
1402+
} else {
1403+
None
1404+
};
1405+
let bench = TrieBenchmark::new();
1406+
let hash_calculation_mode = self.hash_calculation_mode;
1407+
let unconfirmed_block_id = None;
1408+
Ok(ReopenedTrieStorageConnection {
1409+
db_path: &self.db_path,
1410+
db: &self.db,
1411+
blobs,
1412+
data,
1413+
cache,
1414+
bench,
1415+
hash_calculation_mode,
1416+
unconfirmed_block_id,
1417+
#[cfg(test)]
1418+
test_genesis_block: self.test_genesis_block.clone(),
1419+
})
1420+
}
1421+
13301422
pub fn transaction(&mut self) -> Result<TrieStorageTransaction<'_, T>, Error> {
13311423
if self.readonly() {
13321424
return Err(Error::ReadOnlyError);

0 commit comments

Comments
 (0)