Skip to content

Commit dbbdbff

Browse files
committed
Merge branch 'develop' of https://github.com/stacks-network/stacks-core into chore/add-vm-error-to-transaction-event-payload
2 parents 436ea91 + a5b5137 commit dbbdbff

File tree

10 files changed

+466
-248
lines changed

10 files changed

+466
-248
lines changed

CONTRIBUTING.md

Lines changed: 18 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -361,17 +361,31 @@ A non-exhaustive list of examples of consensus-critical changes include:
361361

362362
- Every consensus-critical change needs an integration test to verify that the feature activates only when the hard fork activates.
363363

364-
PRs must include test coverage. However, if your PR includes large tests or tests which cannot run in parallel
364+
- PRs must include test coverage. However, if your PR includes large tests or tests which cannot run in parallel
365365
(which is the default operation of the `cargo test` command), these tests should be decorated with `#[ignore]`.
366-
367366
A test should be marked `#[ignore]` if:
368367

369-
1. It does not _always_ pass `cargo test` in a vanilla environment
368+
1. It does not _always_ pass `cargo test` in a vanilla environment
370369
(i.e., it does not need to run with `--test-threads 1`).
371370

372-
2. Or, it runs for over a minute via a normal `cargo test` execution
371+
2. Or, it runs for over a minute via a normal `cargo test` execution
373372
(the `cargo test` command will warn if this is not the case).
374373

374+
- **Integration tests need to be properly tagged** using [pinny-rs](https://github.com/BitcoinL2-Labs/pinny-rs/) crate. Tagging requires two fundamental steps:
375+
1. Define allowed tags in the package `Cargo.toml` file (if needed).
376+
2. Apply relevant tags to the tests, picking from the allowed set.
377+
378+
Then it will be possible to run tests with filtering based on the tags using `cargo test` and `cargo nextest` runner.
379+
> For more information and examples on how tagging works, refer to the [pinny-rs](https://github.com/BitcoinL2-Labs/pinny-rs/) readme.
380+
381+
Below the tag set currently defined with related purpose:
382+
383+
| Tag | Description |
384+
|-----------------|----------------------------------------------|
385+
| `slow` | tests running over a minute |
386+
| `bitcoind` | tests requiring bitcoin daemon |
387+
| `flaky` | tests that exhibit flaky behavior |
388+
375389
## Formatting
376390

377391
PRs will be checked against `rustfmt` and will _fail_ if not properly formatted.

Cargo.lock

Lines changed: 14 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

clarity/src/vm/database/sqlite.rs

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -254,16 +254,13 @@ impl SqliteConnection {
254254

255255
Ok(())
256256
}
257+
257258
pub fn memory() -> Result<Connection> {
258259
let contract_db = SqliteConnection::inner_open(":memory:")?;
259260
SqliteConnection::initialize_conn(&contract_db)?;
260261
Ok(contract_db)
261262
}
262-
pub fn open(filename: &str) -> Result<Connection> {
263-
let contract_db = SqliteConnection::inner_open(filename)?;
264-
SqliteConnection::check_schema(&contract_db)?;
265-
Ok(contract_db)
266-
}
263+
267264
pub fn check_schema(conn: &Connection) -> Result<()> {
268265
let sql = "SELECT sql FROM sqlite_master WHERE name=?";
269266
let _: String = conn
@@ -272,10 +269,13 @@ impl SqliteConnection {
272269
let _: String = conn
273270
.query_row(sql, params!["metadata_table"], |row| row.get(0))
274271
.map_err(|x| InterpreterError::SqliteError(IncomparableError { err: x }))?;
272+
let _: String = conn
273+
.query_row(sql, params!["md_blockhashes"], |row| row.get(0))
274+
.map_err(|x| InterpreterError::SqliteError(IncomparableError { err: x }))?;
275275
Ok(())
276276
}
277277

278-
pub fn inner_open(filename: &str) -> Result<Connection> {
278+
fn inner_open(filename: &str) -> Result<Connection> {
279279
let conn = Connection::open(filename)
280280
.map_err(|x| InterpreterError::SqliteError(IncomparableError { err: x }))?;
281281

stackslib/src/chainstate/burn/db/sortdb.rs

Lines changed: 59 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ use std::collections::{HashMap, HashSet};
2020
use std::io::{ErrorKind, Write};
2121
use std::ops::{Deref, DerefMut};
2222
use std::str::FromStr;
23-
use std::sync::{Arc, LazyLock, Mutex};
23+
use std::sync::{Arc, LazyLock, Mutex, MutexGuard};
2424
use std::{cmp, fmt, fs};
2525

2626
use clarity::util::lru_cache::LruCache;
@@ -98,8 +98,9 @@ pub const REWARD_WINDOW_END: u64 = 144 * 90 + REWARD_WINDOW_START;
9898

9999
pub type BlockHeaderCache = HashMap<ConsensusHash, (Option<BlockHeaderHash>, ConsensusHash)>;
100100

101+
const DESCENDANCY_CACHE_SIZE: usize = 2000;
101102
static DESCENDANCY_CACHE: LazyLock<Arc<Mutex<LruCache<(SortitionId, BlockHeaderHash), bool>>>> =
102-
LazyLock::new(|| Arc::new(Mutex::new(LruCache::new(2000))));
103+
LazyLock::new(|| Arc::new(Mutex::new(LruCache::new(DESCENDANCY_CACHE_SIZE))));
103104

104105
pub enum FindIter<R> {
105106
Found(R),
@@ -1091,6 +1092,38 @@ pub trait SortitionHandle {
10911092
Ok(Some(StacksBlockId::new(&ch, &bhh)))
10921093
}
10931094

1095+
/// Check if the descendancy cache has an entry for whether or not the winning block in `key.0`
1096+
/// descends from `key.1`
1097+
///
1098+
/// If it does, return the cached entry
1099+
fn descendancy_cache_get(
1100+
cache: &mut MutexGuard<'_, LruCache<(SortitionId, BlockHeaderHash), bool>>,
1101+
key: &(SortitionId, BlockHeaderHash),
1102+
) -> Option<bool> {
1103+
match cache.get(key) {
1104+
Ok(result) => result,
1105+
// cache is broken, create a new one
1106+
Err(e) => {
1107+
error!("SortitionDB's descendant cache errored. Will continue operation with cleared cache"; "err" => %e);
1108+
**cache = LruCache::new(DESCENDANCY_CACHE_SIZE);
1109+
None
1110+
}
1111+
}
1112+
}
1113+
1114+
/// Cache the result of the descendancy check on whether or not the winning block in `key.0`
1115+
/// descends from `key.1`
1116+
fn descendancy_cache_put(
1117+
cache: &mut MutexGuard<'_, LruCache<(SortitionId, BlockHeaderHash), bool>>,
1118+
key: (SortitionId, BlockHeaderHash),
1119+
is_descended: bool,
1120+
) {
1121+
if let Err(e) = cache.insert_clean(key, is_descended) {
1122+
error!("SortitionDB's descendant cache errored. Will continue operation with cleared cache"; "err" => %e);
1123+
**cache = LruCache::new(DESCENDANCY_CACHE_SIZE);
1124+
}
1125+
}
1126+
10941127
/// is the given block a descendant of `potential_ancestor`?
10951128
/// * block_at_burn_height: the burn height of the sortition that chose the stacks block to check
10961129
/// * potential_ancestor: the stacks block hash of the potential ancestor
@@ -1125,39 +1158,36 @@ pub trait SortitionHandle {
11251158
.expect("FATAL: lock poisoned in SortitionDB");
11261159

11271160
while sn.block_height >= earliest_block_height {
1128-
match cache.get(&(sn.sortition_id, potential_ancestor.clone())) {
1129-
Ok(Some(result)) => {
1161+
let cache_check_key = (sn.sortition_id, potential_ancestor.clone());
1162+
match Self::descendancy_cache_get(&mut cache, &cache_check_key) {
1163+
Some(result) => {
11301164
if sn.sortition_id != top_sortition_id {
1131-
if let Err(_) = cache
1132-
.insert_clean((top_sortition_id, potential_ancestor.clone()), result)
1133-
{
1134-
*cache = LruCache::new(2000);
1135-
}
1165+
Self::descendancy_cache_put(
1166+
&mut cache,
1167+
(top_sortition_id, cache_check_key.1),
1168+
result,
1169+
);
11361170
}
11371171
return Ok(result);
11381172
}
11391173
// not cached, don't need to do anything.
1140-
Ok(None) => {}
1141-
// cache is broken, create a new one
1142-
Err(_) => {
1143-
*cache = LruCache::new(2000);
1144-
}
1174+
None => {}
11451175
}
11461176

11471177
if !sn.sortition {
1148-
if let Err(_) =
1149-
cache.insert_clean((top_sortition_id, potential_ancestor.clone()), false)
1150-
{
1151-
*cache = LruCache::new(2000);
1152-
}
1178+
Self::descendancy_cache_put(
1179+
&mut cache,
1180+
(top_sortition_id, cache_check_key.1),
1181+
false,
1182+
);
11531183
return Ok(false);
11541184
}
11551185
if &sn.winning_stacks_block_hash == potential_ancestor {
1156-
if let Err(_) =
1157-
cache.insert_clean((top_sortition_id, potential_ancestor.clone()), true)
1158-
{
1159-
*cache = LruCache::new(2000);
1160-
}
1186+
Self::descendancy_cache_put(
1187+
&mut cache,
1188+
(top_sortition_id, cache_check_key.1),
1189+
true,
1190+
);
11611191
return Ok(true);
11621192
}
11631193

@@ -1193,9 +1223,11 @@ pub trait SortitionHandle {
11931223
}
11941224
}
11951225
}
1196-
if let Err(_) = cache.insert_clean((top_sortition_id, potential_ancestor.clone()), false) {
1197-
*cache = LruCache::new(2000);
1198-
}
1226+
Self::descendancy_cache_put(
1227+
&mut cache,
1228+
(top_sortition_id, potential_ancestor.clone()),
1229+
false,
1230+
);
11991231
return Ok(false);
12001232
}
12011233
}

stackslib/src/clarity_vm/clarity.rs

Lines changed: 48 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1940,6 +1940,7 @@ impl ClarityTransactionConnection<'_, '_> {
19401940
#[cfg(test)]
19411941
mod tests {
19421942
use std::fs;
1943+
use std::path::PathBuf;
19431944

19441945
use clarity::vm::analysis::errors::CheckErrors;
19451946
use clarity::vm::database::{ClarityBackingStore, STXBalance};
@@ -1950,10 +1951,57 @@ mod tests {
19501951
use stacks_common::types::sqlite::NO_PARAMS;
19511952

19521953
use super::*;
1954+
use crate::chainstate::stacks::index::marf::{MARFOpenOpts, MarfConnection as _};
19531955
use crate::chainstate::stacks::index::ClarityMarfTrieId;
19541956
use crate::clarity_vm::database::marf::MarfedKV;
19551957
use crate::core::{PEER_VERSION_EPOCH_1_0, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05};
19561958

1959+
#[test]
1960+
pub fn create_md_index() {
1961+
let path_db = "/tmp/stacks-node-tests/creat_md_index";
1962+
std::fs::remove_dir_all(path_db);
1963+
let mut path = PathBuf::from(path_db);
1964+
1965+
std::fs::create_dir_all(&path).unwrap();
1966+
1967+
path.push("marf.sqlite");
1968+
let marf_path = path.to_str().unwrap().to_string();
1969+
1970+
let mut marf_opts = MARFOpenOpts::default();
1971+
marf_opts.external_blobs = true;
1972+
1973+
let mut marf: MARF<StacksBlockId> = MARF::from_path(&marf_path, marf_opts).unwrap();
1974+
1975+
let tx = marf.storage_tx().unwrap();
1976+
1977+
tx.query_row("PRAGMA journal_mode = WAL;", NO_PARAMS, |_row| Ok(()))
1978+
.unwrap();
1979+
1980+
tx.execute(
1981+
"CREATE TABLE IF NOT EXISTS data_table
1982+
(key TEXT PRIMARY KEY, value TEXT)",
1983+
NO_PARAMS,
1984+
)
1985+
.unwrap();
1986+
1987+
tx.execute(
1988+
"CREATE TABLE IF NOT EXISTS metadata_table
1989+
(key TEXT NOT NULL, blockhash TEXT, value TEXT,
1990+
UNIQUE (key, blockhash))",
1991+
NO_PARAMS,
1992+
)
1993+
.unwrap();
1994+
1995+
tx.commit().unwrap();
1996+
1997+
assert!(SqliteConnection::check_schema(marf.sqlite_conn()).is_err());
1998+
1999+
MarfedKV::open(path_db, None, None).unwrap();
2000+
2001+
// schema should be good now
2002+
assert!(SqliteConnection::check_schema(marf.sqlite_conn()).is_ok());
2003+
}
2004+
19572005
#[test]
19582006
pub fn bad_syntax_test() {
19592007
let marf = MarfedKV::temporary();

stackslib/src/net/tests/convergence.rs

Lines changed: 40 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ fn stacker_db_id(i: usize) -> QualifiedContractIdentifier {
5252

5353
fn make_stacker_db_ids(i: usize) -> Vec<QualifiedContractIdentifier> {
5454
let mut dbs = vec![];
55-
for j in 0..i {
55+
for j in 0..i + 1 {
5656
dbs.push(stacker_db_id(j));
5757
}
5858
dbs
@@ -1053,6 +1053,45 @@ fn run_topology_test_ex<F>(
10531053
(100.0 * (peer_counts as f64)) / ((peer_count * peer_count) as f64),
10541054
);
10551055

1056+
// wait for stacker DBs to converge
1057+
for (i, peer) in peers.iter().enumerate() {
1058+
if i % 2 != 0 {
1059+
continue;
1060+
}
1061+
for (j, other_peer) in peers.iter().enumerate() {
1062+
if i == j {
1063+
continue;
1064+
}
1065+
1066+
let all_neighbors =
1067+
PeerDB::get_all_peers(other_peer.network.peerdb.conn()).unwrap();
1068+
1069+
if (all_neighbors.len() as u64) < ((peer_count - 1) as u64) {
1070+
// this is a simulated-NAT'ed node -- it won't learn about other NAT'ed nodes'
1071+
// DBs
1072+
continue;
1073+
}
1074+
1075+
if j % 2 != 0 {
1076+
continue; // this peer doesn't support Stacker DBs
1077+
}
1078+
let dbs = peer
1079+
.network
1080+
.peerdb
1081+
.get_peer_stacker_dbs(&other_peer.config.to_neighbor())
1082+
.unwrap();
1083+
if dbs.is_empty() {
1084+
test_debug!(
1085+
"waiting for peer {i} {} to learn about peer {j} {}'s stacker DBs",
1086+
&peer.config.to_neighbor(),
1087+
&other_peer.config.to_neighbor()
1088+
);
1089+
finished = false;
1090+
break;
1091+
}
1092+
}
1093+
}
1094+
10561095
if finished {
10571096
break;
10581097
}

testnet/stacks-node/Cargo.toml

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,7 @@ http-types = "2.12"
5151
tempfile = "3.3"
5252
mockito = "1.5"
5353
serial_test = "3.2.0"
54+
pinny = { git = "https://github.com/BitcoinL2-Labs/pinny-rs.git", rev = "54ba9d533a7b84525a5e65a3eae1a3ae76b9ea49" } #v0.0.2
5455
madhouse = { git = "https://github.com/stacks-network/madhouse-rs.git", rev = "fc651ddcbaf85e888b06d4a87aa788c4b7ba9309" }
5556
proptest = { git = "https://github.com/proptest-rs/proptest.git", rev = "c9bdf18c232665b2b740c667c81866b598d06dc7" }
5657

@@ -68,3 +69,6 @@ slog_json = ["stacks/slog_json", "stacks-common/slog_json", "clarity/slog_json"]
6869
prod-genesis-chainstate = []
6970
default = []
7071
testing = []
72+
73+
[package.metadata.pinny]
74+
allowed = ["bitcoind", "flaky", "slow"]

0 commit comments

Comments
 (0)