From a6f68bd181906fb686f2cfe937906fb61660dc37 Mon Sep 17 00:00:00 2001 From: Francesco Leacche Date: Tue, 3 Jun 2025 21:51:34 +0100 Subject: [PATCH 01/20] Add config-docs-generator tool for automated documentation generation --- .gitignore | 1 + Cargo.lock | 12 + Cargo.toml | 3 +- Dockerfile | 33 +- .../tools/config-docs-generator/Cargo.toml | 20 + contrib/tools/config-docs-generator/README.md | 137 ++ .../generate-config-docs.sh | 150 ++ .../config-docs-generator/src/extract_docs.rs | 1814 +++++++++++++++++ .../src/generate_markdown.rs | 1161 +++++++++++ docs/generated/configuration-reference.md | 225 ++ 10 files changed, 3543 insertions(+), 13 deletions(-) create mode 100644 contrib/tools/config-docs-generator/Cargo.toml create mode 100644 contrib/tools/config-docs-generator/README.md create mode 100755 contrib/tools/config-docs-generator/generate-config-docs.sh create mode 100644 contrib/tools/config-docs-generator/src/extract_docs.rs create mode 100644 contrib/tools/config-docs-generator/src/generate_markdown.rs create mode 100644 docs/generated/configuration-reference.md diff --git a/.gitignore b/.gitignore index 5069c47120..bb95069e7f 100644 --- a/.gitignore +++ b/.gitignore @@ -61,6 +61,7 @@ testnet/index.html /testnet/helium/target/ /contrib/tools/puppet-chain/target/ /contrib/core-contract-tests/.cache/ +/contrib/tools/config-docs-generator/target/ # These are backup files generated by rustfmt **/*.rs.bk diff --git a/Cargo.lock b/Cargo.lock index d66c964c00..c1ffde555d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -685,6 +685,18 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "config-docs-generator" +version = "0.1.0" +dependencies = [ + "anyhow", + "clap", + "once_cell", + "regex", + "serde", + "serde_json", +] + [[package]] name = "const-oid" version = "0.9.6" diff --git a/Cargo.toml b/Cargo.toml index 3b9486b61d..2ea2ba53ee 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -10,7 +10,8 @@ members = [ "contrib/tools/relay-server", "libsigner", "stacks-signer", - "testnet/stacks-node"] + "testnet/stacks-node", + "contrib/tools/config-docs-generator"] # Dependencies we want to keep the same between workspace members [workspace.dependencies] diff --git a/Dockerfile b/Dockerfile index ca03fa3ac6..760082ad88 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,16 +1,25 @@ -FROM rust:bookworm AS build +# Use a specific nightly toolchain for reproducible builds +FROM rustlang/rust@sha256:04690ffa09cddd358b349272173155319f384e57816614eea0840ec7f9422862 -ARG STACKS_NODE_VERSION="No Version Info" -ARG GIT_BRANCH='No Branch Info' -ARG GIT_COMMIT='No Commit Info' +# Set the working directory for building +WORKDIR /build -WORKDIR /src +# Copy the entire project to build the binaries COPY . . -RUN mkdir /out -RUN rustup toolchain install stable -RUN cargo build --features monitoring_prom,slog_json --release -RUN cp -R target/release/. /out -FROM debian:bookworm-slim -COPY --from=build /out/stacks-node /out/stacks-signer /out/stacks-inspect /bin/ -CMD ["stacks-node", "mainnet"] +# Pre-build the config-docs-generator binaries during image build +RUN cargo build --package config-docs-generator --release + +# Set the working directory where the project will be mounted at runtime +WORKDIR /project_root + +# Set environment variables for generate-config-docs.sh +ENV PROJECT_ROOT=/project_root +ENV BUILD_ROOT=/build +ENV CARGO_HOME=/project_root/.cargo +ENV EXTRACT_DOCS_BIN=/build/target/release/extract-docs +ENV GENERATE_MARKDOWN_BIN=/build/target/release/generate-markdown +ENV SKIP_BUILD=true + +# Set the entrypoint to run the config docs generation script +ENTRYPOINT ["/build/contrib/tools/config-docs-generator/generate-config-docs.sh"] diff --git a/contrib/tools/config-docs-generator/Cargo.toml b/contrib/tools/config-docs-generator/Cargo.toml new file mode 100644 index 0000000000..17bbd2200d --- /dev/null +++ b/contrib/tools/config-docs-generator/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "config-docs-generator" +version = "0.1.0" +edition = "2024" + +[[bin]] +name = "extract-docs" +path = "src/extract_docs.rs" + +[[bin]] +name = "generate-markdown" +path = "src/generate_markdown.rs" + +[dependencies] +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" +clap = { version = "4.0", features = ["derive"] } +regex = "1.0" +anyhow = "1.0" +once_cell = "1.18" diff --git a/contrib/tools/config-docs-generator/README.md b/contrib/tools/config-docs-generator/README.md new file mode 100644 index 0000000000..3d8e715dec --- /dev/null +++ b/contrib/tools/config-docs-generator/README.md @@ -0,0 +1,137 @@ +# Configuration Documentation Generator + +A tool that automatically generates comprehensive Markdown documentation for Stacks node TOML configuration options. The documentation is extracted directly from Rust source code comments and generates a complete configuration reference. + +## Quick Start + +### Using Docker (Recommended) + +The easiest way to generate configuration documentation: + +```bash +# Build the Docker image (one-time setup) +docker build -t config-docs-generator . + +# Generate documentation +docker run --rm -v "$(pwd):/project_root" --user "$(id -u):$(id -g)" config-docs-generator +``` + +This approach: +- Uses a consistent nightly Rust environment +- Generates `docs/generated/configuration-reference.md` + +### Using Local Setup (Alternative) + +If you prefer to run without Docker: + +```bash +# Install nightly toolchain if needed +rustup toolchain install nightly + +# Generate documentation +./contrib/tools/config-docs-generator/generate-config-docs.sh +``` + +## What It Does + +The tool processes these configuration structs from the Stacks codebase: +- `BurnchainConfig` → `[burnchain]` section +- `NodeConfig` → `[node]` section +- `MinerConfig` → `[miner]` section +- `ConnectionOptionsFile` → `[connection_options]` section +- `FeeEstimationConfigFile` → `[fee_estimation]` section +- `EventObserverConfigFile` → `[event_observer]` section +- `InitialBalanceFile` → `[initial_balances]` section + +For each configuration field, it extracts: +- Field documentation from `///` comments +- Default values (including constant references) +- Usage notes and examples +- Deprecation warnings + +## Output Files + +- **Primary**: `docs/generated/configuration-reference.md` - Complete configuration reference +- **Intermediate**: `target/doc-generation/extracted-config-docs.json` - Raw extracted data + +## Adding New Configuration Structs + +### 1. Update the Target List + +Edit `contrib/tools/config-docs-generator/generate-config-docs.sh`: + +```bash +TARGET_STRUCTS="BurnchainConfig,NodeConfig,MinerConfig,YourNewConfig" +``` + +### 2. Document Your Struct + +Add proper documentation to your Rust configuration struct: + +```rust +/// Configuration for your new feature. +/// +/// This controls how the feature operates and integrates +/// with the existing node functionality. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct YourNewConfig { + /// Enable or disable the new feature. + /// --- + /// @default: `false` + /// @notes: + /// - Requires restart to take effect + /// - May impact performance when enabled + /// @toml_example: | + /// enabled = true + pub enabled: bool, + + /// Timeout for feature operations in milliseconds. + /// --- + /// @default: [`DEFAULT_TIMEOUT`] + pub timeout: u64, +} +``` + +### Supported Annotations + +- **@default**: Default value (supports constant references like `[`CONSTANT_NAME`]`) +- **@notes**: Bullet-pointed usage notes +- **@deprecated**: Deprecation message +- **@toml_example**: Example TOML configuration + +### 3. Add Section Mapping (Optional) + +If you want a custom TOML section name, edit `src/generate_markdown.rs`: + +```rust +fn struct_to_section_name(struct_name: &str) -> String { + match struct_name { + "YourNewConfig" => "[your_custom_section]".to_string(), + // ... existing mappings + _ => format!("[{}]", struct_name.to_lowercase()), + } +} +``` + +### 4. Generate and Verify + +```bash +# Using Docker (recommended) +docker run --rm -v "$(pwd):/project_root" --user "$(id -u):$(id -g)" config-docs-generator + +# OR using local setup +./contrib/tools/config-docs-generator/generate-config-docs.sh + +# Check that your struct appears +grep -A 5 "your_custom_section" docs/generated/configuration-reference.md +``` + +## How It Works + +The tool uses a three-step process: + +1. **Extract**: Uses `cargo +nightly rustdoc --output-format json` to generate documentation JSON +2. **Parse**: Extracts field information, resolves constant references across crates +3. **Generate**: Converts to Markdown with proper cross-references and formatting + +The process is automated by the shell script which coordinates building the tools and running the extraction/generation pipeline. diff --git a/contrib/tools/config-docs-generator/generate-config-docs.sh b/contrib/tools/config-docs-generator/generate-config-docs.sh new file mode 100755 index 0000000000..de4c951c61 --- /dev/null +++ b/contrib/tools/config-docs-generator/generate-config-docs.sh @@ -0,0 +1,150 @@ +#!/bin/bash + +set -euo pipefail + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' # No Color + +# Configuration - Allow environment variable overrides +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +PROJECT_ROOT="${PROJECT_ROOT:-$(cd "$SCRIPT_DIR/../../../" && pwd)}" +BUILD_ROOT="${BUILD_ROOT:-$PROJECT_ROOT}" +OUTPUT_DIR="$PROJECT_ROOT/docs/generated" +TEMP_DIR="$PROJECT_ROOT/target/doc-generation" +CONFIG_SOURCE_FILE="$PROJECT_ROOT/stackslib/src/config/mod.rs" + +# Paths to binaries - allow override via environment +EXTRACT_DOCS_BIN="${EXTRACT_DOCS_BIN:-$BUILD_ROOT/target/release/extract-docs}" +GENERATE_MARKDOWN_BIN="${GENERATE_MARKDOWN_BIN:-$BUILD_ROOT/target/release/generate-markdown}" + +# Check if binaries are pre-built (skip build step) +SKIP_BUILD="${SKIP_BUILD:-false}" +if [[ -f "$EXTRACT_DOCS_BIN" && -f "$GENERATE_MARKDOWN_BIN" ]]; then + SKIP_BUILD=true +fi + +log_info() { + echo -e "${GREEN}[INFO]${NC} $1" +} + +log_warn() { + echo -e "${YELLOW}[WARN]${NC} $1" +} + +log_error() { + echo -e "${RED}[ERROR]${NC} $1" +} + +cleanup() { + if [[ -d "$TEMP_DIR" ]]; then + rm -rf "$TEMP_DIR" + fi +} + +trap cleanup EXIT + +main() { + log_info "Starting config documentation generation..." + + # Create necessary directories + mkdir -p "$OUTPUT_DIR" + mkdir -p "$TEMP_DIR" + + cd "$PROJECT_ROOT" + + # Verify source file exists + if [[ ! -f "$CONFIG_SOURCE_FILE" ]]; then + log_error "Config source file not found: $CONFIG_SOURCE_FILE" + exit 1 + fi + + # Step 1: Build the documentation generation tools (skip if pre-built) + if [[ "$SKIP_BUILD" == "true" ]]; then + log_info "Using pre-built documentation generation tools..." + else + log_info "Building documentation generation tools..." + cargo build --package config-docs-generator --release + fi + + # Step 2: Extract documentation from source code using rustdoc + log_info "Extracting configuration documentation using rustdoc..." + EXTRACTED_JSON="$TEMP_DIR/extracted-config-docs.json" + # List of specific Rust struct names to be documented + # NOTE: This variable must be manually updated if this list changes + # (e.g., new config structs are added or removed from the project) + TARGET_STRUCTS="BurnchainConfig,NodeConfig,MinerConfig,ConnectionOptionsFile,FeeEstimationConfigFile,EventObserverConfigFile,InitialBalanceFile" + "$EXTRACT_DOCS_BIN" \ + --package stackslib \ + --structs "$TARGET_STRUCTS" \ + --output "$EXTRACTED_JSON" + + # Step 3: Generate Markdown + log_info "Generating Markdown documentation..." + MARKDOWN_OUTPUT="$OUTPUT_DIR/configuration-reference.md" + "$GENERATE_MARKDOWN_BIN" \ + --input "$EXTRACTED_JSON" \ + --output "$MARKDOWN_OUTPUT" + + log_info "Documentation generation complete!" + log_info "Generated files:" + log_info " - Configuration reference: $MARKDOWN_OUTPUT" + log_info " - Intermediate JSON: $EXTRACTED_JSON" + + # Verify output + if [[ -f "$MARKDOWN_OUTPUT" ]]; then + WORD_COUNT=$(wc -w < "$MARKDOWN_OUTPUT") + log_info "Generated Markdown contains $WORD_COUNT words" + else + log_error "Expected output file not found: $MARKDOWN_OUTPUT" + exit 1 + fi +} + +# Help function +show_help() { + cat << EOF +generate-config-docs.sh - Generate configuration documentation for Stacks node + +USAGE: + $0 [OPTIONS] + +OPTIONS: + -h, --help Show this help message + +DESCRIPTION: + This script generates comprehensive Markdown documentation for all TOML + configuration options available in the Stacks node. The documentation is + automatically extracted from Rust source code comments. + + The process involves: + 1. Building the documentation generation tools + 2. Extracting configuration struct documentation from source code + 3. Converting to Markdown format + + Source file: stackslib/src/config/mod.rs + +OUTPUT: + docs/generated/configuration-reference.md + +EOF +} + +# Parse command line arguments +while [[ $# -gt 0 ]]; do + case $1 in + -h|--help) + show_help + exit 0 + ;; + *) + log_error "Unknown option: $1" + show_help + exit 1 + ;; + esac +done + +main "$@" diff --git a/contrib/tools/config-docs-generator/src/extract_docs.rs b/contrib/tools/config-docs-generator/src/extract_docs.rs new file mode 100644 index 0000000000..1f54ebfa37 --- /dev/null +++ b/contrib/tools/config-docs-generator/src/extract_docs.rs @@ -0,0 +1,1814 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2025 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . +use std::collections::{HashMap, HashSet}; +use std::fs; +use std::process::Command as StdCommand; + +use anyhow::{Context, Result}; +use clap::{Arg, Command as ClapCommand}; +use once_cell::sync::Lazy; +use serde::{Deserialize, Serialize}; + +// Static regex for finding annotation end patterns +static ANNOTATION_END_REGEX: Lazy = + Lazy::new(|| regex::Regex::new(r"\n\s*@[a-zA-Z_]+:").unwrap()); + +// Static regex for finding constant references in documentation +static CONSTANT_REFERENCE_REGEX: Lazy = + Lazy::new(|| regex::Regex::new(r"\[`([A-Z_][A-Z0-9_]*)`\]").unwrap()); + +#[derive(Debug, Serialize, Deserialize)] +pub struct FieldDoc { + pub name: String, + pub description: String, + pub default_value: Option, + pub notes: Option>, + pub deprecated: Option, + pub toml_example: Option, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct StructDoc { + pub name: String, + pub description: Option, + pub fields: Vec, +} + +#[derive(Debug, Serialize, Deserialize)] +struct ConfigDocs { + structs: Vec, + referenced_constants: HashMap>, // Name -> Resolved Value (or None) +} + +fn main() -> Result<()> { + let matches = ClapCommand::new("extract-docs") + .about("Extract documentation from Rust source code using rustdoc JSON") + .arg( + Arg::new("package") + .long("package") + .short('p') + .value_name("PACKAGE") + .help("Package to extract docs for") + .required(true), + ) + .arg( + Arg::new("output") + .long("output") + .short('o') + .value_name("FILE") + .help("Output JSON file") + .required(true), + ) + .arg( + Arg::new("structs") + .long("structs") + .value_name("NAMES") + .help("Comma-separated list of struct names to extract") + .required(false), + ) + .get_matches(); + + let package = matches.get_one::("package").unwrap(); + let output_file = matches.get_one::("output").unwrap(); + let target_structs: Option> = matches + .get_one::("structs") + .map(|s| s.split(',').map(|s| s.trim().to_string()).collect()); + + // Generate rustdoc JSON + let rustdoc_json = generate_rustdoc_json(package)?; + + // Extract configuration documentation from the rustdoc JSON + let config_docs = extract_config_docs_from_rustdoc(&rustdoc_json, &target_structs)?; + + // Write the extracted docs to file + fs::write(output_file, serde_json::to_string_pretty(&config_docs)?)?; + + println!("Successfully extracted documentation to {}", output_file); + println!( + "Found {} structs with documentation", + config_docs.structs.len() + ); + Ok(()) +} + +fn generate_rustdoc_json(package: &str) -> Result { + // List of crates to generate rustdoc for (in addition to the main package) + // These crates contain constants that might be referenced in documentation + // NOTE: This list must be manually updated if new dependencies containing + // constants referenced in doc comments are added to the project + let additional_crates = ["stacks-common"]; + + // WARNING: This tool relies on nightly rustdoc JSON output (-Z unstable-options --output-format json) + // The JSON format is subject to change with new Rust nightly versions and could break this tool. + // Use cargo rustdoc with nightly to generate JSON for the main package + let output = StdCommand::new("cargo") + .args([ + "+nightly", + "rustdoc", + "--lib", + "-p", + package, + "--target-dir", + "target/rustdoc-json", + "--", + "-Z", + "unstable-options", + "--output-format", + "json", + "--document-private-items", + ]) + .output() + .context("Failed to run cargo rustdoc command")?; + + if !output.status.success() { + let stderr = String::from_utf8_lossy(&output.stderr); + anyhow::bail!("cargo rustdoc failed: {}", stderr); + } + + // Generate rustdoc for additional crates that might contain referenced constants + for additional_crate in &additional_crates { + let error_msg = format!( + "Failed to run cargo rustdoc command for {}", + additional_crate + ); + let output = StdCommand::new("cargo") + .args([ + "+nightly", + "rustdoc", + "--lib", + "-p", + additional_crate, + "--target-dir", + "target/rustdoc-json", + "--", + "-Z", + "unstable-options", + "--output-format", + "json", + "--document-private-items", + ]) + .output() + .context(error_msg)?; + + if !output.status.success() { + let stderr = String::from_utf8_lossy(&output.stderr); + eprintln!( + "Warning: Failed to generate rustdoc for {}: {}", + additional_crate, stderr + ); + } + } + + // Map package names to their library names if different + // For most packages, the library name is the same as package name with hyphens replaced by underscores + // But some packages have custom library names defined in Cargo.toml + // NOTE: This mapping must be updated if new packages with different library names are processed + let lib_name = match package { + "stackslib" => "blockstack_lib".to_string(), + _ => package.replace('-', "_"), + }; + + // Read the generated JSON file - rustdoc generates it based on library name + let json_file_path = format!("target/rustdoc-json/doc/{}.json", lib_name); + let json_content = std::fs::read_to_string(json_file_path) + .context("Failed to read generated rustdoc JSON file")?; + + serde_json::from_str(&json_content).context("Failed to parse rustdoc JSON output") +} + +fn extract_config_docs_from_rustdoc( + rustdoc_json: &serde_json::Value, + target_structs: &Option>, +) -> Result { + let mut structs = Vec::new(); + let mut all_referenced_constants = std::collections::HashSet::new(); + + // Access the main index containing all items from the rustdoc JSON output + let index = rustdoc_json + .get("index") + .and_then(|v| v.as_object()) + .context("Missing 'index' field in rustdoc JSON")?; + + for (_item_id, item) in index { + // Extract the item's name from rustdoc JSON structure + if let Some(name) = item.get("name").and_then(|v| v.as_str()) { + // Navigate to the item's type information + if let Some(inner) = item.get("inner") { + // Check if this item is a struct by looking for the "struct" field + if let Some(_struct_data) = inner.get("struct") { + // Check if this struct is in our target list (if specified) + if let Some(targets) = target_structs { + if !targets.contains(&name.to_string()) { + continue; + } + } + + let (struct_doc_opt, referenced_constants) = + extract_struct_from_rustdoc_index(index, name, item)?; + + if let Some(struct_doc) = struct_doc_opt { + structs.push(struct_doc); + } + all_referenced_constants.extend(referenced_constants); + } + } + } + } + + // Resolve all collected constant references + let mut referenced_constants = HashMap::new(); + for constant_name in all_referenced_constants { + let resolved_value = resolve_constant_reference(&constant_name, index); + referenced_constants.insert(constant_name, resolved_value); + } + + Ok(ConfigDocs { + structs, + referenced_constants, + }) +} + +fn extract_struct_from_rustdoc_index( + index: &serde_json::Map, + struct_name: &str, + struct_item: &serde_json::Value, +) -> Result<(Option, HashSet)> { + let mut all_referenced_constants = std::collections::HashSet::new(); + + // Extract struct documentation + let description = struct_item + .get("docs") + .and_then(|v| v.as_str()) + .map(|s| s.to_string()); + + // Collect constant references from struct description + if let Some(desc) = &description { + all_referenced_constants.extend(find_constant_references(desc)); + } + + // Extract fields + let (fields, referenced_constants) = extract_struct_fields(index, struct_item)?; + + // Extend referenced constants + all_referenced_constants.extend(referenced_constants); + + if !fields.is_empty() || description.is_some() { + let struct_doc = StructDoc { + name: struct_name.to_string(), + description, + fields, + }; + Ok((Some(struct_doc), all_referenced_constants)) + } else { + Ok((None, all_referenced_constants)) + } +} + +fn extract_struct_fields( + index: &serde_json::Map, + struct_item: &serde_json::Value, +) -> Result<(Vec, std::collections::HashSet)> { + let mut fields = Vec::new(); + let mut all_referenced_constants = std::collections::HashSet::new(); + + // Navigate through rustdoc JSON structure to access struct fields + // Path: item.inner.struct.kind.plain.fields[] + if let Some(inner) = struct_item.get("inner") { + if let Some(struct_data) = inner.get("struct") { + if let Some(kind) = struct_data.get("kind") { + if let Some(plain) = kind.get("plain") { + // Access the array of field IDs that reference other items in the index + if let Some(field_ids) = plain.get("fields").and_then(|v| v.as_array()) { + for field_id in field_ids { + // Field IDs can be either integers or strings in rustdoc JSON, try both formats + let field_item = if let Some(field_id_num) = field_id.as_u64() { + // Numeric field ID - convert to string for index lookup + index.get(&field_id_num.to_string()) + } else if let Some(field_id_str) = field_id.as_str() { + // String field ID - use directly for index lookup + index.get(field_id_str) + } else { + None + }; + + if let Some(field_item) = field_item { + // Extract the field's name from the rustdoc item + let field_name = field_item + .get("name") + .and_then(|v| v.as_str()) + .unwrap_or("unknown") + .to_string(); + + // Extract the field's documentation text from rustdoc + let field_docs = field_item + .get("docs") + .and_then(|v| v.as_str()) + .unwrap_or("") + .to_string(); + + // Parse the structured documentation + let (field_doc, referenced_constants) = + parse_field_documentation(&field_docs, &field_name)?; + + // Only include fields that have documentation + if !field_doc.description.is_empty() + || field_doc.default_value.is_some() + { + fields.push(field_doc); + } + + // Extend referenced constants + all_referenced_constants.extend(referenced_constants); + } + } + } + } + } + } + } + + Ok((fields, all_referenced_constants)) +} + +fn parse_field_documentation( + doc_text: &str, + field_name: &str, +) -> Result<(FieldDoc, std::collections::HashSet)> { + let mut default_value = None; + let mut notes = None; + let mut deprecated = None; + let mut toml_example = None; + let mut referenced_constants = std::collections::HashSet::new(); + + // Split on --- separator if present + let parts: Vec<&str> = doc_text.split("---").collect(); + + let description = parts[0].trim().to_string(); + + // Collect constant references from description + referenced_constants.extend(find_constant_references(&description)); + + // Parse metadata section if present + if parts.len() >= 2 { + let metadata_section = parts[1]; + + // Parse @default: annotations + if let Some(default_match) = extract_annotation(metadata_section, "default") { + // Collect constant references from default value + referenced_constants.extend(find_constant_references(&default_match)); + default_value = Some(default_match); + } + + // Parse @notes: annotations + if let Some(notes_text) = extract_annotation(metadata_section, "notes") { + // Collect constant references from notes + referenced_constants.extend(find_constant_references(¬es_text)); + + let mut note_items: Vec = Vec::new(); + let mut current_note = String::new(); + let mut in_note = false; + + for line in notes_text.lines() { + let trimmed = line.trim(); + + // Skip empty lines + if trimmed.is_empty() { + continue; + } + + // Check if this line starts a new note (bullet point) + if trimmed.starts_with("- ") || trimmed.starts_with("* ") { + // If we were building a previous note, save it + if in_note && !current_note.trim().is_empty() { + note_items.push(current_note.trim().to_string()); + } + + // Start a new note (remove the bullet point) + current_note = trimmed[2..].trim().to_string(); + in_note = true; + } else if in_note { + // This is a continuation line for the current note + if !current_note.is_empty() { + current_note.push(' '); + } + current_note.push_str(trimmed); + } + // If not in_note and doesn't start with bullet, ignore the line + } + + // Don't forget the last note + if in_note && !current_note.trim().is_empty() { + note_items.push(current_note.trim().to_string()); + } + + if !note_items.is_empty() { + notes = Some(note_items); + } + } + + // Parse @deprecated: annotations + if let Some(deprecated_text) = extract_annotation(metadata_section, "deprecated") { + // Collect constant references from deprecated text + referenced_constants.extend(find_constant_references(&deprecated_text)); + deprecated = Some(deprecated_text); + } + + // Parse @toml_example: annotations + if let Some(example_text) = extract_annotation(metadata_section, "toml_example") { + // Note: We typically don't expect constant references in TOML examples, + // but we'll check anyway for completeness + referenced_constants.extend(find_constant_references(&example_text)); + toml_example = Some(example_text); + } + } + + let field_doc = FieldDoc { + name: field_name.to_string(), + description, + default_value, + notes, + deprecated, + toml_example, + }; + + Ok((field_doc, referenced_constants)) +} + +fn extract_annotation(metadata_section: &str, annotation_name: &str) -> Option { + let annotation_pattern = format!("@{}:", annotation_name); + + if let Some(start_pos) = metadata_section.find(&annotation_pattern) { + let after_annotation = &metadata_section[start_pos + annotation_pattern.len()..]; + + // Find the end of this annotation by looking for the next @annotation: pattern + // Look for pattern like "@word:" to identify the start of the next annotation + let end_pos = ANNOTATION_END_REGEX + .find(after_annotation) + .map(|m| m.start()) + .unwrap_or(after_annotation.len()); + + let annotation_content = after_annotation[..end_pos].trim(); + + if !annotation_content.is_empty() { + // For toml_example, preserve the content more carefully + if annotation_name == "toml_example" { + // Remove the initial | marker if present and preserve formatting + let cleaned = if let Some(stripped) = annotation_content.strip_prefix('|') { + stripped.trim_start_matches('\n').to_string() + } else { + annotation_content.to_string() + }; + + if !cleaned.trim().is_empty() { + return Some(cleaned); + } + } else { + // For other annotations, clean up backticks and other formatting + let cleaned = annotation_content.trim().to_string(); + + if !cleaned.is_empty() { + return Some(cleaned); + } + } + } + } + + None +} + +fn resolve_constant_reference( + name: &str, + rustdoc_index: &serde_json::Map, +) -> Option { + // First, try to find the constant in the main rustdoc index + if let Some(value) = resolve_constant_in_index(name, rustdoc_index) { + return Some(value); + } + + // If not found in main index, try additional crates + let additional_crate_libs = ["stacks_common"]; // Library names for additional crates + + for lib_name in &additional_crate_libs { + let json_file_path = format!("target/rustdoc-json/doc/{}.json", lib_name); + if let Ok(json_content) = std::fs::read_to_string(&json_file_path) { + if let Ok(rustdoc_json) = serde_json::from_str::(&json_content) { + if let Some(index) = rustdoc_json.get("index").and_then(|v| v.as_object()) { + if let Some(value) = resolve_constant_in_index(name, index) { + return Some(value); + } + } + } + } + } + + None +} + +fn resolve_constant_in_index( + name: &str, + rustdoc_index: &serde_json::Map, +) -> Option { + // Look for a constant with the given name in the rustdoc index + for (_item_id, item) in rustdoc_index { + // Check if this item's name matches the constant we're looking for + if let Some(item_name) = item.get("name").and_then(|v| v.as_str()) { + if item_name == name { + // Navigate to the item's type information in rustdoc JSON + if let Some(inner) = item.get("inner") { + // Check if this item is a constant by looking for the "constant" field + if let Some(constant_data) = inner.get("constant") { + // Try newer rustdoc JSON structure first (with nested 'const' field) + if let Some(const_inner) = constant_data.get("const") { + // For literal constants, prefer expr which doesn't have type suffix + if let Some(is_literal) = + const_inner.get("is_literal").and_then(|v| v.as_bool()) + { + if is_literal { + // Access the expression field for literal constant values + if let Some(expr) = + const_inner.get("expr").and_then(|v| v.as_str()) + { + if expr != "_" { + return Some(expr.to_string()); + } + } + } + } + + // For computed constants or when expr is "_", use value but strip type suffix + if let Some(value) = const_inner.get("value").and_then(|v| v.as_str()) { + return Some(strip_type_suffix(value)); + } + + // Fallback to expr if value is not available + if let Some(expr) = const_inner.get("expr").and_then(|v| v.as_str()) { + if expr != "_" { + return Some(expr.to_string()); + } + } + } + + // Fall back to older rustdoc JSON structure for compatibility + if let Some(value) = constant_data.get("value").and_then(|v| v.as_str()) { + return Some(strip_type_suffix(value)); + } + if let Some(expr) = constant_data.get("expr").and_then(|v| v.as_str()) { + if expr != "_" { + return Some(expr.to_string()); + } + } + + // For some constants, the value might be in the type field if it's a simple literal + if let Some(type_info) = constant_data.get("type") { + if let Some(type_str) = type_info.as_str() { + // Handle simple numeric or string literals embedded in type + return Some(type_str.to_string()); + } + } + } + } + } + } + } + None +} + +/// Strip type suffixes from rustdoc constant values (e.g., "50u64" -> "50", "402_653_196u32" -> "402_653_196") +fn strip_type_suffix(value: &str) -> String { + // Common Rust integer type suffixes + let suffixes = [ + "u8", "u16", "u32", "u64", "u128", "usize", "i8", "i16", "i32", "i64", "i128", "isize", + "f32", "f64", + ]; + + for suffix in &suffixes { + if value.ends_with(suffix) { + let without_suffix = &value[..value.len() - suffix.len()]; + + // Only strip if the remaining part looks like a numeric literal + // (contains only digits, underscores, dots, minus signs, or quotes for string literals) + if !without_suffix.is_empty() + && (without_suffix + .chars() + .all(|c| c.is_ascii_digit() || c == '_' || c == '.' || c == '-') + || (without_suffix.starts_with('"') && without_suffix.ends_with('"'))) + { + return without_suffix.to_string(); + } + } + } + + // If no valid suffix found, return as-is + value.to_string() +} + +fn find_constant_references(text: &str) -> std::collections::HashSet { + let mut constants = std::collections::HashSet::new(); + + for captures in CONSTANT_REFERENCE_REGEX.captures_iter(text) { + if let Some(constant_name) = captures.get(1) { + constants.insert(constant_name.as_str().to_string()); + } + } + + constants +} + +#[cfg(test)] +mod tests { + use serde_json::json; + + use super::*; + + #[test] + fn test_parse_field_documentation_basic() { + let doc_text = "This is a basic field description."; + let result = parse_field_documentation(doc_text, "test_field").unwrap(); + + assert_eq!(result.0.name, "test_field"); + assert_eq!(result.0.description, "This is a basic field description."); + assert_eq!(result.0.default_value, None); + assert_eq!(result.0.notes, None); + assert_eq!(result.0.deprecated, None); + assert_eq!(result.0.toml_example, None); + } + + #[test] + fn test_parse_field_documentation_with_metadata() { + let doc_text = r#"This is a field with metadata. +--- +@default: `"test_value"` +@notes: + - This is a note. + - This is another note. +@deprecated: This field is deprecated. +@toml_example: | + key = "value" + other = 123"#; + + let result = parse_field_documentation(doc_text, "test_field").unwrap(); + + assert_eq!(result.0.name, "test_field"); + assert_eq!(result.0.description, "This is a field with metadata."); + assert_eq!(result.0.default_value, Some("`\"test_value\"`".to_string())); + assert_eq!( + result.0.notes, + Some(vec![ + "This is a note.".to_string(), + "This is another note.".to_string() + ]) + ); + assert_eq!( + result.0.deprecated, + Some("This field is deprecated.".to_string()) + ); + assert_eq!( + result.0.toml_example, + Some(" key = \"value\"\n other = 123".to_string()) + ); + } + + #[test] + fn test_parse_field_documentation_multiline_default() { + let doc_text = r#"Multi-line field description. +--- +@default: Derived from [`BurnchainConfig::mode`] ([`CHAIN_ID_MAINNET`] for `mainnet`, + [`CHAIN_ID_TESTNET`] otherwise). +@notes: + - Warning: Do not modify this unless you really know what you're doing."#; + + let result = parse_field_documentation(doc_text, "test_field").unwrap(); + + assert_eq!(result.0.name, "test_field"); + assert_eq!(result.0.description, "Multi-line field description."); + assert!(result.0.default_value.is_some()); + let default_val = result.0.default_value.unwrap(); + assert!(default_val.contains("Derived from")); + assert!(default_val.contains("CHAIN_ID_MAINNET")); + assert_eq!( + result.0.notes, + Some(vec![ + "Warning: Do not modify this unless you really know what you're doing.".to_string() + ]) + ); + } + + #[test] + fn test_parse_field_documentation_multiline_notes() { + let doc_text = r#"Field with multi-line notes. +--- +@notes: + - This is a single line note. + - This is a multi-line note that + spans across multiple lines + and should be treated as one note. + - Another single line note. + - Final multi-line note that also + continues on the next line."#; + + let result = parse_field_documentation(doc_text, "test_field").unwrap(); + let (field_doc, _) = result; + + assert_eq!(field_doc.name, "test_field"); + assert_eq!(field_doc.description, "Field with multi-line notes."); + + let notes = field_doc.notes.expect("Should have notes"); + assert_eq!(notes.len(), 4); + assert_eq!(notes[0], "This is a single line note."); + assert_eq!( + notes[1], + "This is a multi-line note that spans across multiple lines and should be treated as one note." + ); + assert_eq!(notes[2], "Another single line note."); + assert_eq!( + notes[3], + "Final multi-line note that also continues on the next line." + ); + } + + #[test] + fn test_parse_field_documentation_multiline_notes_mixed_bullets() { + let doc_text = r#"Field with mixed bullet styles. +--- +@notes: + - First note with dash. + * Second note with asterisk + that continues. + - Third note with dash again + and multiple continuation lines + should all be joined together."#; + + let result = parse_field_documentation(doc_text, "test_field").unwrap(); + let (field_doc, _) = result; + + let notes = field_doc.notes.expect("Should have notes"); + assert_eq!(notes.len(), 3); + assert_eq!(notes[0], "First note with dash."); + assert_eq!(notes[1], "Second note with asterisk that continues."); + assert_eq!( + notes[2], + "Third note with dash again and multiple continuation lines should all be joined together." + ); + } + + #[test] + fn test_parse_field_documentation_notes_with_empty_lines() { + let doc_text = r#"Field with notes that have empty lines. +--- +@notes: + - First note. + + - Second note after empty line + with continuation. + + - Third note after another empty line."#; + + let result = parse_field_documentation(doc_text, "test_field").unwrap(); + let (field_doc, _) = result; + + let notes = field_doc.notes.expect("Should have notes"); + assert_eq!(notes.len(), 3); + assert_eq!(notes[0], "First note."); + assert_eq!(notes[1], "Second note after empty line with continuation."); + assert_eq!(notes[2], "Third note after another empty line."); + } + + #[test] + fn test_parse_field_documentation_notes_with_intralinks() { + let doc_text = r#"Field with notes containing intralinks. +--- +@notes: + - If [`SomeConfig::field`] is `true`, the node will + use the default estimator. + - See [`CONSTANT_VALUE`] for details."#; + + let result = parse_field_documentation(doc_text, "test_field").unwrap(); + let (field_doc, referenced_constants) = result; + + let notes = field_doc.notes.expect("Should have notes"); + assert_eq!(notes.len(), 2); + assert_eq!( + notes[0], + "If [`SomeConfig::field`] is `true`, the node will use the default estimator." + ); + assert_eq!(notes[1], "See [`CONSTANT_VALUE`] for details."); + + // Check that constants were collected + assert!(referenced_constants.contains("CONSTANT_VALUE")); + } + + #[test] + fn test_extract_annotation_basic() { + let metadata = "@default: `\"test\"`\n@notes: Some notes here."; + + let default = extract_annotation(metadata, "default"); + let notes = extract_annotation(metadata, "notes"); + let missing = extract_annotation(metadata, "missing"); + + assert_eq!(default, Some("`\"test\"`".to_string())); + assert_eq!(notes, Some("Some notes here.".to_string())); + assert_eq!(missing, None); + } + + #[test] + fn test_extract_annotation_toml_example() { + let metadata = r#"@toml_example: | + key = "value" + number = 42 + nested = { a = 1, b = 2 }"#; + + let result = extract_annotation(metadata, "toml_example"); + assert!(result.is_some()); + let toml = result.unwrap(); + assert!(toml.contains("key = \"value\"")); + assert!(toml.contains("number = 42")); + assert!(toml.contains("nested = { a = 1, b = 2 }")); + } + + #[test] + fn test_extract_annotation_multiline() { + let metadata = r#"@notes: + - First note with important details. + - Second note with more info. +@default: `None`"#; + + let notes = extract_annotation(metadata, "notes"); + let default = extract_annotation(metadata, "default"); + + assert!(notes.is_some()); + let notes_text = notes.unwrap(); + assert!(notes_text.contains("First note")); + assert!(notes_text.contains("Second note")); + assert_eq!(default, Some("`None`".to_string())); + } + + #[test] + fn test_extract_struct_fields_from_mock_data() { + let mock_index = json!({ + "struct_1": { + "name": "TestStruct", + "inner": { + "struct": { + "kind": { + "plain": { + "fields": ["field_1", "field_2"] + } + } + } + } + }, + "field_1": { + "name": "test_field", + "docs": "A test field.\n---\n@default: `42`" + }, + "field_2": { + "name": "another_field", + "docs": "Another field with notes.\n---\n@default: `\"hello\"`\n@notes:\n - This is a note." + } + }); + + let index = mock_index.as_object().unwrap(); + let struct_item = &mock_index["struct_1"]; + + let (fields, _referenced_constants) = extract_struct_fields(index, struct_item).unwrap(); + + assert_eq!(fields.len(), 2); + + let first_field = &fields[0]; + assert_eq!(first_field.name, "test_field"); + assert_eq!(first_field.description, "A test field."); + assert_eq!(first_field.default_value, Some("`42`".to_string())); + + let second_field = &fields[1]; + assert_eq!(second_field.name, "another_field"); + assert_eq!(second_field.description, "Another field with notes."); + assert_eq!(second_field.default_value, Some("`\"hello\"`".to_string())); + assert_eq!( + second_field.notes, + Some(vec!["This is a note.".to_string()]) + ); + } + + #[test] + fn test_extract_struct_from_rustdoc_index() { + let mock_index = json!({ + "struct_1": { + "name": "TestStruct", + "docs": "This is a test struct for configuration.", + "inner": { + "struct": { + "kind": { + "plain": { + "fields": ["field_1"] + } + } + } + } + }, + "field_1": { + "name": "config_field", + "docs": "Configuration field.\n---\n@default: `\"default\"`" + } + }); + + let index = mock_index.as_object().unwrap(); + let struct_item = &mock_index["struct_1"]; + + let result = extract_struct_from_rustdoc_index(index, "TestStruct", struct_item).unwrap(); + + assert!(result.0.is_some()); + let struct_doc = result.0.unwrap(); + assert_eq!(struct_doc.name, "TestStruct"); + assert_eq!( + struct_doc.description, + Some("This is a test struct for configuration.".to_string()) + ); + assert_eq!(struct_doc.fields.len(), 1); + assert_eq!(struct_doc.fields[0].name, "config_field"); + } + + #[test] + fn test_extract_config_docs_from_rustdoc() { + let mock_rustdoc = json!({ + "index": { + "item_1": { + "name": "ConfigStruct", + "inner": { + "struct": { + "kind": { + "plain": { + "fields": ["field_1"] + } + } + } + }, + "docs": "A configuration struct." + }, + "item_2": { + "name": "NonStruct", + "inner": { + "function": {} + } + }, + "field_1": { + "name": "setting", + "docs": "A configuration setting.\n---\n@default: `true`" + } + } + }); + + let target_structs = Some(vec!["ConfigStruct".to_string()]); + let result = extract_config_docs_from_rustdoc(&mock_rustdoc, &target_structs).unwrap(); + + assert_eq!(result.structs.len(), 1); + let struct_doc = &result.structs[0]; + assert_eq!(struct_doc.name, "ConfigStruct"); + assert_eq!( + struct_doc.description, + Some("A configuration struct.".to_string()) + ); + assert_eq!(struct_doc.fields.len(), 1); + } + + #[test] + fn test_extract_config_docs_filter_by_target() { + let mock_rustdoc = json!({ + "index": { + "item_1": { + "name": "WantedStruct", + "inner": { + "struct": { + "kind": { + "plain": { + "fields": [] + } + } + } + }, + "docs": "Wanted struct." + }, + "item_2": { + "name": "UnwantedStruct", + "inner": { + "struct": { + "kind": { + "plain": { + "fields": [] + } + } + } + }, + "docs": "Unwanted struct." + } + } + }); + + let target_structs = Some(vec!["WantedStruct".to_string()]); + let result = extract_config_docs_from_rustdoc(&mock_rustdoc, &target_structs).unwrap(); + + assert_eq!(result.structs.len(), 1); + assert_eq!(result.structs[0].name, "WantedStruct"); + } + + #[test] + fn test_extract_config_docs_no_filter() { + let mock_rustdoc = json!({ + "index": { + "item_1": { + "name": "Struct1", + "inner": { + "struct": { + "kind": { + "plain": { + "fields": [] + } + } + } + }, + "docs": "First struct." + }, + "item_2": { + "name": "Struct2", + "inner": { + "struct": { + "kind": { + "plain": { + "fields": [] + } + } + } + }, + "docs": "Second struct." + } + } + }); + + let result = extract_config_docs_from_rustdoc(&mock_rustdoc, &None).unwrap(); + + assert_eq!(result.structs.len(), 2); + let names: Vec<&str> = result.structs.iter().map(|s| s.name.as_str()).collect(); + assert!(names.contains(&"Struct1")); + assert!(names.contains(&"Struct2")); + } + + #[test] + fn test_parse_field_documentation_empty_notes() { + let doc_text = r#"Field with empty notes. +--- +@default: `None` +@notes: + + +@deprecated: Old field"#; + + let result = parse_field_documentation(doc_text, "test_field").unwrap(); + + assert_eq!(result.0.name, "test_field"); + assert_eq!(result.0.description, "Field with empty notes."); + assert_eq!(result.0.default_value, Some("`None`".to_string())); + assert_eq!(result.0.notes, None); // Empty notes should result in None + assert_eq!(result.0.deprecated, Some("Old field".to_string())); + } + + #[test] + fn test_parse_field_documentation_bullet_points_cleanup() { + let doc_text = r#"Field with bullet notes. +--- +@notes: + - First bullet point + * Second bullet point + - Third bullet point"#; + + let result = parse_field_documentation(doc_text, "test_field").unwrap(); + + assert_eq!( + result.0.notes, + Some(vec![ + "First bullet point".to_string(), + "Second bullet point".to_string(), + "Third bullet point".to_string() + ]) + ); + } + + #[test] + fn test_extract_annotation_edge_cases() { + // Test with annotation at the end + let metadata1 = "@default: `value`"; + assert_eq!( + extract_annotation(metadata1, "default"), + Some("`value`".to_string()) + ); + + // Test with empty annotation + let metadata2 = "@default:\n@notes: something"; + assert_eq!(extract_annotation(metadata2, "default"), None); + + // Test with annotation containing colons + let metadata3 = "@notes: URL: https://example.com:8080/path"; + let notes = extract_annotation(metadata3, "notes"); + assert_eq!( + notes, + Some("URL: https://example.com:8080/path".to_string()) + ); + + // Test with whitespace-only annotation + let metadata_whitespace = "@default: \n@notes: something"; + assert_eq!( + extract_annotation(metadata_whitespace, "default"), + None, + "Annotation with only whitespace should be None" + ); + + // Test with annotation containing only newline + let metadata_newline = "@default:\n@notes: something"; + assert_eq!( + extract_annotation(metadata_newline, "default"), + None, + "Annotation with only newline should be None" + ); + } + + #[test] + fn test_extract_struct_fields_numeric_field_ids() { + let mock_index = json!({ + "struct_1": { + "name": "TestStruct", + "inner": { + "struct": { + "kind": { + "plain": { + "fields": [123, 456] // Numeric field IDs + } + } + } + } + }, + "123": { + "name": "numeric_field", + "docs": "Field with numeric ID.\n---\n@default: `0`" + }, + "456": { + "name": "another_numeric", + "docs": "Another numeric field." + } + }); + + let index = mock_index.as_object().unwrap(); + let struct_item = &mock_index["struct_1"]; + + let (fields, _referenced_constants) = extract_struct_fields(index, struct_item).unwrap(); + + assert_eq!(fields.len(), 2); + assert_eq!(fields[0].name, "numeric_field"); + assert_eq!(fields[1].name, "another_numeric"); + } + + #[test] + fn test_extract_struct_fields_missing_field_data() { + let mock_index = json!({ + "struct_1": { + "name": "TestStruct", + "inner": { + "struct": { + "kind": { + "plain": { + "fields": ["missing_field", "present_field"] + } + } + } + } + }, + "present_field": { + "name": "present", + "docs": "This field exists." + } + // "missing_field" is intentionally not in the index + }); + + let index = mock_index.as_object().unwrap(); + let struct_item = &mock_index["struct_1"]; + + let (fields, _referenced_constants) = extract_struct_fields(index, struct_item).unwrap(); + + // Should only include the present field + assert_eq!(fields.len(), 1); + assert_eq!(fields[0].name, "present"); + } + + #[test] + fn test_extract_config_docs_missing_index() { + let invalid_rustdoc = json!({ + "not_index": {} + }); + + let result = extract_config_docs_from_rustdoc(&invalid_rustdoc, &None); + assert!(result.is_err()); + assert!(result + .unwrap_err() + .to_string() + .contains("Missing 'index' field")); + } + + #[test] + fn test_extract_struct_fields_no_documentation() { + let mock_index = json!({ + "struct_1": { + "name": "TestStruct", + "inner": { + "struct": { + "kind": { + "plain": { + "fields": ["field_1"] + } + } + } + } + }, + "field_1": { + "name": "undocumented_field", + "docs": "" // Empty documentation + } + }); + + let index = mock_index.as_object().unwrap(); + let struct_item = &mock_index["struct_1"]; + + let (fields, _referenced_constants) = extract_struct_fields(index, struct_item).unwrap(); + + // Fields without documentation should be excluded + assert_eq!(fields.len(), 0); + } + + #[test] + fn test_extract_struct_fields_malformed_structure() { + let mock_index = json!({ + "struct_1": { + "name": "TestStruct", + "inner": { + "struct": { + "kind": { + "tuple": {} // Not a "plain" struct + } + } + } + } + }); + + let index = mock_index.as_object().unwrap(); + let struct_item = &mock_index["struct_1"]; + + let (fields, _referenced_constants) = extract_struct_fields(index, struct_item).unwrap(); + + // Should handle malformed structures gracefully + assert_eq!(fields.len(), 0); + } + + #[test] + fn test_parse_field_documentation_complex_annotations() { + let doc_text = r#"Complex field with all annotation types and edge cases. + +This description spans multiple lines +and includes various formatting. +--- +@default: Dynamically determined. + - If the `[miner]` section *is present* in the config file, the [`NodeConfig::seed`] is used. + - If the `[miner]` section *is not present*, this is `None`, and mining operations will fail. +@notes: + - **Warning:** This field requires careful configuration. + - Only relevant if [`NodeConfig::miner`] is `true`. + - Units: milliseconds. +@deprecated: Use `new_field` instead. This will be removed in version 2.0. +@toml_example: | + # This is a comment + [section] + field = "value" + + # Another section + [other_section] + number = 42 + array = ["a", "b", "c"]"#; + + let result = parse_field_documentation(doc_text, "complex_field").unwrap(); + + assert_eq!(result.0.name, "complex_field"); + assert!(result.0.description.contains("Complex field")); + assert!(result.0.description.contains("multiple lines")); + + let default_val = result.0.default_value.unwrap(); + assert!(default_val.contains("Dynamically determined")); + assert!(default_val.contains("NodeConfig::seed")); + + let notes = result.0.notes.unwrap(); + assert_eq!(notes.len(), 3); + assert!(notes[0].contains("Warning")); + assert!(notes[1].contains("Only relevant")); + assert!(notes[2].contains("Units: milliseconds")); + + assert!(result + .0 + .deprecated + .unwrap() + .contains("Use `new_field` instead")); + + let toml_example = result.0.toml_example.unwrap(); + assert!(toml_example.contains("# This is a comment")); + assert!(toml_example.contains("[section]")); + assert!(toml_example.contains("array = [\"a\", \"b\", \"c\"]")); + } + + #[test] + fn test_extract_annotation_overlapping_patterns() { + let metadata = r#"@config_value: `"not_default"` +@default: `"actual_default"` +@notes_info: Some other annotation +@notes: Actual notes here +@deprecated_old: Old deprecation +@deprecated: Current deprecation"#; + + // Should extract the correct annotations, not get confused by similar names + assert_eq!( + extract_annotation(metadata, "default"), + Some("`\"actual_default\"`".to_string()) + ); + assert_eq!( + extract_annotation(metadata, "notes"), + Some("Actual notes here".to_string()) + ); + assert_eq!( + extract_annotation(metadata, "deprecated"), + Some("Current deprecation".to_string()) + ); + + // Should not find non-existent annotations + assert_eq!(extract_annotation(metadata, "nonexistent"), None); + assert_eq!(extract_annotation(metadata, "missing"), None); + } + + #[test] + fn test_extract_struct_from_rustdoc_index_no_fields_no_description() { + let mock_index = json!({ + "struct_1": { + "name": "EmptyStruct", + "inner": { + "struct": { + "kind": { + "plain": { + "fields": [] + } + } + } + } + // No "docs" field + } + }); + + let index = mock_index.as_object().unwrap(); + let struct_item = &mock_index["struct_1"]; + + let result = extract_struct_from_rustdoc_index(index, "EmptyStruct", struct_item).unwrap(); + + // Should return None for structs with no fields and no description + assert!(result.0.is_none()); + } + + #[test] + fn test_parse_field_documentation_only_description() { + let doc_text = "Just a simple description with no metadata separator."; + let result = parse_field_documentation(doc_text, "simple_field").unwrap(); + + assert_eq!(result.0.name, "simple_field"); + assert_eq!( + result.0.description, + "Just a simple description with no metadata separator." + ); + assert_eq!(result.0.default_value, None); + assert_eq!(result.0.notes, None); + assert_eq!(result.0.deprecated, None); + assert_eq!(result.0.toml_example, None); + } + + #[test] + fn test_package_to_library_name_mapping() { + // Test the logic inside generate_rustdoc_json for mapping package names to library names + // We can't easily test generate_rustdoc_json directly since it runs external commands, + // but we can test the mapping logic + + // Test the special case for stackslib + let lib_name = match "stackslib" { + "stackslib" => "blockstack_lib".to_string(), + pkg => pkg.replace('-', "_"), + }; + assert_eq!(lib_name, "blockstack_lib"); + + // Test normal package names with hyphens + let lib_name = match "config-docs-generator" { + "stackslib" => "blockstack_lib".to_string(), + pkg => pkg.replace('-', "_"), + }; + assert_eq!(lib_name, "config_docs_generator"); + + // Test package name without hyphens + let lib_name = match "normalpackage" { + "stackslib" => "blockstack_lib".to_string(), + pkg => pkg.replace('-', "_"), + }; + assert_eq!(lib_name, "normalpackage"); + } + + #[test] + fn test_find_constant_references() { + // Test finding constant references in text + let text1 = "This field uses [`DEFAULT_VALUE`] as default."; + let constants1 = find_constant_references(text1); + assert_eq!(constants1.len(), 1); + assert!(constants1.contains("DEFAULT_VALUE")); + + // Test multiple constants + let text2 = "Uses [`CONST_A`] and [`CONST_B`] values."; + let constants2 = find_constant_references(text2); + assert_eq!(constants2.len(), 2); + assert!(constants2.contains("CONST_A")); + assert!(constants2.contains("CONST_B")); + + // Test no constants + let text3 = "This text has no constant references."; + let constants3 = find_constant_references(text3); + assert_eq!(constants3.len(), 0); + + // Test mixed content + let text4 = + "Field uses [`MY_CONSTANT`] and links to [`SomeStruct::field`] but not `lowercase`."; + let constants4 = find_constant_references(text4); + assert_eq!(constants4.len(), 1); + assert!(constants4.contains("MY_CONSTANT")); + assert!(!constants4.contains("SomeStruct::field")); // Should not match struct::field patterns + assert!(!constants4.contains("lowercase")); // Should not match lowercase + } + + #[test] + fn test_resolve_constant_reference() { + // Create mock rustdoc index with a constant + let mock_index = serde_json::json!({ + "const_1": { + "name": "TEST_CONSTANT", + "inner": { + "constant": { + "expr": "42", + "type": "u32" + } + } + }, + "const_2": { + "name": "STRING_CONST", + "inner": { + "constant": { + "value": "\"hello\"", + "type": "&str" + } + } + }, + "not_const": { + "name": "NotAConstant", + "inner": { + "function": {} + } + } + }); + + let index = mock_index.as_object().unwrap(); + + // Test resolving existing constant with expr field + let result1 = resolve_constant_reference("TEST_CONSTANT", index); + assert_eq!(result1, Some("42".to_string())); + + // Test resolving existing constant with value field + let result2 = resolve_constant_reference("STRING_CONST", index); + assert_eq!(result2, Some("\"hello\"".to_string())); + + // Test resolving non-existent constant + let result3 = resolve_constant_reference("NONEXISTENT", index); + assert_eq!(result3, None); + + // Test resolving non-constant item + let result4 = resolve_constant_reference("NotAConstant", index); + assert_eq!(result4, None); + } + + #[test] + fn test_resolve_computed_constant() { + // Test computed constants that have "_" in expr and actual value in value field + let mock_index = serde_json::json!({ + "computed_const": { + "name": "COMPUTED_CONSTANT", + "inner": { + "constant": { + "const": { + "expr": "_", + "value": "402_653_196u32", + "is_literal": false + }, + "type": { + "primitive": "u32" + } + } + } + }, + "literal_const": { + "name": "LITERAL_CONSTANT", + "inner": { + "constant": { + "const": { + "expr": "100", + "value": "100u32", + "is_literal": true + }, + "type": { + "primitive": "u32" + } + } + } + } + }); + + let index = mock_index.as_object().unwrap(); + + // Test resolving computed constant - should get the value without type suffix + let result1 = resolve_constant_in_index("COMPUTED_CONSTANT", index); + assert_eq!(result1, Some("402_653_196".to_string())); + + // Test resolving literal constant - should get expr which is clean + let result2 = resolve_constant_in_index("LITERAL_CONSTANT", index); + assert_eq!(result2, Some("100".to_string())); + } + + #[test] + fn test_parse_field_documentation_with_constants() { + let doc_text = r#"This field uses [`DEFAULT_TIMEOUT`] milliseconds. +--- +@default: [`DEFAULT_VALUE`] +@notes: + - See [`MAX_RETRIES`] for retry limit. + - Warning about [`DEPRECATED_CONST`]."#; + + let result = parse_field_documentation(doc_text, "test_field").unwrap(); + + // Check that constants were collected + assert_eq!(result.1.len(), 4); + assert!(result.1.contains("DEFAULT_TIMEOUT")); + assert!(result.1.contains("DEFAULT_VALUE")); + assert!(result.1.contains("MAX_RETRIES")); + assert!(result.1.contains("DEPRECATED_CONST")); + + // Check that normal parsing still works + assert_eq!(result.0.name, "test_field"); + assert!(result.0.description.contains("DEFAULT_TIMEOUT")); + assert!(result.0.default_value.is_some()); + assert!(result.0.notes.is_some()); + } + + #[test] + fn test_extract_config_docs_with_constants() { + let mock_rustdoc = serde_json::json!({ + "index": { + "struct_1": { + "name": "TestStruct", + "inner": { + "struct": { + "kind": { + "plain": { + "fields": ["field_1"] + } + } + } + }, + "docs": "Struct that uses [`STRUCT_CONSTANT`]." + }, + "field_1": { + "name": "test_field", + "docs": "Field using [`FIELD_CONSTANT`].\n---\n@default: [`DEFAULT_CONST`]" + }, + "const_1": { + "name": "STRUCT_CONSTANT", + "inner": { + "constant": { + "expr": "100" + } + } + }, + "const_2": { + "name": "FIELD_CONSTANT", + "inner": { + "constant": { + "value": "\"test\"" + } + } + }, + "const_3": { + "name": "DEFAULT_CONST", + "inner": { + "constant": { + "expr": "42" + } + } + } + } + }); + + let result = extract_config_docs_from_rustdoc(&mock_rustdoc, &None).unwrap(); + + // Check that constants were resolved + assert_eq!(result.referenced_constants.len(), 3); + assert_eq!( + result.referenced_constants.get("STRUCT_CONSTANT"), + Some(&Some("100".to_string())) + ); + assert_eq!( + result.referenced_constants.get("FIELD_CONSTANT"), + Some(&Some("\"test\"".to_string())) + ); + assert_eq!( + result.referenced_constants.get("DEFAULT_CONST"), + Some(&Some("42".to_string())) + ); + + // Check that struct was extracted normally + assert_eq!(result.structs.len(), 1); + assert_eq!(result.structs[0].name, "TestStruct"); + } + + #[test] + fn test_extract_config_docs_with_unresolvable_constants() { + let mock_rustdoc = serde_json::json!({ + "index": { + "struct_1": { + "name": "TestStruct", + "inner": { + "struct": { + "kind": { + "plain": { + "fields": ["field_1"] + } + } + } + }, + "docs": "Struct that references [`MISSING_CONSTANT`]." + }, + "field_1": { + "name": "test_field", + "docs": "Field description." + } + } + }); + + let result = extract_config_docs_from_rustdoc(&mock_rustdoc, &None).unwrap(); + + // Check that unresolvable constant is recorded with None value + assert_eq!(result.referenced_constants.len(), 1); + assert_eq!( + result.referenced_constants.get("MISSING_CONSTANT"), + Some(&None) + ); + } + + #[test] + fn test_private_items_included_in_rustdoc() { + // This test verifies that our fix for including private items in rustdoc generation + // allows us to resolve private constants that were previously inaccessible + + // Simulate a rustdoc JSON that includes both public and private constants + // (which should happen with --document-private-items flag) + let mock_rustdoc = serde_json::json!({ + "index": { + "struct_1": { + "name": "TestStruct", + "inner": { + "struct": { + "kind": { + "plain": { + "fields": ["field_1"] + } + } + } + }, + "docs": "Struct description." + }, + "field_1": { + "name": "test_field", + "docs": "Field that uses [`PRIVATE_CONSTANT`] and [`PUBLIC_CONSTANT`]." + }, + // Public constant (would be included without --document-private-items) + "const_public": { + "name": "PUBLIC_CONSTANT", + "inner": { + "constant": { + "const": { + "expr": "100", + "type": "u32" + } + } + }, + "visibility": "public" + }, + // Private constant (only included with --document-private-items) + "const_private": { + "name": "PRIVATE_CONSTANT", + "inner": { + "constant": { + "const": { + "expr": "200", + "type": "u32" + } + } + }, + "visibility": "crate" + } + } + }); + + let result = extract_config_docs_from_rustdoc(&mock_rustdoc, &None).unwrap(); + + // Both constants should be resolved now + assert_eq!(result.referenced_constants.len(), 2); + assert_eq!( + result.referenced_constants.get("PUBLIC_CONSTANT"), + Some(&Some("100".to_string())) + ); + assert_eq!( + result.referenced_constants.get("PRIVATE_CONSTANT"), + Some(&Some("200".to_string())) + ); + } + + #[test] + fn test_multi_crate_constant_resolution() { + // This test verifies that our multi-crate constant resolution works + // It simulates the case where constants are defined in different crates + + // Create a mock rustdoc index for the main crate (without the target constant) + let main_index = serde_json::json!({ + "const_main": { + "name": "MAIN_CONSTANT", + "inner": { + "constant": { + "const": { + "expr": "100", + "type": "u32" + } + } + } + } + }); + + let main_index_obj = main_index.as_object().unwrap(); + + // Test resolving a constant that exists in main index + let result1 = resolve_constant_in_index("MAIN_CONSTANT", main_index_obj); + assert_eq!(result1, Some("100".to_string())); + + // Test resolving a constant that doesn't exist in main index + let result2 = resolve_constant_in_index("EXTERNAL_CONSTANT", main_index_obj); + assert_eq!(result2, None); + + // Note: Testing the full resolve_constant_reference function that reads from files + // would require setting up actual rustdoc JSON files, which is complex for unit tests. + // The integration test via the full pipeline covers this functionality. + } + + #[test] + fn test_strip_type_suffix() { + // Test various type suffixes + assert_eq!(strip_type_suffix("50u64"), "50"); + assert_eq!(strip_type_suffix("402_653_196u32"), "402_653_196"); + assert_eq!(strip_type_suffix("100i32"), "100"); + assert_eq!(strip_type_suffix("255u8"), "255"); + assert_eq!(strip_type_suffix("3.14f32"), "3.14"); + assert_eq!(strip_type_suffix("2.718f64"), "2.718"); + assert_eq!(strip_type_suffix("1000usize"), "1000"); + assert_eq!(strip_type_suffix("-42i64"), "-42"); + + // Test values without type suffixes (should remain unchanged) + assert_eq!(strip_type_suffix("42"), "42"); + assert_eq!(strip_type_suffix("3.14"), "3.14"); + assert_eq!(strip_type_suffix("hello"), "hello"); + assert_eq!(strip_type_suffix("\"string\""), "\"string\""); + + // Test edge cases + assert_eq!(strip_type_suffix(""), ""); + assert_eq!(strip_type_suffix("u32"), "u32"); // Just the type name, not a suffixed value + assert_eq!(strip_type_suffix("value_u32_test"), "value_u32_test"); // Contains but doesn't end with type + } +} diff --git a/contrib/tools/config-docs-generator/src/generate_markdown.rs b/contrib/tools/config-docs-generator/src/generate_markdown.rs new file mode 100644 index 0000000000..82f636ad59 --- /dev/null +++ b/contrib/tools/config-docs-generator/src/generate_markdown.rs @@ -0,0 +1,1161 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2025 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . +use std::collections::HashMap; +use std::fs; + +use anyhow::{Context, Result}; +use clap::{Arg, Command}; +use once_cell::sync::Lazy; +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Serialize, Deserialize, Clone)] +struct FieldDoc { + name: String, + description: String, + default_value: Option, + notes: Option>, + deprecated: Option, + toml_example: Option, +} + +#[derive(Debug, Serialize, Deserialize)] +struct StructDoc { + name: String, + description: Option, + fields: Vec, +} + +#[derive(Debug, Serialize, Deserialize)] +struct ConfigDocs { + structs: Vec, + referenced_constants: HashMap>, // Name -> Resolved Value (or None) +} + +// Global context for cross-references +struct GlobalContext { + // Map from struct name to markdown section anchor + struct_to_anchor: HashMap, + // Map from field name to (struct_name, anchor) for finding cross-references + field_to_struct: HashMap, + // Map from constant name to value (if we can extract them) + constants: HashMap, +} + +// Static regex for finding intra-documentation links - compiled once at startup +static LINK_REGEX_BACKTICKS: Lazy = + Lazy::new(|| regex::Regex::new(r"\[`([A-Za-z0-9_:]+)`\]").unwrap()); + +fn main() -> Result<()> { + let matches = Command::new("generate-markdown") + .about("Generate Markdown documentation from extracted config docs JSON") + .arg( + Arg::new("input") + .long("input") + .value_name("FILE") + .help("Input JSON file with extracted documentation") + .required(true), + ) + .arg( + Arg::new("output") + .long("output") + .value_name("FILE") + .help("Output Markdown file") + .required(true), + ) + .get_matches(); + + let input_path = matches.get_one::("input").unwrap(); + let output_path = matches.get_one::("output").unwrap(); + + let input_content = fs::read_to_string(input_path) + .with_context(|| format!("Failed to read input JSON file: {}", input_path))?; + + let config_docs: ConfigDocs = + serde_json::from_str(&input_content).with_context(|| "Failed to parse input JSON")?; + + let markdown = generate_markdown(&config_docs)?; + + fs::write(output_path, markdown) + .with_context(|| format!("Failed to write output file: {}", output_path))?; + + println!( + "Successfully generated Markdown documentation at {}", + output_path + ); + Ok(()) +} + +fn generate_markdown(config_docs: &ConfigDocs) -> Result { + let mut output = String::new(); + + // Build global context for cross-references + let global_context = build_global_context(config_docs); + + // Header + output.push_str("# Stacks Node Configuration Reference\n\n"); + output.push_str("This document provides a comprehensive reference for all configuration options available in the Stacks node TOML configuration file.\n\n"); + output.push_str( + "The configuration is automatically generated from the Rust source code documentation.\n\n", + ); + + // Table of contents + output.push_str("## Table of Contents\n\n"); + for struct_doc in &config_docs.structs { + let section_name = struct_to_section_name(&struct_doc.name); + output.push_str(&format!( + "- [{}]({})\n", + section_name, + section_anchor(§ion_name) + )); + } + output.push('\n'); + + // Generate sections for each struct + for struct_doc in &config_docs.structs { + generate_struct_section(&mut output, struct_doc, &global_context)?; + output.push('\n'); + } + + Ok(output) +} + +fn build_global_context(config_docs: &ConfigDocs) -> GlobalContext { + let mut struct_to_anchor = HashMap::new(); + let mut field_to_struct = HashMap::new(); + let mut resolved_constants_map = HashMap::new(); + + // Build mappings + for struct_doc in &config_docs.structs { + let section_name = struct_to_section_name(&struct_doc.name); + let anchor = section_anchor(§ion_name); + struct_to_anchor.insert(struct_doc.name.clone(), anchor.clone()); + + for field in &struct_doc.fields { + field_to_struct.insert( + field.name.clone(), + (struct_doc.name.clone(), anchor.clone()), + ); + } + } + + // Populate constants from the parsed ConfigDocs.referenced_constants + for (name, opt_value) in &config_docs.referenced_constants { + if let Some(value) = opt_value { + resolved_constants_map.insert(name.clone(), value.clone()); + } + } + + GlobalContext { + struct_to_anchor, + field_to_struct, + constants: resolved_constants_map, + } +} + +fn generate_struct_section( + output: &mut String, + struct_doc: &StructDoc, + global_context: &GlobalContext, +) -> Result<()> { + let section_name = struct_to_section_name(&struct_doc.name); + output.push_str(&format!("## {}\n\n", section_name)); + + // Add struct description if available + if let Some(description) = &struct_doc.description { + output.push_str(&format!( + "{}\n\n", + process_intralinks_with_context(description, global_context, &struct_doc.name) + )); + } + + // Only create table if there are fields + if struct_doc.fields.is_empty() { + output.push_str("*No configurable parameters documented.*\n\n"); + return Ok(()); + } + + // Sort fields: non-deprecated first, then deprecated + let mut sorted_fields = struct_doc.fields.clone(); + sorted_fields.sort_by(|a, b| { + let a_deprecated = is_deprecated(a); + let b_deprecated = is_deprecated(b); + + match (a_deprecated, b_deprecated) { + (false, true) => std::cmp::Ordering::Less, // non-deprecated first + (true, false) => std::cmp::Ordering::Greater, // deprecated last + _ => a.name.cmp(&b.name), // alphabetical within groups + } + }); + + // Parameter table header + output.push_str("| Parameter | Description | Default |\n"); + output.push_str("|-----------|-------------|----------|\n"); + + // Generate table rows for each field + for field in &sorted_fields { + generate_field_row(output, field, &struct_doc.name, global_context)?; + } + + output.push('\n'); + Ok(()) +} + +fn generate_field_row( + output: &mut String, + field: &FieldDoc, + struct_name: &str, + global_context: &GlobalContext, +) -> Result<()> { + // Create proper anchor ID + let section_name = struct_to_section_name(struct_name); + let anchor_id = format!( + "{}-{}", + section_name.trim_start_matches('[').trim_end_matches(']'), + field.name + ); + + // Use HTML span with id for proper anchoring + let field_name = if is_deprecated(field) { + format!( + "~~[{}](#{})~~", + anchor_id, + escape_markdown(&field.name), + anchor_id + ) + } else { + format!( + "[{}](#{})", + anchor_id, + escape_markdown(&field.name), + anchor_id + ) + }; + + // Build comprehensive description column with struct context + let mut description_parts = Vec::new(); + + // Main description + if !field.description.is_empty() { + let main_desc = if let Some(separator_pos) = field.description.find("---") { + field.description[..separator_pos].trim() + } else { + &field.description + }; + + if !main_desc.is_empty() { + // Check if this description contains hierarchical lists (indented bullet points) + let has_hierarchical_lists = main_desc.lines().any(|line| { + let trimmed = line.trim(); + let leading_spaces = line.len() - line.trim_start().len(); + trimmed.starts_with("- ") && leading_spaces > 0 + }); + + let processed_desc = if has_hierarchical_lists { + // Use hierarchical list processing to preserve indentation + process_hierarchical_lists(main_desc, global_context, struct_name) + } else { + // Use regular processing with intra-links + process_intralinks_with_context(main_desc, global_context, struct_name) + .replace('\n', "
") + }; + + description_parts.push(processed_desc); + } + } + + // Add notes if present + if let Some(notes) = &field.notes { + let mut notes_section = String::new(); + notes_section.push_str("

**Notes:**"); + for note in notes { + notes_section.push_str(&format!( + "
- {}", + process_intralinks_with_context(note, global_context, struct_name) + )); + } + description_parts.push(notes_section); + } + + // Add deprecation warning if present + if let Some(deprecated) = &field.deprecated { + description_parts.push(format!("

**⚠️ DEPRECATED:** {}", deprecated)); + } + + // Add TOML example if present + if let Some(toml_example) = &field.toml_example { + let clean_example = if toml_example.starts_with('|') { + toml_example.trim_start_matches('|').trim_start() + } else { + toml_example + }; + + // Use HTML pre/code formatting that works properly in markdown tables + // instead of markdown fenced code blocks which get mangled by br tag conversion + let escaped_example = clean_example + .replace('&', "&") + .replace('<', "<") + .replace('>', ">"); + + let example_section = format!( + "

**Example:**
{}
", + escaped_example.replace('\n', "
") + ); + description_parts.push(example_section); + } + + let description = if description_parts.is_empty() { + "*No description available*".to_string() + } else { + description_parts.join("") + }; + + // Default value column + let default_value = if let Some(default) = &field.default_value { + process_intralinks_with_context(default, global_context, struct_name) + } else { + "*Required*".to_string() + }; + + output.push_str(&format!( + "| {} | {} | {} |\n", + field_name, + escape_markdown_table(&description), + escape_markdown_table(&default_value) + )); + + Ok(()) +} + +fn escape_markdown_table(text: &str) -> String { + text.replace('|', "\\|").replace('\n', "
") +} + +fn is_deprecated(field: &FieldDoc) -> bool { + field.deprecated.is_some() +} + +fn struct_to_section_name(struct_name: &str) -> String { + // Convert struct name to section name (e.g., "NodeConfig" -> "[node]") + // NOTE: This function contains hardcoded mappings from Rust struct names to their + // desired TOML section names in the Markdown output. It must be updated if new + // top-level configuration structs are added or existing ones are renamed. + match struct_name { + "BurnchainConfig" => "[burnchain]".to_string(), + "NodeConfig" => "[node]".to_string(), + "MinerConfig" => "[miner]".to_string(), + "ConnectionOptionsFile" => "[connection_options]".to_string(), + "FeeEstimationConfigFile" => "[fee_estimation]".to_string(), + "EventObserverConfigFile" => "[event_observer]".to_string(), + "InitialBalanceFile" => "[initial_balances]".to_string(), + _ => format!("[{}]", struct_name.to_lowercase()), + } +} + +fn escape_markdown(text: &str) -> String { + text.replace('|', "\\|") + .replace('[', "\\[") + .replace(']', "\\]") +} + +fn section_anchor(section: &str) -> String { + format!( + "#{}", + section + .to_lowercase() + .replace(' ', "-") + .replace("[", "") + .replace("]", "") + ) +} + +fn process_intralinks_with_context( + text: &str, + global_context: &GlobalContext, + current_struct_name: &str, +) -> String { + // Process cross-references in both formats: + // 1. [`StructName::field`] or [`CONSTANT_NAME`] (with backticks) + LINK_REGEX_BACKTICKS + .replace_all(text, |caps: ®ex::Captures| { + process_reference(&caps[1], global_context, current_struct_name) + }) + .to_string() +} + +fn process_reference( + reference: &str, + global_context: &GlobalContext, + current_struct_name: &str, +) -> String { + if reference.contains("::") { + // This is a struct::field reference + let parts: Vec<&str> = reference.split("::").collect(); + if parts.len() == 2 { + let ref_struct_name = parts[0]; + let field_name = parts[1]; + + // Check if the referenced struct exists in our docs + if global_context + .struct_to_anchor + .contains_key(ref_struct_name) + { + // Create proper anchor ID + let section_name = struct_to_section_name(ref_struct_name); + let anchor_id = format!( + "{}-{}", + section_name.trim_start_matches('[').trim_end_matches(']'), + field_name + ); + + // Check if it's the same struct or different struct + if ref_struct_name == current_struct_name { + // Same struct: just show field name + return format!("[{}](#{}) ", field_name, anchor_id); + } else { + // Different struct: show [config_section].field_name as a link + let config_section = section_name.trim_start_matches('[').trim_end_matches(']'); + return format!("[[{}].{}](#{}) ", config_section, field_name, anchor_id); + } + } + } + } else { + // This might be a constant reference + if let Some(value) = global_context.constants.get(reference) { + return format!("`{value}`"); + } + + // Check if it's a standalone field name (without struct prefix) + if let Some((field_struct_name, _anchor)) = global_context.field_to_struct.get(reference) { + let section_name = struct_to_section_name(field_struct_name); + let anchor_id = format!( + "{}-{}", + section_name.trim_start_matches('[').trim_end_matches(']'), + reference + ); + + // Check if it's the same struct or different struct + if field_struct_name == current_struct_name { + // Same struct: just show field name + return format!("[{}](#{}) ", reference, anchor_id); + } else { + // Different struct: show [config_section].field_name as a link + let config_section = section_name.trim_start_matches('[').trim_end_matches(']'); + return format!("[[{}].{}](#{}) ", config_section, reference, anchor_id); + } + } + } + + // If we can't resolve the reference, keep the text + format!("`{reference}`") +} + +/// Process text to preserve hierarchical list indentation +/// Converts markdown-style indented lists to HTML that preserves indentation in table cells +fn process_hierarchical_lists( + text: &str, + global_context: &GlobalContext, + struct_name: &str, +) -> String { + let lines: Vec<&str> = text.lines().collect(); + let mut result = Vec::new(); + + for line in lines { + if line.trim().starts_with("- ") { + // Count leading spaces to determine indentation level + let leading_spaces = line.len() - line.trim_start().len(); + + // Convert spaces to non-breaking spaces for HTML preservation + // Every 2 spaces becomes 2   entities for visual indentation + let indent_html = " ".repeat(leading_spaces); + + // Process intra-links in the content + let content = line.trim(); + let processed_content = + process_intralinks_with_context(content, global_context, struct_name); + + result.push(format!("{}{}", indent_html, processed_content)); + } else { + // Process intra-links in non-bullet lines too + let processed_line = process_intralinks_with_context(line, global_context, struct_name); + result.push(processed_line); + } + } + + result.join("
") +} + +#[cfg(test)] +mod tests { + use super::*; + + // Helper function to create a basic FieldDoc for testing + fn create_field_doc(name: &str, description: &str) -> FieldDoc { + FieldDoc { + name: name.to_string(), + description: description.to_string(), + default_value: None, + notes: None, + deprecated: None, + toml_example: None, + } + } + + // Helper function to create a basic StructDoc for testing + fn create_struct_doc( + name: &str, + description: Option<&str>, + fields: Vec, + ) -> StructDoc { + StructDoc { + name: name.to_string(), + description: description.map(|s| s.to_string()), + fields, + } + } + + // Helper function to create a basic ConfigDocs for testing + fn create_config_docs(structs: Vec) -> ConfigDocs { + ConfigDocs { + structs, + referenced_constants: HashMap::new(), + } + } + + // Helper function to create a mock GlobalContext for testing + fn create_mock_global_context() -> GlobalContext { + let mut struct_to_anchor = HashMap::new(); + let mut field_to_struct = HashMap::new(); + let mut constants = HashMap::new(); + + // Add some test structs and fields + struct_to_anchor.insert("NodeConfig".to_string(), "#node".to_string()); + struct_to_anchor.insert("MinerConfig".to_string(), "#miner".to_string()); + + field_to_struct.insert( + "test_field".to_string(), + ("NodeConfig".to_string(), "#node".to_string()), + ); + field_to_struct.insert( + "other_field".to_string(), + ("MinerConfig".to_string(), "#miner".to_string()), + ); + + constants.insert("TEST_CONSTANT".to_string(), "42".to_string()); + constants.insert("ANOTHER_CONSTANT".to_string(), "true".to_string()); + + GlobalContext { + struct_to_anchor, + field_to_struct, + constants, + } + } + + // I. Basic Markdown Generation Tests + + #[test] + fn test_generate_markdown_empty_config() { + let config_docs = create_config_docs(vec![]); + let result = generate_markdown(&config_docs).unwrap(); + + assert!(result.contains("# Stacks Node Configuration Reference")); + assert!(result.contains("## Table of Contents")); + // Should not contain any specific struct sections + assert!(!result.contains("## [")); + } + + #[test] + fn test_generate_markdown_with_one_struct_no_fields() { + let struct_doc = create_struct_doc("TestStruct", Some("A test struct"), vec![]); + let config_docs = create_config_docs(vec![struct_doc]); + let result = generate_markdown(&config_docs).unwrap(); + + assert!(result.contains("# Stacks Node Configuration Reference")); + assert!(result.contains("- [[teststruct]](#teststruct)")); + assert!(result.contains("## [teststruct]")); + assert!(result.contains("A test struct")); + assert!(result.contains("*No configurable parameters documented.*")); + } + + #[test] + fn test_generate_markdown_with_one_struct_with_fields() { + let field = create_field_doc("test_field", "A test field"); + let struct_doc = create_struct_doc("TestStruct", Some("A test struct"), vec![field]); + let config_docs = create_config_docs(vec![struct_doc]); + let result = generate_markdown(&config_docs).unwrap(); + + assert!(result.contains("# Stacks Node Configuration Reference")); + assert!(result.contains("- [[teststruct]](#teststruct)")); + assert!(result.contains("## [teststruct]")); + assert!(result.contains("A test struct")); + assert!(result.contains("| Parameter | Description | Default |")); + assert!(result.contains("test_field")); + assert!(result.contains("A test field")); + } + + // II. Section & Anchor Generation Tests + + #[test] + fn test_struct_to_section_name_known_structs() { + assert_eq!(struct_to_section_name("BurnchainConfig"), "[burnchain]"); + assert_eq!(struct_to_section_name("NodeConfig"), "[node]"); + assert_eq!(struct_to_section_name("MinerConfig"), "[miner]"); + assert_eq!( + struct_to_section_name("ConnectionOptionsFile"), + "[connection_options]" + ); + assert_eq!( + struct_to_section_name("FeeEstimationConfigFile"), + "[fee_estimation]" + ); + assert_eq!( + struct_to_section_name("EventObserverConfigFile"), + "[event_observer]" + ); + assert_eq!( + struct_to_section_name("InitialBalanceFile"), + "[initial_balances]" + ); + } + + #[test] + fn test_struct_to_section_name_unknown_struct() { + assert_eq!(struct_to_section_name("MyCustomConfig"), "[mycustomconfig]"); + assert_eq!(struct_to_section_name("UnknownStruct"), "[unknownstruct]"); + } + + #[test] + fn test_section_anchor_generation() { + assert_eq!(section_anchor("[node]"), "#node"); + assert_eq!(section_anchor("[burnchain]"), "#burnchain"); + assert_eq!(section_anchor("[my custom section]"), "#my-custom-section"); + assert_eq!( + section_anchor("[connection_options]"), + "#connection_options" + ); + } + + // III. Field Row Generation Tests + + #[test] + fn test_generate_field_row_basic_field() { + let field = create_field_doc("basic_field", "A basic field description"); + let global_context = create_mock_global_context(); + let mut output = String::new(); + + generate_field_row(&mut output, &field, "TestStruct", &global_context).unwrap(); + + assert!(output.contains("basic_field")); + assert!(output.contains("A basic field description")); + assert!(output.contains("*Required*")); + assert!(output.contains("")); + } + + #[test] + fn test_generate_field_row_with_default_value() { + let mut field = create_field_doc("field_with_default", "Field with default value"); + field.default_value = Some("`42`".to_string()); + let global_context = create_mock_global_context(); + let mut output = String::new(); + + generate_field_row(&mut output, &field, "TestStruct", &global_context).unwrap(); + + assert!(output.contains("field_with_default")); + assert!(output.contains("Field with default value")); + assert!(output.contains("`42`")); + assert!(!output.contains("*Required*")); + } + + #[test] + fn test_generate_field_row_without_default_value() { + let field = create_field_doc("required_field", "A required field"); + let global_context = create_mock_global_context(); + let mut output = String::new(); + + generate_field_row(&mut output, &field, "TestStruct", &global_context).unwrap(); + + assert!(output.contains("required_field")); + assert!(output.contains("*Required*")); + } + + #[test] + fn test_generate_field_row_with_notes() { + let mut field = create_field_doc("field_with_notes", "Field with notes"); + field.notes = Some(vec!["First note".to_string(), "Second note".to_string()]); + let global_context = create_mock_global_context(); + let mut output = String::new(); + + generate_field_row(&mut output, &field, "TestStruct", &global_context).unwrap(); + + assert!(output.contains("**Notes:**")); + assert!(output.contains("- First note")); + assert!(output.contains("- Second note")); + } + + #[test] + fn test_generate_field_row_deprecated_field() { + let mut field = create_field_doc("old_field", "An old field"); + field.deprecated = Some("Use new_field instead".to_string()); + let global_context = create_mock_global_context(); + let mut output = String::new(); + + generate_field_row(&mut output, &field, "TestStruct", &global_context).unwrap(); + + assert!(output.contains("~~")); + assert!(output.contains("**⚠️ DEPRECATED:**")); + assert!(output.contains("Use new_field instead")); + } + + #[test] + fn test_generate_field_row_with_toml_example() { + let mut field = create_field_doc("field_with_example", "Field with TOML example"); + field.toml_example = Some("key = \"value\"\nnumber = 42".to_string()); + let global_context = create_mock_global_context(); + let mut output = String::new(); + + generate_field_row(&mut output, &field, "TestStruct", &global_context).unwrap(); + + assert!(output.contains("**Example:**")); + assert!(output.contains("
"));
+        assert!(output.contains("key = \"value\""));
+        assert!(output.contains("number = 42"));
+        assert!(output.contains("
")); + } + + #[test] + fn test_generate_field_row_toml_example_with_pipe() { + let mut field = create_field_doc("field_with_pipe_example", "Field with pipe example"); + field.toml_example = Some("|\nkey = \"value\"\nnumber = 42".to_string()); + let global_context = create_mock_global_context(); + let mut output = String::new(); + + generate_field_row(&mut output, &field, "TestStruct", &global_context).unwrap(); + + assert!(output.contains("**Example:**")); + assert!(output.contains("
"));
+        assert!(output.contains("key = \"value\""));
+        // The TOML content should not contain the leading pipe character
+        assert!(!output.contains("
|"));
+        assert!(!output.contains("|\nkey"));
+        assert!(output.contains("
")); + } + + #[test] + fn test_generate_field_row_all_attributes() { + let mut field = create_field_doc("complex_field", "A complex field"); + field.default_value = Some("`\"default\"`".to_string()); + field.notes = Some(vec!["Important note".to_string()]); + field.deprecated = Some("Use better_field instead".to_string()); + field.toml_example = Some("field = \"example\"".to_string()); + let global_context = create_mock_global_context(); + let mut output = String::new(); + + generate_field_row(&mut output, &field, "TestStruct", &global_context).unwrap(); + + assert!(output.contains("~~")); // deprecated + assert!(output.contains("A complex field")); + assert!(output.contains("**Notes:**")); + assert!(output.contains("- Important note")); + assert!(output.contains("**⚠️ DEPRECATED:**")); + assert!(output.contains("Use better_field instead")); + assert!(output.contains("**Example:**")); + assert!(output.contains("
"));
+        assert!(output.contains("`\"default\"`"));
+        assert!(output.contains("
")); + } + + #[test] + fn test_generate_field_row_empty_description_parts() { + let field = FieldDoc { + name: "minimal_field".to_string(), + description: "".to_string(), + default_value: None, + notes: None, + deprecated: None, + toml_example: None, + }; + let global_context = create_mock_global_context(); + let mut output = String::new(); + + generate_field_row(&mut output, &field, "TestStruct", &global_context).unwrap(); + + assert!(output.contains("minimal_field")); + assert!(output.contains("*No description available*")); + assert!(output.contains("*Required*")); + } + + #[test] + fn test_field_name_escaping_in_row() { + let field = create_field_doc("field|with[special]chars", "Description"); + let global_context = create_mock_global_context(); + let mut output = String::new(); + + generate_field_row(&mut output, &field, "TestStruct", &global_context).unwrap(); + + assert!(output.contains("field\\|with\\[special\\]chars")); + } + + #[test] + fn test_field_anchor_id_generation() { + let field = create_field_doc("test_anchor", "Test anchor generation"); + let global_context = create_mock_global_context(); + let mut output = String::new(); + + generate_field_row(&mut output, &field, "NodeConfig", &global_context).unwrap(); + + assert!(output.contains("")); + assert!(output.contains("(#node-test_anchor)")); + } + + // IV. Struct Section Generation Tests + + #[test] + fn test_generate_struct_section_description() { + let struct_doc = create_struct_doc("TestStruct", Some("This is a test struct"), vec![]); + let global_context = create_mock_global_context(); + let mut output = String::new(); + + generate_struct_section(&mut output, &struct_doc, &global_context).unwrap(); + + assert!(output.contains("## [teststruct]")); + assert!(output.contains("This is a test struct")); + assert!(output.contains("*No configurable parameters documented.*")); + } + + #[test] + fn test_generate_struct_section_no_description() { + let struct_doc = create_struct_doc("TestStruct", None, vec![]); + let global_context = create_mock_global_context(); + let mut output = String::new(); + + generate_struct_section(&mut output, &struct_doc, &global_context).unwrap(); + + assert!(output.contains("## [teststruct]")); + assert!(!output.contains("This is a test struct")); + assert!(output.contains("*No configurable parameters documented.*")); + } + + #[test] + fn test_generate_struct_section_field_sorting() { + let normal_field = create_field_doc("b_normal", "Normal field"); + let mut deprecated_field = create_field_doc("a_deprecated", "Deprecated field"); + deprecated_field.deprecated = Some("Old field".to_string()); + let another_normal = create_field_doc("c_normal", "Another normal field"); + + let struct_doc = create_struct_doc( + "TestStruct", + None, + vec![deprecated_field, normal_field, another_normal], + ); + let global_context = create_mock_global_context(); + let mut output = String::new(); + + generate_struct_section(&mut output, &struct_doc, &global_context).unwrap(); + + // Normal fields should come first, then deprecated + let b_normal_pos = output.find("b_normal").unwrap(); + let c_normal_pos = output.find("c_normal").unwrap(); + let a_deprecated_pos = output.find("a_deprecated").unwrap(); + + assert!(b_normal_pos < c_normal_pos); + assert!(c_normal_pos < a_deprecated_pos); + } + + // V. Markdown Escaping Tests + + #[test] + fn test_escape_markdown_various_chars() { + assert_eq!(escape_markdown("test|pipe"), "test\\|pipe"); + assert_eq!(escape_markdown("test[bracket]"), "test\\[bracket\\]"); + assert_eq!(escape_markdown("normal text"), "normal text"); + } + + #[test] + fn test_escape_markdown_table_various_chars() { + assert_eq!(escape_markdown_table("test|pipe"), "test\\|pipe"); + assert_eq!(escape_markdown_table("line1\nline2"), "line1
line2"); + assert_eq!( + escape_markdown_table("line1\nwith|pipe"), + "line1
with\\|pipe" + ); + } + + // VI. Intra-link Processing Tests + + #[test] + fn test_intralink_no_links() { + let global_context = create_mock_global_context(); + let text = "This is normal text without any links"; + let result = process_intralinks_with_context(text, &global_context, "TestStruct"); + assert_eq!(result, text); + } + + #[test] + fn test_intralink_to_field_in_same_struct() { + let global_context = create_mock_global_context(); + let text = "See [`NodeConfig::test_field`] for details"; + let result = process_intralinks_with_context(text, &global_context, "NodeConfig"); + assert!(result.contains("[test_field](#node-test_field)")); + } + + #[test] + fn test_intralink_to_field_in_different_struct() { + let global_context = create_mock_global_context(); + let text = "See [`MinerConfig::other_field`] for details"; + let result = process_intralinks_with_context(text, &global_context, "NodeConfig"); + assert!(result.contains("[[miner].other_field](#miner-other_field)")); + } + + #[test] + fn test_intralink_to_standalone_field_in_same_struct() { + let global_context = create_mock_global_context(); + let text = "See [`test_field`] for details"; + let result = process_intralinks_with_context(text, &global_context, "NodeConfig"); + assert!(result.contains("[test_field](#node-test_field)")); + } + + #[test] + fn test_intralink_to_standalone_field_in_different_struct() { + let global_context = create_mock_global_context(); + let text = "See [`other_field`] for details"; + let result = process_intralinks_with_context(text, &global_context, "NodeConfig"); + assert!(result.contains("[[miner].other_field](#miner-other_field)")); + } + + #[test] + fn test_intralink_to_constant() { + let global_context = create_mock_global_context(); + let text = "The default value is [`TEST_CONSTANT`]"; + let result = process_intralinks_with_context(text, &global_context, "TestStruct"); + assert!(result.contains("42")); + assert!(!result.contains("TEST_CONSTANT")); + } + + #[test] + fn test_intralink_unresolved_struct_field_reference() { + let global_context = create_mock_global_context(); + let text = "See [`UnknownStruct::unknown_field`] for details"; + let result = process_intralinks_with_context(text, &global_context, "TestStruct"); + assert!(result.contains("UnknownStruct::unknown_field")); + assert!(!result.contains("[`")); + } + + #[test] + fn test_intralink_unresolved_standalone_reference() { + let global_context = create_mock_global_context(); + let text = "The value [`unknown_reference`] is not found"; + let result = process_intralinks_with_context(text, &global_context, "TestStruct"); + assert!(result.contains("unknown_reference")); + assert!(!result.contains("[`")); + } + + #[test] + fn test_intralink_malformed_reference() { + let global_context = create_mock_global_context(); + let text = "See [`Struct::Field::Extra`] for details"; + let result = process_intralinks_with_context(text, &global_context, "TestStruct"); + assert!(result.contains("Struct::Field::Extra")); + assert!(!result.contains("[`")); + } + + #[test] + fn test_intralink_multiple_links_in_text() { + let global_context = create_mock_global_context(); + let text = "See [`TEST_CONSTANT`] and [`NodeConfig::test_field`] and [`unknown_ref`]"; + let result = process_intralinks_with_context(text, &global_context, "NodeConfig"); + + assert!(result.contains("42")); // constant resolved + assert!(result.contains("[test_field](#node-test_field)")); // field resolved + assert!(result.contains("unknown_ref")); // unresolved kept as text + } + + // VII. Global Context Building Tests + + #[test] + fn test_build_global_context_struct_anchors() { + let structs = vec![ + create_struct_doc("NodeConfig", None, vec![]), + create_struct_doc("MinerConfig", None, vec![]), + ]; + let config_docs = create_config_docs(structs); + let context = build_global_context(&config_docs); + + assert_eq!( + context.struct_to_anchor.get("NodeConfig"), + Some(&"#node".to_string()) + ); + assert_eq!( + context.struct_to_anchor.get("MinerConfig"), + Some(&"#miner".to_string()) + ); + } + + #[test] + fn test_build_global_context_field_struct_mapping() { + let field1 = create_field_doc("field1", "Description"); + let field2 = create_field_doc("field2", "Description"); + let structs = vec![ + create_struct_doc("NodeConfig", None, vec![field1]), + create_struct_doc("MinerConfig", None, vec![field2]), + ]; + let config_docs = create_config_docs(structs); + let context = build_global_context(&config_docs); + + assert_eq!( + context.field_to_struct.get("field1"), + Some(&("NodeConfig".to_string(), "#node".to_string())) + ); + assert_eq!( + context.field_to_struct.get("field2"), + Some(&("MinerConfig".to_string(), "#miner".to_string())) + ); + } + + #[test] + fn test_build_global_context_constants() { + let config_docs = create_config_docs(vec![]); + let context = build_global_context(&config_docs); + + // Should have no constants if none are referenced + assert_eq!(context.constants.len(), 0); + } + + // Helper function tests + + #[test] + fn test_is_deprecated() { + let normal_field = create_field_doc("normal", "Normal field"); + let mut deprecated_field = create_field_doc("deprecated", "Deprecated field"); + deprecated_field.deprecated = Some("Use other field".to_string()); + + assert!(!is_deprecated(&normal_field)); + assert!(is_deprecated(&deprecated_field)); + } + + #[test] + fn test_build_global_context_with_referenced_constants() { + let mut config_docs = create_config_docs(vec![]); + config_docs + .referenced_constants + .insert("TEST_CONSTANT".to_string(), Some("42".to_string())); + config_docs + .referenced_constants + .insert("STRING_CONST".to_string(), Some("\"hello\"".to_string())); + config_docs + .referenced_constants + .insert("UNRESOLVED_CONST".to_string(), None); + + let context = build_global_context(&config_docs); + + // Only resolved constants should be in the context + assert_eq!(context.constants.len(), 2); + assert_eq!( + context.constants.get("TEST_CONSTANT"), + Some(&"42".to_string()) + ); + assert_eq!( + context.constants.get("STRING_CONST"), + Some(&"\"hello\"".to_string()) + ); + assert!(!context.constants.contains_key("UNRESOLVED_CONST")); + } + + #[test] + fn test_build_global_context_empty_referenced_constants() { + let config_docs = create_config_docs(vec![]); + let context = build_global_context(&config_docs); + + // Should have no constants if none are referenced + assert_eq!(context.constants.len(), 0); + } + + #[test] + fn test_generate_field_row_toml_example_no_literal_br_tags() { + let mut field = + create_field_doc("field_with_multiline_example", "Field with multiline TOML"); + field.toml_example = Some( + "txs_to_consider = \"TokenTransfer,ContractCall\"\nother_setting = \"value\"" + .to_string(), + ); + let global_context = create_mock_global_context(); + let mut output = String::new(); + + generate_field_row(&mut output, &field, "TestStruct", &global_context).unwrap(); + + // Verify that the TOML example is properly formatted with HTML pre/code blocks + assert!(output.contains("
"));
+        assert!(output.contains("txs_to_consider = \"TokenTransfer,ContractCall\""));
+        assert!(output.contains("other_setting = \"value\""));
+        assert!(output.contains("
")); + + // This is the critical test: ensure we don't have malformed markdown like + // ```toml
content
``` which renders literal
tags + assert!(!output.contains("```toml
")); + assert!(!output.contains("
```")); + + // Verify proper line separation with
within the code block + assert!(output.contains("ContractCall\"
other_setting")); + } + + #[test] + fn test_generate_field_row_hierarchical_lists() { + let field = create_field_doc( + "complex_list_field", + r"Field with hierarchical lists: +- Main item 1 + - Sub item 1a + - Sub-sub item 1a1 + - Sub item 1b +- Main item 2 + - Sub item 2a", + ); + let global_context = create_mock_global_context(); + let mut output = String::new(); + + generate_field_row(&mut output, &field, "TestStruct", &global_context).unwrap(); + + // Verify that indentation is preserved with   entities + assert!(output.contains("- Main item 1")); + assert!(output.contains("  - Sub item 1a")); + assert!(output.contains("    - Sub-sub item 1a1")); + assert!(output.contains("  - Sub item 1b")); + assert!(output.contains("- Main item 2")); + assert!(output.contains("  - Sub item 2a")); + } + + #[test] + fn test_generate_field_row_hierarchical_lists_with_intralinks() { + let field = create_field_doc( + "list_with_links", + r"Field with links in hierarchical lists: +- Main item with [`TEST_CONSTANT`] + - Sub item with [`NodeConfig::test_field`] + - Sub-sub item with [`other_field`]", + ); + let global_context = create_mock_global_context(); + let mut output = String::new(); + + generate_field_row(&mut output, &field, "TestStruct", &global_context).unwrap(); + + // Verify that indentation is preserved AND intra-links are processed + assert!(output.contains("- Main item with `42`")); // constant resolved + assert!( + output.contains("  - Sub item with [[node].test_field](#node-test_field)") + ); // field link with indentation + assert!(output.contains( + "    - Sub-sub item with [[miner].other_field](#miner-other_field)" + )); // cross-struct field link with indentation + } +} diff --git a/docs/generated/configuration-reference.md b/docs/generated/configuration-reference.md new file mode 100644 index 0000000000..c89de47c04 --- /dev/null +++ b/docs/generated/configuration-reference.md @@ -0,0 +1,225 @@ +# Stacks Node Configuration Reference + +This document provides a comprehensive reference for all configuration options available in the Stacks node TOML configuration file. + +The configuration is automatically generated from the Rust source code documentation. + +## Table of Contents + +- [[initial_balances]](#initial_balances) +- [[event_observer]](#event_observer) +- [[connection_options]](#connection_options) +- [[fee_estimation]](#fee_estimation) +- [[burnchain]](#burnchain) +- [[node]](#node) +- [[miner]](#miner) + +## [initial_balances] + +| Parameter | Description | Default | +|-----------|-------------|----------| +| [address](#initial_balances-address) | The Stacks address to receive the initial STX balance.
Must be a valid "non-mainnet" Stacks address (e.g., "ST2QKZ4FKHAH1NQKYKYAYZPY440FEPK7GZ1R5HBP2"). | No default. This field is required. | +| [amount](#initial_balances-amount) | The amount of microSTX to allocate to the address at node startup.
1 STX = 1,000,000 microSTX.

**Notes:**
- Units: microSTX. | No default. This field is required. | + + +## [event_observer] + +| Parameter | Description | Default | +|-----------|-------------|----------| +| [disable_retries](#event_observer-disable_retries) | Controls whether the node should retry sending event notifications if delivery fails or times out.

- If `false` (default): The node will attempt to deliver event notifications persistently.
If an attempt fails (due to network error, timeout, or a non-200 HTTP response), the event
payload is saved and retried indefinitely. This ensures that all events will eventually be
delivered. However, this can cause the node's block processing to stall if an observer is
down, or indefinitely fails to process the event.

- If `true`: The node will make only a single attempt to deliver each event notification.
If this single attempt fails for any reason, the event is discarded, and no further retries
will be made for that specific event.

**Notes:**
- **Warning:** Setting this to `true` can lead to missed events if the observer endpoint is temporarily unavailable or experiences issues. | `false` (retries are enabled) | +| [endpoint](#event_observer-endpoint) | URL endpoint (hostname and port) where event notifications will be sent via HTTP POST requests.

The node will automatically prepend `http://` to this endpoint and append the
specific event path (e.g., `/new_block`, `/new_mempool_tx`).
Therefore, this value should be specified as `hostname:port` (e.g., "localhost:3700").

This should point to a service capable of receiving and processing Stacks event data.

**Notes:**
- **Do NOT include the `http://` scheme in this configuration value.**

**Example:**
  endpoint = "localhost:3700"
| No default. This field is required. | +| [events_keys](#event_observer-events_keys) | List of event types that this observer is configured to receive.

Each string in the list specifies an event category or a specific event to subscribe to.
For an observer to receive any notifications, this list must contain at least one valid key.
Providing an invalid string that doesn't match any of the valid formats below will cause
the node to panic on startup when parsing the configuration.

All observers, regardless of their `events_keys` configuration, implicitly receive
payloads on the `/attachments/new` endpoint.

Valid Event Keys:
- `"*"`: Subscribes to a broad set of common events.
  - Events delivered to:
    - `/new_block`: For blocks containing transactions that generate STX, FT, NFT, or smart contract events.
    - `/new_microblocks`: For all new microblock streams. Note: Only until epoch 2.5.
    - `/new_mempool_tx`: For new mempool transactions.
    - `/drop_mempool_tx`: For dropped mempool transactions.
    - `/new_burn_block`: For new burnchain blocks.
  - Note: This key does NOT by itself subscribe to `/stackerdb_chunks` or `/proposal_response`.

- `"stx"`: Subscribes to STX token operation events (transfer, mint, burn, lock).
  - Events delivered to: `/new_block`, `/new_microblocks`.
  - Payload details: The "events" array in the delivered payloads will be filtered to include only STX-related events.

- `"memtx"`: Subscribes to new and dropped mempool transaction events.
  - Events delivered to: `/new_mempool_tx`, `/drop_mempool_tx`.

- `"burn_blocks"`: Subscribes to new burnchain block events.
  - Events delivered to: `/new_burn_block`.

- `"microblocks"`: Subscribes to new microblock stream events.
  - Events delivered to: `/new_microblocks`.
  - Payload details:
    - The "transactions" field will contain all transactions from the microblocks.
    - The "events" field will contain STX, FT, NFT, or specific smart contract events
*only if* this observer is also subscribed to those more specific event types
(e.g., via `"stx"`, `"*"`, a specific contract event key, or a specific asset identifier key).
  - Note: Only until epoch 2.5.

- `"stackerdb"`: Subscribes to StackerDB chunk update events.
  - Events delivered to: `/stackerdb_chunks`.

- `"block_proposal"`: Subscribes to block proposal response events (for Nakamoto consensus).
  - Events delivered to: `/proposal_response`.

- Smart Contract Event: Subscribes to a specific smart contract event.
  - Format: `"{contract_address}.{contract_name}::{event_name}"`
(e.g., `ST0000000000000000000000000000000000000000.my-contract::my-custom-event`)
  - Events delivered to: `/new_block`, `/new_microblocks`.
  - Payload details: The "events" array in the delivered payloads will be filtered for this specific event.

- Asset Identifier for FT/NFT Events: Subscribes to events (mint, burn, transfer) for a specific Fungible Token (FT) or Non-Fungible Token (NFT).
  - Format: `"{contract_address}.{contract_name}.{asset_name}"`
(e.g., for an FT: `ST0000000000000000000000000000000000000000.my-ft-contract.my-fungible-token`)
  - Events delivered to: `/new_block`, `/new_microblocks`.
  - Payload details: The "events" array in the delivered payloads will be filtered for events related to the specified asset.

**Notes:**
- For a more detailed documentation check the event-dispatcher docs in the `/docs` folder.

**Example:**
  events_keys = [
"burn_blocks",
"memtx",
"ST0000000000000000000000000000000000000000.my-contract::my-custom-event",
"ST0000000000000000000000000000000000000000.token-contract.my-ft"
]
| No default. This field is required. | +| [timeout_ms](#event_observer-timeout_ms) | Maximum duration (in milliseconds) to wait for the observer endpoint to respond.

When the node sends an event notification to this observer, it will wait at most this long
for a successful HTTP response (status code 200) before considering the request timed out.
If a timeout occurs and retries are enabled (see `EventObserverConfig::disable_retries`),
the request will be attempted again according to the retry strategy.

**Notes:**
- Units: milliseconds. | `1_000` (ms, 1 second) | + + +## [connection_options] + +| Parameter | Description | Default | +|-----------|-------------|----------| +| [auth_token](#connection_options-auth_token) | HTTP auth password to use when communicating with stacks-signer binary.

This token is used in the `Authorization` header for certain requests.
Primarily, it secures the communication channel between this node and a connected
`stacks-signer` instance.

It is also used to authenticate requests to `/v2/blocks?broadcast=1`.

**Notes:**
- **Requirement:** This field **must** be configured if the node needs to receive block proposals from a configured `stacks-signer` event_observer via the `/v3/block_proposal` endpoint. The value must match the token configured on the signer. | `None` (authentication disabled for relevant endpoints) | +| [block_proposal_max_age_secs](#connection_options-block_proposal_max_age_secs) | Maximum age (in seconds) allowed for a block proposal received via the `/v3/block_proposal` RPC endpoint.

If a block proposal is received whose timestamp is older than
the current time minus this configured value, the node will reject the proposal
with an HTTP 422 (Unprocessable Entity) error, considering it too stale.
This prevents the node from spending resources validating outdated proposals.

**Notes:**
- Units: seconds. | `600` (seconds) | +| [connect_timeout](#connection_options-connect_timeout) | Maximum duration (in seconds) a connection attempt is allowed to remain in the connecting state.

This applies to both incoming P2P and HTTP connections. If a remote peer initiates a connection
but does not complete the connection process (e.g., handshake for P2P) within this time, the node
will consider it unresponsive and drop the connection attempt.

**Notes:**
- Units: seconds. | `10` (seconds) | +| [disable_block_download](#connection_options-disable_block_download) | If true, completely disables the block download state machine.

The node will not attempt to download Stacks blocks (neither Nakamoto tenures nor
legacy blocks) from peers.

**Notes:**
- Intended for testing or specialized node configurations. | `false` | +| [disable_inbound_handshakes](#connection_options-disable_inbound_handshakes) | If true, prevents the node from processing initial handshake messages from new inbound P2P connections.

This effectively stops the node from establishing new authenticated inbound P2P sessions.
Outbound connections initiated by this node are unaffected.

**Notes:**
- Primarily intended for testing purposes. | `false` | +| [disable_inbound_walks](#connection_options-disable_inbound_walks) | If true, disables the neighbor discovery mechanism from starting walks from inbound peers.
Walks will only initiate from seed/bootstrap peers, outbound connections, or pingbacks.

**Notes:**
- Primarily intended for testing or specific network debugging scenarios. | `false` | +| [dns_timeout](#connection_options-dns_timeout) | Maximum time (in milliseconds) to wait for a DNS query to resolve.

When the node needs to resolve a hostname (e.g., from a peer's advertised `data_url`
or an Atlas attachment URL) into an IP address, it initiates a DNS lookup.
This setting defines the maximum duration the node will wait for the DNS server
to respond before considering the lookup timed out.

**Notes:**
- Units: milliseconds. | `15_000` (ms, 15 seconds). | +| [force_disconnect_interval](#connection_options-force_disconnect_interval) | Fault injection setting for testing purposes. Interval (in seconds) for forced disconnection of all peers.

If set to a positive value, the node will periodically disconnect all of its P2P peers at
roughly this interval. This simulates network churn or partitioning for testing node resilience.

**Notes:**
- The code enforcing this behavior is conditionally compiled using `cfg!(test)` and is only active during test runs.
- This setting has no effect in standard production builds.
- Units: seconds. | `None` (feature disabled) | +| [handshake_timeout](#connection_options-handshake_timeout) | Maximum duration (in seconds) a P2P peer is allowed after connecting before completing the handshake.

If a P2P peer connects successfully but fails to send the necessary handshake messages
within this time, the node will consider it unresponsive and drop the connection.

**Notes:**
- Units: seconds. | `5` (seconds) | +| [heartbeat](#connection_options-heartbeat) | Interval (in seconds) at which this node expects to send or receive P2P keep-alive messages.

During the P2P handshake, this node advertises this configured `heartbeat` value to its peers.
Each peer uses the other's advertised heartbeat interval (plus a timeout margin) to monitor
responsiveness and detect potential disconnections. This node also uses its own configured
value to proactively send Ping messages if the connection would otherwise be idle, helping to
keep it active.

**Notes:**
- Units: seconds. | `3_600` (seconds, 1 hour) | +| [idle_timeout](#connection_options-idle_timeout) | Maximum idle time (in seconds) for HTTP connections.

This applies only to HTTP connections. It defines the maximum allowed time since the
last response was sent by the node to the client. An HTTP connection is dropped if
both this `idle_timeout` and the general [timeout](#connection_options-timeout) (time since last
request received) are exceeded.

**Notes:**
- Units: seconds. | `15` (seconds) | +| [inbox_maxlen](#connection_options-inbox_maxlen) | Maximum number of messages allowed in the per-connection incoming buffer.
The limits apply individually to each established connection (both P2P and HTTP). | `100` | +| [inv_reward_cycles](#connection_options-inv_reward_cycles) | Lookback depth (in PoX reward cycles) for Nakamoto inventory synchronization requests.

When initiating an inventory sync cycle with a peer, the node requests data starting
from `inv_reward_cycles` cycles before the current target reward cycle. This determines
how much historical inventory information is requested in each sync attempt.

**Notes:**
- Units: PoX reward cycles. | - `3` if [[burnchain].mode](#burnchain-mode) is `"mainnet"`
- `6` otherwise | +| [inv_sync_interval](#connection_options-inv_sync_interval) | Minimum interval (in seconds) between initiating inventory synchronization attempts with the same peer.

Acts as a per-peer cooldown to throttle sync requests. A new sync cycle with a peer generally
starts only after this interval has passed since the previous attempt began *and* the previous
cycle is considered complete.

**Notes:**
- Units: seconds. | `45` (seconds) | +| [log_neighbors_freq](#connection_options-log_neighbors_freq) | Frequency (in milliseconds) for logging the current P2P neighbor list at the DEBUG level.

If set to a non-zero value, the node will periodically log details about its currently
established P2P connections (neighbors). Setting this to 0 disables this periodic logging.

**Notes:**
- Units: milliseconds. | `60_000` (ms, 1 minute). | +| [max_http_clients](#connection_options-max_http_clients) | Maximum total number of allowed concurrent HTTP connections.

This limits the total number of simultaneous connections the node's RPC/HTTP server
will accept. If this limit is reached, new incoming HTTP connection attempts
will be rejected. | `1000` | +| [max_inflight_attachments](#connection_options-max_inflight_attachments) | Maximum number of concurrent Atlas data attachment download requests allowed.

This limits how many separate download requests for Atlas data attachments
can be active simultaneously. Helps manage network resources when fetching
potentially large attachment data. | `6` | +| [max_inflight_blocks](#connection_options-max_inflight_blocks) | Maximum number of concurrent Nakamoto block download requests allowed.

This limits how many separate block download processes for Nakamoto tenures
(both confirmed and unconfirmed) can be active simultaneously. Helps manage
network bandwidth and processing load during chain synchronization. | `6` | +| [max_sockets](#connection_options-max_sockets) | Maximum total number of concurrent network sockets the node is allowed to manage.

This limit applies globally to all types of sockets handled by the node's networking layer,
including listening sockets (P2P and RPC/HTTP), established P2P connections (inbound/outbound),
and established HTTP connections.
It serves as a hard limit to prevent the node from exhausting operating system
resources related to socket descriptors. | `800` | +| [maximum_call_argument_size](#connection_options-maximum_call_argument_size) | Maximum size (in bytes) of the HTTP request body for read-only contract calls.

This limit is enforced on the `Content-Length` of incoming requests to the
`/v2/contracts/call-read-only/...` RPC endpoint. It prevents excessively large
request bodies, which might contain numerous or very large hex-encoded function arguments,
from overwhelming the node.

**Notes:**
- Calculated as 20 * `clarity::vm::types::BOUND_VALUE_SERIALIZATION_HEX`.
- Units: bytes. | `83_886_080` (bytes, 80 MiB) | +| [num_clients](#connection_options-num_clients) | Maximum number of allowed concurrent inbound P2P connections.

This acts as a hard limit. If the node already has this many active inbound P2P
connections, any new incoming P2P connection attempts will be rejected.
Outbound P2P connections initiated by this node are not counted against this limit. | `750` | +| [num_neighbors](#connection_options-num_neighbors) | Target number of peers for StackerDB replication.

Sets the maximum number of potential replication target peers requested from the
StackerDB control contract (`get-replication-targets`) when configuring a replica.

Note: Formerly (pre-Epoch 3.0), this also controlled the target peer count for
inventory synchronization. | `32` | +| [outbox_maxlen](#connection_options-outbox_maxlen) | Maximum number of messages allowed in the per-connection outgoing buffer.
The limit applies individually to each established connection (both P2P and HTTP). | `100` | +| [private_key_lifetime](#connection_options-private_key_lifetime) | Validity duration (in number of bitcoin blocks) for the node's P2P session private key.

The node uses a temporary private key for signing P2P messages. This key has an associated
expiry bitcoin block height stored in the peer database. When the current bitcoin height
reaches or exceeds the key's expiry height, the node automatically generates a new random
private key.
The expiry block height for this new key is calculated by adding the configured
[private_key_lifetime](#connection_options-private_key_lifetime) (in blocks) to the previous key's expiry block height.
The node then re-handshakes with peers to transition to the new key.
This provides periodic key rotation for P2P communication.

**Notes:**
- Units: bitcoin blocks. | `9223372036854775807` (i64::MAX, effectively infinite, disabling automatic re-keying). | +| [private_neighbors](#connection_options-private_neighbors) | Whether to allow connections and interactions with peers having private IP addresses.

If `false` (default), the node will generally:
- Reject incoming connection attempts from peers with private IPs.
- Avoid initiating connections to peers known to have private IPs.
- Ignore peers with private IPs during neighbor discovery (walks).
- Skip querying peers with private IPs for mempool or StackerDB data.
- Filter out peers with private IPs from API responses listing potential peers.

Setting this to `true` disables these restrictions, which can be useful for local testing
environments or fully private network deployments. | `false` | +| [public_ip_address](#connection_options-public_ip_address) | The Public IPv4 address and port (e.g. "203.0.113.42:20444") to advertise to other nodes.

If this option is not set (`None`), the node will attempt to automatically discover its
public IP address. | `None` (triggers automatic discovery attempt) | +| [read_only_call_limit_read_count](#connection_options-read_only_call_limit_read_count) | Maximum number of distinct read operations from Clarity data space allowed during a read-only call. | `30` | +| [read_only_call_limit_read_length](#connection_options-read_only_call_limit_read_length) | Maximum total size (in bytes) of data allowed to be read from Clarity data space (variables, maps)
during a read-only call.

**Notes:**
- Units: bytes. | `100_000` (bytes, 100 KB). | +| [read_only_call_limit_runtime](#connection_options-read_only_call_limit_runtime) | Runtime cost limit for an individual read-only function call. This represents
computation effort within the Clarity VM.
(See SIP-006: https://github.com/stacksgov/sips/blob/main/sips/sip-006/sip-006-runtime-cost-assessment.md)

**Notes:**
- Units: Clarity VM cost units. | `1_000_000_000` (units) | +| [read_only_call_limit_write_count](#connection_options-read_only_call_limit_write_count) | Maximum number of distinct write operations allowed during a read-only call.

**Notes:**
- This limit is effectively forced to 0 by the API handler, ensuring read-only behavior.
- Configuring a non-zero value has no effect on read-only call execution. | `0` | +| [read_only_call_limit_write_length](#connection_options-read_only_call_limit_write_length) | Maximum total size (in bytes) of data allowed to be written during a read-only call.

**Notes:**
- This limit is effectively forced to 0 by the API handler, ensuring read-only behavior.
- Configuring a non-zero value has no effect on read-only call execution.
- Units: bytes. | `0` | +| [reject_blocks_pushed](#connection_options-reject_blocks_pushed) | Controls whether the node accepts Nakamoto blocks pushed proactively by peers.

- If `true`: Pushed blocks are ignored (logged at DEBUG and discarded). The node will
still process blocks that it actively downloads.
- If `false`: Both pushed blocks and actively downloaded blocks are processed. | `false` | +| [soft_max_clients_per_host](#connection_options-soft_max_clients_per_host) | Soft limit on the number of inbound P2P connections allowed per host IP address.

During inbound connection pruning (when total inbound connections > [soft_num_clients](#connection_options-soft_num_clients) ),
the node checks if any single IP address has more connections than this limit.
If so, it preferentially prunes the newest connections originating from that
specific IP address until its count is reduced to this limit.
This prevents a single host from dominating the node's inbound connection capacity. | `4` | +| [soft_max_neighbors_per_org](#connection_options-soft_max_neighbors_per_org) | Soft limit on the number of outbound P2P connections per network organization (ASN).

During connection pruning (when total outbound connections > [soft_num_neighbors](#connection_options-soft_num_neighbors) ),
the node checks if any single network organization (identified by ASN) has more
outbound connections than this limit. If so, it preferentially prunes the least
healthy/newest connections from that overrepresented organization until its count
is reduced to this limit or the total outbound count reaches
[soft_num_neighbors](#connection_options-soft_num_neighbors) . This encourages connection diversity across
different network providers. | `32` | +| [soft_num_clients](#connection_options-soft_num_clients) | Soft limit threshold for triggering inbound P2P connection pruning.

If the total number of currently active inbound P2P connections exceeds this value,
the node will activate pruning logic to reduce the count, typically by applying
per-host limits (see [soft_max_clients_per_host](#connection_options-soft_max_clients_per_host) ).
This helps manage the overall load from inbound peers. | `750` | +| [soft_num_neighbors](#connection_options-soft_num_neighbors) | Target number of outbound P2P connections the node aims to maintain.

The connection pruning logic only activates if the current number of established
outbound P2P connections exceeds this value. Pruning aims to reduce the connection
count back down to this target, ensuring the node maintains a baseline number
of outbound peers for network connectivity. | `16` | +| [stackerdb_hint_replicas](#connection_options-stackerdb_hint_replicas) | Static list of preferred replica peers for specific StackerDB contracts, provided as a JSON string.

This allows manually specifying known peers to use for replicating particular StackerDBs,
potentially overriding or supplementing the peers discovered via the StackerDB's control contract.

Format: The configuration value must be a TOML string containing valid JSON.
The JSON structure must be an array of tuples, where each tuple pairs a contract identifier
with a list of preferred neighbor addresses:
`[[ContractIdentifier, [NeighborAddress, ...]], ...]`

1. `ContractIdentifier`: A JSON object representing the `QualifiedContractIdentifier`.
It must have the specific structure:
`{"issuer": [version_byte, [byte_array_20]], "name": "contract-name"}`

2. `NeighborAddress`: A JSON object specifying the peer details:
`{"ip": "...", "port": ..., "public_key_hash": "..."}`

**Notes:**
- Use this option with caution, primarily for advanced testing or bootstrapping.

**Example:**
  stackerdb_hint_replicas = '''
[
[
{
"issuer": [1, [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]],
"name": "my-contract"
},
[
{
"ip": "192.0.2.1",
"port": 20444,
"public_key_hash": "0102030405060708090a0b0c0d0e0f1011121314"
}
]
]
]
'''
| `None` (no hints provided) | +| [timeout](#connection_options-timeout) | General communication timeout (in seconds).

- For HTTP connections: Governs two timeout aspects:
  - Server-side: Defines the maximum allowed time since the last request was received from a client.
An idle connection is dropped if both this timeout and [idle_timeout](#connection_options-idle_timeout) are exceeded.
  - Client-side: Sets the timeout duration (TTL) for outgoing HTTP requests initiated by the node itself.
- For P2P connections: Used as the specific timeout for NAT punch-through requests.

**Notes:**
- Units: seconds. | `15` (seconds) | +| [walk_interval](#connection_options-walk_interval) | Minimum interval (in seconds) between the start of consecutive neighbor discovery walks.

The node periodically performs "neighbor walks" to discover new peers and maintain
an up-to-date view of the P2P network topology. This setting controls how frequently
these walks can be initiated, preventing excessive network traffic and processing.

**Notes:**
- Units: seconds. | `60` (seconds) | +| [walk_seed_probability](#connection_options-walk_seed_probability) | Probability (0.0 to 1.0) of forcing a neighbor walk to start from a seed/bootstrap peer.

This probability applies only when the node is not in Initial Block Download (IBD)
and is already connected to at least one seed/bootstrap peer.
Normally, in this situation, the walk would start from a random inbound or outbound peer.
However, with this probability, the walk is forced to start from a seed peer instead.
This helps ensure the node periodically re-establishes its network view from trusted entry points. | `0.1` (10%) | +| ~~[antientropy_public](#connection_options-antientropy_public)~~ | Controls whether a node with public inbound connections should still push blocks, even if not NAT'ed.

In the Stacks 2.x anti-entropy logic, if a node detected it had inbound connections
from public IPs (suggesting it wasn't behind NAT) and this flag was set to `false`,
it would refrain from proactively pushing blocks and microblocks to peers.
The assumption was that publicly reachable nodes should primarily serve downloads.
If set to `true` (default), the node would push data regardless of its perceived reachability.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. | `true` | +| ~~[antientropy_retry](#connection_options-antientropy_retry)~~ | Minimum interval (in seconds) between attempts to run the Epoch 2.x anti-entropy data push mechanism.

The Stacks 2.x anti-entropy protocol involves the node proactively pushing its known
Stacks blocks and microblocks to peers. This value specifies the cooldown period for this operation.
This prevents the node from excessively attempting to push data to its peers.

**Notes:**
- Units: seconds.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. | `3_600` (seconds, 1 hour) | +| ~~[download_interval](#connection_options-download_interval)~~ | Minimum interval (in seconds) between consecutive block download scans in epoch 2.x.

In the pre-Nakamoto block download logic, if a full scan for blocks completed without
finding any new blocks to download, and if the known peer inventories had not changed,
the node would wait at least this duration before initiating the next download scan.
This throttled the downloader when the node was likely already synchronized.

**Notes:**
- Units: seconds.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. | `10` (seconds) | +| ~~[full_inv_sync_interval](#connection_options-full_inv_sync_interval)~~ | Deprecated: it does not have any effect on the node's behavior.

**⚠️ DEPRECATED:** It does not have any effect on the node's behavior. | `None` | +| ~~[max_clients_per_host](#connection_options-max_clients_per_host)~~ | Maximum number of inbound p2p connections per host we permit.

**⚠️ DEPRECATED:** It does not have any effect on the node's behavior. | `4` | +| ~~[max_neighbors_per_host](#connection_options-max_neighbors_per_host)~~ | Maximum number of neighbors per host we permit.

**⚠️ DEPRECATED:** It does not have any effect on the node's behavior. | `1` | +| ~~[soft_max_neighbors_per_host](#connection_options-soft_max_neighbors_per_host)~~ | Soft limit on the number of neighbors per host we permit.

**⚠️ DEPRECATED:** It does not have any effect on the node's behavior. | `1` | + + +## [fee_estimation] + +| Parameter | Description | Default | +|-----------|-------------|----------| +| [cost_estimator](#fee_estimation-cost_estimator) | Specifies the name of the cost estimator to use.
This controls how the node estimates computational costs for transactions.

Accepted values:
- `"NaivePessimistic"`: The only currently supported cost estimator. This estimator
tracks the highest observed costs for each operation type and uses the average
of the top 10 values as its estimate, providing a conservative approach to
cost estimation.

**Notes:**
- If [disabled](#fee_estimation-disabled) is `true`, the node will use the default unit cost estimator. | `"NaivePessimistic"` | +| [cost_metric](#fee_estimation-cost_metric) | Specifies the name of the cost metric to use.
This controls how the node measures and compares transaction costs.

Accepted values:
- `"ProportionDotProduct"`: The only currently supported cost metric. This metric
computes a weighted sum of cost dimensions (runtime, read/write counts, etc.)
proportional to how much of the block limit they consume.

**Notes:**
- If [disabled](#fee_estimation-disabled) is `true`, the node will use the default unit cost metric. | `"ProportionDotProduct"` | +| [disabled](#fee_estimation-disabled) | If `true`, all fee and cost estimation features are disabled.
The node will use unit estimators and metrics, which effectively
provide no actual estimation capabilities.

When disabled, the node will:
1. Not track historical transaction costs or fee rates
2. Return simple unit values for costs for any transaction, regardless of its actual complexity
3. Be unable to provide meaningful fee estimates for API requests (always returns an error)
4. Consider only raw transaction fees (not fees per cost unit) when assembling blocks

This setting takes precedence over individual estimator/metric configurations.

**Notes:**
- When `true`, the values for [cost_estimator](#fee_estimation-cost_estimator) , [fee_estimator](#fee_estimation-fee_estimator) , and [cost_metric](#fee_estimation-cost_metric) are ignored. | `false` | +| [fee_estimator](#fee_estimation-fee_estimator) | Specifies the name of the fee estimator to use.
This controls how the node calculates appropriate transaction fees based on costs.

Accepted values:
- `"ScalarFeeRate"`: Simple multiplier-based fee estimation that uses percentiles
(5th, 50th, and 95th) of observed fee rates from recent blocks.
- `"FuzzedWeightedMedianFeeRate"`: Fee estimation that adds controlled randomness
to a weighted median rate calculator. This helps prevent fee optimization attacks
by adding unpredictability to fee estimates while still maintaining accuracy.

**Notes:**
- If [disabled](#fee_estimation-disabled) is `true`, the node will use the default unit fee estimator. | `"ScalarFeeRate"` | +| [fee_rate_fuzzer_fraction](#fee_estimation-fee_rate_fuzzer_fraction) | Specifies the fraction of random noise to add if using the `FuzzedWeightedMedianFeeRate` fee estimator.
This value should be in the range [0, 1], representing a percentage of the base fee rate.

For example, with a value of 0.1 (10%), fee rate estimates will have random noise added
within the range of ±10% of the original estimate. This randomization makes it difficult
for users to precisely optimize their fees while still providing reasonable estimates.

**Notes:**
- This setting is only relevant when [fee_estimator](#fee_estimation-fee_estimator) is set to `"FuzzedWeightedMedianFeeRate"`. | `0.1` (10%) | +| [fee_rate_window_size](#fee_estimation-fee_rate_window_size) | Specifies the window size for the `WeightedMedianFeeRateEstimator`.
This determines how many historical fee rate data points are considered
when calculating the median fee rate.

The window size controls how quickly the fee estimator responds to changing
network conditions. A smaller window size (e.g., 5) makes the estimator more
responsive to recent fee rate changes but potentially more volatile. A larger
window size (e.g., 10) produces more stable estimates but may be slower to
adapt to rapid network changes.

**Notes:**
- This setting is primarily relevant when [fee_estimator](#fee_estimation-fee_estimator) is set to `"FuzzedWeightedMedianFeeRate"`, as it's used by the underlying `WeightedMedianFeeRateEstimator`. | `5` | +| [log_error](#fee_estimation-log_error) | If `true`, errors encountered during cost or fee estimation will be logged.
This can help diagnose issues with the fee estimation subsystem. | `false` | + + +## [burnchain] + +| Parameter | Description | Default | +|-----------|-------------|----------| +| [block_commit_tx_estimated_size](#burnchain-block_commit_tx_estimated_size) | Estimated size (in virtual bytes) of a block commit transaction on bitcoin.
Used for fee calculation in mining logic by multiplying with the fee rate
[satoshis_per_byte](#burnchain-satoshis_per_byte) .

**Notes:**
- Only relevant if [[node].miner](#node-miner) is `true`.
- Units: virtual bytes. | `380` (virtual bytes) | +| [burn_fee_cap](#burnchain-burn_fee_cap) | The maximum amount (in sats) of "burn commitment" to broadcast for the next block's leader election.
Acts as a safety cap to limit the maximum amount spent on mining.
It serves as both the target fee and a fallback if dynamic fee calculations fail or cannot be performed.

This setting can be hot-reloaded from the config file, allowing adjustment without restarting.

**Notes:**
- Only relevant if [[node].miner](#node-miner) is `true`.
- Units: satoshis. | `20_000` (satoshis) | +| [chain](#burnchain-chain) | The underlying blockchain used for Proof-of-Transfer.

**Notes:**
- Currently, only `"bitcoin"` is supported. | `"bitcoin"` | +| [chain_id](#burnchain-chain_id) | The network-specific identifier used in P2P communication and database initialization.

**Notes:**
- **Warning:** Do not modify this unless you really know what you're doing.
- This is intended strictly for testing purposes. | - `0x00000001` if [mode](#burnchain-mode) is `"mainnet"`
- `0x80000000` otherwise | +| [commit_anchor_block_within](#burnchain-commit_anchor_block_within) | Specifies a mandatory wait period (in milliseconds) after receiving a burnchain tip
before the node attempts to build the anchored block for the new tenure.
This duration effectively schedules the start of the block-building
process relative to the tip's arrival time.

**Notes:**
- This is intended strictly for testing purposes.
- Units: milliseconds. | `5_000` (milliseconds) | +| [epochs](#burnchain-epochs) | Custom override for the definitions of Stacks epochs (start/end burnchain heights, consensus rules).
This setting allows testing specific epoch transitions or custom consensus rules by defining exactly
when each epoch starts on bitcoin.

Epochs define distinct protocol rule sets (consensus rules, execution costs, capabilities).
When configured, the list must include all epochs sequentially from "1.0" up to the
highest desired epoch, without skipping any intermediate ones.
Valid `epoch_name` values currently include:
`"1.0"`, `"2.0"`, `"2.05"`, `"2.1"`, `"2.2"`, `"2.3"`, `"2.4"`, `"2.5"`, `"3.0"`, `"3.1"`.

**Validation Rules:**
- Epochs must be provided in strict chronological order (`1.0`, `2.0`, `2.05`...).
- `start_height` values must be non-decreasing across the list.
- Epoch `"1.0"` must have `start_height = 0`.
- The number of defined epochs cannot exceed the maximum supported by the node software.

**Notes:**
- Applied only if [mode](#burnchain-mode) is not "mainnet".
- This is intended strictly for testing purposes.
- Configured as a list `[[burnchain.epochs]]` in TOML, each with `epoch_name` (string) and `start_height` (integer Bitcoin block height).

**Example:**
  [[burnchain.epochs]]
epoch_name = "2.1"
start_height = 150

[[burnchain.epochs]]
epoch_name = "2.2"
start_height = 200
| `None` (uses the standard epoch definitions for the selected [mode](#burnchain-mode) ) | +| [fault_injection_burnchain_block_delay](#burnchain-fault_injection_burnchain_block_delay) | Fault injection setting for testing. Introduces an artificial delay (in milliseconds)
before processing each burnchain block download. Simulates a slow burnchain connection.

**Notes:**
- This is intended strictly for testing purposes.
- Units: milliseconds. | `0` (no delay) | +| [first_burn_block_hash](#burnchain-first_burn_block_hash) | Overrides the default starting block hash of the burnchain.

**Notes:**
- Applied only if [mode](#burnchain-mode) is not "mainnet".
- This is intended strictly for testing purposes.
- Should be used together with [first_burn_block_height](#burnchain-first_burn_block_height) and [first_burn_block_timestamp](#burnchain-first_burn_block_timestamp) for proper operation. | `None` (uses the burnchain's default starting block hash) | +| [first_burn_block_height](#burnchain-first_burn_block_height) | Overrides the default starting bitcoin block height for the node.
Allows starting synchronization from a specific historical point in test environments.

**Notes:**
- Applied only if [mode](#burnchain-mode) is not "mainnet".
- This is intended strictly for testing purposes.
- Should be used together with [first_burn_block_timestamp](#burnchain-first_burn_block_timestamp) and [first_burn_block_hash](#burnchain-first_burn_block_hash) for proper operation. | `None` (uses the burnchain's default starting height for the mode) | +| [first_burn_block_timestamp](#burnchain-first_burn_block_timestamp) | Overrides the default starting block timestamp of the burnchain.

**Notes:**
- Applied only if [mode](#burnchain-mode) is not "mainnet".
- This is intended strictly for testing purposes.
- Should be used together with [first_burn_block_height](#burnchain-first_burn_block_height) and [first_burn_block_hash](#burnchain-first_burn_block_hash) for proper operation. | `None` (uses the burnchain's default starting timestamp) | +| [leader_key_tx_estimated_size](#burnchain-leader_key_tx_estimated_size) | Estimated size (in virtual bytes) of a leader key registration transaction on bitcoin.
Used for fee calculation in mining logic by multiplying with the fee rate
[satoshis_per_byte](#burnchain-satoshis_per_byte) .

**Notes:**
- Only relevant if [[node].miner](#node-miner) is `true`.
- Units: virtual bytes. | `290` (virtual bytes) | +| [local_mining_public_key](#burnchain-local_mining_public_key) | The public key associated with the local mining address for the underlying Bitcoin regtest node.
Provided as a hex string representing an uncompressed public key.

It is primarily used in modes that rely on a controlled Bitcoin regtest backend
(e.g., "helium", "mocknet", "neon") where the Stacks node itself needs to
instruct the Bitcoin node to generate blocks.

The key is used to derive the Bitcoin address that receives the coinbase rewards
when generating blocks on the regtest network.

**Notes:**
- Mandatory if [mode](#burnchain-mode) is "helium".
- This is intended strictly for testing purposes. | `None` | +| [magic_bytes](#burnchain-magic_bytes) | The network "magic bytes" used to identify packets for the specific bitcoin network
instance (e.g., mainnet, testnet, regtest). Must match the magic bytes of the connected
bitcoin node.

These two-byte identifiers help ensure that nodes only connect to peers on the same
network type. Common values include:
- "X2" for mainnet
- "T2" for testnet (xenon)
- Other values for specific test networks

Configured as a 2-character ASCII string (e.g., "X2" for mainnet). | - `"T2"` if [mode](#burnchain-mode) is `"xenon"`
- `"X2"` otherwise | +| [max_rbf](#burnchain-max_rbf) | Maximum fee rate multiplier allowed when using Replace-By-Fee (RBF) for bitcoin transactions.
Expressed as a percentage of the original [satoshis_per_byte](#burnchain-satoshis_per_byte) rate (e.g.,
150 means the fee rate can be increased up to 1.5x). Used in mining logic for RBF decisions
to cap the replacement fee rate.

**Notes:**
- Only relevant if [[node].miner](#node-miner) is `true`. | `150` (%) | +| [max_unspent_utxos](#burnchain-max_unspent_utxos) | The maximum number of unspent transaction outputs (UTXOs) to request from the bitcoin node.

This value is passed as the `maximumCount` parameter to the bitcoin node. It helps manage
response size and processing load, particularly relevant for miners querying for available
UTXOs to fund operations like block commits or leader key registrations.

Setting this limit too high might lead to performance issues or timeouts when querying
nodes with a very large number of UTXOs. Conversely, setting it too low might prevent
the miner from finding enough UTXOs in a single query to meet the required funding amount
for a transaction, even if sufficient funds exist across more UTXOs not returned by the limited query.

**Notes:**
- This value must be `<= 1024`.
- Only relevant if [[node].miner](#node-miner) is `true`. | `1024` | +| [mode](#burnchain-mode) | The operational mode or network profile for the Stacks node.
This setting determines network parameters (like chain ID, peer version),
default configurations, genesis block definitions, and overall node behavior.

Supported values:
- `"mainnet"` → mainnet
- `"xenon"` → testnet
- `"mocknet"` → regtest
- `"helium"` → regtest
- `"neon"` → regtest
- `"argon"` → regtest
- `"krypton"` → regtest
- `"nakamoto-neon"` → regtest | `"mocknet"` | +| [password](#burnchain-password) | The password for authenticating with the bitcoin node's RPC interface.
Required if the bitcoin node requires RPC authentication.

**Notes:**
- Only relevant if [[node].miner](#node-miner) is `true`. | `None` | +| [peer_host](#burnchain-peer_host) | The hostname or IP address of the bitcoin node peer.

This field is required for all node configurations as it specifies where to find the underlying
bitcoin node to interact with for PoX operations, block validation, and mining. | `"0.0.0.0"` | +| [peer_port](#burnchain-peer_port) | The P2P network port of the bitcoin node specified by [peer_host](#burnchain-peer_host) . | `8333` | +| [peer_version](#burnchain-peer_version) | The peer protocol version number used in P2P communication.
This parameter cannot be set via the configuration file.

**Notes:**
- **Warning:** Do not modify this unless you really know what you're doing. | - `402_653_196` if [mode](#burnchain-mode) is `"mainnet"`
- `4_207_599_116` otherwise | +| [poll_time_secs](#burnchain-poll_time_secs) | The interval, in seconds, at which the node polls the bitcoin node for new blocks and state updates.

The default value of 10 seconds is mainly intended for testing purposes.
It's suggested to set this to a higher value for mainnet, e.g., 300 seconds (5 minutes).

**Notes:**
- Units: seconds | `10` (seconds) | +| [pox_2_activation](#burnchain-pox_2_activation) | Sets a custom burnchain height for PoX-2 activation (for testing).

This affects two key transitions:
1. The block height at which PoX v1 lockups are automatically unlocked.
2. The block height from which PoX reward set calculations switch to PoX v2 rules.

**Behavior:**
- This value directly sets the auto unlock height for PoX v1 lockups before transition to PoX v2.
This also defines the burn height at which PoX reward sets are calculated using PoX v2 rather than v1.
- If custom [epochs](#burnchain-epochs) are provided:
  - This value is used to validate that Epoch 2.1's start height is ≤ this value.
  - However, the height specified in `epochs` for Epoch 2.1 takes precedence.

**Notes:**
- Applied only if [mode](#burnchain-mode) is not "mainnet".
- This is intended strictly for testing purposes. | `None` | +| [pox_prepare_length](#burnchain-pox_prepare_length) | Overrides the length (in bitcoin blocks) of the PoX prepare phase.

**Notes:**
- Applied only if [mode](#burnchain-mode) is not "mainnet".
- This is intended strictly for testing purposes.
- Units: bitcoin blocks. | `None` (uses the standard prepare phase length for the mode) | +| [pox_reward_length](#burnchain-pox_reward_length) | Overrides the length (in bitcoin blocks) of the PoX reward cycle.

**Notes:**
- Applied only if [mode](#burnchain-mode) is not "mainnet".
- This is intended strictly for testing purposes.
- Units: bitcoin blocks. | `None` (uses the standard reward cycle length for the mode) | +| [process_exit_at_block_height](#burnchain-process_exit_at_block_height) | Optional bitcoin block height at which the Stacks node process should gracefully exit.
When bitcoin reaches this height, the node logs a message and initiates a graceful shutdown.

**Notes:**
- Applied only if [mode](#burnchain-mode) is not "mainnet".
- This is intended strictly for testing purposes. | `None` | +| [rbf_fee_increment](#burnchain-rbf_fee_increment) | The incremental amount (in Sats/vByte) to add to the previous transaction's
fee rate for RBF bitcoin transactions.

**Notes:**
- Only relevant if [[node].miner](#node-miner) is `true`.
- Units: satoshis per virtual byte. | `5` (satoshis per virtual byte) | +| [rpc_port](#burnchain-rpc_port) | The RPC port of the bitcoin node specified by [peer_host](#burnchain-peer_host) . | `8332` | +| [rpc_ssl](#burnchain-rpc_ssl) | Flag indicating whether to use SSL/TLS when connecting to the bitcoin node's RPC interface. | `false` | +| [satoshis_per_byte](#burnchain-satoshis_per_byte) | The default fee rate in satoshis per virtual byte (sats/vB) to use when estimating fees for miners
to submit bitcoin transactions (like block commits or leader key registrations).

**Notes:**
- Only relevant if [[node].miner](#node-miner) is `true`.
- Units: satoshis per virtual byte. | `50` (satoshis per virtual byte) | +| [timeout](#burnchain-timeout) | Timeout duration, in seconds, for RPC calls made to the bitcoin node.
Configures the timeout on the underlying HTTP client.

**Notes:**
- Units: seconds | `60` (seconds) | +| [username](#burnchain-username) | The username for authenticating with the bitcoin node's RPC interface.
Required if the bitcoin node requires RPC authentication.

**Notes:**
- Only relevant if [[node].miner](#node-miner) is `true`. | `None` | +| [wallet_name](#burnchain-wallet_name) | Specifies the name of the Bitcoin wallet to use within the connected bitcoin node.
Used to interact with a specific named wallet if the bitcoin node manages multiple wallets.

If the specified wallet doesn't exist, the node will attempt to create it via the
`createwallet` RPC call. This is particularly useful for miners who need to manage
separate wallets.

**Notes:**
- Primarily relevant for miners interacting with multi-wallet Bitcoin nodes. | `""` (empty string, implying the default wallet or no specific wallet needed) | +| ~~[affirmation_overrides](#burnchain-affirmation_overrides)~~ | Overrides for the burnchain block affirmation map for specific reward cycles.
Allows manually setting the miner affirmation ('p'resent/'n'ot-present/'a'bsent) map for a
given cycle, bypassing the map normally derived from sortition results.

Special defaults are added when [mode](#burnchain-mode) is "xenon", but config entries take precedence.
At startup, these overrides are written to the `BurnchainDB` (`overrides` table).

**Notes:**
- Primarily used for testing or recovering from network issues.
- Configured as a list `[[burnchain.affirmation_overrides]]` in TOML, each with `reward_cycle` (integer) and `affirmation` (string of 'p'/'n'/'a', length `reward_cycle - 1`).

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode.

**Example:**
  [[burnchain.affirmation_overrides]]
reward_cycle = 413
affirmation = "pna..." # Must be 412 chars long
| Empty map | +| ~~[ast_precheck_size_height](#burnchain-ast_precheck_size_height)~~ | Override for the burnchain height activating stricter AST size checks pre-epoch 3.0 for testing purposes.

Used pre-epoch 3.0 to control activation before it became standard (at burn height `752000`).
Ignored in standard production builds as the underlying mechanism is disabled unless the `testing`
feature is active.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. | `None` | +| ~~[sunset_end](#burnchain-sunset_end)~~ | Overrides the bitcoin height, non-inclusive, at which the PoX sunset period ends in epochs before 2.1.
After this height, Stacking rewards are disabled completely. This parameter works together
with `sunset_start` to define the full sunset transition period for PoX.

**Notes:**
- Applied only if [mode](#burnchain-mode) is not "mainnet".
- This is intended strictly for testing purposes.

**⚠️ DEPRECATED:** The sunset phase was removed in Epoch 2.1. This parameter can still be used for
testing purposes for epochs before 2.1. | `None` (uses the standard sunset end height for the mode) | +| ~~[sunset_start](#burnchain-sunset_start)~~ | Overrides the bitcoin height at which the PoX sunset period begins in epochs before 2.1.
The sunset period represents a planned phase-out of the PoX mechanism. During this period,
stacking rewards gradually decrease, eventually ceasing entirely. This parameter allows
testing the PoX sunset transition by explicitly setting its start height.

**Notes:**
- Applied only if [mode](#burnchain-mode) is not "mainnet".
- This is intended strictly for testing purposes.

**⚠️ DEPRECATED:** The sunset phase was removed in Epoch 2.1. This parameter can still be used for
testing purposes for epochs before 2.1. | `None` (uses the standard sunset start height for the mode) | + + +## [node] + +| Parameter | Description | Default | +|-----------|-------------|----------| +| [always_use_affirmation_maps](#node-always_use_affirmation_maps) | Controls if Stacks Epoch 2.1+ affirmation map logic should be applied even before Epoch 2.1.
- If `true` (default), the node consistently uses the newer (Epoch 2.1) rules for PoX anchor block
validation and affirmation-based reorg handling, even in earlier epochs.
- If `false`, the node strictly follows the rules defined for the specific epoch it is currently
processing, only applying 2.1+ logic from Epoch 2.1 onwards.
Differences in this setting between nodes prior to Epoch 2.1 could lead to consensus forks. | `true` | +| [assume_present_anchor_blocks](#node-assume_present_anchor_blocks) | Controls if the node must strictly wait for any PoX anchor block selected by the core consensus mechanism.
- If `true`: Halts burnchain processing immediately whenever a selected anchor block is missing locally
(`SelectedAndUnknown` status), regardless of affirmation status. This is always true in Nakamoto (Epoch 3.0+)
and runs *before* affirmation checks.
- If `false` (primarily for testing): Skips this immediate halt, allowing processing to proceed to
affirmation map checks.
Normal operation requires this to be `true`; setting to `false` will likely break consensus adherence.

**Notes:**
- This parameter cannot be set via the configuration file; it must be modified programmatically. | `true` | +| [bootstrap_node](#node-bootstrap_node) | A list of initial peer nodes used to bootstrap connections into the Stacks P2P network.
Peers are specified in a configuration file as comma-separated strings in the
format `"PUBKEY@IP:PORT"` or `"PUBKEY@HOSTNAME:PORT"`. DNS hostnames are resolved
during configuration loading.

**Example:**
  bootstrap_node = "029266faff4c8e0ca4f934f34996a96af481df94a89b0c9bd515f3536a95682ddc@seed.testnet.hiro.so:30444,02196f005965cebe6ddc3901b7b1cc1aa7a88f305bb8c5893456b8f9a605923893@seed.mainnet.hiro.so:20444"
| `[]` (empty vector) | +| [chain_liveness_poll_time_secs](#node-chain_liveness_poll_time_secs) | The polling interval, in seconds, for the background thread that monitors chain liveness.
This thread periodically wakes up the main coordinator to check for chain progress or
other conditions requiring action.

**Notes:**
- Units: seconds. | `300` (seconds, 5 minutes) | +| [data_url](#node-data_url) | The publicly accessible URL that this node advertises to peers during the P2P handshake
as its HTTP RPC endpoint. Other nodes or services might use this URL to query the node's API. | `http://{rpc_bind}` (e.g., "http://0.0.0.0:20443" if [rpc_bind](#node-rpc_bind) is default). | +| [deny_nodes](#node-deny_nodes) | A list of peer addresses that this node should explicitly deny connections from.
Peers are specified as comma-separated strings in the format "IP:PORT" or "HOSTNAME:PORT"
in the configuration file. DNS hostnames are resolved during configuration loading.

**Example:**
  deny_nodes = "192.168.1.100:20444,badhost.example.com:20444"
| `[]` (empty vector) | +| [fault_injection_block_push_fail_probability](#node-fault_injection_block_push_fail_probability) | Fault injection setting for testing purposes. If set to `Some(p)`, where `p` is between 0 and 100,
the node will have a `p` percent chance of intentionally *not* pushing a newly processed block
to its peers.

**Notes:**
- Values: 0-100 (percentage). | `None` (no fault injection) | +| [fault_injection_hide_blocks](#node-fault_injection_hide_blocks) | Fault injection setting for testing purposes. If `true`, the node's chainstate database
access layer may intentionally fail to retrieve block data, even if it exists,
simulating block hiding or data unavailability.

**Notes:**
- This parameter cannot be set via the configuration file; it must be modified programmatically. | `false` | +| [local_peer_seed](#node-local_peer_seed) | The private key seed, provided as a hex string in the config file, used specifically for the
node's identity and message signing within the P2P networking layer.
This is separate from the main [seed](#node-seed) . | Randomly generated 32 bytes | +| [marf_cache_strategy](#node-marf_cache_strategy) | The strategy to use for MARF trie node caching in memory.
Controls the trade-off between memory usage and performance for state access.

Possible values:
- `"noop"`: No caching (least memory).
- `"everything"`: Cache all nodes (most memory, potentially fastest).
- `"node256"`: Cache only larger `TrieNode256` nodes.

If the value is `None` or an unrecognized string, it defaults to `"noop"`. | `None` (effectively `"noop"`) | +| [marf_defer_hashing](#node-marf_defer_hashing) | Controls the timing of hash calculations for MARF trie nodes.
- If `true`, hashes are calculated only when the MARF is flushed to disk (deferred hashing).
- If `false`, hashes are calculated immediately as leaf nodes are inserted or updated (immediate hashing).
Deferred hashing might improve write performance. | `true` | +| [miner](#node-miner) | Flag indicating whether this node should activate its mining logic and attempt to produce Stacks blocks.
Setting this to `true` typically requires providing necessary private keys (either [seed](#node-seed) or
[[miner].mining_key](#miner-mining_key) ). It also influences default behavior for settings like
[require_affirmed_anchor_blocks](#node-require_affirmed_anchor_blocks) . | `false` | +| [mock_mining](#node-mock_mining) | Enables a simulated mining mode, primarily for local testing and development.
When `true`, the node may generate blocks locally without participating in the
real bitcoin consensus or P2P block production process.

**Notes:**
- Only relevant if [miner](#node-miner) is `true`. | `false` | +| [name](#node-name) | Human-readable name for the node. Primarily used for identification in testing environments
(e.g., deriving log file names, temporary directory names). | `"helium-node"` | +| [next_initiative_delay](#node-next_initiative_delay) | Controls how frequently, in milliseconds, the Nakamoto miner's relay thread polls for work
or takes periodic actions when idle (e.g., checking for new burnchain blocks).
Default value of 10 seconds is reasonable in mainnet (where bitcoin blocks are ~10 minutes)
A lower value might be useful in other environments with faster burn blocks.

**Notes:**
- Units: milliseconds. | `10_000` (milliseconds, 10 seconds) | +| [p2p_address](#node-p2p_address) | The publicly accessible IPv4 address and port that this node advertises to peers for P2P connections.
This might differ from [p2p_bind](#node-p2p_bind) if the node is behind NAT or a proxy.

**Notes:**
- The default value derivation might be unexpected, potentially using the [rpc_bind](#node-rpc_bind) address; explicit configuration is recommended if needed. | Derived from [rpc_bind](#node-rpc_bind) (e.g., "0.0.0.0:20443" if [rpc_bind](#node-rpc_bind) is default). | +| [p2p_bind](#node-p2p_bind) | The IPv4 address and port (e.g., "0.0.0.0:20444") on which the node's P2P networking
service should bind and listen for incoming connections from other peers. | `"0.0.0.0:20444"` | +| [prometheus_bind](#node-prometheus_bind) | Optional network address and port (e.g., "127.0.0.1:9153") for binding the Prometheus metrics server.
If set, the node will start an HTTP server on this address to expose internal metrics
for scraping by a Prometheus instance. | `None` (Prometheus server disabled) | +| [require_affirmed_anchor_blocks](#node-require_affirmed_anchor_blocks) | Controls if the node must wait for locally missing but burnchain-affirmed PoX anchor blocks.
If an anchor block is confirmed by the affirmation map but not yet processed by this node:
- If `true`: Burnchain processing halts until the affirmed block is acquired. Ensures strict
adherence to the affirmed canonical chain, typical for followers.
- If `false`: Burnchain processing continues without waiting. Allows miners to operate optimistically
but may necessitate unwinding later if the affirmed block alters the chain state. | - `true` if [miner](#node-miner) is `false`
- `false` if [miner](#node-miner) is `true` | +| [rpc_bind](#node-rpc_bind) | The IPv4 address and port (e.g., "0.0.0.0:20443") on which the node's HTTP RPC server
should bind and listen for incoming API requests. | `"0.0.0.0:20443"` | +| [seed](#node-seed) | The node's Bitcoin wallet private key, provided as a hex string in the config file.
Used to initialize the node's keychain for signing operations.
If [[miner].mining_key](#miner-mining_key) is not set, this seed may also be used for mining-related signing.

**Notes:**
- Required if [miner](#node-miner) is `true` and [[miner].mining_key](#miner-mining_key) is absent. | Randomly generated 32 bytes | +| [stacker](#node-stacker) | Setting this to `true` enables the node to replicate the miner and signer Stacker DBs
required for signing, and is required if the node is connected to a signer. | `false` | +| [stacker_dbs](#node-stacker_dbs) | A list of specific StackerDB contracts (identified by their qualified contract identifiers,
e.g., "SP000000000000000000002Q6VF78.pox-3") that this node should actively replicate.

**Notes:**
- Values are strings representing qualified contract identifiers.

**Example:**
  stacker_dbs = ["SP000000000000000000002Q6VF78.pox-3", "SP2C2YFP12AJZB4M4KUPSTMZQR0SNHNPH204SCQJM.stx-oracle-v1"]
| - If [miner](#node-miner) is `true` or [stacker](#node-stacker) is `true`, relevant system contracts
(like `.miners`, `.signers-*`) are automatically added in addition to any contracts
specified in the configuration file.
- Otherwise, defaults to an empty list `[]` if not specified in the TOML. | +| [txindex](#node-txindex) | Enables the transaction index, which maps transaction IDs to the blocks containing them.
Setting this to `true` allows the use of RPC endpoints that look up transactions by ID
(e.g., `/extended/v1/tx/{txid}`), but requires substantial additional disk space for the index database. | `false` | +| [use_test_genesis_chainstate](#node-use_test_genesis_chainstate) | If set to `true`, the node initializes its state using an alternative test genesis block definition,
loading different initial balances, names, and lockups than the standard network genesis.
This is intended strictly for testing purposes and is disallowed on mainnet.

**Notes:**
- This is intended strictly for testing purposes and is disallowed on mainnet. | `None` (uses standard network genesis) | +| [wait_time_for_blocks](#node-wait_time_for_blocks) | When operating as a miner, this specifies the maximum time (in milliseconds)
the node waits after detecting a new burnchain block to synchronize corresponding
Stacks block data from the network before resuming mining attempts.
If synchronization doesn't complete within this duration, mining resumes anyway
to prevent stalling. This setting is loaded by all nodes but primarily affects
miner behavior within the relayer thread.

**Notes:**
- Units: milliseconds. | `30_000` (milliseconds, 30 seconds) | +| [working_dir](#node-working_dir) | The file system absolute path to the node's working directory.
All persistent data, including chainstate, burnchain databases, and potentially other stores,
will be located within this directory.
This path can be overridden by setting the `STACKS_WORKING_DIR` environment variable.

**Notes:**
- For persistent mainnet or testnet nodes, this path must be explicitly configured to a non-temporary location. | `/tmp/stacks-node-{current_timestamp}` | +| ~~[max_microblocks](#node-max_microblocks)~~ | The maximum number of microblocks allowed per Stacks block.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 2.5+. | `65535` (u16::MAX) | +| ~~[microblock_frequency](#node-microblock_frequency)~~ | How often to attempt producing microblocks, in milliseconds.

**Notes:**
- Only applies when [mine_microblocks](#node-mine_microblocks) is true and before Epoch 2.5.
- Units: milliseconds.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 2.5+. | `30_000` (milliseconds, 30 seconds) | +| ~~[mine_microblocks](#node-mine_microblocks)~~ | Enable microblock mining.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 2.5+. | `true` | +| ~~[mock_mining_output_dir](#node-mock_mining_output_dir)~~ | If [mock_mining](#node-mock_mining) is enabled, this specifies an optional directory path where the
generated mock Stacks blocks will be saved. (pre-Nakamoto)
The path is canonicalized on load.

**⚠️ DEPRECATED:** This setting was only used in the neon node and is ignored in Epoch 3.0+. | `None` | +| ~~[pox_sync_sample_secs](#node-pox_sync_sample_secs)~~ | Sampling interval in seconds for the PoX synchronization watchdog thread (pre-Nakamoto).
Determines how often the watchdog checked PoX state consistency in the Neon run loop.

**Notes:**
- Units: seconds.

**⚠️ DEPRECATED:** Unused after the Nakamoto upgrade. This setting is ignored in Epoch 3.0+. | `30` (seconds) | +| ~~[wait_time_for_microblocks](#node-wait_time_for_microblocks)~~ | Cooldown period after a microblock is produced, in milliseconds.

**Notes:**
- Only applies when [mine_microblocks](#node-mine_microblocks) is true and before Epoch 2.5.
- Units: milliseconds.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 2.5+. | `30_000` (milliseconds, 30 seconds) | + + +## [miner] + +| Parameter | Description | Default | +|-----------|-------------|----------| +| [activated_vrf_key_path](#miner-activated_vrf_key_path) | Path to a file for storing and loading the currently active, registered VRF leader key.

Loading: On startup or when needing to register a key, if this path is set, the relayer first
attempts to load a serialized `RegisteredKey` from this file. If successful, it uses the
loaded key and skips the on-chain VRF key registration transaction, saving time and fees.
Saving: After a new VRF key registration transaction is confirmed and activated on the burnchain,
if this path is set, the node saves the details of the newly activated `RegisteredKey` to this file.
This allows the miner to persist its active VRF key across restarts.
If the file doesn't exist during load, or the path is `None`, the node proceeds with a new registration. | `None` | +| [block_commit_delay](#miner-block_commit_delay) | Time in milliseconds to wait for a Nakamoto block after seeing a burnchain block before submitting a block commit.

After observing a new burnchain block, the miner's relayer waits for this duration
before submitting its next block commit transaction to Bitcoin. This delay provides an opportunity
for a new Nakamoto block (produced by the winner of the latest sortition) to arrive.
Waiting helps avoid situations where the relayer immediately submits a commit that needs
to be replaced via RBF if a new Stacks block appears shortly after.
This delay is skipped if the new burnchain blocks leading to the tip contain no sortitions.

**Notes:**
- Units: milliseconds. | `40_000` (ms) | +| [block_rejection_timeout_steps](#miner-block_rejection_timeout_steps) | Defines adaptive timeouts for waiting for signer responses, based on the accumulated weight of rejections.

Configured as a map where keys represent rejection count thresholds in percentage,
and values are the timeout durations (in seconds) to apply when the rejection count
reaches or exceeds that key but is less than the next key.

When a miner proposes a block, it waits for signer responses (approvals or rejections).
The SignerCoordinator tracks the total weight of received rejections. It uses this map to determine
the current timeout duration. It selects the timeout value associated with the largest key
in the map that is less than or equal to the current accumulated rejection weight.
If this timeout duration expires before a decision is reached, the coordinator signals a timeout.
This prompts the miner to potentially retry proposing the block.
As more rejections come in, the applicable timeout step might change (likely decrease),
allowing the miner to abandon unviable proposals faster.

A key for 0 (zero rejections) must be defined, representing the initial timeout when no rejections have been received.

**Notes:**
- Keys are rejection weight percentages (0-100). Values are timeout durations.

**Example:**
  # Keys are rejection counts (as strings), values are timeouts in seconds.
[miner.block_rejection_timeout_steps]
"0" = 180
"10" = 90
"20" = 45
"30" = 0
| `{ 0: 180, 10: 90, 20: 45, 30: 0 }` (times in seconds) | +| [block_reward_recipient](#miner-block_reward_recipient) | Optional recipient for the coinbase block reward, overriding the default miner address.

By default (`None`), the reward is sent to the miner's primary address ([[node].seed](#node-seed) ).
If set to some principal address *and* the current Stacks epoch is > 2.1,
the reward will be directed to the specified address instead. | `None` | +| [candidate_retry_cache_size](#miner-candidate_retry_cache_size) | Max size (in *number* of items) of transaction candidates to hold in the in-memory
retry cache.

This cache stores transactions encountered during a `GlobalFeeRate` mempool walk
whose nonces are currently too high for immediate processing. These candidates
are prioritized for reconsideration later within the *same* walk, potentially
becoming valid if other processed transactions update the expected nonces.

A larger cache retains more potentially valid future candidates but uses more memory.
This setting is primarily relevant for the `GlobalFeeRate` strategy.

**Notes:**
- Units: number of items (Each element `crate::core::mempool::MemPoolTxInfoPartial` is currently 112 bytes). | `1048576` (items) | +| [empty_mempool_sleep_time](#miner-empty_mempool_sleep_time) | The amount of time in milliseconds that the miner should sleep in between attempts to
mine a block when the mempool is empty.

This prevents the miner from busy-looping when there are no pending transactions,
conserving CPU resources. During this sleep, the miner still checks burnchain tip changes.

**Notes:**
- Units: milliseconds. | `2_500` (ms) | +| [filter_origins](#miner-filter_origins) | A comma separated list of Stacks addresses to whitelist so that only transactions from
these addresses should be considered during the mempool walk for block building. If this
list is non-empty, any transaction whose origin address is *not* in this set will be skipped.

This allows miners to prioritize transactions originating from specific accounts that are
important to them.
Configured as a comma-separated string of standard Stacks addresses (e.g., "ST123...,ST456...")
in the configuration file.

**Example:**
  filter_origins = "ST2QKZ4FKHAH1NQKYKYAYZPY440FEPK7GZ1R5HBP2,ST319CF5WV77KYR1H3GT0GZ7B8Q4AQPY42ETP1VPF"
| Empty set (all origins are considered). | +| [first_rejection_pause_ms](#miner-first_rejection_pause_ms) | Time in milliseconds to pause after receiving the first threshold rejection, before proposing a new block.

When a miner's block proposal fails to gather enough signatures from the signers for the first time
at a given height, the miner will pause for this duration before attempting to mine and propose again.

**Notes:**
- Units: milliseconds. | `5_000` (ms) | +| [max_execution_time_secs](#miner-max_execution_time_secs) | Defines the maximum execution time (in seconds) allowed for a single contract call transaction.

When processing a transaction (contract call or smart contract deployment), if this option is set,
and the execution time exceeds this limit, the transaction processing fails with an `ExecutionTimeout` error,
and the transaction is skipped. This prevents potentially long-running or infinite-loop transactions
from blocking block production.

**Notes:**
- Units: seconds. | `None` (no execution time limit) | +| [mempool_walk_strategy](#miner-mempool_walk_strategy) | Strategy for selecting the next transaction candidate from the mempool.
Controls prioritization between maximizing immediate fee capture vs. ensuring
transaction nonce order for account progression and processing efficiency.

See `MemPoolWalkStrategy` for variant details.

Possible values (use variant names for configuration):
- `"GlobalFeeRate"`: Selects the transaction with the highest fee rate globally.
- `"NextNonceWithHighestFeeRate"`: Selects the highest-fee transaction among those
matching the next expected nonce for sender/sponsor accounts. | `"GlobalFeeRate"` | +| [min_time_between_blocks_ms](#miner-min_time_between_blocks_ms) | The minimum time to wait between mining blocks in milliseconds. The value must be greater
than or equal to 1000 ms because if a block is mined within the same second as its parent,
it will be rejected by the signers.

This check ensures compliance with signer rules that prevent blocks with identical timestamps
(at second resolution) to their parents. If a lower value is configured, 1000 ms is used instead.

**Notes:**
- Units: milliseconds. | `1_000` (ms) | +| [mining_key](#miner-mining_key) | The private key (Secp256k1) used for signing blocks, provided as a hex string.

This key must be present at runtime for mining operations to succeed. | - [[node].seed](#node-seed) if the `[miner]` section *is present* in the config file
- `None` if the `[miner]` section *is not present* | +| [nakamoto_attempt_time_ms](#miner-nakamoto_attempt_time_ms) | Maximum time (in milliseconds) the miner spends selecting transactions from the mempool
when assembling a Nakamoto block. Once this duration is exceeded, the miner stops
adding transactions and finalizes the block with those already selected.

**Notes:**
- Units: milliseconds. | `5_000` (ms, 5 seconds) | +| [nonce_cache_size](#miner-nonce_cache_size) | Max size (in bytes) of the in-memory cache for storing expected account nonces.

This cache accelerates mempool processing (e.g., during block building) by storing
the anticipated next nonce for accounts, reducing expensive lookups into the node's
state (MARF trie). A larger cache can improve performance for workloads involving
many unique accounts but increases memory consumption.

**Notes:**
- Must be configured to a value greater than 0.
- Units: bytes. | `1048576` (bytes, 1 MiB) | +| [probability_pick_no_estimate_tx](#miner-probability_pick_no_estimate_tx) | Probability (percentage, 0-100) of prioritizing a transaction without a known fee rate
during candidate selection.

Only effective when `mempool_walk_strategy` is `GlobalFeeRate`. Helps ensure
transactions lacking fee estimates are periodically considered alongside high-fee ones,
preventing potential starvation. A value of 0 means never prioritize them first,
100 means always prioritize them first (if available).

**Notes:**
- Values: 0-100. | `25` (25% chance) | +| [replay_transactions](#miner-replay_transactions) | TODO: remove this option when its no longer a testing feature and it becomes default behaviour
The miner will attempt to replay transactions that a threshold number of signers are expecting in the next block | *Required* | +| [segwit](#miner-segwit) | If possible, mine with a p2wpkh address. | `false` | +| [subsequent_rejection_pause_ms](#miner-subsequent_rejection_pause_ms) | Time in milliseconds to pause after receiving subsequent threshold rejections, before proposing a new block.

If a miner's block proposal is rejected multiple times at the same height (after the first rejection),
this potentially longer pause duration is used before retrying. This gives more significant time
for network state changes or signer coordination.

**Notes:**
- Units: milliseconds. | `10_000` (ms) | +| [tenure_cost_limit_per_block_percentage](#miner-tenure_cost_limit_per_block_percentage) | The percentage of the remaining tenure cost limit to consume each block.

This setting limits the execution cost (Clarity cost) a single Nakamoto block can incur,
expressed as a percentage of the *remaining* cost budget for the current mining tenure.
For example, if set to 25, a block can use at most 25% of the tenure's currently available cost limit.
This allows miners to spread the tenure's total execution budget across multiple blocks rather than
potentially consuming it all in the first block.

**Notes:**
- The value must be between 1 and 100, inclusive, if specified.
- Setting to 100 effectively disables this per-block limit, allowing a block to use the entire remaining tenure budget. | `25` (%) | +| [tenure_extend_cost_threshold](#miner-tenure_extend_cost_threshold) | Percentage of block budget that must be used before attempting a time-based tenure extend.

This sets a minimum threshold for the accumulated execution cost within a tenure before a
time-based tenure extension ([tenure_timeout](#miner-tenure_timeout) ) can be initiated.
The miner checks if the proportion of the total tenure budget consumed so far exceeds this percentage.
If the cost usage is below this threshold, a time-based extension will not be attempted, even if
the [tenure_timeout](#miner-tenure_timeout) duration has elapsed.
This prevents miners from extending tenures very early if they have produced only low-cost blocks.

**Notes:**
- Values: 0-100. | `50` (%) | +| [tenure_extend_poll_timeout](#miner-tenure_extend_poll_timeout) | Duration to wait in-between polling the sortition DB to see if we need to
extend the ongoing tenure (e.g. because the current sortition is empty or invalid).

After the relayer determines that a tenure extension might be needed but cannot proceed immediately
(e.g., because a miner thread is already active for the current burn view), it will wait for this
duration before re-checking the conditions for tenure extension.

**Notes:**
- Units: seconds. | `1` (seconds) | +| [tenure_extend_wait_timeout](#miner-tenure_extend_wait_timeout) | Duration to wait before trying to continue a tenure because the next miner did not produce blocks.

If the node was the winner of the previous sortition but not the most recent one,
the relayer waits for this duration before attempting to extend its own tenure.
This gives the new winner of the most recent sortition a grace period to produce their first block.
Also used in scenarios with empty sortitions to give the winner of the *last valid* sortition time
to produce a block before the current miner attempts an extension.

**Notes:**
- Units: milliseconds. | `120_000` (ms) | +| [tenure_timeout](#miner-tenure_timeout) | Duration to wait before attempting to issue a time-based tenure extend.

A miner can proactively attempt to extend its tenure if a significant amount of time has passed
since the last tenure change, even without an explicit trigger like an empty sortition.
If the time elapsed since the last tenure change exceeds this value, and the signer coordinator
indicates an extension is timely, and the cost usage threshold ([tenure_extend_cost_threshold](#miner-tenure_extend_cost_threshold) )
is met, the miner will include a tenure extension transaction in its next block.

**Notes:**
- Units: seconds. | `180` (seconds) | +| [txs_to_consider](#miner-txs_to_consider) | Specifies which types of transactions the miner should consider including in a block
during the mempool walk process. Transactions of types not included in this set will be skipped.

This allows miners to exclude specific transaction categories.
Configured as a comma-separated string of transaction type names in the configuration file.

Accepted values correspond to variants of `MemPoolWalkTxTypes`:
- `"TokenTransfer"`
- `"SmartContract"`
- `"ContractCall"`

**Example:**
  txs_to_consider = "TokenTransfer,ContractCall"
| All transaction types are considered (equivalent to [`MemPoolWalkTxTypes::all()`]). | +| [wait_for_block_download](#miner-wait_for_block_download) | Wait for a downloader pass before mining.
This can only be disabled in testing; it can't be changed in the config file. | `true` | +| ~~[fast_rampup](#miner-fast_rampup)~~ | Controls how the miner estimates its win probability when checking for underperformance.

This estimation is used in conjunction with [target_win_probability](#miner-target_win_probability) and
[underperform_stop_threshold](#miner-underperform_stop_threshold) to decide whether to pause mining due to
low predicted success rate.

- If `true`: The win probability estimation looks at projected spend distributions
~6 blocks into the future. This might help the miner adjust its spending more quickly
based on anticipated competition changes.
- If `false`: The win probability estimation uses the currently observed spend distribution
for the next block.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode and by the
`get-spend-amount` cli subcommand. | `false` | +| ~~[first_attempt_time_ms](#miner-first_attempt_time_ms)~~ | Time to wait (in milliseconds) before the first attempt to mine a block.

**Notes:**
- Units: milliseconds.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode. | `10` (ms) | +| ~~[max_reorg_depth](#miner-max_reorg_depth)~~ | Defines the maximum depth (in Stacks blocks) the miner considers when evaluating
potential chain tips when selecting the best tip to mine the next block on.

The miner analyzes candidate tips within this depth from the highest known tip.
It selects the "nicest" tip, often defined as the one that minimizes chain reorganizations
or orphans within this lookback window. A lower value restricts the analysis to shallower forks,
while a higher value considers deeper potential reorganizations.

This setting influences which fork the miner chooses to build upon if multiple valid tips exist.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode and the
`pick-best-tip` cli subcommand. | `3` | +| ~~[microblock_attempt_time_ms](#miner-microblock_attempt_time_ms)~~ | Time to wait (in milliseconds) to mine a microblock.

**Notes:**
- Units: milliseconds.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode. | `30_000` (ms, 30 seconds) | +| ~~[min_tx_count](#miner-min_tx_count)~~ | Minimum number of transactions that must be in a block if we're going to replace a pending
block-commit with a new block-commit.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode. | `0` | +| ~~[only_increase_tx_count](#miner-only_increase_tx_count)~~ | If true, requires subsequent mining attempts for the same block height
to have a transaction count >= the previous best attempt.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode. | `false` | +| ~~[pre_nakamoto_mock_signing](#miner-pre_nakamoto_mock_signing)~~ | Enables a mock signing process for testing purposes, specifically designed for use during Epoch 2.5
before the activation of Nakamoto consensus.

When set to `true` and [mining_key](#miner-mining_key) is provided, the miner will interact
with the `.miners` and `.signers` contracts via the stackerdb to send and receive mock
proposals and signatures, simulating aspects of the Nakamoto leader election and block signing flow.

**Notes:**
- This is intended strictly for testing purposes for Epoch 2.5 conditions.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. | `false` (Should only default true if [mining_key](#miner-mining_key) is set). | +| ~~[subsequent_attempt_time_ms](#miner-subsequent_attempt_time_ms)~~ | Time to wait (in milliseconds) for subsequent attempts to mine a block,
after the first attempt fails.

**Notes:**
- Units: milliseconds.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode. | `120_000` (ms, 2 minutes) | +| ~~[target_win_probability](#miner-target_win_probability)~~ | The minimum win probability this miner aims to achieve in block sortitions.

This target is used to detect prolonged periods of underperformance. If the miner's
calculated win probability consistently falls below this value for a duration specified
by [underperform_stop_threshold](#miner-underperform_stop_threshold) (after an initial startup phase), the miner may
cease spending in subsequent sortitions (returning a burn fee cap of 0) to conserve resources.

Setting this value close to 0.0 effectively disables the underperformance check.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode. | `0.0` | +| ~~[unconfirmed_commits_helper](#miner-unconfirmed_commits_helper)~~ | Optional path to an external helper script for fetching unconfirmed block-commits.
Used to inform the miner's dynamic burn fee bidding strategy with off-chain data.

If a path is provided, the target script must:
- Be executable by the user running the Stacks node process.
- Accept a list of active miner burnchain addresses as command-line arguments.
- On successful execution, print a JSON array representing `Vec`
(see `stacks::config::chain_data::UnconfirmedBlockCommit` struct) to stdout.
- Exit with code 0 on success.

Look at `test_get_unconfirmed_commits` in `stackslib/src/config/chain_data.rs` for an example script.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode and by the
`get-spend-amount` cli subcommand. | `None` (feature disabled). | +| ~~[underperform_stop_threshold](#miner-underperform_stop_threshold)~~ | The maximum number of consecutive Bitcoin blocks the miner will tolerate underperforming
(i.e., having a calculated win probability below [target_win_probability](#miner-target_win_probability) )
before temporarily pausing mining efforts.

This check is only active after an initial startup phase (6 blocks past the mining start height).
If the miner underperforms for this number of consecutive blocks, the
`BlockMinerThread::get_mining_spend_amount` function will return 0, effectively preventing the
miner from submitting a block commit for the current sortition to conserve funds.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode. | `None` (underperformance check is disabled). | +| ~~[unprocessed_block_deadline_secs](#miner-unprocessed_block_deadline_secs)~~ | Amount of time (in seconds) to wait for unprocessed blocks before mining a new block.

**Notes:**
- Units: seconds.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode. | `30` (seconds) | +| ~~[wait_on_interim_blocks](#miner-wait_on_interim_blocks)~~ | Amount of time while mining in nakamoto to wait in between mining interim blocks.

**⚠️ DEPRECATED:** Use `min_time_between_blocks_ms` instead. | `None` | + + From 5d74eee63b9e6d94931b936f7b69ef5d26cbab2f Mon Sep 17 00:00:00 2001 From: Francesco Leacche Date: Tue, 3 Jun 2025 22:30:17 +0100 Subject: [PATCH 02/20] move Dockerfile to the right folder --- Dockerfile | 33 +++++++------------ .../tools/config-docs-generator/Dockerfile | 27 +++++++++++++++ contrib/tools/config-docs-generator/README.md | 4 +-- 3 files changed, 41 insertions(+), 23 deletions(-) create mode 100644 contrib/tools/config-docs-generator/Dockerfile diff --git a/Dockerfile b/Dockerfile index 760082ad88..ca03fa3ac6 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,25 +1,16 @@ -# Use a specific nightly toolchain for reproducible builds -FROM rustlang/rust@sha256:04690ffa09cddd358b349272173155319f384e57816614eea0840ec7f9422862 +FROM rust:bookworm AS build -# Set the working directory for building -WORKDIR /build +ARG STACKS_NODE_VERSION="No Version Info" +ARG GIT_BRANCH='No Branch Info' +ARG GIT_COMMIT='No Commit Info' -# Copy the entire project to build the binaries +WORKDIR /src COPY . . +RUN mkdir /out +RUN rustup toolchain install stable +RUN cargo build --features monitoring_prom,slog_json --release +RUN cp -R target/release/. /out -# Pre-build the config-docs-generator binaries during image build -RUN cargo build --package config-docs-generator --release - -# Set the working directory where the project will be mounted at runtime -WORKDIR /project_root - -# Set environment variables for generate-config-docs.sh -ENV PROJECT_ROOT=/project_root -ENV BUILD_ROOT=/build -ENV CARGO_HOME=/project_root/.cargo -ENV EXTRACT_DOCS_BIN=/build/target/release/extract-docs -ENV GENERATE_MARKDOWN_BIN=/build/target/release/generate-markdown -ENV SKIP_BUILD=true - -# Set the entrypoint to run the config docs generation script -ENTRYPOINT ["/build/contrib/tools/config-docs-generator/generate-config-docs.sh"] +FROM debian:bookworm-slim +COPY --from=build /out/stacks-node /out/stacks-signer /out/stacks-inspect /bin/ +CMD ["stacks-node", "mainnet"] diff --git a/contrib/tools/config-docs-generator/Dockerfile b/contrib/tools/config-docs-generator/Dockerfile new file mode 100644 index 0000000000..de96b6beaf --- /dev/null +++ b/contrib/tools/config-docs-generator/Dockerfile @@ -0,0 +1,27 @@ +# Use a specific nightly toolchain for reproducible builds +FROM rustlang/rust@sha256:04690ffa09cddd358b349272173155319f384e57816614eea0840ec7f9422862 + +# Set the working directory for building +WORKDIR /build + +# Copy the entire project root to preserve structure +# Copy from three levels up (project root) to maintain the directory structure +COPY ../../../ /build + +# Pre-build the config-docs-generator binaries during image build +RUN cargo build --package config-docs-generator --release + +# Set the working directory where the project will be mounted at runtime +WORKDIR /project_root + +# Set environment variables for generate-config-docs.sh +ENV PROJECT_ROOT=/project_root +ENV BUILD_ROOT=/build +ENV CARGO_HOME=/project_root/.cargo +ENV EXTRACT_DOCS_BIN=/build/target/release/extract-docs +ENV GENERATE_MARKDOWN_BIN=/build/target/release/generate-markdown +ENV SKIP_BUILD=true + +# Set the entrypoint to run the config docs generation script +# The script ends up at /build/generate-config-docs.sh due to the copy operation +ENTRYPOINT ["/build/generate-config-docs.sh"] diff --git a/contrib/tools/config-docs-generator/README.md b/contrib/tools/config-docs-generator/README.md index 3d8e715dec..720f8a0af1 100644 --- a/contrib/tools/config-docs-generator/README.md +++ b/contrib/tools/config-docs-generator/README.md @@ -13,7 +13,7 @@ The easiest way to generate configuration documentation: docker build -t config-docs-generator . # Generate documentation -docker run --rm -v "$(pwd):/project_root" --user "$(id -u):$(id -g)" config-docs-generator +docker run --rm -v "$(pwd)/../../../:/project_root" config-docs-generator ``` This approach: @@ -117,7 +117,7 @@ fn struct_to_section_name(struct_name: &str) -> String { ```bash # Using Docker (recommended) -docker run --rm -v "$(pwd):/project_root" --user "$(id -u):$(id -g)" config-docs-generator +docker run --rm -v "$(pwd)/../../../:/project_root" config-docs-generator # OR using local setup ./contrib/tools/config-docs-generator/generate-config-docs.sh From 5f353cbbe31bcb27eb6f107cb0bce1ddb26ddbb7 Mon Sep 17 00:00:00 2001 From: Francesco Leacche Date: Tue, 3 Jun 2025 23:10:18 +0100 Subject: [PATCH 03/20] clippy and fmt --- .../config-docs-generator/src/extract_docs.rs | 26 ++++++++++--------- 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/contrib/tools/config-docs-generator/src/extract_docs.rs b/contrib/tools/config-docs-generator/src/extract_docs.rs index 1f54ebfa37..baa8a3b9de 100644 --- a/contrib/tools/config-docs-generator/src/extract_docs.rs +++ b/contrib/tools/config-docs-generator/src/extract_docs.rs @@ -595,9 +595,7 @@ fn strip_type_suffix(value: &str) -> String { ]; for suffix in &suffixes { - if value.ends_with(suffix) { - let without_suffix = &value[..value.len() - suffix.len()]; - + if let Some(without_suffix) = value.strip_suffix(suffix) { // Only strip if the remaining part looks like a numeric literal // (contains only digits, underscores, dots, minus signs, or quotes for string literals) if !without_suffix.is_empty() @@ -1217,10 +1215,12 @@ mod tests { let result = extract_config_docs_from_rustdoc(&invalid_rustdoc, &None); assert!(result.is_err()); - assert!(result - .unwrap_err() - .to_string() - .contains("Missing 'index' field")); + assert!( + result + .unwrap_err() + .to_string() + .contains("Missing 'index' field") + ); } #[test] @@ -1318,11 +1318,13 @@ and includes various formatting. assert!(notes[1].contains("Only relevant")); assert!(notes[2].contains("Units: milliseconds")); - assert!(result - .0 - .deprecated - .unwrap() - .contains("Use `new_field` instead")); + assert!( + result + .0 + .deprecated + .unwrap() + .contains("Use `new_field` instead") + ); let toml_example = result.0.toml_example.unwrap(); assert!(toml_example.contains("# This is a comment")); From 0ad756938976a619bcd57b0553476b6785799820 Mon Sep 17 00:00:00 2001 From: Francesco Leacche Date: Wed, 4 Jun 2025 14:10:57 +0100 Subject: [PATCH 04/20] add support for @required and @units annotations. Implement parsing for literal and folded block scalars --- .../config-docs-generator/src/extract_docs.rs | 716 +++++++++++++++++- 1 file changed, 682 insertions(+), 34 deletions(-) diff --git a/contrib/tools/config-docs-generator/src/extract_docs.rs b/contrib/tools/config-docs-generator/src/extract_docs.rs index baa8a3b9de..a3101f65e4 100644 --- a/contrib/tools/config-docs-generator/src/extract_docs.rs +++ b/contrib/tools/config-docs-generator/src/extract_docs.rs @@ -22,10 +22,6 @@ use clap::{Arg, Command as ClapCommand}; use once_cell::sync::Lazy; use serde::{Deserialize, Serialize}; -// Static regex for finding annotation end patterns -static ANNOTATION_END_REGEX: Lazy = - Lazy::new(|| regex::Regex::new(r"\n\s*@[a-zA-Z_]+:").unwrap()); - // Static regex for finding constant references in documentation static CONSTANT_REFERENCE_REGEX: Lazy = Lazy::new(|| regex::Regex::new(r"\[`([A-Z_][A-Z0-9_]*)`\]").unwrap()); @@ -38,6 +34,8 @@ pub struct FieldDoc { pub notes: Option>, pub deprecated: Option, pub toml_example: Option, + pub required: Option, + pub units: Option, } #[derive(Debug, Serialize, Deserialize)] @@ -351,6 +349,8 @@ fn parse_field_documentation( let mut notes = None; let mut deprecated = None; let mut toml_example = None; + let mut required = None; + let mut units = None; let mut referenced_constants = std::collections::HashSet::new(); // Split on --- separator if present @@ -433,6 +433,29 @@ fn parse_field_documentation( referenced_constants.extend(find_constant_references(&example_text)); toml_example = Some(example_text); } + + // Parse @required: annotations + if let Some(required_text) = extract_annotation(metadata_section, "required") { + // Parse boolean value, handling common representations + let required_bool = match required_text.trim().to_lowercase().as_str() { + "" => false, // Empty string defaults to false + "true" | "yes" | "1" => true, + "false" | "no" | "0" => false, + _ => { + // Default to false for invalid values, but could log a warning in the future + eprintln!("Warning: Invalid @required value '{}' for field '{}', defaulting to false", required_text, field_name); + false + } + }; + required = Some(required_bool); + } + + // Parse @units: annotations + if let Some(units_text) = extract_annotation(metadata_section, "units") { + // Collect constant references from units text + referenced_constants.extend(find_constant_references(&units_text)); + units = Some(units_text); + } } let field_doc = FieldDoc { @@ -442,46 +465,271 @@ fn parse_field_documentation( notes, deprecated, toml_example, + required, + units, }; Ok((field_doc, referenced_constants)) } +/// Parse a YAML-style literal block scalar (|) from comment lines +/// Preserves newlines and internal indentation relative to the block base indentation +fn parse_literal_block_scalar(lines: &[&str], _base_indent: usize) -> String { + if lines.is_empty() { + return String::new(); + } + + // Find the first non-empty content line to determine block indentation + let content_lines: Vec<&str> = lines.iter() + .skip_while(|line| line.trim().is_empty()) + .copied() + .collect(); + + if content_lines.is_empty() { + return String::new(); + } + + // Determine block indentation from the first content line + let block_indent = content_lines[0].len() - content_lines[0].trim_start().len(); + + // Process all lines, preserving relative indentation within the block + let mut result_lines = Vec::new(); + for line in lines { + if line.trim().is_empty() { + // Preserve empty lines + result_lines.push(String::new()); + } else { + let line_indent = line.len() - line.trim_start().len(); + if line_indent >= block_indent { + // Remove only the common block indentation, preserving relative indentation + let content = &line[block_indent.min(line.len())..]; + result_lines.push(content.to_string()); + } else { + // Line is less indented than block base - should not happen in well-formed blocks + result_lines.push(line.trim_start().to_string()); + } + } + } + + // Remove trailing empty lines (clip chomping style) + while let Some(last) = result_lines.last() { + if last.is_empty() { + result_lines.pop(); + } else { + break; + } + } + + result_lines.join("\n") +} + +/// Parse a YAML-style folded block scalar (>) +/// Folds lines into paragraphs, preserving more-indented lines as literal blocks +fn parse_folded_block_scalar(lines: &[&str], _base_indent: usize) -> String { + if lines.is_empty() { + return String::new(); + } + + // Find the first non-empty content line to determine block indentation + let content_lines: Vec<&str> = lines.iter() + .skip_while(|line| line.trim().is_empty()) + .copied() + .collect(); + + if content_lines.is_empty() { + return String::new(); + } + + // Determine block indentation from the first content line + let block_indent = content_lines[0].len() - content_lines[0].trim_start().len(); + + let mut result = String::new(); + let mut current_paragraph = Vec::new(); + let mut in_literal_block = false; + + for line in lines { + if line.trim().is_empty() { + if in_literal_block { + // Empty line in literal block - preserve it + result.push('\n'); + } else if !current_paragraph.is_empty() { + // End current paragraph + result.push_str(¤t_paragraph.join(" ")); + result.push_str("\n\n"); + current_paragraph.clear(); + } + continue; + } + + let line_indent = line.len() - line.trim_start().len(); + let content = if line_indent >= block_indent { + &line[block_indent.min(line.len())..] + } else { + line.trim_start() + }; + + let relative_indent = line_indent.saturating_sub(block_indent); + + if relative_indent > 0 { + // More indented line - start or continue literal block + if !in_literal_block { + // Finish current paragraph before starting literal block + if !current_paragraph.is_empty() { + result.push_str(¤t_paragraph.join(" ")); + result.push('\n'); + current_paragraph.clear(); + } + in_literal_block = true; + } + // Add literal line with preserved indentation + result.push_str(content); + result.push('\n'); + } else { + // Normal indentation - folded content + if in_literal_block { + // Exit literal block + in_literal_block = false; + if !result.is_empty() && !result.ends_with('\n') { + result.push('\n'); + } + } + // Add to current paragraph + current_paragraph.push(content); + } + } + + // Finish any remaining paragraph + if !current_paragraph.is_empty() { + result.push_str(¤t_paragraph.join(" ")); + } + + // Apply "clip" chomping style (consistent with literal parser) + // Remove trailing empty lines but preserve a single trailing newline if content exists + let trimmed = result.trim_end_matches('\n'); + if !trimmed.is_empty() && result.ends_with('\n') { + format!("{}\n", trimmed) + } else { + trimmed.to_string() + } +} + fn extract_annotation(metadata_section: &str, annotation_name: &str) -> Option { let annotation_pattern = format!("@{}:", annotation_name); - if let Some(start_pos) = metadata_section.find(&annotation_pattern) { - let after_annotation = &metadata_section[start_pos + annotation_pattern.len()..]; - - // Find the end of this annotation by looking for the next @annotation: pattern - // Look for pattern like "@word:" to identify the start of the next annotation - let end_pos = ANNOTATION_END_REGEX - .find(after_annotation) - .map(|m| m.start()) - .unwrap_or(after_annotation.len()); - - let annotation_content = after_annotation[..end_pos].trim(); - - if !annotation_content.is_empty() { - // For toml_example, preserve the content more carefully - if annotation_name == "toml_example" { - // Remove the initial | marker if present and preserve formatting - let cleaned = if let Some(stripped) = annotation_content.strip_prefix('|') { - stripped.trim_start_matches('\n').to_string() - } else { - annotation_content.to_string() - }; - - if !cleaned.trim().is_empty() { - return Some(cleaned); - } + if let Some(_start_pos) = metadata_section.find(&annotation_pattern) { + // Split the metadata section into lines for processing + let all_lines: Vec<&str> = metadata_section.lines().collect(); + + // Find which line contains our annotation + let mut annotation_line_idx = None; + for (idx, line) in all_lines.iter().enumerate() { + if line.contains(&annotation_pattern) { + annotation_line_idx = Some(idx); + break; + } + } + + let annotation_line_idx = annotation_line_idx?; + let annotation_line = all_lines[annotation_line_idx]; + + // Find the position of the annotation pattern within this line + let pattern_pos = annotation_line.find(&annotation_pattern)?; + let after_colon = &annotation_line[pattern_pos + annotation_pattern.len()..]; + + // Check for multiline indicators immediately after the colon + let trimmed_after_colon = after_colon.trim_start(); + + if trimmed_after_colon.starts_with('|') { + // Literal block scalar mode (|) + // Content starts from the next line, ignoring any text after | on the same line + let block_lines = collect_annotation_block_lines(&all_lines, annotation_line_idx + 1, annotation_line); + + // Convert to owned strings for the parser + let owned_lines: Vec = block_lines.iter().map(|s| s.to_string()).collect(); + + // Convert back to string slices for the parser + let string_refs: Vec<&str> = owned_lines.iter().map(|s| s.as_str()).collect(); + let base_indent = annotation_line.len() - annotation_line.trim_start().len(); + let result = parse_literal_block_scalar(&string_refs, base_indent); + if result.trim().is_empty() { + return None; + } else { + return Some(result); + } + } else if trimmed_after_colon.starts_with('>') { + // Folded block scalar mode (>) + // Content starts from the next line, ignoring any text after > on the same line + let block_lines = collect_annotation_block_lines(&all_lines, annotation_line_idx + 1, annotation_line); + + // Convert to owned strings for the parser + let owned_lines: Vec = block_lines.iter().map(|s| s.to_string()).collect(); + + // Convert back to string slices for the parser + let string_refs: Vec<&str> = owned_lines.iter().map(|s| s.as_str()).collect(); + let base_indent = annotation_line.len() - annotation_line.trim_start().len(); + let result = parse_folded_block_scalar(&string_refs, base_indent); + if result.trim().is_empty() { + return None; } else { - // For other annotations, clean up backticks and other formatting - let cleaned = annotation_content.trim().to_string(); + return Some(result); + } + } else { + // Default literal-like multiline mode + // Content can start on the same line or the next line + let mut content_lines = Vec::new(); + + // Check if there's content on the same line after the colon + if !trimmed_after_colon.is_empty() { + content_lines.push(trimmed_after_colon); + } - if !cleaned.is_empty() { - return Some(cleaned); + // Collect subsequent lines that belong to this annotation + let block_lines = collect_annotation_block_lines(&all_lines, annotation_line_idx + 1, annotation_line); + + // For default mode, preserve relative indentation within the block + if !block_lines.is_empty() { + // Find the base indentation from the first non-empty content line + let mut base_indent = None; + for line in &block_lines { + let trimmed = line.trim(); + if !trimmed.is_empty() { + base_indent = Some(line.len() - line.trim_start().len()); + break; + } } + + // Process lines preserving relative indentation + for line in block_lines { + let trimmed = line.trim(); + if !trimmed.is_empty() { + if let Some(base) = base_indent { + let line_indent = line.len() - line.trim_start().len(); + if line_indent >= base { + // Remove only the common base indentation, preserving relative indentation + let content = &line[base.min(line.len())..]; + content_lines.push(content); + } else { + // Line is less indented than base - use trimmed content + content_lines.push(trimmed); + } + } else { + content_lines.push(trimmed); + } + } + } + } + + if content_lines.is_empty() { + return None; + } + + // Join lines preserving the structure - this maintains internal newlines and relative indentation + let result = content_lines.join("\n"); + + // Apply standard trimming and return if not empty + let cleaned = result.trim(); + if !cleaned.is_empty() { + return Some(cleaned.to_string()); } } } @@ -489,6 +737,39 @@ fn extract_annotation(metadata_section: &str, annotation_name: &str) -> Option( + all_lines: &[&'a str], + start_idx: usize, + annotation_line: &str +) -> Vec<&'a str> { + let mut block_lines = Vec::new(); + let annotation_indent = annotation_line.len() - annotation_line.trim_start().len(); + + for &line in all_lines.iter().skip(start_idx) { + let trimmed = line.trim(); + + // Stop if we hit another annotation at the same or lesser indentation level + if trimmed.starts_with('@') && trimmed.contains(':') { + let line_indent = line.len() - line.trim_start().len(); + if line_indent <= annotation_indent { + break; + } + } + + // Stop if we hit a line that's clearly not part of the comment block + // (very different indentation or structure) + let line_indent = line.len() - line.trim_start().len(); + if !trimmed.is_empty() && line_indent < annotation_indent { + break; + } + + block_lines.push(line); + } + + block_lines +} + fn resolve_constant_reference( name: &str, rustdoc_index: &serde_json::Map, @@ -675,7 +956,7 @@ mod tests { ); assert_eq!( result.0.toml_example, - Some(" key = \"value\"\n other = 123".to_string()) + Some("key = \"value\"\nother = 123".to_string()) ); } @@ -1813,4 +2094,371 @@ and includes various formatting. assert_eq!(strip_type_suffix("u32"), "u32"); // Just the type name, not a suffixed value assert_eq!(strip_type_suffix("value_u32_test"), "value_u32_test"); // Contains but doesn't end with type } + + #[test] + fn test_parse_field_documentation_with_required_and_units() { + let doc_text = r#"Field with required and units annotations. +--- +@default: `5000` +@required: true +@units: milliseconds +@notes: + - This field has all new features."#; + + let result = parse_field_documentation(doc_text, "test_field").unwrap(); + + assert_eq!(result.0.name, "test_field"); + assert_eq!(result.0.description, "Field with required and units annotations."); + assert_eq!(result.0.default_value, Some("`5000`".to_string())); + assert_eq!(result.0.required, Some(true)); + assert_eq!(result.0.units, Some("milliseconds".to_string())); + assert_eq!( + result.0.notes, + Some(vec!["This field has all new features.".to_string()]) + ); + } + + #[test] + fn test_parse_field_documentation_required_variants() { + // Test "true" variant + let doc_text1 = r#"Required field. +--- +@required: true"#; + let result1 = parse_field_documentation(doc_text1, "field1").unwrap(); + assert_eq!(result1.0.required, Some(true)); + + // Test "false" variant + let doc_text2 = r#"Optional field. +--- +@required: false"#; + let result2 = parse_field_documentation(doc_text2, "field2").unwrap(); + assert_eq!(result2.0.required, Some(false)); + + // Test "yes" variant + let doc_text3 = r#"Required field. +--- +@required: yes"#; + let result3 = parse_field_documentation(doc_text3, "field3").unwrap(); + assert_eq!(result3.0.required, Some(true)); + + // Test "no" variant + let doc_text4 = r#"Optional field. +--- +@required: no"#; + let result4 = parse_field_documentation(doc_text4, "field4").unwrap(); + assert_eq!(result4.0.required, Some(false)); + + // Test invalid variant (should default to false with warning) + let doc_text5 = r#"Invalid required field. +--- +@required: maybe"#; + let result5 = parse_field_documentation(doc_text5, "field5").unwrap(); + assert_eq!(result5.0.required, Some(false)); + } + + #[test] + fn test_extract_annotation_literal_block_mode() { + let metadata = r#"@notes: | + This is a literal block + with preserved indentation + and multiple lines."#; + + let result = extract_annotation(metadata, "notes"); + assert!(result.is_some()); + let notes = result.unwrap(); + assert!(notes.contains("This is a literal block")); + assert!(notes.contains(" with preserved indentation")); + assert!(notes.contains("and multiple lines")); + // Should preserve newlines + assert!(notes.contains('\n')); + } + + #[test] + fn test_extract_annotation_folded_block_mode() { + let metadata = r#"@default: > + This is a folded block + that should join lines + together. + + But preserve paragraph breaks."#; + + let result = extract_annotation(metadata, "default"); + assert!(result.is_some()); + let default = result.unwrap(); + // Folded blocks should join lines with spaces + assert!(default.contains("This is a folded block that should join lines together.")); + // But preserve paragraph breaks + assert!(default.contains("But preserve paragraph breaks.")); + } + + #[test] + fn test_extract_annotation_default_multiline_mode() { + let metadata = r#"@notes: + - First bullet point + - Second bullet point with + continuation on next line + - Third bullet point"#; + + let result = extract_annotation(metadata, "notes"); + assert!(result.is_some()); + let notes = result.unwrap(); + assert!(notes.contains("First bullet point")); + assert!(notes.contains("Second bullet point with")); + assert!(notes.contains("continuation on next line")); + assert!(notes.contains("Third bullet point")); + } + + #[test] + fn test_extract_annotation_literal_block_with_same_line_content() { + let metadata = r#"@toml_example: | This content is on the same line + And this content is on the next line + With proper indentation preserved"#; + + let result = extract_annotation(metadata, "toml_example"); + assert!(result.is_some()); + let toml = result.unwrap(); + // Should only include content from subsequent lines, ignoring same-line content + assert!(!toml.contains("This content is on the same line")); + assert!(toml.contains("And this content is on the next line")); + assert!(toml.contains("With proper indentation preserved")); + } + + #[test] + fn test_units_with_constant_references() { + let doc_text = r#"Field with units containing constant references. +--- +@units: [`DEFAULT_TIMEOUT_MS`] milliseconds"#; + + let result = parse_field_documentation(doc_text, "test_field").unwrap(); + let (field_doc, referenced_constants) = result; + + assert_eq!(field_doc.units, Some("[`DEFAULT_TIMEOUT_MS`] milliseconds".to_string())); + // Check that constants were collected from units + assert!(referenced_constants.contains("DEFAULT_TIMEOUT_MS")); + } + + #[test] + fn test_extract_annotation_default_mode_preserves_relative_indent() { + let metadata = r#"@notes: + - Main item 1 + - Sub item 1a + - Sub-sub item 1a1 + - Sub item 1b + - Main item 2"#; + + let result = extract_annotation(metadata, "notes"); + assert!(result.is_some()); + let notes = result.unwrap(); + + // Should preserve relative indentation within the block + assert!(notes.contains("- Main item 1")); + assert!(notes.contains(" - Sub item 1a")); // 2 spaces more indented + assert!(notes.contains(" - Sub-sub item 1a1")); // 4 spaces more indented + assert!(notes.contains(" - Sub item 1b")); // Back to 2 spaces + assert!(notes.contains("- Main item 2")); // Back to base level + } + + #[test] + fn test_extract_annotation_default_mode_mixed_indentation() { + let metadata = r#"@default: + First line with base indentation + Second line more indented + Third line back to base + Fourth line very indented"#; + + let result = extract_annotation(metadata, "default"); + assert!(result.is_some()); + let default_val = result.unwrap(); + + // Should preserve relative spacing + let lines: Vec<&str> = default_val.lines().collect(); + assert_eq!(lines[0], "First line with base indentation"); + assert_eq!(lines[1], " Second line more indented"); // 2 extra spaces + assert_eq!(lines[2], "Third line back to base"); + assert_eq!(lines[3], " Fourth line very indented"); // 4 extra spaces + } + + #[test] + fn test_extract_annotation_toml_example_consistency() { + // Test that @toml_example now uses standard parsing (no special handling) + let metadata = r#"@toml_example: | + key = "value" + indented_key = "nested" + other = 123"#; + + let result = extract_annotation(metadata, "toml_example"); + assert!(result.is_some()); + let toml = result.unwrap(); + + // Should use standard literal block parsing + assert!(toml.contains("key = \"value\"")); + assert!(toml.contains(" indented_key = \"nested\"")); // Preserved relative indent + assert!(toml.contains("other = 123")); + } + + #[test] + fn test_parse_folded_block_scalar_clip_chomping() { + // Test that folded blocks use "clip" chomping (consistent with literal) + let lines = vec![ + " First paragraph line", + " continues here.", + "", + " Second paragraph", + " also continues.", + "", + "", // Extra empty lines at end + ]; + + let result = parse_folded_block_scalar(&lines, 0); + + // Should fold lines within paragraphs but preserve paragraph breaks + assert!(result.contains("First paragraph line continues here.")); + assert!(result.contains("Second paragraph also continues.")); + + // Should use clip chomping - preserve single trailing newline if content ends with one + // But since we're folding, the exact behavior depends on implementation + assert!(!result.ends_with("\n\n")); // Should not have multiple trailing newlines + } + + #[test] + #[ignore = "Test for old behavior - same-line content mode has been removed"] + fn test_extract_annotation_literal_and_folded_same_line_content() { + // Test same-line content handling for both | and > + let metadata_literal = r#"@notes: | Same line content + Next line content + Another line"#; + + let metadata_folded = r#"@default: > Same line content + Next line content + Another line"#; + + let literal_result = extract_annotation(metadata_literal, "notes").unwrap(); + let folded_result = extract_annotation(metadata_folded, "default").unwrap(); + + // Both should include same-line content + assert!(literal_result.contains("Same line content")); + assert!(folded_result.contains("Same line content")); + + // Literal mode should preserve all content and line structure exactly + assert!(literal_result.contains("Next line content")); + assert!(literal_result.contains("Another line")); + + let literal_lines: Vec<&str> = literal_result.lines().collect(); + assert_eq!(literal_lines.len(), 3); + assert_eq!(literal_lines[0], "Same line content"); + assert_eq!(literal_lines[1], "Next line content"); + assert_eq!(literal_lines[2], "Another line"); + + // Folded mode with same-line content has current implementation limitation: + // it only captures the same-line content and ignores subsequent block lines. + // This is an acceptable edge case behavior. + assert_eq!(folded_result, "Same line content"); + + // Verify it doesn't contain the subsequent lines (current limitation) + assert!(!folded_result.contains("Next line content")); + assert!(!folded_result.contains("Another line")); + } + + #[test] + fn test_extract_annotation_edge_cases_empty_and_whitespace() { + // Test annotations with only whitespace or empty content + let metadata1 = "@default: |"; + let metadata2 = "@notes:\n \n \n"; // Only whitespace lines + let metadata3 = "@deprecated: >\n"; // Folded with no content + + assert_eq!(extract_annotation(metadata1, "default"), None); + assert_eq!(extract_annotation(metadata2, "notes"), None); + assert_eq!(extract_annotation(metadata3, "deprecated"), None); + } + + #[test] + fn test_required_field_validation_comprehensive() { + // Test all supported boolean representations for @required + let test_cases = vec![ + ("true", Some(true)), + ("True", Some(true)), + ("TRUE", Some(true)), + ("yes", Some(true)), + ("Yes", Some(true)), + ("YES", Some(true)), + ("1", Some(true)), + ("false", Some(false)), + ("False", Some(false)), + ("FALSE", Some(false)), + ("no", Some(false)), + ("No", Some(false)), + ("NO", Some(false)), + ("0", Some(false)), + ("maybe", Some(false)), // Invalid defaults to false + ("invalid", Some(false)), + ]; + + for (input, expected) in test_cases { + let doc_text = format!("Test field.\n---\n@required: {}", input); + let result = parse_field_documentation(&doc_text, "test_field").unwrap(); + assert_eq!(result.0.required, expected, "Failed for input: '{}'", input); + } + + // Test empty @required annotation (should return None, not Some(false)) + let doc_text_empty = "Test field.\n---\n@required:"; + let result_empty = parse_field_documentation(doc_text_empty, "test_field").unwrap(); + assert_eq!(result_empty.0.required, None, "Empty @required should not be parsed"); + } + + #[test] + fn test_units_with_multiline_content() { + // Test units annotation with multiline content + let doc_text = r#"Field with multiline units. +--- +@units: | + seconds (range: 1-3600) + Default: [`DEFAULT_TIMEOUT`] seconds +@required: true"#; + + let result = parse_field_documentation(doc_text, "test_field").unwrap(); + let (field_doc, referenced_constants) = result; + + assert!(field_doc.units.is_some()); + let units = field_doc.units.unwrap(); + assert!(units.contains("seconds (range: 1-3600)")); + assert!(units.contains("Default: [`DEFAULT_TIMEOUT`] seconds")); + assert_eq!(field_doc.required, Some(true)); + assert!(referenced_constants.contains("DEFAULT_TIMEOUT")); + } + + #[test] + fn test_extract_annotation_literal_and_folded_ignore_same_line_content() { + // Test that same-line content is ignored for both | and > + let metadata_literal = r#"@notes: | Ignored same line content + Next line content + Another line"#; + + let metadata_folded = r#"@default: > Ignored same line content + Next line content + Another line"#; + + let literal_result = extract_annotation(metadata_literal, "notes").unwrap(); + let folded_result = extract_annotation(metadata_folded, "default").unwrap(); + + // Same-line content should be ignored + assert!(!literal_result.contains("Ignored same line content")); + assert!(!folded_result.contains("Ignored same line content")); + + // Literal mode should preserve all content from subsequent lines + assert!(literal_result.contains("Next line content")); + assert!(literal_result.contains("Another line")); + + let literal_lines: Vec<&str> = literal_result.lines().collect(); + assert_eq!(literal_lines.len(), 2); + assert_eq!(literal_lines[0], "Next line content"); + assert_eq!(literal_lines[1], "Another line"); + + // Folded mode should fold the subsequent lines + assert!(folded_result.contains("Next line content")); + assert!(folded_result.contains("Another line")); + + // In folded mode, lines at same indentation get joined with spaces + let expected_folded = "Next line content Another line"; + assert_eq!(folded_result.trim(), expected_folded); + } } From e5b6f92289013bc68f2f3f2d15ab32645d46d71a Mon Sep 17 00:00:00 2001 From: Francesco Leacche Date: Wed, 4 Jun 2025 14:14:47 +0100 Subject: [PATCH 05/20] add documentation of format --- contrib/tools/config-docs-generator/README.md | 238 +++++++++++++++++- 1 file changed, 237 insertions(+), 1 deletion(-) diff --git a/contrib/tools/config-docs-generator/README.md b/contrib/tools/config-docs-generator/README.md index 720f8a0af1..ee999c22c9 100644 --- a/contrib/tools/config-docs-generator/README.md +++ b/contrib/tools/config-docs-generator/README.md @@ -1,6 +1,6 @@ # Configuration Documentation Generator -A tool that automatically generates comprehensive Markdown documentation for Stacks node TOML configuration options. The documentation is extracted directly from Rust source code comments and generates a complete configuration reference. +This tool automatically generates markdown documentation from Rust configuration structs by extracting specially formatted doc comments. ## Quick Start @@ -54,6 +54,242 @@ For each configuration field, it extracts: - **Primary**: `docs/generated/configuration-reference.md` - Complete configuration reference - **Intermediate**: `target/doc-generation/extracted-config-docs.json` - Raw extracted data +## Annotation Syntax Guide + +### Overview + +The generator processes doc comments with a structured annotation format: + +```rust +/// [Description text in Markdown format] +/// --- +/// @annotation_name: value +/// @another_annotation: value +pub field_name: Type, +``` + +### General Structure + +- **Description**: Standard Markdown text before the `---` separator +- **Separator**: Three dashes (`---`) separate description from annotations +- **Annotations**: Key-value pairs starting with `@`, each on its own line + +### Supported Annotations + +#### `@default: ` +Specifies the default value for the field. +- **Value Type**: String +- **Multiline Support**: Yes (all modes) +- **Examples**: + ```rust + /// @default: `None` + /// @default: `"localhost:8080"` + /// @default: | + /// Complex multi-line + /// default value + ``` + +#### `@notes: ` +Additional notes or explanations, rendered as a bulleted list. +- **Value Type**: String (parsed into list items) +- **Multiline Support**: Yes (all modes) +- **List Processing**: Lines starting with `-`, `*`, or `•` become list items +- **Examples**: + ```rust + /// @notes: Single line note + /// @notes: + /// - First bullet point + /// - Second bullet point + /// @notes: | + /// Complex formatting with + /// preserved line breaks + ``` + +#### `@deprecated: ` +Marks a field as deprecated with an optional message. +- **Value Type**: String +- **Multiline Support**: Yes (all modes) +- **Examples**: + ```rust + /// @deprecated: Use new_field instead + /// @deprecated: | + /// This field will be removed in v3.0. + /// Migrate to the new configuration system. + ``` + +#### `@toml_example: ` +Provides TOML configuration examples. +- **Value Type**: String +- **Multiline Support**: Yes (all modes) +- **Rendering**: Displayed in `
` blocks in markdown tables
+- **Examples**:
+  ```rust
+  /// @toml_example: key = "value"
+  /// @toml_example: |
+  ///   [section]
+  ///   key = "value"
+  ///   nested = { a = 1, b = 2 }
+  ```
+
+#### `@required: `
+Indicates whether the field is mandatory.
+- **Value Type**: Boolean (flexible parsing)
+- **Default**: `false` if annotation is omitted
+- **Supported Values**:
+  - `true`, `True`, `TRUE`, `yes`, `Yes`, `YES`, `1` → `true`
+  - `false`, `False`, `FALSE`, `no`, `No`, `NO`, `0` → `false`
+  - Invalid values default to `false`
+- **Examples**:
+  ```rust
+  /// @required: true
+  /// @required: yes
+  /// @required: false
+  ```
+
+#### `@units: `
+Specifies the unit of measurement for the field.
+- **Value Type**: String
+- **Multiline Support**: Yes (all modes)
+- **Constant References**: Supports `[`CONSTANT_NAME`]` syntax
+- **Examples**:
+  ```rust
+  /// @units: milliseconds
+  /// @units: sats/vByte
+  ```
+
+### Multiline Content Support
+
+All annotations support three multiline modes:
+
+#### Default Literal-like Mode
+Content preserves newlines and relative indentation within the annotation block.
+
+```rust
+/// @notes:
+///   First line with base indentation
+///     Second line more indented
+///   Third line back to base
+///       Fourth line very indented
+```
+
+**Output preserves relative indentation**:
+```
+First line with base indentation
+  Second line more indented
+Third line back to base
+    Fourth line very indented
+```
+
+#### Literal Block Style (`|`)
+Exact preservation of newlines and relative indentation. Uses "clip" chomping (single trailing newline preserved).
+
+```rust
+/// @toml_example: |
+///   [network]
+///   bind = "0.0.0.0:20444"
+///     # Indented comment
+///   timeout = 30
+```
+
+**Output**:
+```
+[network]
+bind = "0.0.0.0:20444"
+  # Indented comment
+timeout = 30
+```
+
+#### Folded Block Style (`>`)
+Folds lines into paragraphs with intelligent spacing. More-indented lines preserved as literal blocks.
+
+```rust
+/// @notes: >
+///   This is a long paragraph that will be
+///   folded into a single line with spaces
+///   between the original line breaks.
+///
+///   This is a second paragraph after a blank line.
+///
+///     This indented block will be preserved
+///     exactly as written, like code.
+///
+///   Back to normal folded paragraph text.
+```
+
+**Output**:
+```
+This is a long paragraph that will be folded into a single line with spaces between the original line breaks.
+
+This is a second paragraph after a blank line.
+
+  This indented block will be preserved
+  exactly as written, like code.
+
+Back to normal folded paragraph text.
+```
+
+### Same-line Content
+
+Content can start immediately after the colon for default multiline mode:
+
+```rust
+/// @default: immediate content
+/// @notes: Content that starts immediately
+///   and continues on the next line
+```
+
+For literal (`|`) and folded (`>`) modes, content must start on the next line:
+
+```rust
+/// @notes: |
+///   Content starts here on the next line
+///   All content must be indented on subsequent lines
+/// @deprecated: >
+///   Folded content also starts on the next line
+///   and will be joined appropriately
+```
+
+### Complete Example
+
+```rust
+/// Timeout duration for network connections.
+///
+/// This setting controls how long the node will wait for network operations
+/// to complete before timing out. Setting this too low may cause connection
+/// failures on slow networks.
+/// ---
+/// @default: [`DEFAULT_NETWORK_TIMEOUT`]
+/// @required: true
+/// @units: milliseconds
+/// @notes:
+///   - Must be greater than 0
+///   - Recommended range: 1000-30000
+///   - Higher values needed for slow connections
+/// @toml_example: |
+///   [network]
+///   timeout = 15000  # 15 seconds
+/// @deprecated: >
+///   Use the new `connection_timeout` setting instead.
+///   This field will be removed in version 3.0.
+pub timeout_ms: u64,
+```
+
+### Best Practices
+
+1. **Choose the right multiline mode**:
+   - Default mode: General text with preserved formatting
+   - Literal (`|`): Code examples, exact formatting required
+   - Folded (`>`): Documentation prose, automatic paragraph wrapping
+
+2. **Use constant references in `@default` when appropriate**
+
+### Integration with Rust Documentation
+
+This system integrates with standard Rust documentation tools:
+- Doc comments remain valid for `rustdoc`
+- Annotations are ignored by standard documentation generators
+- Full compatibility with existing documentation workflows
+
 ## Adding New Configuration Structs
 
 ### 1. Update the Target List

From dcef804573051f212b512197f48179f2f9517cc4 Mon Sep 17 00:00:00 2001
From: Francesco Leacche 
Date: Wed, 4 Jun 2025 14:16:43 +0100
Subject: [PATCH 06/20] improve html formatting

---
 .../src/generate_markdown.rs                  | 573 +++++++-----------
 1 file changed, 234 insertions(+), 339 deletions(-)

diff --git a/contrib/tools/config-docs-generator/src/generate_markdown.rs b/contrib/tools/config-docs-generator/src/generate_markdown.rs
index 82f636ad59..3c7e5d1cec 100644
--- a/contrib/tools/config-docs-generator/src/generate_markdown.rs
+++ b/contrib/tools/config-docs-generator/src/generate_markdown.rs
@@ -29,6 +29,8 @@ struct FieldDoc {
     notes: Option>,
     deprecated: Option,
     toml_example: Option,
+    required: Option,
+    units: Option,
 }
 
 #[derive(Debug, Serialize, Deserialize)]
@@ -307,26 +309,43 @@ fn generate_field_row(
         let escaped_example = clean_example
             .replace('&', "&")
             .replace('<', "<")
-            .replace('>', ">");
+            .replace('>', ">")
+            .replace('\n', "
"); // Use HTML entity for newline to avoid 
conversion let example_section = format!( "

**Example:**
{}
", - escaped_example.replace('\n', "
") + escaped_example // HTML entities will be rendered as newlines by
         );
         description_parts.push(example_section);
     }
 
+    // Add units information if present
+    if let Some(units) = &field.units {
+        let units_text = process_intralinks_with_context(units, global_context, struct_name);
+        description_parts.push(format!("

**Units:** {}", units_text)); + } + let description = if description_parts.is_empty() { "*No description available*".to_string() } else { description_parts.join("") }; - // Default value column - let default_value = if let Some(default) = &field.default_value { - process_intralinks_with_context(default, global_context, struct_name) - } else { - "*Required*".to_string() + // Default value column - handle required fields + let default_value = match (&field.required, &field.default_value) { + // If explicitly marked as required=true, show as required regardless of default + (Some(true), _) => "**Required**".to_string(), + // If explicitly marked as required=false and has default, show the default + (Some(false), Some(default)) => { + process_intralinks_with_context(default, global_context, struct_name) + } + // If explicitly marked as required=false but no default, show as optional + (Some(false), None) => "*Optional*".to_string(), + // If required field is not specified, use default behavior (backward compatibility) + (None, Some(default)) => { + process_intralinks_with_context(default, global_context, struct_name) + } + (None, None) => "**Required**".to_string(), }; output.push_str(&format!( @@ -510,6 +529,8 @@ mod tests { notes: None, deprecated: None, toml_example: None, + required: None, + units: None, } } @@ -659,7 +680,7 @@ mod tests { assert!(output.contains("basic_field")); assert!(output.contains("A basic field description")); - assert!(output.contains("*Required*")); + assert!(output.contains("**Required**")); assert!(output.contains("")); } @@ -675,7 +696,7 @@ mod tests { assert!(output.contains("field_with_default")); assert!(output.contains("Field with default value")); assert!(output.contains("`42`")); - assert!(!output.contains("*Required*")); + assert!(!output.contains("**Required**")); } #[test] @@ -687,7 +708,7 @@ mod tests { generate_field_row(&mut output, &field, "TestStruct", &global_context).unwrap(); assert!(output.contains("required_field")); - assert!(output.contains("*Required*")); + assert!(output.contains("**Required**")); } #[test] @@ -735,9 +756,9 @@ mod tests { } #[test] - fn test_generate_field_row_toml_example_with_pipe() { - let mut field = create_field_doc("field_with_pipe_example", "Field with pipe example"); - field.toml_example = Some("|\nkey = \"value\"\nnumber = 42".to_string()); + fn test_generate_field_row_toml_example_preserves_newlines() { + let mut field = create_field_doc("multiline_example", "Field with multiline TOML example"); + field.toml_example = Some("key = \"value\"\nnested = {\n sub_key = \"sub_value\"\n}".to_string()); let global_context = create_mock_global_context(); let mut output = String::new(); @@ -745,417 +766,291 @@ mod tests { assert!(output.contains("**Example:**")); assert!(output.contains("
"));
-        assert!(output.contains("key = \"value\""));
-        // The TOML content should not contain the leading pipe character
-        assert!(!output.contains("
|"));
-        assert!(!output.contains("|\nkey"));
         assert!(output.contains("
")); - } - #[test] - fn test_generate_field_row_all_attributes() { - let mut field = create_field_doc("complex_field", "A complex field"); - field.default_value = Some("`\"default\"`".to_string()); - field.notes = Some(vec!["Important note".to_string()]); - field.deprecated = Some("Use better_field instead".to_string()); - field.toml_example = Some("field = \"example\"".to_string()); - let global_context = create_mock_global_context(); - let mut output = String::new(); + // Find the code block content + let pre_start = output.find("
").unwrap();
+        let pre_end = output.find("
").unwrap(); + let code_content = &output[pre_start..pre_end + "
".len()]; - generate_field_row(&mut output, &field, "TestStruct", &global_context).unwrap(); + // Should NOT contain
tags inside the code block + assert!(!code_content.contains("
"), "Code block should not contain
tags"); - assert!(output.contains("~~")); // deprecated - assert!(output.contains("A complex field")); - assert!(output.contains("**Notes:**")); - assert!(output.contains("- Important note")); - assert!(output.contains("**⚠️ DEPRECATED:**")); - assert!(output.contains("Use better_field instead")); - assert!(output.contains("**Example:**")); - assert!(output.contains("
"));
-        assert!(output.contains("`\"default\"`"));
-        assert!(output.contains("
")); + // Should contain HTML entities for newlines instead + assert!(code_content.contains(" "), "Code block should contain HTML entities for newlines"); + + // Should contain the key-value pairs + assert!(code_content.contains("key = \"value\"")); + assert!(code_content.contains("sub_key = \"sub_value\"")); + + // Should contain the actual newline characters in the original TOML + assert!(field.toml_example.as_ref().unwrap().contains('\n')); } #[test] - fn test_generate_field_row_empty_description_parts() { - let field = FieldDoc { - name: "minimal_field".to_string(), - description: "".to_string(), - default_value: None, - notes: None, - deprecated: None, - toml_example: None, - }; + fn test_generate_field_row_hierarchical_lists() { + let field = create_field_doc( + "complex_list_field", + r"Field with hierarchical lists: +- Main item 1 + - Sub item 1a + - Sub-sub item 1a1 + - Sub item 1b +- Main item 2 + - Sub item 2a", + ); let global_context = create_mock_global_context(); let mut output = String::new(); generate_field_row(&mut output, &field, "TestStruct", &global_context).unwrap(); - assert!(output.contains("minimal_field")); - assert!(output.contains("*No description available*")); - assert!(output.contains("*Required*")); + // Verify that indentation is preserved with   entities + assert!(output.contains("- Main item 1")); + assert!(output.contains("  - Sub item 1a")); + assert!(output.contains("    - Sub-sub item 1a1")); + assert!(output.contains("  - Sub item 1b")); + assert!(output.contains("- Main item 2")); + assert!(output.contains("  - Sub item 2a")); } #[test] - fn test_field_name_escaping_in_row() { - let field = create_field_doc("field|with[special]chars", "Description"); + fn test_generate_field_row_hierarchical_lists_with_intralinks() { + let field = create_field_doc( + "list_with_links", + r"Field with links in hierarchical lists: +- Main item with [`TEST_CONSTANT`] + - Sub item with [`NodeConfig::test_field`] + - Sub-sub item with [`other_field`]", + ); let global_context = create_mock_global_context(); let mut output = String::new(); generate_field_row(&mut output, &field, "TestStruct", &global_context).unwrap(); - assert!(output.contains("field\\|with\\[special\\]chars")); + // Verify that indentation is preserved AND intra-links are processed + assert!(output.contains("- Main item with `42`")); // constant resolved + assert!( + output.contains("  - Sub item with [[node].test_field](#node-test_field)") + ); // field link with indentation + assert!(output.contains( + "    - Sub-sub item with [[miner].other_field](#miner-other_field)" + )); // cross-struct field link with indentation } #[test] - fn test_field_anchor_id_generation() { - let field = create_field_doc("test_anchor", "Test anchor generation"); + fn test_generate_field_row_with_required_true() { + let mut field = create_field_doc("required_field", "A required field"); + field.required = Some(true); + field.default_value = Some("`default_value`".to_string()); // Even with default, should show as required let global_context = create_mock_global_context(); let mut output = String::new(); - generate_field_row(&mut output, &field, "NodeConfig", &global_context).unwrap(); + generate_field_row(&mut output, &field, "TestStruct", &global_context).unwrap(); - assert!(output.contains("")); - assert!(output.contains("(#node-test_anchor)")); + assert!(output.contains("required_field")); + assert!(output.contains("A required field")); + assert!(output.contains("**Required**")); + assert!(!output.contains("`default_value`")); // Should not show default when required=true } - // IV. Struct Section Generation Tests - #[test] - fn test_generate_struct_section_description() { - let struct_doc = create_struct_doc("TestStruct", Some("This is a test struct"), vec![]); + fn test_generate_field_row_with_required_false_and_default() { + let mut field = create_field_doc("optional_field", "An optional field"); + field.required = Some(false); + field.default_value = Some("`42`".to_string()); let global_context = create_mock_global_context(); let mut output = String::new(); - generate_struct_section(&mut output, &struct_doc, &global_context).unwrap(); + generate_field_row(&mut output, &field, "TestStruct", &global_context).unwrap(); - assert!(output.contains("## [teststruct]")); - assert!(output.contains("This is a test struct")); - assert!(output.contains("*No configurable parameters documented.*")); + assert!(output.contains("optional_field")); + assert!(output.contains("An optional field")); + assert!(output.contains("`42`")); + assert!(!output.contains("**Required**")); } #[test] - fn test_generate_struct_section_no_description() { - let struct_doc = create_struct_doc("TestStruct", None, vec![]); + fn test_generate_field_row_with_required_false_no_default() { + let mut field = create_field_doc("optional_field", "An optional field"); + field.required = Some(false); + field.default_value = None; let global_context = create_mock_global_context(); let mut output = String::new(); - generate_struct_section(&mut output, &struct_doc, &global_context).unwrap(); + generate_field_row(&mut output, &field, "TestStruct", &global_context).unwrap(); - assert!(output.contains("## [teststruct]")); - assert!(!output.contains("This is a test struct")); - assert!(output.contains("*No configurable parameters documented.*")); + assert!(output.contains("optional_field")); + assert!(output.contains("An optional field")); + assert!(output.contains("*Optional*")); + assert!(!output.contains("**Required**")); } #[test] - fn test_generate_struct_section_field_sorting() { - let normal_field = create_field_doc("b_normal", "Normal field"); - let mut deprecated_field = create_field_doc("a_deprecated", "Deprecated field"); - deprecated_field.deprecated = Some("Old field".to_string()); - let another_normal = create_field_doc("c_normal", "Another normal field"); - - let struct_doc = create_struct_doc( - "TestStruct", - None, - vec![deprecated_field, normal_field, another_normal], - ); + fn test_generate_field_row_with_units() { + let mut field = create_field_doc("timeout_field", "A timeout field"); + field.units = Some("milliseconds".to_string()); + field.default_value = Some("`5000`".to_string()); let global_context = create_mock_global_context(); let mut output = String::new(); - generate_struct_section(&mut output, &struct_doc, &global_context).unwrap(); - - // Normal fields should come first, then deprecated - let b_normal_pos = output.find("b_normal").unwrap(); - let c_normal_pos = output.find("c_normal").unwrap(); - let a_deprecated_pos = output.find("a_deprecated").unwrap(); - - assert!(b_normal_pos < c_normal_pos); - assert!(c_normal_pos < a_deprecated_pos); - } - - // V. Markdown Escaping Tests - - #[test] - fn test_escape_markdown_various_chars() { - assert_eq!(escape_markdown("test|pipe"), "test\\|pipe"); - assert_eq!(escape_markdown("test[bracket]"), "test\\[bracket\\]"); - assert_eq!(escape_markdown("normal text"), "normal text"); - } + generate_field_row(&mut output, &field, "TestStruct", &global_context).unwrap(); - #[test] - fn test_escape_markdown_table_various_chars() { - assert_eq!(escape_markdown_table("test|pipe"), "test\\|pipe"); - assert_eq!(escape_markdown_table("line1\nline2"), "line1
line2"); - assert_eq!( - escape_markdown_table("line1\nwith|pipe"), - "line1
with\\|pipe" - ); + assert!(output.contains("timeout_field")); + assert!(output.contains("A timeout field")); + assert!(output.contains("**Units:** milliseconds")); + assert!(output.contains("`5000`")); } - // VI. Intra-link Processing Tests - #[test] - fn test_intralink_no_links() { + fn test_generate_field_row_with_units_and_constants() { + let mut field = create_field_doc("timeout_field", "A timeout field"); + field.units = Some("[`TEST_CONSTANT`] milliseconds".to_string()); + field.default_value = Some("`5000`".to_string()); let global_context = create_mock_global_context(); - let text = "This is normal text without any links"; - let result = process_intralinks_with_context(text, &global_context, "TestStruct"); - assert_eq!(result, text); - } + let mut output = String::new(); - #[test] - fn test_intralink_to_field_in_same_struct() { - let global_context = create_mock_global_context(); - let text = "See [`NodeConfig::test_field`] for details"; - let result = process_intralinks_with_context(text, &global_context, "NodeConfig"); - assert!(result.contains("[test_field](#node-test_field)")); - } + generate_field_row(&mut output, &field, "TestStruct", &global_context).unwrap(); - #[test] - fn test_intralink_to_field_in_different_struct() { - let global_context = create_mock_global_context(); - let text = "See [`MinerConfig::other_field`] for details"; - let result = process_intralinks_with_context(text, &global_context, "NodeConfig"); - assert!(result.contains("[[miner].other_field](#miner-other_field)")); + assert!(output.contains("timeout_field")); + assert!(output.contains("A timeout field")); + assert!(output.contains("**Units:** `42` milliseconds")); // Constant should be resolved + assert!(output.contains("`5000`")); } #[test] - fn test_intralink_to_standalone_field_in_same_struct() { + fn test_generate_field_row_all_new_features() { + let mut field = create_field_doc("complex_field", "A field with all new features"); + field.required = Some(true); + field.units = Some("seconds".to_string()); + field.notes = Some(vec!["Important note".to_string()]); + field.toml_example = Some("field = 30".to_string()); let global_context = create_mock_global_context(); - let text = "See [`test_field`] for details"; - let result = process_intralinks_with_context(text, &global_context, "NodeConfig"); - assert!(result.contains("[test_field](#node-test_field)")); - } + let mut output = String::new(); - #[test] - fn test_intralink_to_standalone_field_in_different_struct() { - let global_context = create_mock_global_context(); - let text = "See [`other_field`] for details"; - let result = process_intralinks_with_context(text, &global_context, "NodeConfig"); - assert!(result.contains("[[miner].other_field](#miner-other_field)")); - } + generate_field_row(&mut output, &field, "TestStruct", &global_context).unwrap(); - #[test] - fn test_intralink_to_constant() { - let global_context = create_mock_global_context(); - let text = "The default value is [`TEST_CONSTANT`]"; - let result = process_intralinks_with_context(text, &global_context, "TestStruct"); - assert!(result.contains("42")); - assert!(!result.contains("TEST_CONSTANT")); + assert!(output.contains("complex_field")); + assert!(output.contains("A field with all new features")); + assert!(output.contains("**Required**")); + assert!(output.contains("**Units:** seconds")); + assert!(output.contains("**Notes:**")); + assert!(output.contains("- Important note")); + assert!(output.contains("**Example:**")); + assert!(output.contains("field = 30")); } #[test] - fn test_intralink_unresolved_struct_field_reference() { + fn test_generate_field_row_units_with_constants_and_intralinks() { + let mut field = create_field_doc("timeout_field", "A timeout field"); + field.units = Some("[`TEST_CONSTANT`] seconds (see [`NodeConfig::test_field`])".to_string()); + field.default_value = Some("`30`".to_string()); let global_context = create_mock_global_context(); - let text = "See [`UnknownStruct::unknown_field`] for details"; - let result = process_intralinks_with_context(text, &global_context, "TestStruct"); - assert!(result.contains("UnknownStruct::unknown_field")); - assert!(!result.contains("[`")); - } + let mut output = String::new(); - #[test] - fn test_intralink_unresolved_standalone_reference() { - let global_context = create_mock_global_context(); - let text = "The value [`unknown_reference`] is not found"; - let result = process_intralinks_with_context(text, &global_context, "TestStruct"); - assert!(result.contains("unknown_reference")); - assert!(!result.contains("[`")); - } + generate_field_row(&mut output, &field, "TestStruct", &global_context).unwrap(); - #[test] - fn test_intralink_malformed_reference() { - let global_context = create_mock_global_context(); - let text = "See [`Struct::Field::Extra`] for details"; - let result = process_intralinks_with_context(text, &global_context, "TestStruct"); - assert!(result.contains("Struct::Field::Extra")); - assert!(!result.contains("[`")); + assert!(output.contains("timeout_field")); + assert!(output.contains("**Units:**")); + // Constants should be resolved and intralinks processed + assert!(output.contains("`42`")); // TEST_CONSTANT resolved + assert!(output.contains("[[node].test_field](#node-test_field)")); // Cross-struct reference format } #[test] - fn test_intralink_multiple_links_in_text() { + fn test_generate_field_row_required_field_combinations() { let global_context = create_mock_global_context(); - let text = "See [`TEST_CONSTANT`] and [`NodeConfig::test_field`] and [`unknown_ref`]"; - let result = process_intralinks_with_context(text, &global_context, "NodeConfig"); - assert!(result.contains("42")); // constant resolved - assert!(result.contains("[test_field](#node-test_field)")); // field resolved - assert!(result.contains("unknown_ref")); // unresolved kept as text - } - - // VII. Global Context Building Tests - - #[test] - fn test_build_global_context_struct_anchors() { - let structs = vec![ - create_struct_doc("NodeConfig", None, vec![]), - create_struct_doc("MinerConfig", None, vec![]), - ]; - let config_docs = create_config_docs(structs); - let context = build_global_context(&config_docs); - - assert_eq!( - context.struct_to_anchor.get("NodeConfig"), - Some(&"#node".to_string()) - ); - assert_eq!( - context.struct_to_anchor.get("MinerConfig"), - Some(&"#miner".to_string()) - ); + // Test required=true with default (should show Required, not default) + let mut field1 = create_field_doc("req_with_default", "Required with default"); + field1.required = Some(true); + field1.default_value = Some("`ignored`".to_string()); + let mut output1 = String::new(); + generate_field_row(&mut output1, &field1, "TestStruct", &global_context).unwrap(); + assert!(output1.contains("**Required**")); + assert!(!output1.contains("`ignored`")); + + // Test required=false with default (should show default) + let mut field2 = create_field_doc("opt_with_default", "Optional with default"); + field2.required = Some(false); + field2.default_value = Some("`42`".to_string()); + let mut output2 = String::new(); + generate_field_row(&mut output2, &field2, "TestStruct", &global_context).unwrap(); + assert!(output2.contains("`42`")); + assert!(!output2.contains("**Required**")); + assert!(!output2.contains("*Optional*")); + + // Test required=false without default (should show Optional) + let mut field3 = create_field_doc("opt_no_default", "Optional without default"); + field3.required = Some(false); + field3.default_value = None; + let mut output3 = String::new(); + generate_field_row(&mut output3, &field3, "TestStruct", &global_context).unwrap(); + assert!(output3.contains("*Optional*")); + assert!(!output3.contains("**Required**")); + + // Test no required field specified (backward compatibility) + let mut field4 = create_field_doc("legacy_field", "Legacy field"); + field4.required = None; + field4.default_value = Some("`legacy`".to_string()); + let mut output4 = String::new(); + generate_field_row(&mut output4, &field4, "TestStruct", &global_context).unwrap(); + assert!(output4.contains("`legacy`")); + assert!(!output4.contains("**Required**")); } #[test] - fn test_build_global_context_field_struct_mapping() { - let field1 = create_field_doc("field1", "Description"); - let field2 = create_field_doc("field2", "Description"); - let structs = vec![ - create_struct_doc("NodeConfig", None, vec![field1]), - create_struct_doc("MinerConfig", None, vec![field2]), - ]; - let config_docs = create_config_docs(structs); - let context = build_global_context(&config_docs); - - assert_eq!( - context.field_to_struct.get("field1"), - Some(&("NodeConfig".to_string(), "#node".to_string())) - ); - assert_eq!( - context.field_to_struct.get("field2"), - Some(&("MinerConfig".to_string(), "#miner".to_string())) + fn test_generate_field_row_comprehensive_integration() { + // Test a field with all possible attributes + let mut field = create_field_doc( + "comprehensive_field", + "A comprehensive field demonstrating all features.\n\nThis includes multiple paragraphs." ); - } - - #[test] - fn test_build_global_context_constants() { - let config_docs = create_config_docs(vec![]); - let context = build_global_context(&config_docs); - - // Should have no constants if none are referenced - assert_eq!(context.constants.len(), 0); - } + field.default_value = Some("`[\"default\", \"values\"]`".to_string()); + field.required = Some(false); + field.units = Some("milliseconds (range: 100-5000)".to_string()); + field.notes = Some(vec![ + "This is the first note with [`TEST_CONSTANT`]".to_string(), + "This is the second note referencing [`NodeConfig::test_field`]".to_string(), + ]); + field.deprecated = Some("Use new_comprehensive_field instead. Will be removed in v3.0.".to_string()); + field.toml_example = Some("comprehensive_field = [\n \"value1\",\n \"value2\"\n]".to_string()); - // Helper function tests - - #[test] - fn test_is_deprecated() { - let normal_field = create_field_doc("normal", "Normal field"); - let mut deprecated_field = create_field_doc("deprecated", "Deprecated field"); - deprecated_field.deprecated = Some("Use other field".to_string()); - - assert!(!is_deprecated(&normal_field)); - assert!(is_deprecated(&deprecated_field)); - } - - #[test] - fn test_build_global_context_with_referenced_constants() { - let mut config_docs = create_config_docs(vec![]); - config_docs - .referenced_constants - .insert("TEST_CONSTANT".to_string(), Some("42".to_string())); - config_docs - .referenced_constants - .insert("STRING_CONST".to_string(), Some("\"hello\"".to_string())); - config_docs - .referenced_constants - .insert("UNRESOLVED_CONST".to_string(), None); - - let context = build_global_context(&config_docs); - - // Only resolved constants should be in the context - assert_eq!(context.constants.len(), 2); - assert_eq!( - context.constants.get("TEST_CONSTANT"), - Some(&"42".to_string()) - ); - assert_eq!( - context.constants.get("STRING_CONST"), - Some(&"\"hello\"".to_string()) - ); - assert!(!context.constants.contains_key("UNRESOLVED_CONST")); - } - - #[test] - fn test_build_global_context_empty_referenced_constants() { - let config_docs = create_config_docs(vec![]); - let context = build_global_context(&config_docs); - - // Should have no constants if none are referenced - assert_eq!(context.constants.len(), 0); - } - - #[test] - fn test_generate_field_row_toml_example_no_literal_br_tags() { - let mut field = - create_field_doc("field_with_multiline_example", "Field with multiline TOML"); - field.toml_example = Some( - "txs_to_consider = \"TokenTransfer,ContractCall\"\nother_setting = \"value\"" - .to_string(), - ); let global_context = create_mock_global_context(); let mut output = String::new(); - generate_field_row(&mut output, &field, "TestStruct", &global_context).unwrap(); - - // Verify that the TOML example is properly formatted with HTML pre/code blocks - assert!(output.contains("
"));
-        assert!(output.contains("txs_to_consider = \"TokenTransfer,ContractCall\""));
-        assert!(output.contains("other_setting = \"value\""));
-        assert!(output.contains("
")); - - // This is the critical test: ensure we don't have malformed markdown like - // ```toml
content
``` which renders literal
tags - assert!(!output.contains("```toml
")); - assert!(!output.contains("
```")); + generate_field_row(&mut output, &field, "NodeConfig", &global_context).unwrap(); - // Verify proper line separation with
within the code block - assert!(output.contains("ContractCall\"
other_setting")); - } + // Verify field name with deprecation strikethrough + assert!(output.contains("~~")); + assert!(output.contains("comprehensive_field")); - #[test] - fn test_generate_field_row_hierarchical_lists() { - let field = create_field_doc( - "complex_list_field", - r"Field with hierarchical lists: -- Main item 1 - - Sub item 1a - - Sub-sub item 1a1 - - Sub item 1b -- Main item 2 - - Sub item 2a", - ); - let global_context = create_mock_global_context(); - let mut output = String::new(); + // Verify description processing + assert!(output.contains("A comprehensive field")); + assert!(output.contains("This includes multiple paragraphs")); - generate_field_row(&mut output, &field, "TestStruct", &global_context).unwrap(); + // Verify default value (since required=false and has default) + assert!(output.contains("`[\"default\", \"values\"]`")); + assert!(!output.contains("**Required**")); + assert!(!output.contains("*Optional*")); - // Verify that indentation is preserved with   entities - assert!(output.contains("- Main item 1")); - assert!(output.contains("  - Sub item 1a")); - assert!(output.contains("    - Sub-sub item 1a1")); - assert!(output.contains("  - Sub item 1b")); - assert!(output.contains("- Main item 2")); - assert!(output.contains("  - Sub item 2a")); - } + // Verify units + assert!(output.contains("**Units:** milliseconds (range: 100-5000)")); - #[test] - fn test_generate_field_row_hierarchical_lists_with_intralinks() { - let field = create_field_doc( - "list_with_links", - r"Field with links in hierarchical lists: -- Main item with [`TEST_CONSTANT`] - - Sub item with [`NodeConfig::test_field`] - - Sub-sub item with [`other_field`]", - ); - let global_context = create_mock_global_context(); - let mut output = String::new(); + // Verify notes with intralink processing + assert!(output.contains("**Notes:**")); + assert!(output.contains("- This is the first note with `42`")); // Constant resolved + assert!(output.contains("- This is the second note referencing [test_field](#node-test_field)")); // Intralink - generate_field_row(&mut output, &field, "TestStruct", &global_context).unwrap(); + // Verify deprecation warning + assert!(output.contains("**⚠️ DEPRECATED:**")); + assert!(output.contains("Use new_comprehensive_field instead")); - // Verify that indentation is preserved AND intra-links are processed - assert!(output.contains("- Main item with `42`")); // constant resolved - assert!( - output.contains("  - Sub item with [[node].test_field](#node-test_field)") - ); // field link with indentation - assert!(output.contains( - "    - Sub-sub item with [[miner].other_field](#miner-other_field)" - )); // cross-struct field link with indentation + // Verify TOML example with proper formatting + assert!(output.contains("**Example:**")); + assert!(output.contains("
"));
+        assert!(output.contains("comprehensive_field = ["));
+        assert!(output.contains("\"value1\","));
+        assert!(output.contains("\"value2\""));
+        assert!(output.contains("
")); } } From d5084f968f2f50ec8c62cc3eac09e6c2198e834d Mon Sep 17 00:00:00 2001 From: Francesco Leacche Date: Wed, 4 Jun 2025 14:26:15 +0100 Subject: [PATCH 07/20] update configuration-reference.md --- docs/generated/configuration-reference.md | 164 +++++++++++----------- 1 file changed, 82 insertions(+), 82 deletions(-) diff --git a/docs/generated/configuration-reference.md b/docs/generated/configuration-reference.md index c89de47c04..99a22f46fe 100644 --- a/docs/generated/configuration-reference.md +++ b/docs/generated/configuration-reference.md @@ -18,8 +18,8 @@ The configuration is automatically generated from the Rust source code documenta | Parameter | Description | Default | |-----------|-------------|----------| -| [address](#initial_balances-address) | The Stacks address to receive the initial STX balance.
Must be a valid "non-mainnet" Stacks address (e.g., "ST2QKZ4FKHAH1NQKYKYAYZPY440FEPK7GZ1R5HBP2"). | No default. This field is required. | -| [amount](#initial_balances-amount) | The amount of microSTX to allocate to the address at node startup.
1 STX = 1,000,000 microSTX.

**Notes:**
- Units: microSTX. | No default. This field is required. | +| [address](#initial_balances-address) | The Stacks address to receive the initial STX balance.
Must be a valid "non-mainnet" Stacks address (e.g., "ST2QKZ4FKHAH1NQKYKYAYZPY440FEPK7GZ1R5HBP2"). | **Required** | +| [amount](#initial_balances-amount) | The amount of microSTX to allocate to the address at node startup.
1 STX = 1,000,000 microSTX.

**Units:** microSTX | **Required** | ## [event_observer] @@ -27,9 +27,9 @@ The configuration is automatically generated from the Rust source code documenta | Parameter | Description | Default | |-----------|-------------|----------| | [disable_retries](#event_observer-disable_retries) | Controls whether the node should retry sending event notifications if delivery fails or times out.

- If `false` (default): The node will attempt to deliver event notifications persistently.
If an attempt fails (due to network error, timeout, or a non-200 HTTP response), the event
payload is saved and retried indefinitely. This ensures that all events will eventually be
delivered. However, this can cause the node's block processing to stall if an observer is
down, or indefinitely fails to process the event.

- If `true`: The node will make only a single attempt to deliver each event notification.
If this single attempt fails for any reason, the event is discarded, and no further retries
will be made for that specific event.

**Notes:**
- **Warning:** Setting this to `true` can lead to missed events if the observer endpoint is temporarily unavailable or experiences issues. | `false` (retries are enabled) | -| [endpoint](#event_observer-endpoint) | URL endpoint (hostname and port) where event notifications will be sent via HTTP POST requests.

The node will automatically prepend `http://` to this endpoint and append the
specific event path (e.g., `/new_block`, `/new_mempool_tx`).
Therefore, this value should be specified as `hostname:port` (e.g., "localhost:3700").

This should point to a service capable of receiving and processing Stacks event data.

**Notes:**
- **Do NOT include the `http://` scheme in this configuration value.**

**Example:**
  endpoint = "localhost:3700"
| No default. This field is required. | -| [events_keys](#event_observer-events_keys) | List of event types that this observer is configured to receive.

Each string in the list specifies an event category or a specific event to subscribe to.
For an observer to receive any notifications, this list must contain at least one valid key.
Providing an invalid string that doesn't match any of the valid formats below will cause
the node to panic on startup when parsing the configuration.

All observers, regardless of their `events_keys` configuration, implicitly receive
payloads on the `/attachments/new` endpoint.

Valid Event Keys:
- `"*"`: Subscribes to a broad set of common events.
  - Events delivered to:
    - `/new_block`: For blocks containing transactions that generate STX, FT, NFT, or smart contract events.
    - `/new_microblocks`: For all new microblock streams. Note: Only until epoch 2.5.
    - `/new_mempool_tx`: For new mempool transactions.
    - `/drop_mempool_tx`: For dropped mempool transactions.
    - `/new_burn_block`: For new burnchain blocks.
  - Note: This key does NOT by itself subscribe to `/stackerdb_chunks` or `/proposal_response`.

- `"stx"`: Subscribes to STX token operation events (transfer, mint, burn, lock).
  - Events delivered to: `/new_block`, `/new_microblocks`.
  - Payload details: The "events" array in the delivered payloads will be filtered to include only STX-related events.

- `"memtx"`: Subscribes to new and dropped mempool transaction events.
  - Events delivered to: `/new_mempool_tx`, `/drop_mempool_tx`.

- `"burn_blocks"`: Subscribes to new burnchain block events.
  - Events delivered to: `/new_burn_block`.

- `"microblocks"`: Subscribes to new microblock stream events.
  - Events delivered to: `/new_microblocks`.
  - Payload details:
    - The "transactions" field will contain all transactions from the microblocks.
    - The "events" field will contain STX, FT, NFT, or specific smart contract events
*only if* this observer is also subscribed to those more specific event types
(e.g., via `"stx"`, `"*"`, a specific contract event key, or a specific asset identifier key).
  - Note: Only until epoch 2.5.

- `"stackerdb"`: Subscribes to StackerDB chunk update events.
  - Events delivered to: `/stackerdb_chunks`.

- `"block_proposal"`: Subscribes to block proposal response events (for Nakamoto consensus).
  - Events delivered to: `/proposal_response`.

- Smart Contract Event: Subscribes to a specific smart contract event.
  - Format: `"{contract_address}.{contract_name}::{event_name}"`
(e.g., `ST0000000000000000000000000000000000000000.my-contract::my-custom-event`)
  - Events delivered to: `/new_block`, `/new_microblocks`.
  - Payload details: The "events" array in the delivered payloads will be filtered for this specific event.

- Asset Identifier for FT/NFT Events: Subscribes to events (mint, burn, transfer) for a specific Fungible Token (FT) or Non-Fungible Token (NFT).
  - Format: `"{contract_address}.{contract_name}.{asset_name}"`
(e.g., for an FT: `ST0000000000000000000000000000000000000000.my-ft-contract.my-fungible-token`)
  - Events delivered to: `/new_block`, `/new_microblocks`.
  - Payload details: The "events" array in the delivered payloads will be filtered for events related to the specified asset.

**Notes:**
- For a more detailed documentation check the event-dispatcher docs in the `/docs` folder.

**Example:**
  events_keys = [
"burn_blocks",
"memtx",
"ST0000000000000000000000000000000000000000.my-contract::my-custom-event",
"ST0000000000000000000000000000000000000000.token-contract.my-ft"
]
| No default. This field is required. | -| [timeout_ms](#event_observer-timeout_ms) | Maximum duration (in milliseconds) to wait for the observer endpoint to respond.

When the node sends an event notification to this observer, it will wait at most this long
for a successful HTTP response (status code 200) before considering the request timed out.
If a timeout occurs and retries are enabled (see `EventObserverConfig::disable_retries`),
the request will be attempted again according to the retry strategy.

**Notes:**
- Units: milliseconds. | `1_000` (ms, 1 second) | +| [endpoint](#event_observer-endpoint) | URL endpoint (hostname and port) where event notifications will be sent via HTTP POST requests.

The node will automatically prepend `http://` to this endpoint and append the
specific event path (e.g., `/new_block`, `/new_mempool_tx`).
Therefore, this value should be specified as `hostname:port` (e.g., "localhost:3700").

This should point to a service capable of receiving and processing Stacks event data.

**Notes:**
- **Do NOT include the `http://` scheme in this configuration value.**

**Example:**
endpoint = "localhost:3700"
| **Required** | +| [events_keys](#event_observer-events_keys) | List of event types that this observer is configured to receive.

Each string in the list specifies an event category or a specific event to subscribe to.
For an observer to receive any notifications, this list must contain at least one valid key.
Providing an invalid string that doesn't match any of the valid formats below will cause
the node to panic on startup when parsing the configuration.

All observers, regardless of their `events_keys` configuration, implicitly receive
payloads on the `/attachments/new` endpoint.

Valid Event Keys:
- `"*"`: Subscribes to a broad set of common events.
  - Events delivered to:
    - `/new_block`: For blocks containing transactions that generate STX, FT, NFT, or smart contract events.
    - `/new_microblocks`: For all new microblock streams. Note: Only until epoch 2.5.
    - `/new_mempool_tx`: For new mempool transactions.
    - `/drop_mempool_tx`: For dropped mempool transactions.
    - `/new_burn_block`: For new burnchain blocks.
  - Note: This key does NOT by itself subscribe to `/stackerdb_chunks` or `/proposal_response`.

- `"stx"`: Subscribes to STX token operation events (transfer, mint, burn, lock).
  - Events delivered to: `/new_block`, `/new_microblocks`.
  - Payload details: The "events" array in the delivered payloads will be filtered to include only STX-related events.

- `"memtx"`: Subscribes to new and dropped mempool transaction events.
  - Events delivered to: `/new_mempool_tx`, `/drop_mempool_tx`.

- `"burn_blocks"`: Subscribes to new burnchain block events.
  - Events delivered to: `/new_burn_block`.

- `"microblocks"`: Subscribes to new microblock stream events.
  - Events delivered to: `/new_microblocks`.
  - Payload details:
    - The "transactions" field will contain all transactions from the microblocks.
    - The "events" field will contain STX, FT, NFT, or specific smart contract events
*only if* this observer is also subscribed to those more specific event types
(e.g., via `"stx"`, `"*"`, a specific contract event key, or a specific asset identifier key).
  - Note: Only until epoch 2.5.

- `"stackerdb"`: Subscribes to StackerDB chunk update events.
  - Events delivered to: `/stackerdb_chunks`.

- `"block_proposal"`: Subscribes to block proposal response events (for Nakamoto consensus).
  - Events delivered to: `/proposal_response`.

- Smart Contract Event: Subscribes to a specific smart contract event.
  - Format: `"{contract_address}.{contract_name}::{event_name}"`
(e.g., `ST0000000000000000000000000000000000000000.my-contract::my-custom-event`)
  - Events delivered to: `/new_block`, `/new_microblocks`.
  - Payload details: The "events" array in the delivered payloads will be filtered for this specific event.

- Asset Identifier for FT/NFT Events: Subscribes to events (mint, burn, transfer) for a specific Fungible Token (FT) or Non-Fungible Token (NFT).
  - Format: `"{contract_address}.{contract_name}.{asset_name}"`
(e.g., for an FT: `ST0000000000000000000000000000000000000000.my-ft-contract.my-fungible-token`)
  - Events delivered to: `/new_block`, `/new_microblocks`.
  - Payload details: The "events" array in the delivered payloads will be filtered for events related to the specified asset.

**Notes:**
- For a more detailed documentation check the event-dispatcher docs in the `/docs` folder.

**Example:**
events_keys = [
  "burn_blocks",
  "memtx",
  "ST0000000000000000000000000000000000000000.my-contract::my-custom-event",
  "ST0000000000000000000000000000000000000000.token-contract.my-ft"
]
| **Required** | +| [timeout_ms](#event_observer-timeout_ms) | Maximum duration (in milliseconds) to wait for the observer endpoint to respond.

When the node sends an event notification to this observer, it will wait at most this long
for a successful HTTP response (status code 200) before considering the request timed out.
If a timeout occurs and retries are enabled (see `EventObserverConfig::disable_retries`),
the request will be attempted again according to the retry strategy.

**Units:** milliseconds | `1_000` | ## [connection_options] @@ -37,48 +37,48 @@ The configuration is automatically generated from the Rust source code documenta | Parameter | Description | Default | |-----------|-------------|----------| | [auth_token](#connection_options-auth_token) | HTTP auth password to use when communicating with stacks-signer binary.

This token is used in the `Authorization` header for certain requests.
Primarily, it secures the communication channel between this node and a connected
`stacks-signer` instance.

It is also used to authenticate requests to `/v2/blocks?broadcast=1`.

**Notes:**
- **Requirement:** This field **must** be configured if the node needs to receive block proposals from a configured `stacks-signer` event_observer via the `/v3/block_proposal` endpoint. The value must match the token configured on the signer. | `None` (authentication disabled for relevant endpoints) | -| [block_proposal_max_age_secs](#connection_options-block_proposal_max_age_secs) | Maximum age (in seconds) allowed for a block proposal received via the `/v3/block_proposal` RPC endpoint.

If a block proposal is received whose timestamp is older than
the current time minus this configured value, the node will reject the proposal
with an HTTP 422 (Unprocessable Entity) error, considering it too stale.
This prevents the node from spending resources validating outdated proposals.

**Notes:**
- Units: seconds. | `600` (seconds) | -| [connect_timeout](#connection_options-connect_timeout) | Maximum duration (in seconds) a connection attempt is allowed to remain in the connecting state.

This applies to both incoming P2P and HTTP connections. If a remote peer initiates a connection
but does not complete the connection process (e.g., handshake for P2P) within this time, the node
will consider it unresponsive and drop the connection attempt.

**Notes:**
- Units: seconds. | `10` (seconds) | +| [block_proposal_max_age_secs](#connection_options-block_proposal_max_age_secs) | Maximum age (in seconds) allowed for a block proposal received via the `/v3/block_proposal` RPC endpoint.

If a block proposal is received whose timestamp is older than
the current time minus this configured value, the node will reject the proposal
with an HTTP 422 (Unprocessable Entity) error, considering it too stale.
This prevents the node from spending resources validating outdated proposals.

**Units:** seconds | `600` | +| [connect_timeout](#connection_options-connect_timeout) | Maximum duration (in seconds) a connection attempt is allowed to remain in the connecting state.

This applies to both incoming P2P and HTTP connections. If a remote peer initiates a connection
but does not complete the connection process (e.g., handshake for P2P) within this time, the node
will consider it unresponsive and drop the connection attempt.

**Units:** seconds | `10` | | [disable_block_download](#connection_options-disable_block_download) | If true, completely disables the block download state machine.

The node will not attempt to download Stacks blocks (neither Nakamoto tenures nor
legacy blocks) from peers.

**Notes:**
- Intended for testing or specialized node configurations. | `false` | | [disable_inbound_handshakes](#connection_options-disable_inbound_handshakes) | If true, prevents the node from processing initial handshake messages from new inbound P2P connections.

This effectively stops the node from establishing new authenticated inbound P2P sessions.
Outbound connections initiated by this node are unaffected.

**Notes:**
- Primarily intended for testing purposes. | `false` | | [disable_inbound_walks](#connection_options-disable_inbound_walks) | If true, disables the neighbor discovery mechanism from starting walks from inbound peers.
Walks will only initiate from seed/bootstrap peers, outbound connections, or pingbacks.

**Notes:**
- Primarily intended for testing or specific network debugging scenarios. | `false` | -| [dns_timeout](#connection_options-dns_timeout) | Maximum time (in milliseconds) to wait for a DNS query to resolve.

When the node needs to resolve a hostname (e.g., from a peer's advertised `data_url`
or an Atlas attachment URL) into an IP address, it initiates a DNS lookup.
This setting defines the maximum duration the node will wait for the DNS server
to respond before considering the lookup timed out.

**Notes:**
- Units: milliseconds. | `15_000` (ms, 15 seconds). | -| [force_disconnect_interval](#connection_options-force_disconnect_interval) | Fault injection setting for testing purposes. Interval (in seconds) for forced disconnection of all peers.

If set to a positive value, the node will periodically disconnect all of its P2P peers at
roughly this interval. This simulates network churn or partitioning for testing node resilience.

**Notes:**
- The code enforcing this behavior is conditionally compiled using `cfg!(test)` and is only active during test runs.
- This setting has no effect in standard production builds.
- Units: seconds. | `None` (feature disabled) | -| [handshake_timeout](#connection_options-handshake_timeout) | Maximum duration (in seconds) a P2P peer is allowed after connecting before completing the handshake.

If a P2P peer connects successfully but fails to send the necessary handshake messages
within this time, the node will consider it unresponsive and drop the connection.

**Notes:**
- Units: seconds. | `5` (seconds) | -| [heartbeat](#connection_options-heartbeat) | Interval (in seconds) at which this node expects to send or receive P2P keep-alive messages.

During the P2P handshake, this node advertises this configured `heartbeat` value to its peers.
Each peer uses the other's advertised heartbeat interval (plus a timeout margin) to monitor
responsiveness and detect potential disconnections. This node also uses its own configured
value to proactively send Ping messages if the connection would otherwise be idle, helping to
keep it active.

**Notes:**
- Units: seconds. | `3_600` (seconds, 1 hour) | -| [idle_timeout](#connection_options-idle_timeout) | Maximum idle time (in seconds) for HTTP connections.

This applies only to HTTP connections. It defines the maximum allowed time since the
last response was sent by the node to the client. An HTTP connection is dropped if
both this `idle_timeout` and the general [timeout](#connection_options-timeout) (time since last
request received) are exceeded.

**Notes:**
- Units: seconds. | `15` (seconds) | +| [dns_timeout](#connection_options-dns_timeout) | Maximum time (in milliseconds) to wait for a DNS query to resolve.

When the node needs to resolve a hostname (e.g., from a peer's advertised `data_url`
or an Atlas attachment URL) into an IP address, it initiates a DNS lookup.
This setting defines the maximum duration the node will wait for the DNS server
to respond before considering the lookup timed out.

**Units:** milliseconds | `15_000` (15 seconds) | +| [force_disconnect_interval](#connection_options-force_disconnect_interval) | Fault injection setting for testing purposes. Interval (in seconds) for forced disconnection of all peers.

If set to a positive value, the node will periodically disconnect all of its P2P peers at
roughly this interval. This simulates network churn or partitioning for testing node resilience.

**Notes:**
- The code enforcing this behavior is conditionally compiled using `cfg!(test)` and is only active during test runs.
- This setting has no effect in standard production builds.

**Units:** seconds | `None` (feature disabled) | +| [handshake_timeout](#connection_options-handshake_timeout) | Maximum duration (in seconds) a P2P peer is allowed after connecting before completing the handshake.

If a P2P peer connects successfully but fails to send the necessary handshake messages
within this time, the node will consider it unresponsive and drop the connection.

**Units:** seconds | `5` | +| [heartbeat](#connection_options-heartbeat) | Interval (in seconds) at which this node expects to send or receive P2P keep-alive messages.

During the P2P handshake, this node advertises this configured `heartbeat` value to its peers.
Each peer uses the other's advertised heartbeat interval (plus a timeout margin) to monitor
responsiveness and detect potential disconnections. This node also uses its own configured
value to proactively send Ping messages if the connection would otherwise be idle, helping to
keep it active.

**Units:** seconds | `3_600` (1 hour) | +| [idle_timeout](#connection_options-idle_timeout) | Maximum idle time (in seconds) for HTTP connections.

This applies only to HTTP connections. It defines the maximum allowed time since the
last response was sent by the node to the client. An HTTP connection is dropped if
both this `idle_timeout` and the general [timeout](#connection_options-timeout) (time since last
request received) are exceeded.

**Units:** seconds | `15` | | [inbox_maxlen](#connection_options-inbox_maxlen) | Maximum number of messages allowed in the per-connection incoming buffer.
The limits apply individually to each established connection (both P2P and HTTP). | `100` | -| [inv_reward_cycles](#connection_options-inv_reward_cycles) | Lookback depth (in PoX reward cycles) for Nakamoto inventory synchronization requests.

When initiating an inventory sync cycle with a peer, the node requests data starting
from `inv_reward_cycles` cycles before the current target reward cycle. This determines
how much historical inventory information is requested in each sync attempt.

**Notes:**
- Units: PoX reward cycles. | - `3` if [[burnchain].mode](#burnchain-mode) is `"mainnet"`
- `6` otherwise | -| [inv_sync_interval](#connection_options-inv_sync_interval) | Minimum interval (in seconds) between initiating inventory synchronization attempts with the same peer.

Acts as a per-peer cooldown to throttle sync requests. A new sync cycle with a peer generally
starts only after this interval has passed since the previous attempt began *and* the previous
cycle is considered complete.

**Notes:**
- Units: seconds. | `45` (seconds) | -| [log_neighbors_freq](#connection_options-log_neighbors_freq) | Frequency (in milliseconds) for logging the current P2P neighbor list at the DEBUG level.

If set to a non-zero value, the node will periodically log details about its currently
established P2P connections (neighbors). Setting this to 0 disables this periodic logging.

**Notes:**
- Units: milliseconds. | `60_000` (ms, 1 minute). | +| [inv_reward_cycles](#connection_options-inv_reward_cycles) | Lookback depth (in PoX reward cycles) for Nakamoto inventory synchronization requests.

When initiating an inventory sync cycle with a peer, the node requests data starting
from `inv_reward_cycles` cycles before the current target reward cycle. This determines
how much historical inventory information is requested in each sync attempt.

**Units:** PoX reward cycles | - `3` if [[burnchain].mode](#burnchain-mode) is `"mainnet"`
- `6` otherwise | +| [inv_sync_interval](#connection_options-inv_sync_interval) | Minimum interval (in seconds) between initiating inventory synchronization attempts with the same peer.

Acts as a per-peer cooldown to throttle sync requests. A new sync cycle with a peer generally
starts only after this interval has passed since the previous attempt began *and* the previous
cycle is considered complete.

**Units:** seconds | `45` | +| [log_neighbors_freq](#connection_options-log_neighbors_freq) | Frequency (in milliseconds) for logging the current P2P neighbor list at the DEBUG level.

If set to a non-zero value, the node will periodically log details about its currently
established P2P connections (neighbors). Setting this to 0 disables this periodic logging.

**Units:** milliseconds | `60_000` (1 minute) | | [max_http_clients](#connection_options-max_http_clients) | Maximum total number of allowed concurrent HTTP connections.

This limits the total number of simultaneous connections the node's RPC/HTTP server
will accept. If this limit is reached, new incoming HTTP connection attempts
will be rejected. | `1000` | | [max_inflight_attachments](#connection_options-max_inflight_attachments) | Maximum number of concurrent Atlas data attachment download requests allowed.

This limits how many separate download requests for Atlas data attachments
can be active simultaneously. Helps manage network resources when fetching
potentially large attachment data. | `6` | | [max_inflight_blocks](#connection_options-max_inflight_blocks) | Maximum number of concurrent Nakamoto block download requests allowed.

This limits how many separate block download processes for Nakamoto tenures
(both confirmed and unconfirmed) can be active simultaneously. Helps manage
network bandwidth and processing load during chain synchronization. | `6` | | [max_sockets](#connection_options-max_sockets) | Maximum total number of concurrent network sockets the node is allowed to manage.

This limit applies globally to all types of sockets handled by the node's networking layer,
including listening sockets (P2P and RPC/HTTP), established P2P connections (inbound/outbound),
and established HTTP connections.
It serves as a hard limit to prevent the node from exhausting operating system
resources related to socket descriptors. | `800` | -| [maximum_call_argument_size](#connection_options-maximum_call_argument_size) | Maximum size (in bytes) of the HTTP request body for read-only contract calls.

This limit is enforced on the `Content-Length` of incoming requests to the
`/v2/contracts/call-read-only/...` RPC endpoint. It prevents excessively large
request bodies, which might contain numerous or very large hex-encoded function arguments,
from overwhelming the node.

**Notes:**
- Calculated as 20 * `clarity::vm::types::BOUND_VALUE_SERIALIZATION_HEX`.
- Units: bytes. | `83_886_080` (bytes, 80 MiB) | +| [maximum_call_argument_size](#connection_options-maximum_call_argument_size) | Maximum size (in bytes) of the HTTP request body for read-only contract calls.

This limit is enforced on the `Content-Length` of incoming requests to the
`/v2/contracts/call-read-only/...` RPC endpoint. It prevents excessively large
request bodies, which might contain numerous or very large hex-encoded function arguments,
from overwhelming the node.

**Notes:**
- Calculated as 20 * `clarity::vm::types::BOUND_VALUE_SERIALIZATION_HEX`.

**Units:** bytes | `83_886_080` (80 MiB) | | [num_clients](#connection_options-num_clients) | Maximum number of allowed concurrent inbound P2P connections.

This acts as a hard limit. If the node already has this many active inbound P2P
connections, any new incoming P2P connection attempts will be rejected.
Outbound P2P connections initiated by this node are not counted against this limit. | `750` | | [num_neighbors](#connection_options-num_neighbors) | Target number of peers for StackerDB replication.

Sets the maximum number of potential replication target peers requested from the
StackerDB control contract (`get-replication-targets`) when configuring a replica.

Note: Formerly (pre-Epoch 3.0), this also controlled the target peer count for
inventory synchronization. | `32` | | [outbox_maxlen](#connection_options-outbox_maxlen) | Maximum number of messages allowed in the per-connection outgoing buffer.
The limit applies individually to each established connection (both P2P and HTTP). | `100` | -| [private_key_lifetime](#connection_options-private_key_lifetime) | Validity duration (in number of bitcoin blocks) for the node's P2P session private key.

The node uses a temporary private key for signing P2P messages. This key has an associated
expiry bitcoin block height stored in the peer database. When the current bitcoin height
reaches or exceeds the key's expiry height, the node automatically generates a new random
private key.
The expiry block height for this new key is calculated by adding the configured
[private_key_lifetime](#connection_options-private_key_lifetime) (in blocks) to the previous key's expiry block height.
The node then re-handshakes with peers to transition to the new key.
This provides periodic key rotation for P2P communication.

**Notes:**
- Units: bitcoin blocks. | `9223372036854775807` (i64::MAX, effectively infinite, disabling automatic re-keying). | +| [private_key_lifetime](#connection_options-private_key_lifetime) | Validity duration (in number of bitcoin blocks) for the node's P2P session private key.

The node uses a temporary private key for signing P2P messages. This key has an associated
expiry bitcoin block height stored in the peer database. When the current bitcoin height
reaches or exceeds the key's expiry height, the node automatically generates a new random
private key.
The expiry block height for this new key is calculated by adding the configured
[private_key_lifetime](#connection_options-private_key_lifetime) (in blocks) to the previous key's expiry block height.
The node then re-handshakes with peers to transition to the new key.
This provides periodic key rotation for P2P communication.

**Units:** bitcoin blocks | `9223372036854775807` (i64::MAX, effectively infinite, disabling automatic re-keying). | | [private_neighbors](#connection_options-private_neighbors) | Whether to allow connections and interactions with peers having private IP addresses.

If `false` (default), the node will generally:
- Reject incoming connection attempts from peers with private IPs.
- Avoid initiating connections to peers known to have private IPs.
- Ignore peers with private IPs during neighbor discovery (walks).
- Skip querying peers with private IPs for mempool or StackerDB data.
- Filter out peers with private IPs from API responses listing potential peers.

Setting this to `true` disables these restrictions, which can be useful for local testing
environments or fully private network deployments. | `false` | | [public_ip_address](#connection_options-public_ip_address) | The Public IPv4 address and port (e.g. "203.0.113.42:20444") to advertise to other nodes.

If this option is not set (`None`), the node will attempt to automatically discover its
public IP address. | `None` (triggers automatic discovery attempt) | | [read_only_call_limit_read_count](#connection_options-read_only_call_limit_read_count) | Maximum number of distinct read operations from Clarity data space allowed during a read-only call. | `30` | -| [read_only_call_limit_read_length](#connection_options-read_only_call_limit_read_length) | Maximum total size (in bytes) of data allowed to be read from Clarity data space (variables, maps)
during a read-only call.

**Notes:**
- Units: bytes. | `100_000` (bytes, 100 KB). | -| [read_only_call_limit_runtime](#connection_options-read_only_call_limit_runtime) | Runtime cost limit for an individual read-only function call. This represents
computation effort within the Clarity VM.
(See SIP-006: https://github.com/stacksgov/sips/blob/main/sips/sip-006/sip-006-runtime-cost-assessment.md)

**Notes:**
- Units: Clarity VM cost units. | `1_000_000_000` (units) | +| [read_only_call_limit_read_length](#connection_options-read_only_call_limit_read_length) | Maximum total size (in bytes) of data allowed to be read from Clarity data space (variables, maps)
during a read-only call.

**Units:** bytes | `100_000` (100 KB). | +| [read_only_call_limit_runtime](#connection_options-read_only_call_limit_runtime) | Runtime cost limit for an individual read-only function call. This represents
computation effort within the Clarity VM.
(See SIP-006: https://github.com/stacksgov/sips/blob/main/sips/sip-006/sip-006-runtime-cost-assessment.md)

**Units:** Clarity VM cost units | `1_000_000_000` | | [read_only_call_limit_write_count](#connection_options-read_only_call_limit_write_count) | Maximum number of distinct write operations allowed during a read-only call.

**Notes:**
- This limit is effectively forced to 0 by the API handler, ensuring read-only behavior.
- Configuring a non-zero value has no effect on read-only call execution. | `0` | -| [read_only_call_limit_write_length](#connection_options-read_only_call_limit_write_length) | Maximum total size (in bytes) of data allowed to be written during a read-only call.

**Notes:**
- This limit is effectively forced to 0 by the API handler, ensuring read-only behavior.
- Configuring a non-zero value has no effect on read-only call execution.
- Units: bytes. | `0` | +| [read_only_call_limit_write_length](#connection_options-read_only_call_limit_write_length) | Maximum total size (in bytes) of data allowed to be written during a read-only call.

**Notes:**
- This limit is effectively forced to 0 by the API handler, ensuring read-only behavior.
- Configuring a non-zero value has no effect on read-only call execution.

**Units:** bytes | `0` | | [reject_blocks_pushed](#connection_options-reject_blocks_pushed) | Controls whether the node accepts Nakamoto blocks pushed proactively by peers.

- If `true`: Pushed blocks are ignored (logged at DEBUG and discarded). The node will
still process blocks that it actively downloads.
- If `false`: Both pushed blocks and actively downloaded blocks are processed. | `false` | | [soft_max_clients_per_host](#connection_options-soft_max_clients_per_host) | Soft limit on the number of inbound P2P connections allowed per host IP address.

During inbound connection pruning (when total inbound connections > [soft_num_clients](#connection_options-soft_num_clients) ),
the node checks if any single IP address has more connections than this limit.
If so, it preferentially prunes the newest connections originating from that
specific IP address until its count is reduced to this limit.
This prevents a single host from dominating the node's inbound connection capacity. | `4` | | [soft_max_neighbors_per_org](#connection_options-soft_max_neighbors_per_org) | Soft limit on the number of outbound P2P connections per network organization (ASN).

During connection pruning (when total outbound connections > [soft_num_neighbors](#connection_options-soft_num_neighbors) ),
the node checks if any single network organization (identified by ASN) has more
outbound connections than this limit. If so, it preferentially prunes the least
healthy/newest connections from that overrepresented organization until its count
is reduced to this limit or the total outbound count reaches
[soft_num_neighbors](#connection_options-soft_num_neighbors) . This encourages connection diversity across
different network providers. | `32` | | [soft_num_clients](#connection_options-soft_num_clients) | Soft limit threshold for triggering inbound P2P connection pruning.

If the total number of currently active inbound P2P connections exceeds this value,
the node will activate pruning logic to reduce the count, typically by applying
per-host limits (see [soft_max_clients_per_host](#connection_options-soft_max_clients_per_host) ).
This helps manage the overall load from inbound peers. | `750` | | [soft_num_neighbors](#connection_options-soft_num_neighbors) | Target number of outbound P2P connections the node aims to maintain.

The connection pruning logic only activates if the current number of established
outbound P2P connections exceeds this value. Pruning aims to reduce the connection
count back down to this target, ensuring the node maintains a baseline number
of outbound peers for network connectivity. | `16` | -| [stackerdb_hint_replicas](#connection_options-stackerdb_hint_replicas) | Static list of preferred replica peers for specific StackerDB contracts, provided as a JSON string.

This allows manually specifying known peers to use for replicating particular StackerDBs,
potentially overriding or supplementing the peers discovered via the StackerDB's control contract.

Format: The configuration value must be a TOML string containing valid JSON.
The JSON structure must be an array of tuples, where each tuple pairs a contract identifier
with a list of preferred neighbor addresses:
`[[ContractIdentifier, [NeighborAddress, ...]], ...]`

1. `ContractIdentifier`: A JSON object representing the `QualifiedContractIdentifier`.
It must have the specific structure:
`{"issuer": [version_byte, [byte_array_20]], "name": "contract-name"}`

2. `NeighborAddress`: A JSON object specifying the peer details:
`{"ip": "...", "port": ..., "public_key_hash": "..."}`

**Notes:**
- Use this option with caution, primarily for advanced testing or bootstrapping.

**Example:**
  stackerdb_hint_replicas = '''
[
[
{
"issuer": [1, [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]],
"name": "my-contract"
},
[
{
"ip": "192.0.2.1",
"port": 20444,
"public_key_hash": "0102030405060708090a0b0c0d0e0f1011121314"
}
]
]
]
'''
| `None` (no hints provided) | -| [timeout](#connection_options-timeout) | General communication timeout (in seconds).

- For HTTP connections: Governs two timeout aspects:
  - Server-side: Defines the maximum allowed time since the last request was received from a client.
An idle connection is dropped if both this timeout and [idle_timeout](#connection_options-idle_timeout) are exceeded.
  - Client-side: Sets the timeout duration (TTL) for outgoing HTTP requests initiated by the node itself.
- For P2P connections: Used as the specific timeout for NAT punch-through requests.

**Notes:**
- Units: seconds. | `15` (seconds) | -| [walk_interval](#connection_options-walk_interval) | Minimum interval (in seconds) between the start of consecutive neighbor discovery walks.

The node periodically performs "neighbor walks" to discover new peers and maintain
an up-to-date view of the P2P network topology. This setting controls how frequently
these walks can be initiated, preventing excessive network traffic and processing.

**Notes:**
- Units: seconds. | `60` (seconds) | +| [stackerdb_hint_replicas](#connection_options-stackerdb_hint_replicas) | Static list of preferred replica peers for specific StackerDB contracts, provided as a JSON string.

This allows manually specifying known peers to use for replicating particular StackerDBs,
potentially overriding or supplementing the peers discovered via the StackerDB's control contract.

Format: The configuration value must be a TOML string containing valid JSON.
The JSON structure must be an array of tuples, where each tuple pairs a contract identifier
with a list of preferred neighbor addresses:
`[[ContractIdentifier, [NeighborAddress, ...]], ...]`

1. `ContractIdentifier`: A JSON object representing the `QualifiedContractIdentifier`.
It must have the specific structure:
`{"issuer": [version_byte, [byte_array_20]], "name": "contract-name"}`

2. `NeighborAddress`: A JSON object specifying the peer details:
`{"ip": "...", "port": ..., "public_key_hash": "..."}`

**Notes:**
- Use this option with caution, primarily for advanced testing or bootstrapping.

**Example:**
stackerdb_hint_replicas = '''
[
  [
    {
      "issuer": [1, [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]],
      "name": "my-contract"
    },
    [
      {
        "ip": "192.0.2.1",
        "port": 20444,
        "public_key_hash": "0102030405060708090a0b0c0d0e0f1011121314"
      }
    ]
  ]
]
'''
| `None` (no hints provided) | +| [timeout](#connection_options-timeout) | General communication timeout (in seconds).

- For HTTP connections: Governs two timeout aspects:
  - Server-side: Defines the maximum allowed time since the last request was received from a client.
An idle connection is dropped if both this timeout and [idle_timeout](#connection_options-idle_timeout) are exceeded.
  - Client-side: Sets the timeout duration (TTL) for outgoing HTTP requests initiated by the node itself.
- For P2P connections: Used as the specific timeout for NAT punch-through requests.

**Units:** seconds | `15` | +| [walk_interval](#connection_options-walk_interval) | Minimum interval (in seconds) between the start of consecutive neighbor discovery walks.

The node periodically performs "neighbor walks" to discover new peers and maintain
an up-to-date view of the P2P network topology. This setting controls how frequently
these walks can be initiated, preventing excessive network traffic and processing.

**Units:** seconds | `60` | | [walk_seed_probability](#connection_options-walk_seed_probability) | Probability (0.0 to 1.0) of forcing a neighbor walk to start from a seed/bootstrap peer.

This probability applies only when the node is not in Initial Block Download (IBD)
and is already connected to at least one seed/bootstrap peer.
Normally, in this situation, the walk would start from a random inbound or outbound peer.
However, with this probability, the walk is forced to start from a seed peer instead.
This helps ensure the node periodically re-establishes its network view from trusted entry points. | `0.1` (10%) | | ~~[antientropy_public](#connection_options-antientropy_public)~~ | Controls whether a node with public inbound connections should still push blocks, even if not NAT'ed.

In the Stacks 2.x anti-entropy logic, if a node detected it had inbound connections
from public IPs (suggesting it wasn't behind NAT) and this flag was set to `false`,
it would refrain from proactively pushing blocks and microblocks to peers.
The assumption was that publicly reachable nodes should primarily serve downloads.
If set to `true` (default), the node would push data regardless of its perceived reachability.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. | `true` | -| ~~[antientropy_retry](#connection_options-antientropy_retry)~~ | Minimum interval (in seconds) between attempts to run the Epoch 2.x anti-entropy data push mechanism.

The Stacks 2.x anti-entropy protocol involves the node proactively pushing its known
Stacks blocks and microblocks to peers. This value specifies the cooldown period for this operation.
This prevents the node from excessively attempting to push data to its peers.

**Notes:**
- Units: seconds.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. | `3_600` (seconds, 1 hour) | -| ~~[download_interval](#connection_options-download_interval)~~ | Minimum interval (in seconds) between consecutive block download scans in epoch 2.x.

In the pre-Nakamoto block download logic, if a full scan for blocks completed without
finding any new blocks to download, and if the known peer inventories had not changed,
the node would wait at least this duration before initiating the next download scan.
This throttled the downloader when the node was likely already synchronized.

**Notes:**
- Units: seconds.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. | `10` (seconds) | +| ~~[antientropy_retry](#connection_options-antientropy_retry)~~ | Minimum interval (in seconds) between attempts to run the Epoch 2.x anti-entropy data push mechanism.

The Stacks 2.x anti-entropy protocol involves the node proactively pushing its known
Stacks blocks and microblocks to peers. This value specifies the cooldown period for this operation.
This prevents the node from excessively attempting to push data to its peers.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+.

**Units:** seconds | `3_600` (1 hour) | +| ~~[download_interval](#connection_options-download_interval)~~ | Minimum interval (in seconds) between consecutive block download scans in epoch 2.x.

In the pre-Nakamoto block download logic, if a full scan for blocks completed without
finding any new blocks to download, and if the known peer inventories had not changed,
the node would wait at least this duration before initiating the next download scan.
This throttled the downloader when the node was likely already synchronized.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+.

**Units:** seconds | `10` | | ~~[full_inv_sync_interval](#connection_options-full_inv_sync_interval)~~ | Deprecated: it does not have any effect on the node's behavior.

**⚠️ DEPRECATED:** It does not have any effect on the node's behavior. | `None` | | ~~[max_clients_per_host](#connection_options-max_clients_per_host)~~ | Maximum number of inbound p2p connections per host we permit.

**⚠️ DEPRECATED:** It does not have any effect on the node's behavior. | `4` | | ~~[max_neighbors_per_host](#connection_options-max_neighbors_per_host)~~ | Maximum number of neighbors per host we permit.

**⚠️ DEPRECATED:** It does not have any effect on the node's behavior. | `1` | @@ -102,42 +102,42 @@ The configuration is automatically generated from the Rust source code documenta | Parameter | Description | Default | |-----------|-------------|----------| -| [block_commit_tx_estimated_size](#burnchain-block_commit_tx_estimated_size) | Estimated size (in virtual bytes) of a block commit transaction on bitcoin.
Used for fee calculation in mining logic by multiplying with the fee rate
[satoshis_per_byte](#burnchain-satoshis_per_byte) .

**Notes:**
- Only relevant if [[node].miner](#node-miner) is `true`.
- Units: virtual bytes. | `380` (virtual bytes) | -| [burn_fee_cap](#burnchain-burn_fee_cap) | The maximum amount (in sats) of "burn commitment" to broadcast for the next block's leader election.
Acts as a safety cap to limit the maximum amount spent on mining.
It serves as both the target fee and a fallback if dynamic fee calculations fail or cannot be performed.

This setting can be hot-reloaded from the config file, allowing adjustment without restarting.

**Notes:**
- Only relevant if [[node].miner](#node-miner) is `true`.
- Units: satoshis. | `20_000` (satoshis) | +| [block_commit_tx_estimated_size](#burnchain-block_commit_tx_estimated_size) | Estimated size (in virtual bytes) of a block commit transaction on bitcoin.
Used for fee calculation in mining logic by multiplying with the fee rate
[satoshis_per_byte](#burnchain-satoshis_per_byte) .

**Notes:**
- Only relevant if [[node].miner](#node-miner) is `true`.

**Units:** virtual bytes | `380` | +| [burn_fee_cap](#burnchain-burn_fee_cap) | The maximum amount (in sats) of "burn commitment" to broadcast for the next block's leader election.
Acts as a safety cap to limit the maximum amount spent on mining.
It serves as both the target fee and a fallback if dynamic fee calculations fail or cannot be performed.

This setting can be hot-reloaded from the config file, allowing adjustment without restarting.

**Notes:**
- Only relevant if [[node].miner](#node-miner) is `true`.

**Units:** satoshis | `20_000` | | [chain](#burnchain-chain) | The underlying blockchain used for Proof-of-Transfer.

**Notes:**
- Currently, only `"bitcoin"` is supported. | `"bitcoin"` | -| [chain_id](#burnchain-chain_id) | The network-specific identifier used in P2P communication and database initialization.

**Notes:**
- **Warning:** Do not modify this unless you really know what you're doing.
- This is intended strictly for testing purposes. | - `0x00000001` if [mode](#burnchain-mode) is `"mainnet"`
- `0x80000000` otherwise | -| [commit_anchor_block_within](#burnchain-commit_anchor_block_within) | Specifies a mandatory wait period (in milliseconds) after receiving a burnchain tip
before the node attempts to build the anchored block for the new tenure.
This duration effectively schedules the start of the block-building
process relative to the tip's arrival time.

**Notes:**
- This is intended strictly for testing purposes.
- Units: milliseconds. | `5_000` (milliseconds) | -| [epochs](#burnchain-epochs) | Custom override for the definitions of Stacks epochs (start/end burnchain heights, consensus rules).
This setting allows testing specific epoch transitions or custom consensus rules by defining exactly
when each epoch starts on bitcoin.

Epochs define distinct protocol rule sets (consensus rules, execution costs, capabilities).
When configured, the list must include all epochs sequentially from "1.0" up to the
highest desired epoch, without skipping any intermediate ones.
Valid `epoch_name` values currently include:
`"1.0"`, `"2.0"`, `"2.05"`, `"2.1"`, `"2.2"`, `"2.3"`, `"2.4"`, `"2.5"`, `"3.0"`, `"3.1"`.

**Validation Rules:**
- Epochs must be provided in strict chronological order (`1.0`, `2.0`, `2.05`...).
- `start_height` values must be non-decreasing across the list.
- Epoch `"1.0"` must have `start_height = 0`.
- The number of defined epochs cannot exceed the maximum supported by the node software.

**Notes:**
- Applied only if [mode](#burnchain-mode) is not "mainnet".
- This is intended strictly for testing purposes.
- Configured as a list `[[burnchain.epochs]]` in TOML, each with `epoch_name` (string) and `start_height` (integer Bitcoin block height).

**Example:**
  [[burnchain.epochs]]
epoch_name = "2.1"
start_height = 150

[[burnchain.epochs]]
epoch_name = "2.2"
start_height = 200
| `None` (uses the standard epoch definitions for the selected [mode](#burnchain-mode) ) | -| [fault_injection_burnchain_block_delay](#burnchain-fault_injection_burnchain_block_delay) | Fault injection setting for testing. Introduces an artificial delay (in milliseconds)
before processing each burnchain block download. Simulates a slow burnchain connection.

**Notes:**
- This is intended strictly for testing purposes.
- Units: milliseconds. | `0` (no delay) | +| [chain_id](#burnchain-chain_id) | The network-specific identifier used in P2P communication and database initialization.

**Notes:**
- **Warning:** Do not modify this unless you really know what you're doing.
- This is intended strictly for testing purposes. | - `0x00000001` if [mode](#burnchain-mode) is `"mainnet"`
- `0x80000000` otherwise | +| [commit_anchor_block_within](#burnchain-commit_anchor_block_within) | Specifies a mandatory wait period (in milliseconds) after receiving a burnchain tip
before the node attempts to build the anchored block for the new tenure.
This duration effectively schedules the start of the block-building
process relative to the tip's arrival time.

**Notes:**
- This is intended strictly for testing purposes.

**Units:** milliseconds | `5_000` | +| [epochs](#burnchain-epochs) | Custom override for the definitions of Stacks epochs (start/end burnchain heights, consensus rules).
This setting allows testing specific epoch transitions or custom consensus rules by defining exactly
when each epoch starts on bitcoin.

Epochs define distinct protocol rule sets (consensus rules, execution costs, capabilities).
When configured, the list must include all epochs sequentially from "1.0" up to the
highest desired epoch, without skipping any intermediate ones.
Valid `epoch_name` values currently include:
`"1.0"`, `"2.0"`, `"2.05"`, `"2.1"`, `"2.2"`, `"2.3"`, `"2.4"`, `"2.5"`, `"3.0"`, `"3.1"`.

**Validation Rules:**
- Epochs must be provided in strict chronological order (`1.0`, `2.0`, `2.05`...).
- `start_height` values must be non-decreasing across the list.
- Epoch `"1.0"` must have `start_height = 0`.
- The number of defined epochs cannot exceed the maximum supported by the node software.

**Notes:**
- Applied only if [mode](#burnchain-mode) is not "mainnet".
- This is intended strictly for testing purposes.
- Configured as a list `[[burnchain.epochs]]` in TOML, each with `epoch_name` (string) and `start_height` (integer Bitcoin block height).

**Example:**
[[burnchain.epochs]]
epoch_name = "2.1"
start_height = 150

[[burnchain.epochs]]
epoch_name = "2.2"
start_height = 200
| `None` (uses the standard epoch definitions for the selected [mode](#burnchain-mode) ) | +| [fault_injection_burnchain_block_delay](#burnchain-fault_injection_burnchain_block_delay) | Fault injection setting for testing. Introduces an artificial delay (in milliseconds)
before processing each burnchain block download. Simulates a slow burnchain connection.

**Notes:**
- This is intended strictly for testing purposes.

**Units:** milliseconds | `0` (no delay) | | [first_burn_block_hash](#burnchain-first_burn_block_hash) | Overrides the default starting block hash of the burnchain.

**Notes:**
- Applied only if [mode](#burnchain-mode) is not "mainnet".
- This is intended strictly for testing purposes.
- Should be used together with [first_burn_block_height](#burnchain-first_burn_block_height) and [first_burn_block_timestamp](#burnchain-first_burn_block_timestamp) for proper operation. | `None` (uses the burnchain's default starting block hash) | | [first_burn_block_height](#burnchain-first_burn_block_height) | Overrides the default starting bitcoin block height for the node.
Allows starting synchronization from a specific historical point in test environments.

**Notes:**
- Applied only if [mode](#burnchain-mode) is not "mainnet".
- This is intended strictly for testing purposes.
- Should be used together with [first_burn_block_timestamp](#burnchain-first_burn_block_timestamp) and [first_burn_block_hash](#burnchain-first_burn_block_hash) for proper operation. | `None` (uses the burnchain's default starting height for the mode) | | [first_burn_block_timestamp](#burnchain-first_burn_block_timestamp) | Overrides the default starting block timestamp of the burnchain.

**Notes:**
- Applied only if [mode](#burnchain-mode) is not "mainnet".
- This is intended strictly for testing purposes.
- Should be used together with [first_burn_block_height](#burnchain-first_burn_block_height) and [first_burn_block_hash](#burnchain-first_burn_block_hash) for proper operation. | `None` (uses the burnchain's default starting timestamp) | -| [leader_key_tx_estimated_size](#burnchain-leader_key_tx_estimated_size) | Estimated size (in virtual bytes) of a leader key registration transaction on bitcoin.
Used for fee calculation in mining logic by multiplying with the fee rate
[satoshis_per_byte](#burnchain-satoshis_per_byte) .

**Notes:**
- Only relevant if [[node].miner](#node-miner) is `true`.
- Units: virtual bytes. | `290` (virtual bytes) | +| [leader_key_tx_estimated_size](#burnchain-leader_key_tx_estimated_size) | Estimated size (in virtual bytes) of a leader key registration transaction on bitcoin.
Used for fee calculation in mining logic by multiplying with the fee rate
[satoshis_per_byte](#burnchain-satoshis_per_byte) .

**Notes:**
- Only relevant if [[node].miner](#node-miner) is `true`.

**Units:** virtual bytes | `290` | | [local_mining_public_key](#burnchain-local_mining_public_key) | The public key associated with the local mining address for the underlying Bitcoin regtest node.
Provided as a hex string representing an uncompressed public key.

It is primarily used in modes that rely on a controlled Bitcoin regtest backend
(e.g., "helium", "mocknet", "neon") where the Stacks node itself needs to
instruct the Bitcoin node to generate blocks.

The key is used to derive the Bitcoin address that receives the coinbase rewards
when generating blocks on the regtest network.

**Notes:**
- Mandatory if [mode](#burnchain-mode) is "helium".
- This is intended strictly for testing purposes. | `None` | -| [magic_bytes](#burnchain-magic_bytes) | The network "magic bytes" used to identify packets for the specific bitcoin network
instance (e.g., mainnet, testnet, regtest). Must match the magic bytes of the connected
bitcoin node.

These two-byte identifiers help ensure that nodes only connect to peers on the same
network type. Common values include:
- "X2" for mainnet
- "T2" for testnet (xenon)
- Other values for specific test networks

Configured as a 2-character ASCII string (e.g., "X2" for mainnet). | - `"T2"` if [mode](#burnchain-mode) is `"xenon"`
- `"X2"` otherwise | -| [max_rbf](#burnchain-max_rbf) | Maximum fee rate multiplier allowed when using Replace-By-Fee (RBF) for bitcoin transactions.
Expressed as a percentage of the original [satoshis_per_byte](#burnchain-satoshis_per_byte) rate (e.g.,
150 means the fee rate can be increased up to 1.5x). Used in mining logic for RBF decisions
to cap the replacement fee rate.

**Notes:**
- Only relevant if [[node].miner](#node-miner) is `true`. | `150` (%) | +| [magic_bytes](#burnchain-magic_bytes) | The network "magic bytes" used to identify packets for the specific bitcoin network
instance (e.g., mainnet, testnet, regtest). Must match the magic bytes of the connected
bitcoin node.

These two-byte identifiers help ensure that nodes only connect to peers on the same
network type. Common values include:
- "X2" for mainnet
- "T2" for testnet (xenon)
- Other values for specific test networks

Configured as a 2-character ASCII string (e.g., "X2" for mainnet). | - `"T2"` if [mode](#burnchain-mode) is `"xenon"`
- `"X2"` otherwise | +| [max_rbf](#burnchain-max_rbf) | Maximum fee rate multiplier allowed when using Replace-By-Fee (RBF) for bitcoin transactions.
Expressed as a percentage of the original [satoshis_per_byte](#burnchain-satoshis_per_byte) rate (e.g.,
150 means the fee rate can be increased up to 1.5x). Used in mining logic for RBF decisions
to cap the replacement fee rate.

**Notes:**
- Only relevant if [[node].miner](#node-miner) is `true`.

**Units:** percent | `150` | | [max_unspent_utxos](#burnchain-max_unspent_utxos) | The maximum number of unspent transaction outputs (UTXOs) to request from the bitcoin node.

This value is passed as the `maximumCount` parameter to the bitcoin node. It helps manage
response size and processing load, particularly relevant for miners querying for available
UTXOs to fund operations like block commits or leader key registrations.

Setting this limit too high might lead to performance issues or timeouts when querying
nodes with a very large number of UTXOs. Conversely, setting it too low might prevent
the miner from finding enough UTXOs in a single query to meet the required funding amount
for a transaction, even if sufficient funds exist across more UTXOs not returned by the limited query.

**Notes:**
- This value must be `<= 1024`.
- Only relevant if [[node].miner](#node-miner) is `true`. | `1024` | -| [mode](#burnchain-mode) | The operational mode or network profile for the Stacks node.
This setting determines network parameters (like chain ID, peer version),
default configurations, genesis block definitions, and overall node behavior.

Supported values:
- `"mainnet"` → mainnet
- `"xenon"` → testnet
- `"mocknet"` → regtest
- `"helium"` → regtest
- `"neon"` → regtest
- `"argon"` → regtest
- `"krypton"` → regtest
- `"nakamoto-neon"` → regtest | `"mocknet"` | +| [mode](#burnchain-mode) | The operational mode or network profile for the Stacks node.
This setting determines network parameters (like chain ID, peer version),
default configurations, genesis block definitions, and overall node behavior.

Supported values:
- `"mainnet"`: mainnet
- `"xenon"`: testnet
- `"mocknet"`: regtest
- `"helium"`: regtest
- `"neon"`: regtest
- `"argon"`: regtest
- `"krypton"`: regtest
- `"nakamoto-neon"`: regtest | `"mocknet"` | | [password](#burnchain-password) | The password for authenticating with the bitcoin node's RPC interface.
Required if the bitcoin node requires RPC authentication.

**Notes:**
- Only relevant if [[node].miner](#node-miner) is `true`. | `None` | | [peer_host](#burnchain-peer_host) | The hostname or IP address of the bitcoin node peer.

This field is required for all node configurations as it specifies where to find the underlying
bitcoin node to interact with for PoX operations, block validation, and mining. | `"0.0.0.0"` | | [peer_port](#burnchain-peer_port) | The P2P network port of the bitcoin node specified by [peer_host](#burnchain-peer_host) . | `8333` | -| [peer_version](#burnchain-peer_version) | The peer protocol version number used in P2P communication.
This parameter cannot be set via the configuration file.

**Notes:**
- **Warning:** Do not modify this unless you really know what you're doing. | - `402_653_196` if [mode](#burnchain-mode) is `"mainnet"`
- `4_207_599_116` otherwise | -| [poll_time_secs](#burnchain-poll_time_secs) | The interval, in seconds, at which the node polls the bitcoin node for new blocks and state updates.

The default value of 10 seconds is mainly intended for testing purposes.
It's suggested to set this to a higher value for mainnet, e.g., 300 seconds (5 minutes).

**Notes:**
- Units: seconds | `10` (seconds) | +| [peer_version](#burnchain-peer_version) | The peer protocol version number used in P2P communication.
This parameter cannot be set via the configuration file.

**Notes:**
- **Warning:** Do not modify this unless you really know what you're doing. | - `402_653_196` if [mode](#burnchain-mode) is `"mainnet"`
- `4_207_599_116` otherwise | +| [poll_time_secs](#burnchain-poll_time_secs) | The interval, in seconds, at which the node polls the bitcoin node for new blocks and state updates.

The default value of 10 seconds is mainly intended for testing purposes.
It's suggested to set this to a higher value for mainnet, e.g., 300 seconds (5 minutes).

**Units:** seconds | `10` | | [pox_2_activation](#burnchain-pox_2_activation) | Sets a custom burnchain height for PoX-2 activation (for testing).

This affects two key transitions:
1. The block height at which PoX v1 lockups are automatically unlocked.
2. The block height from which PoX reward set calculations switch to PoX v2 rules.

**Behavior:**
- This value directly sets the auto unlock height for PoX v1 lockups before transition to PoX v2.
This also defines the burn height at which PoX reward sets are calculated using PoX v2 rather than v1.
- If custom [epochs](#burnchain-epochs) are provided:
  - This value is used to validate that Epoch 2.1's start height is ≤ this value.
  - However, the height specified in `epochs` for Epoch 2.1 takes precedence.

**Notes:**
- Applied only if [mode](#burnchain-mode) is not "mainnet".
- This is intended strictly for testing purposes. | `None` | -| [pox_prepare_length](#burnchain-pox_prepare_length) | Overrides the length (in bitcoin blocks) of the PoX prepare phase.

**Notes:**
- Applied only if [mode](#burnchain-mode) is not "mainnet".
- This is intended strictly for testing purposes.
- Units: bitcoin blocks. | `None` (uses the standard prepare phase length for the mode) | -| [pox_reward_length](#burnchain-pox_reward_length) | Overrides the length (in bitcoin blocks) of the PoX reward cycle.

**Notes:**
- Applied only if [mode](#burnchain-mode) is not "mainnet".
- This is intended strictly for testing purposes.
- Units: bitcoin blocks. | `None` (uses the standard reward cycle length for the mode) | +| [pox_prepare_length](#burnchain-pox_prepare_length) | Overrides the length (in bitcoin blocks) of the PoX prepare phase.

**Notes:**
- Applied only if [mode](#burnchain-mode) is not "mainnet".
- This is intended strictly for testing purposes.

**Units:** bitcoin blocks | `None` (uses the standard prepare phase length for the mode) | +| [pox_reward_length](#burnchain-pox_reward_length) | Overrides the length (in bitcoin blocks) of the PoX reward cycle.

**Notes:**
- Applied only if [mode](#burnchain-mode) is not "mainnet".
- This is intended strictly for testing purposes.

**Units:** bitcoin blocks | `None` (uses the standard reward cycle length for the mode) | | [process_exit_at_block_height](#burnchain-process_exit_at_block_height) | Optional bitcoin block height at which the Stacks node process should gracefully exit.
When bitcoin reaches this height, the node logs a message and initiates a graceful shutdown.

**Notes:**
- Applied only if [mode](#burnchain-mode) is not "mainnet".
- This is intended strictly for testing purposes. | `None` | -| [rbf_fee_increment](#burnchain-rbf_fee_increment) | The incremental amount (in Sats/vByte) to add to the previous transaction's
fee rate for RBF bitcoin transactions.

**Notes:**
- Only relevant if [[node].miner](#node-miner) is `true`.
- Units: satoshis per virtual byte. | `5` (satoshis per virtual byte) | +| [rbf_fee_increment](#burnchain-rbf_fee_increment) | The incremental amount (in sats/vByte) to add to the previous transaction's
fee rate for RBF bitcoin transactions.

**Notes:**
- Only relevant if [[node].miner](#node-miner) is `true`.

**Units:** sats/vByte | `5` | | [rpc_port](#burnchain-rpc_port) | The RPC port of the bitcoin node specified by [peer_host](#burnchain-peer_host) . | `8332` | | [rpc_ssl](#burnchain-rpc_ssl) | Flag indicating whether to use SSL/TLS when connecting to the bitcoin node's RPC interface. | `false` | -| [satoshis_per_byte](#burnchain-satoshis_per_byte) | The default fee rate in satoshis per virtual byte (sats/vB) to use when estimating fees for miners
to submit bitcoin transactions (like block commits or leader key registrations).

**Notes:**
- Only relevant if [[node].miner](#node-miner) is `true`.
- Units: satoshis per virtual byte. | `50` (satoshis per virtual byte) | -| [timeout](#burnchain-timeout) | Timeout duration, in seconds, for RPC calls made to the bitcoin node.
Configures the timeout on the underlying HTTP client.

**Notes:**
- Units: seconds | `60` (seconds) | +| [satoshis_per_byte](#burnchain-satoshis_per_byte) | The default fee rate in sats/vByte to use when estimating fees for miners
to submit bitcoin transactions (like block commits or leader key registrations).

**Notes:**
- Only relevant if [[node].miner](#node-miner) is `true`.

**Units:** sats/vByte | `50` | +| [timeout](#burnchain-timeout) | Timeout duration, in seconds, for RPC calls made to the bitcoin node.
Configures the timeout on the underlying HTTP client.

**Units:** seconds | `60` | | [username](#burnchain-username) | The username for authenticating with the bitcoin node's RPC interface.
Required if the bitcoin node requires RPC authentication.

**Notes:**
- Only relevant if [[node].miner](#node-miner) is `true`. | `None` | | [wallet_name](#burnchain-wallet_name) | Specifies the name of the Bitcoin wallet to use within the connected bitcoin node.
Used to interact with a specific named wallet if the bitcoin node manages multiple wallets.

If the specified wallet doesn't exist, the node will attempt to create it via the
`createwallet` RPC call. This is particularly useful for miners who need to manage
separate wallets.

**Notes:**
- Primarily relevant for miners interacting with multi-wallet Bitcoin nodes. | `""` (empty string, implying the default wallet or no specific wallet needed) | -| ~~[affirmation_overrides](#burnchain-affirmation_overrides)~~ | Overrides for the burnchain block affirmation map for specific reward cycles.
Allows manually setting the miner affirmation ('p'resent/'n'ot-present/'a'bsent) map for a
given cycle, bypassing the map normally derived from sortition results.

Special defaults are added when [mode](#burnchain-mode) is "xenon", but config entries take precedence.
At startup, these overrides are written to the `BurnchainDB` (`overrides` table).

**Notes:**
- Primarily used for testing or recovering from network issues.
- Configured as a list `[[burnchain.affirmation_overrides]]` in TOML, each with `reward_cycle` (integer) and `affirmation` (string of 'p'/'n'/'a', length `reward_cycle - 1`).

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode.

**Example:**
  [[burnchain.affirmation_overrides]]
reward_cycle = 413
affirmation = "pna..." # Must be 412 chars long
| Empty map | +| ~~[affirmation_overrides](#burnchain-affirmation_overrides)~~ | Overrides for the burnchain block affirmation map for specific reward cycles.
Allows manually setting the miner affirmation ('p'resent/'n'ot-present/'a'bsent) map for a
given cycle, bypassing the map normally derived from sortition results.

Special defaults are added when [mode](#burnchain-mode) is "xenon", but config entries take precedence.
At startup, these overrides are written to the `BurnchainDB` (`overrides` table).

**Notes:**
- Primarily used for testing or recovering from network issues.
- Configured as a list `[[burnchain.affirmation_overrides]]` in TOML, each with `reward_cycle` (integer) and `affirmation` (string of 'p'/'n'/'a', length `reward_cycle - 1`).

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode.

**Example:**
[[burnchain.affirmation_overrides]]
reward_cycle = 413
affirmation = "pna..." # Must be 412 chars long
| Empty map | | ~~[ast_precheck_size_height](#burnchain-ast_precheck_size_height)~~ | Override for the burnchain height activating stricter AST size checks pre-epoch 3.0 for testing purposes.

Used pre-epoch 3.0 to control activation before it became standard (at burn height `752000`).
Ignored in standard production builds as the underlying mechanism is disabled unless the `testing`
feature is active.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. | `None` | -| ~~[sunset_end](#burnchain-sunset_end)~~ | Overrides the bitcoin height, non-inclusive, at which the PoX sunset period ends in epochs before 2.1.
After this height, Stacking rewards are disabled completely. This parameter works together
with `sunset_start` to define the full sunset transition period for PoX.

**Notes:**
- Applied only if [mode](#burnchain-mode) is not "mainnet".
- This is intended strictly for testing purposes.

**⚠️ DEPRECATED:** The sunset phase was removed in Epoch 2.1. This parameter can still be used for
testing purposes for epochs before 2.1. | `None` (uses the standard sunset end height for the mode) | -| ~~[sunset_start](#burnchain-sunset_start)~~ | Overrides the bitcoin height at which the PoX sunset period begins in epochs before 2.1.
The sunset period represents a planned phase-out of the PoX mechanism. During this period,
stacking rewards gradually decrease, eventually ceasing entirely. This parameter allows
testing the PoX sunset transition by explicitly setting its start height.

**Notes:**
- Applied only if [mode](#burnchain-mode) is not "mainnet".
- This is intended strictly for testing purposes.

**⚠️ DEPRECATED:** The sunset phase was removed in Epoch 2.1. This parameter can still be used for
testing purposes for epochs before 2.1. | `None` (uses the standard sunset start height for the mode) | +| ~~[sunset_end](#burnchain-sunset_end)~~ | Overrides the bitcoin height, non-inclusive, at which the PoX sunset period ends in epochs before 2.1.
After this height, Stacking rewards are disabled completely. This parameter works together
with `sunset_start` to define the full sunset transition period for PoX.

**Notes:**
- Applied only if [mode](#burnchain-mode) is not "mainnet".
- This is intended strictly for testing purposes.

**⚠️ DEPRECATED:** The sunset phase was removed in Epoch 2.1. This parameter can still be used for
testing purposes for epochs before 2.1. | `None` (uses the standard sunset end height for the mode) | +| ~~[sunset_start](#burnchain-sunset_start)~~ | Overrides the bitcoin height at which the PoX sunset period begins in epochs before 2.1.
The sunset period represents a planned phase-out of the PoX mechanism. During this period,
stacking rewards gradually decrease, eventually ceasing entirely. This parameter allows
testing the PoX sunset transition by explicitly setting its start height.

**Notes:**
- Applied only if [mode](#burnchain-mode) is not "mainnet".
- This is intended strictly for testing purposes.

**⚠️ DEPRECATED:** The sunset phase was removed in Epoch 2.1. This parameter can still be used for
testing purposes for epochs before 2.1. | `None` (uses the standard sunset start height for the mode) | ## [node] @@ -146,10 +146,10 @@ The configuration is automatically generated from the Rust source code documenta |-----------|-------------|----------| | [always_use_affirmation_maps](#node-always_use_affirmation_maps) | Controls if Stacks Epoch 2.1+ affirmation map logic should be applied even before Epoch 2.1.
- If `true` (default), the node consistently uses the newer (Epoch 2.1) rules for PoX anchor block
validation and affirmation-based reorg handling, even in earlier epochs.
- If `false`, the node strictly follows the rules defined for the specific epoch it is currently
processing, only applying 2.1+ logic from Epoch 2.1 onwards.
Differences in this setting between nodes prior to Epoch 2.1 could lead to consensus forks. | `true` | | [assume_present_anchor_blocks](#node-assume_present_anchor_blocks) | Controls if the node must strictly wait for any PoX anchor block selected by the core consensus mechanism.
- If `true`: Halts burnchain processing immediately whenever a selected anchor block is missing locally
(`SelectedAndUnknown` status), regardless of affirmation status. This is always true in Nakamoto (Epoch 3.0+)
and runs *before* affirmation checks.
- If `false` (primarily for testing): Skips this immediate halt, allowing processing to proceed to
affirmation map checks.
Normal operation requires this to be `true`; setting to `false` will likely break consensus adherence.

**Notes:**
- This parameter cannot be set via the configuration file; it must be modified programmatically. | `true` | -| [bootstrap_node](#node-bootstrap_node) | A list of initial peer nodes used to bootstrap connections into the Stacks P2P network.
Peers are specified in a configuration file as comma-separated strings in the
format `"PUBKEY@IP:PORT"` or `"PUBKEY@HOSTNAME:PORT"`. DNS hostnames are resolved
during configuration loading.

**Example:**
  bootstrap_node = "029266faff4c8e0ca4f934f34996a96af481df94a89b0c9bd515f3536a95682ddc@seed.testnet.hiro.so:30444,02196f005965cebe6ddc3901b7b1cc1aa7a88f305bb8c5893456b8f9a605923893@seed.mainnet.hiro.so:20444"
| `[]` (empty vector) | -| [chain_liveness_poll_time_secs](#node-chain_liveness_poll_time_secs) | The polling interval, in seconds, for the background thread that monitors chain liveness.
This thread periodically wakes up the main coordinator to check for chain progress or
other conditions requiring action.

**Notes:**
- Units: seconds. | `300` (seconds, 5 minutes) | +| [bootstrap_node](#node-bootstrap_node) | A list of initial peer nodes used to bootstrap connections into the Stacks P2P network.
Peers are specified in a configuration file as comma-separated strings in the
format `"PUBKEY@IP:PORT"` or `"PUBKEY@HOSTNAME:PORT"`. DNS hostnames are resolved
during configuration loading.

**Example:**
bootstrap_node = "pubkey1@example.com:30444,pubkey2@192.168.1.100:20444"
| `[]` (empty vector) | +| [chain_liveness_poll_time_secs](#node-chain_liveness_poll_time_secs) | The polling interval, in seconds, for the background thread that monitors chain liveness.
This thread periodically wakes up the main coordinator to check for chain progress or
other conditions requiring action.

**Units:** seconds | `300` (5 minutes) | | [data_url](#node-data_url) | The publicly accessible URL that this node advertises to peers during the P2P handshake
as its HTTP RPC endpoint. Other nodes or services might use this URL to query the node's API. | `http://{rpc_bind}` (e.g., "http://0.0.0.0:20443" if [rpc_bind](#node-rpc_bind) is default). | -| [deny_nodes](#node-deny_nodes) | A list of peer addresses that this node should explicitly deny connections from.
Peers are specified as comma-separated strings in the format "IP:PORT" or "HOSTNAME:PORT"
in the configuration file. DNS hostnames are resolved during configuration loading.

**Example:**
  deny_nodes = "192.168.1.100:20444,badhost.example.com:20444"
| `[]` (empty vector) | +| [deny_nodes](#node-deny_nodes) | A list of peer addresses that this node should explicitly deny connections from.
Peers are specified as comma-separated strings in the format "IP:PORT" or "HOSTNAME:PORT"
in the configuration file. DNS hostnames are resolved during configuration loading.

**Example:**
deny_nodes = "192.168.1.100:20444,badhost.example.com:20444"
| `[]` (empty vector) | | [fault_injection_block_push_fail_probability](#node-fault_injection_block_push_fail_probability) | Fault injection setting for testing purposes. If set to `Some(p)`, where `p` is between 0 and 100,
the node will have a `p` percent chance of intentionally *not* pushing a newly processed block
to its peers.

**Notes:**
- Values: 0-100 (percentage). | `None` (no fault injection) | | [fault_injection_hide_blocks](#node-fault_injection_hide_blocks) | Fault injection setting for testing purposes. If `true`, the node's chainstate database
access layer may intentionally fail to retrieve block data, even if it exists,
simulating block hiding or data unavailability.

**Notes:**
- This parameter cannot be set via the configuration file; it must be modified programmatically. | `false` | | [local_peer_seed](#node-local_peer_seed) | The private key seed, provided as a hex string in the config file, used specifically for the
node's identity and message signing within the P2P networking layer.
This is separate from the main [seed](#node-seed) . | Randomly generated 32 bytes | @@ -158,25 +158,25 @@ The configuration is automatically generated from the Rust source code documenta | [miner](#node-miner) | Flag indicating whether this node should activate its mining logic and attempt to produce Stacks blocks.
Setting this to `true` typically requires providing necessary private keys (either [seed](#node-seed) or
[[miner].mining_key](#miner-mining_key) ). It also influences default behavior for settings like
[require_affirmed_anchor_blocks](#node-require_affirmed_anchor_blocks) . | `false` | | [mock_mining](#node-mock_mining) | Enables a simulated mining mode, primarily for local testing and development.
When `true`, the node may generate blocks locally without participating in the
real bitcoin consensus or P2P block production process.

**Notes:**
- Only relevant if [miner](#node-miner) is `true`. | `false` | | [name](#node-name) | Human-readable name for the node. Primarily used for identification in testing environments
(e.g., deriving log file names, temporary directory names). | `"helium-node"` | -| [next_initiative_delay](#node-next_initiative_delay) | Controls how frequently, in milliseconds, the Nakamoto miner's relay thread polls for work
or takes periodic actions when idle (e.g., checking for new burnchain blocks).
Default value of 10 seconds is reasonable in mainnet (where bitcoin blocks are ~10 minutes)
A lower value might be useful in other environments with faster burn blocks.

**Notes:**
- Units: milliseconds. | `10_000` (milliseconds, 10 seconds) | +| [next_initiative_delay](#node-next_initiative_delay) | Controls how frequently, in milliseconds, the Nakamoto miner's relay thread polls for work
or takes periodic actions when idle (e.g., checking for new burnchain blocks).
Default value of 10 seconds is reasonable in mainnet (where bitcoin blocks are ~10 minutes)
A lower value might be useful in other environments with faster burn blocks.

**Units:** milliseconds | `10_000` (10 seconds) | | [p2p_address](#node-p2p_address) | The publicly accessible IPv4 address and port that this node advertises to peers for P2P connections.
This might differ from [p2p_bind](#node-p2p_bind) if the node is behind NAT or a proxy.

**Notes:**
- The default value derivation might be unexpected, potentially using the [rpc_bind](#node-rpc_bind) address; explicit configuration is recommended if needed. | Derived from [rpc_bind](#node-rpc_bind) (e.g., "0.0.0.0:20443" if [rpc_bind](#node-rpc_bind) is default). | | [p2p_bind](#node-p2p_bind) | The IPv4 address and port (e.g., "0.0.0.0:20444") on which the node's P2P networking
service should bind and listen for incoming connections from other peers. | `"0.0.0.0:20444"` | | [prometheus_bind](#node-prometheus_bind) | Optional network address and port (e.g., "127.0.0.1:9153") for binding the Prometheus metrics server.
If set, the node will start an HTTP server on this address to expose internal metrics
for scraping by a Prometheus instance. | `None` (Prometheus server disabled) | -| [require_affirmed_anchor_blocks](#node-require_affirmed_anchor_blocks) | Controls if the node must wait for locally missing but burnchain-affirmed PoX anchor blocks.
If an anchor block is confirmed by the affirmation map but not yet processed by this node:
- If `true`: Burnchain processing halts until the affirmed block is acquired. Ensures strict
adherence to the affirmed canonical chain, typical for followers.
- If `false`: Burnchain processing continues without waiting. Allows miners to operate optimistically
but may necessitate unwinding later if the affirmed block alters the chain state. | - `true` if [miner](#node-miner) is `false`
- `false` if [miner](#node-miner) is `true` | +| [require_affirmed_anchor_blocks](#node-require_affirmed_anchor_blocks) | Controls if the node must wait for locally missing but burnchain-affirmed PoX anchor blocks.
If an anchor block is confirmed by the affirmation map but not yet processed by this node:
- If `true`: Burnchain processing halts until the affirmed block is acquired. Ensures strict
adherence to the affirmed canonical chain, typical for followers.
- If `false`: Burnchain processing continues without waiting. Allows miners to operate optimistically
but may necessitate unwinding later if the affirmed block alters the chain state. | - `true` if [miner](#node-miner) is `false`
- `false` if [miner](#node-miner) is `true` | | [rpc_bind](#node-rpc_bind) | The IPv4 address and port (e.g., "0.0.0.0:20443") on which the node's HTTP RPC server
should bind and listen for incoming API requests. | `"0.0.0.0:20443"` | | [seed](#node-seed) | The node's Bitcoin wallet private key, provided as a hex string in the config file.
Used to initialize the node's keychain for signing operations.
If [[miner].mining_key](#miner-mining_key) is not set, this seed may also be used for mining-related signing.

**Notes:**
- Required if [miner](#node-miner) is `true` and [[miner].mining_key](#miner-mining_key) is absent. | Randomly generated 32 bytes | | [stacker](#node-stacker) | Setting this to `true` enables the node to replicate the miner and signer Stacker DBs
required for signing, and is required if the node is connected to a signer. | `false` | -| [stacker_dbs](#node-stacker_dbs) | A list of specific StackerDB contracts (identified by their qualified contract identifiers,
e.g., "SP000000000000000000002Q6VF78.pox-3") that this node should actively replicate.

**Notes:**
- Values are strings representing qualified contract identifiers.

**Example:**
  stacker_dbs = ["SP000000000000000000002Q6VF78.pox-3", "SP2C2YFP12AJZB4M4KUPSTMZQR0SNHNPH204SCQJM.stx-oracle-v1"]
| - If [miner](#node-miner) is `true` or [stacker](#node-stacker) is `true`, relevant system contracts
(like `.miners`, `.signers-*`) are automatically added in addition to any contracts
specified in the configuration file.
- Otherwise, defaults to an empty list `[]` if not specified in the TOML. | +| [stacker_dbs](#node-stacker_dbs) | A list of specific StackerDB contracts (identified by their qualified contract identifiers,
e.g., "SP000000000000000000002Q6VF78.pox-3") that this node should actively replicate.

**Notes:**
- Values are strings representing qualified contract identifiers.

**Example:**
stacker_dbs = ["SP000000000000000000002Q6VF78.pox-3", "SP2C2YFP12AJZB4M4KUPSTMZQR0SNHNPH204SCQJM.stx-oracle-v1"]
| - If [miner](#node-miner) is `true` or [stacker](#node-stacker) is `true`, relevant system contracts
(like `.miners`, `.signers-*`) are automatically added in addition to any contracts
specified in the configuration file.
- Otherwise, defaults to an empty list `[]` if not specified in the TOML. | | [txindex](#node-txindex) | Enables the transaction index, which maps transaction IDs to the blocks containing them.
Setting this to `true` allows the use of RPC endpoints that look up transactions by ID
(e.g., `/extended/v1/tx/{txid}`), but requires substantial additional disk space for the index database. | `false` | | [use_test_genesis_chainstate](#node-use_test_genesis_chainstate) | If set to `true`, the node initializes its state using an alternative test genesis block definition,
loading different initial balances, names, and lockups than the standard network genesis.
This is intended strictly for testing purposes and is disallowed on mainnet.

**Notes:**
- This is intended strictly for testing purposes and is disallowed on mainnet. | `None` (uses standard network genesis) | -| [wait_time_for_blocks](#node-wait_time_for_blocks) | When operating as a miner, this specifies the maximum time (in milliseconds)
the node waits after detecting a new burnchain block to synchronize corresponding
Stacks block data from the network before resuming mining attempts.
If synchronization doesn't complete within this duration, mining resumes anyway
to prevent stalling. This setting is loaded by all nodes but primarily affects
miner behavior within the relayer thread.

**Notes:**
- Units: milliseconds. | `30_000` (milliseconds, 30 seconds) | +| [wait_time_for_blocks](#node-wait_time_for_blocks) | When operating as a miner, this specifies the maximum time (in milliseconds)
the node waits after detecting a new burnchain block to synchronize corresponding
Stacks block data from the network before resuming mining attempts.
If synchronization doesn't complete within this duration, mining resumes anyway
to prevent stalling. This setting is loaded by all nodes but primarily affects
miner behavior within the relayer thread.

**Units:** milliseconds | `30_000` (30 seconds) | | [working_dir](#node-working_dir) | The file system absolute path to the node's working directory.
All persistent data, including chainstate, burnchain databases, and potentially other stores,
will be located within this directory.
This path can be overridden by setting the `STACKS_WORKING_DIR` environment variable.

**Notes:**
- For persistent mainnet or testnet nodes, this path must be explicitly configured to a non-temporary location. | `/tmp/stacks-node-{current_timestamp}` | | ~~[max_microblocks](#node-max_microblocks)~~ | The maximum number of microblocks allowed per Stacks block.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 2.5+. | `65535` (u16::MAX) | -| ~~[microblock_frequency](#node-microblock_frequency)~~ | How often to attempt producing microblocks, in milliseconds.

**Notes:**
- Only applies when [mine_microblocks](#node-mine_microblocks) is true and before Epoch 2.5.
- Units: milliseconds.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 2.5+. | `30_000` (milliseconds, 30 seconds) | +| ~~[microblock_frequency](#node-microblock_frequency)~~ | How often to attempt producing microblocks, in milliseconds.

**Notes:**
- Only applies when [mine_microblocks](#node-mine_microblocks) is true and before Epoch 2.5.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 2.5+.

**Units:** milliseconds | `30_000` (30 seconds) | | ~~[mine_microblocks](#node-mine_microblocks)~~ | Enable microblock mining.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 2.5+. | `true` | | ~~[mock_mining_output_dir](#node-mock_mining_output_dir)~~ | If [mock_mining](#node-mock_mining) is enabled, this specifies an optional directory path where the
generated mock Stacks blocks will be saved. (pre-Nakamoto)
The path is canonicalized on load.

**⚠️ DEPRECATED:** This setting was only used in the neon node and is ignored in Epoch 3.0+. | `None` | -| ~~[pox_sync_sample_secs](#node-pox_sync_sample_secs)~~ | Sampling interval in seconds for the PoX synchronization watchdog thread (pre-Nakamoto).
Determines how often the watchdog checked PoX state consistency in the Neon run loop.

**Notes:**
- Units: seconds.

**⚠️ DEPRECATED:** Unused after the Nakamoto upgrade. This setting is ignored in Epoch 3.0+. | `30` (seconds) | -| ~~[wait_time_for_microblocks](#node-wait_time_for_microblocks)~~ | Cooldown period after a microblock is produced, in milliseconds.

**Notes:**
- Only applies when [mine_microblocks](#node-mine_microblocks) is true and before Epoch 2.5.
- Units: milliseconds.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 2.5+. | `30_000` (milliseconds, 30 seconds) | +| ~~[pox_sync_sample_secs](#node-pox_sync_sample_secs)~~ | Sampling interval in seconds for the PoX synchronization watchdog thread (pre-Nakamoto).
Determines how often the watchdog checked PoX state consistency in the Neon run loop.

**⚠️ DEPRECATED:** Unused after the Nakamoto upgrade. This setting is ignored in Epoch 3.0+.

**Units:** seconds | `30` | +| ~~[wait_time_for_microblocks](#node-wait_time_for_microblocks)~~ | Cooldown period after a microblock is produced, in milliseconds.

**Notes:**
- Only applies when [mine_microblocks](#node-mine_microblocks) is true and before Epoch 2.5.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 2.5+.

**Units:** milliseconds | `30_000` (30 seconds) | ## [miner] @@ -184,42 +184,42 @@ The configuration is automatically generated from the Rust source code documenta | Parameter | Description | Default | |-----------|-------------|----------| | [activated_vrf_key_path](#miner-activated_vrf_key_path) | Path to a file for storing and loading the currently active, registered VRF leader key.

Loading: On startup or when needing to register a key, if this path is set, the relayer first
attempts to load a serialized `RegisteredKey` from this file. If successful, it uses the
loaded key and skips the on-chain VRF key registration transaction, saving time and fees.
Saving: After a new VRF key registration transaction is confirmed and activated on the burnchain,
if this path is set, the node saves the details of the newly activated `RegisteredKey` to this file.
This allows the miner to persist its active VRF key across restarts.
If the file doesn't exist during load, or the path is `None`, the node proceeds with a new registration. | `None` | -| [block_commit_delay](#miner-block_commit_delay) | Time in milliseconds to wait for a Nakamoto block after seeing a burnchain block before submitting a block commit.

After observing a new burnchain block, the miner's relayer waits for this duration
before submitting its next block commit transaction to Bitcoin. This delay provides an opportunity
for a new Nakamoto block (produced by the winner of the latest sortition) to arrive.
Waiting helps avoid situations where the relayer immediately submits a commit that needs
to be replaced via RBF if a new Stacks block appears shortly after.
This delay is skipped if the new burnchain blocks leading to the tip contain no sortitions.

**Notes:**
- Units: milliseconds. | `40_000` (ms) | -| [block_rejection_timeout_steps](#miner-block_rejection_timeout_steps) | Defines adaptive timeouts for waiting for signer responses, based on the accumulated weight of rejections.

Configured as a map where keys represent rejection count thresholds in percentage,
and values are the timeout durations (in seconds) to apply when the rejection count
reaches or exceeds that key but is less than the next key.

When a miner proposes a block, it waits for signer responses (approvals or rejections).
The SignerCoordinator tracks the total weight of received rejections. It uses this map to determine
the current timeout duration. It selects the timeout value associated with the largest key
in the map that is less than or equal to the current accumulated rejection weight.
If this timeout duration expires before a decision is reached, the coordinator signals a timeout.
This prompts the miner to potentially retry proposing the block.
As more rejections come in, the applicable timeout step might change (likely decrease),
allowing the miner to abandon unviable proposals faster.

A key for 0 (zero rejections) must be defined, representing the initial timeout when no rejections have been received.

**Notes:**
- Keys are rejection weight percentages (0-100). Values are timeout durations.

**Example:**
  # Keys are rejection counts (as strings), values are timeouts in seconds.
[miner.block_rejection_timeout_steps]
"0" = 180
"10" = 90
"20" = 45
"30" = 0
| `{ 0: 180, 10: 90, 20: 45, 30: 0 }` (times in seconds) | +| [block_commit_delay](#miner-block_commit_delay) | Time in milliseconds to wait for a Nakamoto block after seeing a burnchain block before submitting a block commit.

After observing a new burnchain block, the miner's relayer waits for this duration
before submitting its next block commit transaction to Bitcoin. This delay provides an opportunity
for a new Nakamoto block (produced by the winner of the latest sortition) to arrive.
Waiting helps avoid situations where the relayer immediately submits a commit that needs
to be replaced via RBF if a new Stacks block appears shortly after.
This delay is skipped if the new burnchain blocks leading to the tip contain no sortitions.

**Units:** milliseconds | `40_000` | +| [block_rejection_timeout_steps](#miner-block_rejection_timeout_steps) | Defines adaptive timeouts for waiting for signer responses, based on the accumulated weight of rejections.

Configured as a map where keys represent rejection count thresholds in percentage,
and values are the timeout durations (in seconds) to apply when the rejection count
reaches or exceeds that key but is less than the next key.

When a miner proposes a block, it waits for signer responses (approvals or rejections).
The SignerCoordinator tracks the total weight of received rejections. It uses this map to determine
the current timeout duration. It selects the timeout value associated with the largest key
in the map that is less than or equal to the current accumulated rejection weight.
If this timeout duration expires before a decision is reached, the coordinator signals a timeout.
This prompts the miner to potentially retry proposing the block.
As more rejections come in, the applicable timeout step might change (likely decrease),
allowing the miner to abandon unviable proposals faster.

A key for 0 (zero rejections) must be defined, representing the initial timeout when no rejections have been received.

**Notes:**
- Keys are rejection weight percentages (0-100). Values are timeout durations.

**Example:**
# Keys are rejection counts (as strings), values are timeouts in seconds.
[miner.block_rejection_timeout_steps]
"0" = 180
"10" = 90
"20" = 45
"30" = 0
| `{ 0: 180, 10: 90, 20: 45, 30: 0 }` (times in seconds) | | [block_reward_recipient](#miner-block_reward_recipient) | Optional recipient for the coinbase block reward, overriding the default miner address.

By default (`None`), the reward is sent to the miner's primary address ([[node].seed](#node-seed) ).
If set to some principal address *and* the current Stacks epoch is > 2.1,
the reward will be directed to the specified address instead. | `None` | -| [candidate_retry_cache_size](#miner-candidate_retry_cache_size) | Max size (in *number* of items) of transaction candidates to hold in the in-memory
retry cache.

This cache stores transactions encountered during a `GlobalFeeRate` mempool walk
whose nonces are currently too high for immediate processing. These candidates
are prioritized for reconsideration later within the *same* walk, potentially
becoming valid if other processed transactions update the expected nonces.

A larger cache retains more potentially valid future candidates but uses more memory.
This setting is primarily relevant for the `GlobalFeeRate` strategy.

**Notes:**
- Units: number of items (Each element `crate::core::mempool::MemPoolTxInfoPartial` is currently 112 bytes). | `1048576` (items) | -| [empty_mempool_sleep_time](#miner-empty_mempool_sleep_time) | The amount of time in milliseconds that the miner should sleep in between attempts to
mine a block when the mempool is empty.

This prevents the miner from busy-looping when there are no pending transactions,
conserving CPU resources. During this sleep, the miner still checks burnchain tip changes.

**Notes:**
- Units: milliseconds. | `2_500` (ms) | -| [filter_origins](#miner-filter_origins) | A comma separated list of Stacks addresses to whitelist so that only transactions from
these addresses should be considered during the mempool walk for block building. If this
list is non-empty, any transaction whose origin address is *not* in this set will be skipped.

This allows miners to prioritize transactions originating from specific accounts that are
important to them.
Configured as a comma-separated string of standard Stacks addresses (e.g., "ST123...,ST456...")
in the configuration file.

**Example:**
  filter_origins = "ST2QKZ4FKHAH1NQKYKYAYZPY440FEPK7GZ1R5HBP2,ST319CF5WV77KYR1H3GT0GZ7B8Q4AQPY42ETP1VPF"
| Empty set (all origins are considered). | -| [first_rejection_pause_ms](#miner-first_rejection_pause_ms) | Time in milliseconds to pause after receiving the first threshold rejection, before proposing a new block.

When a miner's block proposal fails to gather enough signatures from the signers for the first time
at a given height, the miner will pause for this duration before attempting to mine and propose again.

**Notes:**
- Units: milliseconds. | `5_000` (ms) | -| [max_execution_time_secs](#miner-max_execution_time_secs) | Defines the maximum execution time (in seconds) allowed for a single contract call transaction.

When processing a transaction (contract call or smart contract deployment), if this option is set,
and the execution time exceeds this limit, the transaction processing fails with an `ExecutionTimeout` error,
and the transaction is skipped. This prevents potentially long-running or infinite-loop transactions
from blocking block production.

**Notes:**
- Units: seconds. | `None` (no execution time limit) | +| [candidate_retry_cache_size](#miner-candidate_retry_cache_size) | Max size (in *number* of items) of transaction candidates to hold in the in-memory
retry cache.

This cache stores transactions encountered during a `GlobalFeeRate` mempool walk
whose nonces are currently too high for immediate processing. These candidates
are prioritized for reconsideration later within the *same* walk, potentially
becoming valid if other processed transactions update the expected nonces.

A larger cache retains more potentially valid future candidates but uses more memory.
This setting is primarily relevant for the `GlobalFeeRate` strategy.

**Notes:**
- Each element `crate::core::mempool::MemPoolTxInfoPartial` is currently 112 bytes.

**Units:** items | `1048576` | +| [empty_mempool_sleep_time](#miner-empty_mempool_sleep_time) | The amount of time in milliseconds that the miner should sleep in between attempts to
mine a block when the mempool is empty.

This prevents the miner from busy-looping when there are no pending transactions,
conserving CPU resources. During this sleep, the miner still checks burnchain tip changes.

**Units:** milliseconds | `2_500` | +| [filter_origins](#miner-filter_origins) | A comma separated list of Stacks addresses to whitelist so that only transactions from
these addresses should be considered during the mempool walk for block building. If this
list is non-empty, any transaction whose origin address is *not* in this set will be skipped.

This allows miners to prioritize transactions originating from specific accounts that are
important to them.
Configured as a comma-separated string of standard Stacks addresses (e.g., "ST123...,ST456...")
in the configuration file.

**Example:**
filter_origins = "ST2QKZ4FKHAH1NQKYKYAYZPY440FEPK7GZ1R5HBP2,ST319CF5WV77KYR1H3GT0GZ7B8Q4AQPY42ETP1VPF"
| Empty set (all origins are considered). | +| [first_rejection_pause_ms](#miner-first_rejection_pause_ms) | Time in milliseconds to pause after receiving the first threshold rejection, before proposing a new block.

When a miner's block proposal fails to gather enough signatures from the signers for the first time
at a given height, the miner will pause for this duration before attempting to mine and propose again.

**Units:** milliseconds | `5_000` | +| [max_execution_time_secs](#miner-max_execution_time_secs) | Defines the maximum execution time (in seconds) allowed for a single contract call transaction.

When processing a transaction (contract call or smart contract deployment), if this option is set,
and the execution time exceeds this limit, the transaction processing fails with an `ExecutionTimeout` error,
and the transaction is skipped. This prevents potentially long-running or infinite-loop transactions
from blocking block production.

**Units:** seconds | `None` (no execution time limit) | | [mempool_walk_strategy](#miner-mempool_walk_strategy) | Strategy for selecting the next transaction candidate from the mempool.
Controls prioritization between maximizing immediate fee capture vs. ensuring
transaction nonce order for account progression and processing efficiency.

See `MemPoolWalkStrategy` for variant details.

Possible values (use variant names for configuration):
- `"GlobalFeeRate"`: Selects the transaction with the highest fee rate globally.
- `"NextNonceWithHighestFeeRate"`: Selects the highest-fee transaction among those
matching the next expected nonce for sender/sponsor accounts. | `"GlobalFeeRate"` | -| [min_time_between_blocks_ms](#miner-min_time_between_blocks_ms) | The minimum time to wait between mining blocks in milliseconds. The value must be greater
than or equal to 1000 ms because if a block is mined within the same second as its parent,
it will be rejected by the signers.

This check ensures compliance with signer rules that prevent blocks with identical timestamps
(at second resolution) to their parents. If a lower value is configured, 1000 ms is used instead.

**Notes:**
- Units: milliseconds. | `1_000` (ms) | -| [mining_key](#miner-mining_key) | The private key (Secp256k1) used for signing blocks, provided as a hex string.

This key must be present at runtime for mining operations to succeed. | - [[node].seed](#node-seed) if the `[miner]` section *is present* in the config file
- `None` if the `[miner]` section *is not present* | -| [nakamoto_attempt_time_ms](#miner-nakamoto_attempt_time_ms) | Maximum time (in milliseconds) the miner spends selecting transactions from the mempool
when assembling a Nakamoto block. Once this duration is exceeded, the miner stops
adding transactions and finalizes the block with those already selected.

**Notes:**
- Units: milliseconds. | `5_000` (ms, 5 seconds) | -| [nonce_cache_size](#miner-nonce_cache_size) | Max size (in bytes) of the in-memory cache for storing expected account nonces.

This cache accelerates mempool processing (e.g., during block building) by storing
the anticipated next nonce for accounts, reducing expensive lookups into the node's
state (MARF trie). A larger cache can improve performance for workloads involving
many unique accounts but increases memory consumption.

**Notes:**
- Must be configured to a value greater than 0.
- Units: bytes. | `1048576` (bytes, 1 MiB) | -| [probability_pick_no_estimate_tx](#miner-probability_pick_no_estimate_tx) | Probability (percentage, 0-100) of prioritizing a transaction without a known fee rate
during candidate selection.

Only effective when `mempool_walk_strategy` is `GlobalFeeRate`. Helps ensure
transactions lacking fee estimates are periodically considered alongside high-fee ones,
preventing potential starvation. A value of 0 means never prioritize them first,
100 means always prioritize them first (if available).

**Notes:**
- Values: 0-100. | `25` (25% chance) | -| [replay_transactions](#miner-replay_transactions) | TODO: remove this option when its no longer a testing feature and it becomes default behaviour
The miner will attempt to replay transactions that a threshold number of signers are expecting in the next block | *Required* | +| [min_time_between_blocks_ms](#miner-min_time_between_blocks_ms) | The minimum time to wait between mining blocks in milliseconds. The value must be greater
than or equal to 1000 ms because if a block is mined within the same second as its parent,
it will be rejected by the signers.

This check ensures compliance with signer rules that prevent blocks with identical timestamps
(at second resolution) to their parents. If a lower value is configured, 1000 ms is used instead.

**Units:** milliseconds | `1_000` | +| [mining_key](#miner-mining_key) | The private key (Secp256k1) used for signing blocks, provided as a hex string.

This key must be present at runtime for mining operations to succeed. | - [[node].seed](#node-seed) if the `[miner]` section *is present* in the config file
- `None` if the `[miner]` section *is not present* | +| [nakamoto_attempt_time_ms](#miner-nakamoto_attempt_time_ms) | Maximum time (in milliseconds) the miner spends selecting transactions from the mempool
when assembling a Nakamoto block. Once this duration is exceeded, the miner stops
adding transactions and finalizes the block with those already selected.

**Units:** milliseconds | `5_000` (5 seconds) | +| [nonce_cache_size](#miner-nonce_cache_size) | Max size (in bytes) of the in-memory cache for storing expected account nonces.

This cache accelerates mempool processing (e.g., during block building) by storing
the anticipated next nonce for accounts, reducing expensive lookups into the node's
state (MARF trie). A larger cache can improve performance for workloads involving
many unique accounts but increases memory consumption.

**Notes:**
- Must be configured to a value greater than 0.

**Units:** bytes | `1048576` (1 MiB) | +| [probability_pick_no_estimate_tx](#miner-probability_pick_no_estimate_tx) | Probability (percentage, 0-100) of prioritizing a transaction without a known fee rate
during candidate selection.

Only effective when `mempool_walk_strategy` is `GlobalFeeRate`. Helps ensure
transactions lacking fee estimates are periodically considered alongside high-fee ones,
preventing potential starvation. A value of 0 means never prioritize them first,
100 means always prioritize them first (if available).

**Notes:**
- Values: 0-100.

**Units:** percent | `25` (25% chance) | +| [replay_transactions](#miner-replay_transactions) | TODO: remove this option when its no longer a testing feature and it becomes default behaviour
The miner will attempt to replay transactions that a threshold number of signers are expecting in the next block | **Required** | | [segwit](#miner-segwit) | If possible, mine with a p2wpkh address. | `false` | -| [subsequent_rejection_pause_ms](#miner-subsequent_rejection_pause_ms) | Time in milliseconds to pause after receiving subsequent threshold rejections, before proposing a new block.

If a miner's block proposal is rejected multiple times at the same height (after the first rejection),
this potentially longer pause duration is used before retrying. This gives more significant time
for network state changes or signer coordination.

**Notes:**
- Units: milliseconds. | `10_000` (ms) | -| [tenure_cost_limit_per_block_percentage](#miner-tenure_cost_limit_per_block_percentage) | The percentage of the remaining tenure cost limit to consume each block.

This setting limits the execution cost (Clarity cost) a single Nakamoto block can incur,
expressed as a percentage of the *remaining* cost budget for the current mining tenure.
For example, if set to 25, a block can use at most 25% of the tenure's currently available cost limit.
This allows miners to spread the tenure's total execution budget across multiple blocks rather than
potentially consuming it all in the first block.

**Notes:**
- The value must be between 1 and 100, inclusive, if specified.
- Setting to 100 effectively disables this per-block limit, allowing a block to use the entire remaining tenure budget. | `25` (%) | -| [tenure_extend_cost_threshold](#miner-tenure_extend_cost_threshold) | Percentage of block budget that must be used before attempting a time-based tenure extend.

This sets a minimum threshold for the accumulated execution cost within a tenure before a
time-based tenure extension ([tenure_timeout](#miner-tenure_timeout) ) can be initiated.
The miner checks if the proportion of the total tenure budget consumed so far exceeds this percentage.
If the cost usage is below this threshold, a time-based extension will not be attempted, even if
the [tenure_timeout](#miner-tenure_timeout) duration has elapsed.
This prevents miners from extending tenures very early if they have produced only low-cost blocks.

**Notes:**
- Values: 0-100. | `50` (%) | -| [tenure_extend_poll_timeout](#miner-tenure_extend_poll_timeout) | Duration to wait in-between polling the sortition DB to see if we need to
extend the ongoing tenure (e.g. because the current sortition is empty or invalid).

After the relayer determines that a tenure extension might be needed but cannot proceed immediately
(e.g., because a miner thread is already active for the current burn view), it will wait for this
duration before re-checking the conditions for tenure extension.

**Notes:**
- Units: seconds. | `1` (seconds) | -| [tenure_extend_wait_timeout](#miner-tenure_extend_wait_timeout) | Duration to wait before trying to continue a tenure because the next miner did not produce blocks.

If the node was the winner of the previous sortition but not the most recent one,
the relayer waits for this duration before attempting to extend its own tenure.
This gives the new winner of the most recent sortition a grace period to produce their first block.
Also used in scenarios with empty sortitions to give the winner of the *last valid* sortition time
to produce a block before the current miner attempts an extension.

**Notes:**
- Units: milliseconds. | `120_000` (ms) | -| [tenure_timeout](#miner-tenure_timeout) | Duration to wait before attempting to issue a time-based tenure extend.

A miner can proactively attempt to extend its tenure if a significant amount of time has passed
since the last tenure change, even without an explicit trigger like an empty sortition.
If the time elapsed since the last tenure change exceeds this value, and the signer coordinator
indicates an extension is timely, and the cost usage threshold ([tenure_extend_cost_threshold](#miner-tenure_extend_cost_threshold) )
is met, the miner will include a tenure extension transaction in its next block.

**Notes:**
- Units: seconds. | `180` (seconds) | -| [txs_to_consider](#miner-txs_to_consider) | Specifies which types of transactions the miner should consider including in a block
during the mempool walk process. Transactions of types not included in this set will be skipped.

This allows miners to exclude specific transaction categories.
Configured as a comma-separated string of transaction type names in the configuration file.

Accepted values correspond to variants of `MemPoolWalkTxTypes`:
- `"TokenTransfer"`
- `"SmartContract"`
- `"ContractCall"`

**Example:**
  txs_to_consider = "TokenTransfer,ContractCall"
| All transaction types are considered (equivalent to [`MemPoolWalkTxTypes::all()`]). | +| [subsequent_rejection_pause_ms](#miner-subsequent_rejection_pause_ms) | Time in milliseconds to pause after receiving subsequent threshold rejections, before proposing a new block.

If a miner's block proposal is rejected multiple times at the same height (after the first rejection),
this potentially longer pause duration is used before retrying. This gives more significant time
for network state changes or signer coordination.

**Units:** milliseconds | `10_000` | +| [tenure_cost_limit_per_block_percentage](#miner-tenure_cost_limit_per_block_percentage) | The percentage of the remaining tenure cost limit to consume each block.

This setting limits the execution cost (Clarity cost) a single Nakamoto block can incur,
expressed as a percentage of the *remaining* cost budget for the current mining tenure.
For example, if set to 25, a block can use at most 25% of the tenure's currently available cost limit.
This allows miners to spread the tenure's total execution budget across multiple blocks rather than
potentially consuming it all in the first block.

**Notes:**
- Values: 1-100.
- Setting to 100 effectively disables this per-block limit, allowing a block to use the entire remaining tenure budget.

**Units:** percent | `25` | +| [tenure_extend_cost_threshold](#miner-tenure_extend_cost_threshold) | Percentage of block budget that must be used before attempting a time-based tenure extend.

This sets a minimum threshold for the accumulated execution cost within a tenure before a
time-based tenure extension ([tenure_timeout](#miner-tenure_timeout) ) can be initiated.
The miner checks if the proportion of the total tenure budget consumed so far exceeds this percentage.
If the cost usage is below this threshold, a time-based extension will not be attempted, even if
the [tenure_timeout](#miner-tenure_timeout) duration has elapsed.
This prevents miners from extending tenures very early if they have produced only low-cost blocks.

**Notes:**
- Values: 0-100.

**Units:** percent | `50` | +| [tenure_extend_poll_timeout](#miner-tenure_extend_poll_timeout) | Duration to wait in-between polling the sortition DB to see if we need to
extend the ongoing tenure (e.g. because the current sortition is empty or invalid).

After the relayer determines that a tenure extension might be needed but cannot proceed immediately
(e.g., because a miner thread is already active for the current burn view), it will wait for this
duration before re-checking the conditions for tenure extension.

**Units:** seconds | `1` | +| [tenure_extend_wait_timeout](#miner-tenure_extend_wait_timeout) | Duration to wait before trying to continue a tenure because the next miner did not produce blocks.

If the node was the winner of the previous sortition but not the most recent one,
the relayer waits for this duration before attempting to extend its own tenure.
This gives the new winner of the most recent sortition a grace period to produce their first block.
Also used in scenarios with empty sortitions to give the winner of the *last valid* sortition time
to produce a block before the current miner attempts an extension.

**Units:** milliseconds | `120_000` | +| [tenure_timeout](#miner-tenure_timeout) | Duration to wait before attempting to issue a time-based tenure extend.

A miner can proactively attempt to extend its tenure if a significant amount of time has passed
since the last tenure change, even without an explicit trigger like an empty sortition.
If the time elapsed since the last tenure change exceeds this value, and the signer coordinator
indicates an extension is timely, and the cost usage threshold ([tenure_extend_cost_threshold](#miner-tenure_extend_cost_threshold) )
is met, the miner will include a tenure extension transaction in its next block.

**Units:** seconds | `180` | +| [txs_to_consider](#miner-txs_to_consider) | Specifies which types of transactions the miner should consider including in a block
during the mempool walk process. Transactions of types not included in this set will be skipped.

This allows miners to exclude specific transaction categories.
Configured as a comma-separated string of transaction type names in the configuration file.

Accepted values correspond to variants of `MemPoolWalkTxTypes`:
- `"TokenTransfer"`
- `"SmartContract"`
- `"ContractCall"`

**Example:**
txs_to_consider = "TokenTransfer,ContractCall"
| All transaction types are considered (equivalent to [`MemPoolWalkTxTypes::all()`]). | | [wait_for_block_download](#miner-wait_for_block_download) | Wait for a downloader pass before mining.
This can only be disabled in testing; it can't be changed in the config file. | `true` | -| ~~[fast_rampup](#miner-fast_rampup)~~ | Controls how the miner estimates its win probability when checking for underperformance.

This estimation is used in conjunction with [target_win_probability](#miner-target_win_probability) and
[underperform_stop_threshold](#miner-underperform_stop_threshold) to decide whether to pause mining due to
low predicted success rate.

- If `true`: The win probability estimation looks at projected spend distributions
~6 blocks into the future. This might help the miner adjust its spending more quickly
based on anticipated competition changes.
- If `false`: The win probability estimation uses the currently observed spend distribution
for the next block.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode and by the
`get-spend-amount` cli subcommand. | `false` | -| ~~[first_attempt_time_ms](#miner-first_attempt_time_ms)~~ | Time to wait (in milliseconds) before the first attempt to mine a block.

**Notes:**
- Units: milliseconds.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode. | `10` (ms) | -| ~~[max_reorg_depth](#miner-max_reorg_depth)~~ | Defines the maximum depth (in Stacks blocks) the miner considers when evaluating
potential chain tips when selecting the best tip to mine the next block on.

The miner analyzes candidate tips within this depth from the highest known tip.
It selects the "nicest" tip, often defined as the one that minimizes chain reorganizations
or orphans within this lookback window. A lower value restricts the analysis to shallower forks,
while a higher value considers deeper potential reorganizations.

This setting influences which fork the miner chooses to build upon if multiple valid tips exist.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode and the
`pick-best-tip` cli subcommand. | `3` | -| ~~[microblock_attempt_time_ms](#miner-microblock_attempt_time_ms)~~ | Time to wait (in milliseconds) to mine a microblock.

**Notes:**
- Units: milliseconds.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode. | `30_000` (ms, 30 seconds) | +| ~~[fast_rampup](#miner-fast_rampup)~~ | Controls how the miner estimates its win probability when checking for underperformance.

This estimation is used in conjunction with [target_win_probability](#miner-target_win_probability) and
[underperform_stop_threshold](#miner-underperform_stop_threshold) to decide whether to pause mining due to
low predicted success rate.

- If `true`: The win probability estimation looks at projected spend distributions
~6 blocks into the future. This might help the miner adjust its spending more quickly
based on anticipated competition changes.
- If `false`: The win probability estimation uses the currently observed spend distribution
for the next block.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode and by the
`get-spend-amount` cli subcommand. | `false` | +| ~~[first_attempt_time_ms](#miner-first_attempt_time_ms)~~ | Time to wait (in milliseconds) before the first attempt to mine a block.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode.

**Units:** milliseconds | `10` | +| ~~[max_reorg_depth](#miner-max_reorg_depth)~~ | Defines the maximum depth (in Stacks blocks) the miner considers when evaluating
potential chain tips when selecting the best tip to mine the next block on.

The miner analyzes candidate tips within this depth from the highest known tip.
It selects the "nicest" tip, often defined as the one that minimizes chain reorganizations
or orphans within this lookback window. A lower value restricts the analysis to shallower forks,
while a higher value considers deeper potential reorganizations.

This setting influences which fork the miner chooses to build upon if multiple valid tips exist.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode and the
`pick-best-tip` cli subcommand. | `3` | +| ~~[microblock_attempt_time_ms](#miner-microblock_attempt_time_ms)~~ | Time to wait (in milliseconds) to mine a microblock.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode.

**Units:** milliseconds | `30_000` (30 seconds) | | ~~[min_tx_count](#miner-min_tx_count)~~ | Minimum number of transactions that must be in a block if we're going to replace a pending
block-commit with a new block-commit.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode. | `0` | | ~~[only_increase_tx_count](#miner-only_increase_tx_count)~~ | If true, requires subsequent mining attempts for the same block height
to have a transaction count >= the previous best attempt.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode. | `false` | | ~~[pre_nakamoto_mock_signing](#miner-pre_nakamoto_mock_signing)~~ | Enables a mock signing process for testing purposes, specifically designed for use during Epoch 2.5
before the activation of Nakamoto consensus.

When set to `true` and [mining_key](#miner-mining_key) is provided, the miner will interact
with the `.miners` and `.signers` contracts via the stackerdb to send and receive mock
proposals and signatures, simulating aspects of the Nakamoto leader election and block signing flow.

**Notes:**
- This is intended strictly for testing purposes for Epoch 2.5 conditions.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. | `false` (Should only default true if [mining_key](#miner-mining_key) is set). | -| ~~[subsequent_attempt_time_ms](#miner-subsequent_attempt_time_ms)~~ | Time to wait (in milliseconds) for subsequent attempts to mine a block,
after the first attempt fails.

**Notes:**
- Units: milliseconds.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode. | `120_000` (ms, 2 minutes) | +| ~~[subsequent_attempt_time_ms](#miner-subsequent_attempt_time_ms)~~ | Time to wait (in milliseconds) for subsequent attempts to mine a block,
after the first attempt fails.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode.

**Units:** milliseconds | `120_000` (2 minutes) | | ~~[target_win_probability](#miner-target_win_probability)~~ | The minimum win probability this miner aims to achieve in block sortitions.

This target is used to detect prolonged periods of underperformance. If the miner's
calculated win probability consistently falls below this value for a duration specified
by [underperform_stop_threshold](#miner-underperform_stop_threshold) (after an initial startup phase), the miner may
cease spending in subsequent sortitions (returning a burn fee cap of 0) to conserve resources.

Setting this value close to 0.0 effectively disables the underperformance check.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode. | `0.0` | -| ~~[unconfirmed_commits_helper](#miner-unconfirmed_commits_helper)~~ | Optional path to an external helper script for fetching unconfirmed block-commits.
Used to inform the miner's dynamic burn fee bidding strategy with off-chain data.

If a path is provided, the target script must:
- Be executable by the user running the Stacks node process.
- Accept a list of active miner burnchain addresses as command-line arguments.
- On successful execution, print a JSON array representing `Vec`
(see `stacks::config::chain_data::UnconfirmedBlockCommit` struct) to stdout.
- Exit with code 0 on success.

Look at `test_get_unconfirmed_commits` in `stackslib/src/config/chain_data.rs` for an example script.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode and by the
`get-spend-amount` cli subcommand. | `None` (feature disabled). | +| ~~[unconfirmed_commits_helper](#miner-unconfirmed_commits_helper)~~ | Optional path to an external helper script for fetching unconfirmed block-commits.
Used to inform the miner's dynamic burn fee bidding strategy with off-chain data.

If a path is provided, the target script must:
- Be executable by the user running the Stacks node process.
- Accept a list of active miner burnchain addresses as command-line arguments.
- On successful execution, print a JSON array representing `Vec`
(see `stacks::config::chain_data::UnconfirmedBlockCommit` struct) to stdout.
- Exit with code 0 on success.

Look at `test_get_unconfirmed_commits` in `stackslib/src/config/chain_data.rs` for an example script.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode and by the
`get-spend-amount` cli subcommand. | `None` (feature disabled). | | ~~[underperform_stop_threshold](#miner-underperform_stop_threshold)~~ | The maximum number of consecutive Bitcoin blocks the miner will tolerate underperforming
(i.e., having a calculated win probability below [target_win_probability](#miner-target_win_probability) )
before temporarily pausing mining efforts.

This check is only active after an initial startup phase (6 blocks past the mining start height).
If the miner underperforms for this number of consecutive blocks, the
`BlockMinerThread::get_mining_spend_amount` function will return 0, effectively preventing the
miner from submitting a block commit for the current sortition to conserve funds.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode. | `None` (underperformance check is disabled). | -| ~~[unprocessed_block_deadline_secs](#miner-unprocessed_block_deadline_secs)~~ | Amount of time (in seconds) to wait for unprocessed blocks before mining a new block.

**Notes:**
- Units: seconds.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode. | `30` (seconds) | +| ~~[unprocessed_block_deadline_secs](#miner-unprocessed_block_deadline_secs)~~ | Amount of time (in seconds) to wait for unprocessed blocks before mining a new block.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode.

**Units:** seconds | `30` | | ~~[wait_on_interim_blocks](#miner-wait_on_interim_blocks)~~ | Amount of time while mining in nakamoto to wait in between mining interim blocks.

**⚠️ DEPRECATED:** Use `min_time_between_blocks_ms` instead. | `None` | From fe86039e981354bcced3f4e01a40af3e4b79f18b Mon Sep 17 00:00:00 2001 From: Francesco Leacche Date: Wed, 4 Jun 2025 14:31:16 +0100 Subject: [PATCH 08/20] update reference --- docs/generated/configuration-reference.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/generated/configuration-reference.md b/docs/generated/configuration-reference.md index 99a22f46fe..7883ff6f42 100644 --- a/docs/generated/configuration-reference.md +++ b/docs/generated/configuration-reference.md @@ -166,7 +166,7 @@ The configuration is automatically generated from the Rust source code documenta | [rpc_bind](#node-rpc_bind) | The IPv4 address and port (e.g., "0.0.0.0:20443") on which the node's HTTP RPC server
should bind and listen for incoming API requests. | `"0.0.0.0:20443"` | | [seed](#node-seed) | The node's Bitcoin wallet private key, provided as a hex string in the config file.
Used to initialize the node's keychain for signing operations.
If [[miner].mining_key](#miner-mining_key) is not set, this seed may also be used for mining-related signing.

**Notes:**
- Required if [miner](#node-miner) is `true` and [[miner].mining_key](#miner-mining_key) is absent. | Randomly generated 32 bytes | | [stacker](#node-stacker) | Setting this to `true` enables the node to replicate the miner and signer Stacker DBs
required for signing, and is required if the node is connected to a signer. | `false` | -| [stacker_dbs](#node-stacker_dbs) | A list of specific StackerDB contracts (identified by their qualified contract identifiers,
e.g., "SP000000000000000000002Q6VF78.pox-3") that this node should actively replicate.

**Notes:**
- Values are strings representing qualified contract identifiers.

**Example:**
stacker_dbs = ["SP000000000000000000002Q6VF78.pox-3", "SP2C2YFP12AJZB4M4KUPSTMZQR0SNHNPH204SCQJM.stx-oracle-v1"]
| - If [miner](#node-miner) is `true` or [stacker](#node-stacker) is `true`, relevant system contracts
(like `.miners`, `.signers-*`) are automatically added in addition to any contracts
specified in the configuration file.
- Otherwise, defaults to an empty list `[]` if not specified in the TOML. | +| [stacker_dbs](#node-stacker_dbs) | A list of specific StackerDB contracts (identified by their qualified contract identifiers,
e.g., "SP000000000000000000002Q6VF78.pox-3") that this node should actively replicate.

**Notes:**
- Values are strings representing qualified contract identifiers.

**Example:**
stacker_dbs = [
  "SP000000000000000000002Q6VF78.pox-3",
  "SP2C2YFP12AJZB4M4KUPSTMZQR0SNHNPH204SCQJM.stx-oracle-v1"
]
| - If [miner](#node-miner) is `true` or [stacker](#node-stacker) is `true`, relevant system contracts
(like `.miners`, `.signers-*`) are automatically added in addition to any contracts
specified in the configuration file.
- Otherwise, defaults to an empty list `[]` if not specified in the TOML. | | [txindex](#node-txindex) | Enables the transaction index, which maps transaction IDs to the blocks containing them.
Setting this to `true` allows the use of RPC endpoints that look up transactions by ID
(e.g., `/extended/v1/tx/{txid}`), but requires substantial additional disk space for the index database. | `false` | | [use_test_genesis_chainstate](#node-use_test_genesis_chainstate) | If set to `true`, the node initializes its state using an alternative test genesis block definition,
loading different initial balances, names, and lockups than the standard network genesis.
This is intended strictly for testing purposes and is disallowed on mainnet.

**Notes:**
- This is intended strictly for testing purposes and is disallowed on mainnet. | `None` (uses standard network genesis) | | [wait_time_for_blocks](#node-wait_time_for_blocks) | When operating as a miner, this specifies the maximum time (in milliseconds)
the node waits after detecting a new burnchain block to synchronize corresponding
Stacks block data from the network before resuming mining attempts.
If synchronization doesn't complete within this duration, mining resumes anyway
to prevent stalling. This setting is loaded by all nodes but primarily affects
miner behavior within the relayer thread.

**Units:** milliseconds | `30_000` (30 seconds) | From a2b15089d223f1534d7caafb77b032ce82956a52 Mon Sep 17 00:00:00 2001 From: Francesco Leacche Date: Fri, 6 Jun 2025 18:44:29 +0100 Subject: [PATCH 09/20] add custom section mappings and template support --- .../tools/config-docs-generator/Dockerfile | 8 +- contrib/tools/config-docs-generator/README.md | 41 ++-- .../generate-config-docs.sh | 39 ++-- .../section_name_mappings.json | 9 + .../config-docs-generator/src/extract_docs.rs | 59 +++-- .../src/generate_markdown.rs | 208 +++++++++++++----- .../templates/reference_template.md | 11 + docs/generated/configuration-reference.md | 22 +- 8 files changed, 276 insertions(+), 121 deletions(-) create mode 100644 contrib/tools/config-docs-generator/section_name_mappings.json create mode 100644 contrib/tools/config-docs-generator/templates/reference_template.md diff --git a/contrib/tools/config-docs-generator/Dockerfile b/contrib/tools/config-docs-generator/Dockerfile index de96b6beaf..49ad4a68b4 100644 --- a/contrib/tools/config-docs-generator/Dockerfile +++ b/contrib/tools/config-docs-generator/Dockerfile @@ -16,12 +16,14 @@ WORKDIR /project_root # Set environment variables for generate-config-docs.sh ENV PROJECT_ROOT=/project_root -ENV BUILD_ROOT=/build ENV CARGO_HOME=/project_root/.cargo +ENV CARGO_TARGET_DIR=/tmp/stacks-config-docs/target +ENV TEMP_DIR=/tmp/stacks-config-docs/doc-generation ENV EXTRACT_DOCS_BIN=/build/target/release/extract-docs ENV GENERATE_MARKDOWN_BIN=/build/target/release/generate-markdown ENV SKIP_BUILD=true -# Set the entrypoint to run the config docs generation script -# The script ends up at /build/generate-config-docs.sh due to the copy operation +# Create the Docker-specific temp directory +RUN mkdir -p /tmp/stacks-config-docs + ENTRYPOINT ["/build/generate-config-docs.sh"] diff --git a/contrib/tools/config-docs-generator/README.md b/contrib/tools/config-docs-generator/README.md index ee999c22c9..e749ef70b4 100644 --- a/contrib/tools/config-docs-generator/README.md +++ b/contrib/tools/config-docs-generator/README.md @@ -9,6 +9,9 @@ This tool automatically generates markdown documentation from Rust configuration The easiest way to generate configuration documentation: ```bash +# Navigate to the config-docs-generator directory +cd contrib/tools/config-docs-generator + # Build the Docker image (one-time setup) docker build -t config-docs-generator . @@ -28,8 +31,11 @@ If you prefer to run without Docker: # Install nightly toolchain if needed rustup toolchain install nightly +# Navigate to the config-docs-generator directory +cd contrib/tools/config-docs-generator + # Generate documentation -./contrib/tools/config-docs-generator/generate-config-docs.sh +./generate-config-docs.sh ``` ## What It Does @@ -40,8 +46,8 @@ The tool processes these configuration structs from the Stacks codebase: - `MinerConfig` → `[miner]` section - `ConnectionOptionsFile` → `[connection_options]` section - `FeeEstimationConfigFile` → `[fee_estimation]` section -- `EventObserverConfigFile` → `[event_observer]` section -- `InitialBalanceFile` → `[initial_balances]` section +- `EventObserverConfigFile` → `[[events_observer]]` section +- `InitialBalanceFile` → `[[ustx_balance]]` section For each configuration field, it extracts: - Field documentation from `///` comments @@ -335,31 +341,20 @@ pub struct YourNewConfig { - **@deprecated**: Deprecation message - **@toml_example**: Example TOML configuration -### 3. Add Section Mapping (Optional) +### 3. Generate -If you want a custom TOML section name, edit `src/generate_markdown.rs`: - -```rust -fn struct_to_section_name(struct_name: &str) -> String { - match struct_name { - "YourNewConfig" => "[your_custom_section]".to_string(), - // ... existing mappings - _ => format!("[{}]", struct_name.to_lowercase()), - } -} -``` - -### 4. Generate and Verify +Override TOML section names using JSON configuration: ```bash -# Using Docker (recommended) -docker run --rm -v "$(pwd)/../../../:/project_root" config-docs-generator +# Using Docker with custom mappings and template +cd contrib/tools/config-docs-generator +docker run --rm -v "$(pwd)/../../../:/project_root" \ + -e SECTION_MAPPINGS_PATH="/build/contrib/tools/config-docs-generator/custom_mappings.json" \ + -e TEMPLATE_PATH="/build/contrib/tools/config-docs-generator/templates/custom_template.md" \ + config-docs-generator # OR using local setup -./contrib/tools/config-docs-generator/generate-config-docs.sh - -# Check that your struct appears -grep -A 5 "your_custom_section" docs/generated/configuration-reference.md +./generate-config-docs.sh --section-name-mappings custom_mappings.json --template custom_template.md ``` ## How It Works diff --git a/contrib/tools/config-docs-generator/generate-config-docs.sh b/contrib/tools/config-docs-generator/generate-config-docs.sh index de4c951c61..4e6b5f9d05 100755 --- a/contrib/tools/config-docs-generator/generate-config-docs.sh +++ b/contrib/tools/config-docs-generator/generate-config-docs.sh @@ -11,20 +11,22 @@ NC='\033[0m' # No Color # Configuration - Allow environment variable overrides SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" PROJECT_ROOT="${PROJECT_ROOT:-$(cd "$SCRIPT_DIR/../../../" && pwd)}" -BUILD_ROOT="${BUILD_ROOT:-$PROJECT_ROOT}" OUTPUT_DIR="$PROJECT_ROOT/docs/generated" -TEMP_DIR="$PROJECT_ROOT/target/doc-generation" -CONFIG_SOURCE_FILE="$PROJECT_ROOT/stackslib/src/config/mod.rs" +CARGO_TARGET_DIR="${CARGO_TARGET_DIR:-$PROJECT_ROOT/target}" +TEMP_DIR="${TEMP_DIR:-$CARGO_TARGET_DIR/doc-generation}" -# Paths to binaries - allow override via environment -EXTRACT_DOCS_BIN="${EXTRACT_DOCS_BIN:-$BUILD_ROOT/target/release/extract-docs}" -GENERATE_MARKDOWN_BIN="${GENERATE_MARKDOWN_BIN:-$BUILD_ROOT/target/release/generate-markdown}" +# Binary paths - allow override via environment +EXTRACT_DOCS_BIN="${EXTRACT_DOCS_BIN:-$CARGO_TARGET_DIR/release/extract-docs}" +GENERATE_MARKDOWN_BIN="${GENERATE_MARKDOWN_BIN:-$CARGO_TARGET_DIR/release/generate-markdown}" + +# Template and mappings paths - allow override via environment +TEMPLATE_PATH="${TEMPLATE_PATH:-$SCRIPT_DIR/templates/reference_template.md}" +SECTION_MAPPINGS_PATH="${SECTION_MAPPINGS_PATH:-$SCRIPT_DIR/section_name_mappings.json}" # Check if binaries are pre-built (skip build step) SKIP_BUILD="${SKIP_BUILD:-false}" -if [[ -f "$EXTRACT_DOCS_BIN" && -f "$GENERATE_MARKDOWN_BIN" ]]; then - SKIP_BUILD=true -fi + +export CARGO_TARGET_DIR log_info() { echo -e "${GREEN}[INFO]${NC} $1" @@ -55,16 +57,8 @@ main() { cd "$PROJECT_ROOT" - # Verify source file exists - if [[ ! -f "$CONFIG_SOURCE_FILE" ]]; then - log_error "Config source file not found: $CONFIG_SOURCE_FILE" - exit 1 - fi - - # Step 1: Build the documentation generation tools (skip if pre-built) - if [[ "$SKIP_BUILD" == "true" ]]; then - log_info "Using pre-built documentation generation tools..." - else + # Step 1: Build the documentation generation tools + if [[ "$SKIP_BUILD" != "true" ]]; then log_info "Building documentation generation tools..." cargo build --package config-docs-generator --release fi @@ -84,9 +78,9 @@ main() { # Step 3: Generate Markdown log_info "Generating Markdown documentation..." MARKDOWN_OUTPUT="$OUTPUT_DIR/configuration-reference.md" - "$GENERATE_MARKDOWN_BIN" \ - --input "$EXTRACTED_JSON" \ - --output "$MARKDOWN_OUTPUT" + + # Call the command + "$GENERATE_MARKDOWN_BIN" --input "$EXTRACTED_JSON" --output "$MARKDOWN_OUTPUT" --template "$TEMPLATE_PATH" --section-name-mappings "$SECTION_MAPPINGS_PATH" log_info "Documentation generation complete!" log_info "Generated files:" @@ -145,6 +139,7 @@ while [[ $# -gt 0 ]]; do exit 1 ;; esac + shift done main "$@" diff --git a/contrib/tools/config-docs-generator/section_name_mappings.json b/contrib/tools/config-docs-generator/section_name_mappings.json new file mode 100644 index 0000000000..95a071a785 --- /dev/null +++ b/contrib/tools/config-docs-generator/section_name_mappings.json @@ -0,0 +1,9 @@ +{ + "BurnchainConfig": "[burnchain]", + "NodeConfig": "[node]", + "MinerConfig": "[miner]", + "ConnectionOptionsFile": "[connection_options]", + "FeeEstimationConfigFile": "[fee_estimation]", + "EventObserverConfigFile": "[[events_observer]]", + "InitialBalanceFile": "[[ustx_balance]]" +} diff --git a/contrib/tools/config-docs-generator/src/extract_docs.rs b/contrib/tools/config-docs-generator/src/extract_docs.rs index a3101f65e4..a7ddce71e8 100644 --- a/contrib/tools/config-docs-generator/src/extract_docs.rs +++ b/contrib/tools/config-docs-generator/src/extract_docs.rs @@ -75,7 +75,7 @@ fn main() -> Result<()> { .long("structs") .value_name("NAMES") .help("Comma-separated list of struct names to extract") - .required(false), + .required(true), ) .get_matches(); @@ -109,6 +109,11 @@ fn generate_rustdoc_json(package: &str) -> Result { // constants referenced in doc comments are added to the project let additional_crates = ["stacks-common"]; + // Respect CARGO_TARGET_DIR environment variable for rustdoc output + let rustdoc_target_dir = std::env::var("CARGO_TARGET_DIR") + .unwrap_or_else(|_| "target".to_string()) + + "/rustdoc-json"; + // WARNING: This tool relies on nightly rustdoc JSON output (-Z unstable-options --output-format json) // The JSON format is subject to change with new Rust nightly versions and could break this tool. // Use cargo rustdoc with nightly to generate JSON for the main package @@ -120,7 +125,7 @@ fn generate_rustdoc_json(package: &str) -> Result { "-p", package, "--target-dir", - "target/rustdoc-json", + &rustdoc_target_dir, "--", "-Z", "unstable-options", @@ -150,7 +155,7 @@ fn generate_rustdoc_json(package: &str) -> Result { "-p", additional_crate, "--target-dir", - "target/rustdoc-json", + &rustdoc_target_dir, "--", "-Z", "unstable-options", @@ -180,7 +185,7 @@ fn generate_rustdoc_json(package: &str) -> Result { }; // Read the generated JSON file - rustdoc generates it based on library name - let json_file_path = format!("target/rustdoc-json/doc/{}.json", lib_name); + let json_file_path = format!("{}/doc/{}.json", rustdoc_target_dir, lib_name); let json_content = std::fs::read_to_string(json_file_path) .context("Failed to read generated rustdoc JSON file")?; @@ -443,7 +448,10 @@ fn parse_field_documentation( "false" | "no" | "0" => false, _ => { // Default to false for invalid values, but could log a warning in the future - eprintln!("Warning: Invalid @required value '{}' for field '{}', defaulting to false", required_text, field_name); + eprintln!( + "Warning: Invalid @required value '{}' for field '{}', defaulting to false", + required_text, field_name + ); false } }; @@ -480,7 +488,8 @@ fn parse_literal_block_scalar(lines: &[&str], _base_indent: usize) -> String { } // Find the first non-empty content line to determine block indentation - let content_lines: Vec<&str> = lines.iter() + let content_lines: Vec<&str> = lines + .iter() .skip_while(|line| line.trim().is_empty()) .copied() .collect(); @@ -531,7 +540,8 @@ fn parse_folded_block_scalar(lines: &[&str], _base_indent: usize) -> String { } // Find the first non-empty content line to determine block indentation - let content_lines: Vec<&str> = lines.iter() + let content_lines: Vec<&str> = lines + .iter() .skip_while(|line| line.trim().is_empty()) .copied() .collect(); @@ -642,7 +652,11 @@ fn extract_annotation(metadata_section: &str, annotation_name: &str) -> Option = block_lines.iter().map(|s| s.to_string()).collect(); @@ -659,7 +673,11 @@ fn extract_annotation(metadata_section: &str, annotation_name: &str) -> Option') { // Folded block scalar mode (>) // Content starts from the next line, ignoring any text after > on the same line - let block_lines = collect_annotation_block_lines(&all_lines, annotation_line_idx + 1, annotation_line); + let block_lines = collect_annotation_block_lines( + &all_lines, + annotation_line_idx + 1, + annotation_line, + ); // Convert to owned strings for the parser let owned_lines: Vec = block_lines.iter().map(|s| s.to_string()).collect(); @@ -684,7 +702,11 @@ fn extract_annotation(metadata_section: &str, annotation_name: &str) -> Option Option( all_lines: &[&'a str], start_idx: usize, - annotation_line: &str + annotation_line: &str, ) -> Vec<&'a str> { let mut block_lines = Vec::new(); let annotation_indent = annotation_line.len() - annotation_line.trim_start().len(); @@ -2108,7 +2130,10 @@ and includes various formatting. let result = parse_field_documentation(doc_text, "test_field").unwrap(); assert_eq!(result.0.name, "test_field"); - assert_eq!(result.0.description, "Field with required and units annotations."); + assert_eq!( + result.0.description, + "Field with required and units annotations." + ); assert_eq!(result.0.default_value, Some("`5000`".to_string())); assert_eq!(result.0.required, Some(true)); assert_eq!(result.0.units, Some("milliseconds".to_string())); @@ -2232,7 +2257,10 @@ and includes various formatting. let result = parse_field_documentation(doc_text, "test_field").unwrap(); let (field_doc, referenced_constants) = result; - assert_eq!(field_doc.units, Some("[`DEFAULT_TIMEOUT_MS`] milliseconds".to_string())); + assert_eq!( + field_doc.units, + Some("[`DEFAULT_TIMEOUT_MS`] milliseconds".to_string()) + ); // Check that constants were collected from units assert!(referenced_constants.contains("DEFAULT_TIMEOUT_MS")); } @@ -2402,7 +2430,10 @@ and includes various formatting. // Test empty @required annotation (should return None, not Some(false)) let doc_text_empty = "Test field.\n---\n@required:"; let result_empty = parse_field_documentation(doc_text_empty, "test_field").unwrap(); - assert_eq!(result_empty.0.required, None, "Empty @required should not be parsed"); + assert_eq!( + result_empty.0.required, None, + "Empty @required should not be parsed" + ); } #[test] diff --git a/contrib/tools/config-docs-generator/src/generate_markdown.rs b/contrib/tools/config-docs-generator/src/generate_markdown.rs index 3c7e5d1cec..4a0d6c2085 100644 --- a/contrib/tools/config-docs-generator/src/generate_markdown.rs +++ b/contrib/tools/config-docs-generator/src/generate_markdown.rs @@ -54,6 +54,8 @@ struct GlobalContext { field_to_struct: HashMap, // Map from constant name to value (if we can extract them) constants: HashMap, + // Custom section name mappings + custom_mappings: HashMap, } // Static regex for finding intra-documentation links - compiled once at startup @@ -77,10 +79,28 @@ fn main() -> Result<()> { .help("Output Markdown file") .required(true), ) + .arg( + Arg::new("template") + .long("template") + .value_name("FILE") + .help( + "Optional markdown template file (defaults to templates/reference_template.md)", + ) + .required(true), + ) + .arg( + Arg::new("mappings") + .long("section-name-mappings") + .value_name("FILE") + .help("Optional JSON file for struct name to TOML section name mappings") + .required(true), + ) .get_matches(); let input_path = matches.get_one::("input").unwrap(); let output_path = matches.get_one::("output").unwrap(); + let template_path = matches.get_one::("template").unwrap(); + let mappings_path = matches.get_one::("mappings").unwrap(); let input_content = fs::read_to_string(input_path) .with_context(|| format!("Failed to read input JSON file: {}", input_path))?; @@ -88,7 +108,9 @@ fn main() -> Result<()> { let config_docs: ConfigDocs = serde_json::from_str(&input_content).with_context(|| "Failed to parse input JSON")?; - let markdown = generate_markdown(&config_docs)?; + let custom_mappings = load_section_name_mappings(mappings_path)?; + + let markdown = generate_markdown(&config_docs, template_path, &custom_mappings)?; fs::write(output_path, markdown) .with_context(|| format!("Failed to write output file: {}", output_path))?; @@ -100,48 +122,96 @@ fn main() -> Result<()> { Ok(()) } -fn generate_markdown(config_docs: &ConfigDocs) -> Result { - let mut output = String::new(); +fn load_section_name_mappings(mappings_file: &str) -> Result> { + let content = fs::read_to_string(mappings_file).with_context(|| { + format!( + "Failed to read section name mappings file: {}", + mappings_file + ) + })?; - // Build global context for cross-references - let global_context = build_global_context(config_docs); + let mappings: HashMap = serde_json::from_str(&content).with_context(|| { + format!( + "Failed to parse section name mappings JSON: {}", + mappings_file + ) + })?; - // Header - output.push_str("# Stacks Node Configuration Reference\n\n"); - output.push_str("This document provides a comprehensive reference for all configuration options available in the Stacks node TOML configuration file.\n\n"); - output.push_str( - "The configuration is automatically generated from the Rust source code documentation.\n\n", - ); + Ok(mappings) +} + +fn load_template(template_path: &str) -> Result { + fs::read_to_string(template_path) + .with_context(|| format!("Failed to read template file: {}", template_path)) +} + +fn render_template(template: &str, variables: HashMap) -> String { + let mut result = template.to_string(); + + for (key, value) in variables { + let placeholder = format!("{{{{{}}}}}", key); + result = result.replace(&placeholder, &value); + } + + result +} + +fn generate_markdown( + config_docs: &ConfigDocs, + template_path: &str, + custom_mappings: &HashMap, +) -> Result { + // Load template + let template = load_template(template_path)?; - // Table of contents - output.push_str("## Table of Contents\n\n"); + // Build global context for cross-references + let global_context = build_global_context(config_docs, custom_mappings); + + // Build table of contents + let mut toc_content = String::new(); for struct_doc in &config_docs.structs { - let section_name = struct_to_section_name(&struct_doc.name); - output.push_str(&format!( + let section_name = struct_to_section_name(&struct_doc.name, custom_mappings); + toc_content.push_str(&format!( "- [{}]({})\n", section_name, section_anchor(§ion_name) )); } - output.push('\n'); // Generate sections for each struct + let mut struct_sections = String::new(); for struct_doc in &config_docs.structs { - generate_struct_section(&mut output, struct_doc, &global_context)?; - output.push('\n'); + generate_struct_section( + &mut struct_sections, + struct_doc, + &global_context, + custom_mappings, + )?; + struct_sections.push('\n'); } + // Prepare template variables + let mut template_vars = HashMap::new(); + template_vars.insert("toc_content".to_string(), toc_content); + template_vars.insert("struct_sections".to_string(), struct_sections); + + // Render template with variables + let output = render_template(&template, template_vars); + Ok(output) } -fn build_global_context(config_docs: &ConfigDocs) -> GlobalContext { +fn build_global_context( + config_docs: &ConfigDocs, + custom_mappings: &HashMap, +) -> GlobalContext { let mut struct_to_anchor = HashMap::new(); let mut field_to_struct = HashMap::new(); let mut resolved_constants_map = HashMap::new(); // Build mappings for struct_doc in &config_docs.structs { - let section_name = struct_to_section_name(&struct_doc.name); + let section_name = struct_to_section_name(&struct_doc.name, custom_mappings); let anchor = section_anchor(§ion_name); struct_to_anchor.insert(struct_doc.name.clone(), anchor.clone()); @@ -164,6 +234,7 @@ fn build_global_context(config_docs: &ConfigDocs) -> GlobalContext { struct_to_anchor, field_to_struct, constants: resolved_constants_map, + custom_mappings: custom_mappings.clone(), } } @@ -171,8 +242,9 @@ fn generate_struct_section( output: &mut String, struct_doc: &StructDoc, global_context: &GlobalContext, + custom_mappings: &HashMap, ) -> Result<()> { - let section_name = struct_to_section_name(&struct_doc.name); + let section_name = struct_to_section_name(&struct_doc.name, custom_mappings); output.push_str(&format!("## {}\n\n", section_name)); // Add struct description if available @@ -222,7 +294,7 @@ fn generate_field_row( global_context: &GlobalContext, ) -> Result<()> { // Create proper anchor ID - let section_name = struct_to_section_name(struct_name); + let section_name = struct_to_section_name_with_context(struct_name, global_context); let anchor_id = format!( "{}-{}", section_name.trim_start_matches('[').trim_end_matches(']'), @@ -366,7 +438,12 @@ fn is_deprecated(field: &FieldDoc) -> bool { field.deprecated.is_some() } -fn struct_to_section_name(struct_name: &str) -> String { +fn struct_to_section_name(struct_name: &str, custom_mappings: &HashMap) -> String { + // Check custom mappings first + if let Some(section_name) = custom_mappings.get(struct_name) { + return section_name.clone(); + } + // Convert struct name to section name (e.g., "NodeConfig" -> "[node]") // NOTE: This function contains hardcoded mappings from Rust struct names to their // desired TOML section names in the Markdown output. It must be updated if new @@ -377,12 +454,19 @@ fn struct_to_section_name(struct_name: &str) -> String { "MinerConfig" => "[miner]".to_string(), "ConnectionOptionsFile" => "[connection_options]".to_string(), "FeeEstimationConfigFile" => "[fee_estimation]".to_string(), - "EventObserverConfigFile" => "[event_observer]".to_string(), - "InitialBalanceFile" => "[initial_balances]".to_string(), + "EventObserverConfigFile" => "[[events_observer]]".to_string(), + "InitialBalanceFile" => "[[ustx_balance]]".to_string(), _ => format!("[{}]", struct_name.to_lowercase()), } } +fn struct_to_section_name_with_context( + struct_name: &str, + global_context: &GlobalContext, +) -> String { + struct_to_section_name(struct_name, &global_context.custom_mappings) +} + fn escape_markdown(text: &str) -> String { text.replace('|', "\\|") .replace('[', "\\[") @@ -432,7 +516,8 @@ fn process_reference( .contains_key(ref_struct_name) { // Create proper anchor ID - let section_name = struct_to_section_name(ref_struct_name); + let section_name = + struct_to_section_name_with_context(ref_struct_name, global_context); let anchor_id = format!( "{}-{}", section_name.trim_start_matches('[').trim_end_matches(']'), @@ -458,7 +543,8 @@ fn process_reference( // Check if it's a standalone field name (without struct prefix) if let Some((field_struct_name, _anchor)) = global_context.field_to_struct.get(reference) { - let section_name = struct_to_section_name(field_struct_name); + let section_name = + struct_to_section_name_with_context(field_struct_name, global_context); let anchor_id = format!( "{}-{}", section_name.trim_start_matches('[').trim_end_matches(']'), @@ -581,6 +667,7 @@ mod tests { struct_to_anchor, field_to_struct, constants, + custom_mappings: HashMap::new(), } } @@ -589,7 +676,7 @@ mod tests { #[test] fn test_generate_markdown_empty_config() { let config_docs = create_config_docs(vec![]); - let result = generate_markdown(&config_docs).unwrap(); + let result = generate_markdown(&config_docs, None, &HashMap::new()).unwrap(); assert!(result.contains("# Stacks Node Configuration Reference")); assert!(result.contains("## Table of Contents")); @@ -601,7 +688,7 @@ mod tests { fn test_generate_markdown_with_one_struct_no_fields() { let struct_doc = create_struct_doc("TestStruct", Some("A test struct"), vec![]); let config_docs = create_config_docs(vec![struct_doc]); - let result = generate_markdown(&config_docs).unwrap(); + let result = generate_markdown(&config_docs, None, &HashMap::new()).unwrap(); assert!(result.contains("# Stacks Node Configuration Reference")); assert!(result.contains("- [[teststruct]](#teststruct)")); @@ -615,7 +702,7 @@ mod tests { let field = create_field_doc("test_field", "A test field"); let struct_doc = create_struct_doc("TestStruct", Some("A test struct"), vec![field]); let config_docs = create_config_docs(vec![struct_doc]); - let result = generate_markdown(&config_docs).unwrap(); + let result = generate_markdown(&config_docs, None, &HashMap::new()).unwrap(); assert!(result.contains("# Stacks Node Configuration Reference")); assert!(result.contains("- [[teststruct]](#teststruct)")); @@ -630,31 +717,42 @@ mod tests { #[test] fn test_struct_to_section_name_known_structs() { - assert_eq!(struct_to_section_name("BurnchainConfig"), "[burnchain]"); - assert_eq!(struct_to_section_name("NodeConfig"), "[node]"); - assert_eq!(struct_to_section_name("MinerConfig"), "[miner]"); + let mappings = HashMap::new(); + assert_eq!( + struct_to_section_name("BurnchainConfig", &mappings), + "[burnchain]" + ); + assert_eq!(struct_to_section_name("NodeConfig", &mappings), "[node]"); + assert_eq!(struct_to_section_name("MinerConfig", &mappings), "[miner]"); assert_eq!( - struct_to_section_name("ConnectionOptionsFile"), + struct_to_section_name("ConnectionOptionsFile", &mappings), "[connection_options]" ); assert_eq!( - struct_to_section_name("FeeEstimationConfigFile"), + struct_to_section_name("FeeEstimationConfigFile", &mappings), "[fee_estimation]" ); assert_eq!( - struct_to_section_name("EventObserverConfigFile"), - "[event_observer]" + struct_to_section_name("EventObserverConfigFile", &mappings), + "[[events_observer]]" ); assert_eq!( - struct_to_section_name("InitialBalanceFile"), - "[initial_balances]" + struct_to_section_name("InitialBalanceFile", &mappings), + "[[ustx_balance]]" ); } #[test] fn test_struct_to_section_name_unknown_struct() { - assert_eq!(struct_to_section_name("MyCustomConfig"), "[mycustomconfig]"); - assert_eq!(struct_to_section_name("UnknownStruct"), "[unknownstruct]"); + let mappings = HashMap::new(); + assert_eq!( + struct_to_section_name("MyCustomConfig", &mappings), + "[mycustomconfig]" + ); + assert_eq!( + struct_to_section_name("UnknownStruct", &mappings), + "[unknownstruct]" + ); } #[test] @@ -758,7 +856,8 @@ mod tests { #[test] fn test_generate_field_row_toml_example_preserves_newlines() { let mut field = create_field_doc("multiline_example", "Field with multiline TOML example"); - field.toml_example = Some("key = \"value\"\nnested = {\n sub_key = \"sub_value\"\n}".to_string()); + field.toml_example = + Some("key = \"value\"\nnested = {\n sub_key = \"sub_value\"\n}".to_string()); let global_context = create_mock_global_context(); let mut output = String::new(); @@ -774,10 +873,16 @@ mod tests { let code_content = &output[pre_start..pre_end + "
".len()]; // Should NOT contain
tags inside the code block - assert!(!code_content.contains("
"), "Code block should not contain
tags"); + assert!( + !code_content.contains("
"), + "Code block should not contain
tags" + ); // Should contain HTML entities for newlines instead - assert!(code_content.contains(" "), "Code block should contain HTML entities for newlines"); + assert!( + code_content.contains(" "), + "Code block should contain HTML entities for newlines" + ); // Should contain the key-value pairs assert!(code_content.contains("key = \"value\"")); @@ -942,7 +1047,8 @@ mod tests { #[test] fn test_generate_field_row_units_with_constants_and_intralinks() { let mut field = create_field_doc("timeout_field", "A timeout field"); - field.units = Some("[`TEST_CONSTANT`] seconds (see [`NodeConfig::test_field`])".to_string()); + field.units = + Some("[`TEST_CONSTANT`] seconds (see [`NodeConfig::test_field`])".to_string()); field.default_value = Some("`30`".to_string()); let global_context = create_mock_global_context(); let mut output = String::new(); @@ -1003,7 +1109,7 @@ mod tests { // Test a field with all possible attributes let mut field = create_field_doc( "comprehensive_field", - "A comprehensive field demonstrating all features.\n\nThis includes multiple paragraphs." + "A comprehensive field demonstrating all features.\n\nThis includes multiple paragraphs.", ); field.default_value = Some("`[\"default\", \"values\"]`".to_string()); field.required = Some(false); @@ -1012,8 +1118,10 @@ mod tests { "This is the first note with [`TEST_CONSTANT`]".to_string(), "This is the second note referencing [`NodeConfig::test_field`]".to_string(), ]); - field.deprecated = Some("Use new_comprehensive_field instead. Will be removed in v3.0.".to_string()); - field.toml_example = Some("comprehensive_field = [\n \"value1\",\n \"value2\"\n]".to_string()); + field.deprecated = + Some("Use new_comprehensive_field instead. Will be removed in v3.0.".to_string()); + field.toml_example = + Some("comprehensive_field = [\n \"value1\",\n \"value2\"\n]".to_string()); let global_context = create_mock_global_context(); let mut output = String::new(); @@ -1039,7 +1147,9 @@ mod tests { // Verify notes with intralink processing assert!(output.contains("**Notes:**")); assert!(output.contains("- This is the first note with `42`")); // Constant resolved - assert!(output.contains("- This is the second note referencing [test_field](#node-test_field)")); // Intralink + assert!( + output.contains("- This is the second note referencing [test_field](#node-test_field)") + ); // Intralink // Verify deprecation warning assert!(output.contains("**⚠️ DEPRECATED:**")); diff --git a/contrib/tools/config-docs-generator/templates/reference_template.md b/contrib/tools/config-docs-generator/templates/reference_template.md new file mode 100644 index 0000000000..cee6c3dbf3 --- /dev/null +++ b/contrib/tools/config-docs-generator/templates/reference_template.md @@ -0,0 +1,11 @@ +# Stacks Node Configuration Reference + +This document provides a comprehensive reference for all configuration options available in the Stacks node TOML configuration file. + +The configuration is automatically generated from the Rust source code documentation. + +## Table of Contents + +{{toc_content}} + +{{struct_sections}} diff --git a/docs/generated/configuration-reference.md b/docs/generated/configuration-reference.md index 7883ff6f42..d6d550940c 100644 --- a/docs/generated/configuration-reference.md +++ b/docs/generated/configuration-reference.md @@ -6,30 +6,31 @@ The configuration is automatically generated from the Rust source code documenta ## Table of Contents -- [[initial_balances]](#initial_balances) -- [[event_observer]](#event_observer) +- [[[ustx_balance]]](#ustx_balance) +- [[[events_observer]]](#events_observer) - [[connection_options]](#connection_options) - [[fee_estimation]](#fee_estimation) - [[burnchain]](#burnchain) - [[node]](#node) - [[miner]](#miner) -## [initial_balances] + +## [[ustx_balance]] | Parameter | Description | Default | |-----------|-------------|----------| -| [address](#initial_balances-address) | The Stacks address to receive the initial STX balance.
Must be a valid "non-mainnet" Stacks address (e.g., "ST2QKZ4FKHAH1NQKYKYAYZPY440FEPK7GZ1R5HBP2"). | **Required** | -| [amount](#initial_balances-amount) | The amount of microSTX to allocate to the address at node startup.
1 STX = 1,000,000 microSTX.

**Units:** microSTX | **Required** | +| [address](#ustx_balance-address) | The Stacks address to receive the initial STX balance.
Must be a valid "non-mainnet" Stacks address (e.g., "ST2QKZ4FKHAH1NQKYKYAYZPY440FEPK7GZ1R5HBP2"). | **Required** | +| [amount](#ustx_balance-amount) | The amount of microSTX to allocate to the address at node startup.
1 STX = 1,000,000 microSTX.

**Units:** microSTX | **Required** | -## [event_observer] +## [[events_observer]] | Parameter | Description | Default | |-----------|-------------|----------| -| [disable_retries](#event_observer-disable_retries) | Controls whether the node should retry sending event notifications if delivery fails or times out.

- If `false` (default): The node will attempt to deliver event notifications persistently.
If an attempt fails (due to network error, timeout, or a non-200 HTTP response), the event
payload is saved and retried indefinitely. This ensures that all events will eventually be
delivered. However, this can cause the node's block processing to stall if an observer is
down, or indefinitely fails to process the event.

- If `true`: The node will make only a single attempt to deliver each event notification.
If this single attempt fails for any reason, the event is discarded, and no further retries
will be made for that specific event.

**Notes:**
- **Warning:** Setting this to `true` can lead to missed events if the observer endpoint is temporarily unavailable or experiences issues. | `false` (retries are enabled) | -| [endpoint](#event_observer-endpoint) | URL endpoint (hostname and port) where event notifications will be sent via HTTP POST requests.

The node will automatically prepend `http://` to this endpoint and append the
specific event path (e.g., `/new_block`, `/new_mempool_tx`).
Therefore, this value should be specified as `hostname:port` (e.g., "localhost:3700").

This should point to a service capable of receiving and processing Stacks event data.

**Notes:**
- **Do NOT include the `http://` scheme in this configuration value.**

**Example:**
endpoint = "localhost:3700"
| **Required** | -| [events_keys](#event_observer-events_keys) | List of event types that this observer is configured to receive.

Each string in the list specifies an event category or a specific event to subscribe to.
For an observer to receive any notifications, this list must contain at least one valid key.
Providing an invalid string that doesn't match any of the valid formats below will cause
the node to panic on startup when parsing the configuration.

All observers, regardless of their `events_keys` configuration, implicitly receive
payloads on the `/attachments/new` endpoint.

Valid Event Keys:
- `"*"`: Subscribes to a broad set of common events.
  - Events delivered to:
    - `/new_block`: For blocks containing transactions that generate STX, FT, NFT, or smart contract events.
    - `/new_microblocks`: For all new microblock streams. Note: Only until epoch 2.5.
    - `/new_mempool_tx`: For new mempool transactions.
    - `/drop_mempool_tx`: For dropped mempool transactions.
    - `/new_burn_block`: For new burnchain blocks.
  - Note: This key does NOT by itself subscribe to `/stackerdb_chunks` or `/proposal_response`.

- `"stx"`: Subscribes to STX token operation events (transfer, mint, burn, lock).
  - Events delivered to: `/new_block`, `/new_microblocks`.
  - Payload details: The "events" array in the delivered payloads will be filtered to include only STX-related events.

- `"memtx"`: Subscribes to new and dropped mempool transaction events.
  - Events delivered to: `/new_mempool_tx`, `/drop_mempool_tx`.

- `"burn_blocks"`: Subscribes to new burnchain block events.
  - Events delivered to: `/new_burn_block`.

- `"microblocks"`: Subscribes to new microblock stream events.
  - Events delivered to: `/new_microblocks`.
  - Payload details:
    - The "transactions" field will contain all transactions from the microblocks.
    - The "events" field will contain STX, FT, NFT, or specific smart contract events
*only if* this observer is also subscribed to those more specific event types
(e.g., via `"stx"`, `"*"`, a specific contract event key, or a specific asset identifier key).
  - Note: Only until epoch 2.5.

- `"stackerdb"`: Subscribes to StackerDB chunk update events.
  - Events delivered to: `/stackerdb_chunks`.

- `"block_proposal"`: Subscribes to block proposal response events (for Nakamoto consensus).
  - Events delivered to: `/proposal_response`.

- Smart Contract Event: Subscribes to a specific smart contract event.
  - Format: `"{contract_address}.{contract_name}::{event_name}"`
(e.g., `ST0000000000000000000000000000000000000000.my-contract::my-custom-event`)
  - Events delivered to: `/new_block`, `/new_microblocks`.
  - Payload details: The "events" array in the delivered payloads will be filtered for this specific event.

- Asset Identifier for FT/NFT Events: Subscribes to events (mint, burn, transfer) for a specific Fungible Token (FT) or Non-Fungible Token (NFT).
  - Format: `"{contract_address}.{contract_name}.{asset_name}"`
(e.g., for an FT: `ST0000000000000000000000000000000000000000.my-ft-contract.my-fungible-token`)
  - Events delivered to: `/new_block`, `/new_microblocks`.
  - Payload details: The "events" array in the delivered payloads will be filtered for events related to the specified asset.

**Notes:**
- For a more detailed documentation check the event-dispatcher docs in the `/docs` folder.

**Example:**
events_keys = [
  "burn_blocks",
  "memtx",
  "ST0000000000000000000000000000000000000000.my-contract::my-custom-event",
  "ST0000000000000000000000000000000000000000.token-contract.my-ft"
]
| **Required** | -| [timeout_ms](#event_observer-timeout_ms) | Maximum duration (in milliseconds) to wait for the observer endpoint to respond.

When the node sends an event notification to this observer, it will wait at most this long
for a successful HTTP response (status code 200) before considering the request timed out.
If a timeout occurs and retries are enabled (see `EventObserverConfig::disable_retries`),
the request will be attempted again according to the retry strategy.

**Units:** milliseconds | `1_000` | +| [disable_retries](#events_observer-disable_retries) | Controls whether the node should retry sending event notifications if delivery fails or times out.

- If `false` (default): The node will attempt to deliver event notifications persistently.
If an attempt fails (due to network error, timeout, or a non-200 HTTP response), the event
payload is saved and retried indefinitely. This ensures that all events will eventually be
delivered. However, this can cause the node's block processing to stall if an observer is
down, or indefinitely fails to process the event.

- If `true`: The node will make only a single attempt to deliver each event notification.
If this single attempt fails for any reason, the event is discarded, and no further retries
will be made for that specific event.

**Notes:**
- **Warning:** Setting this to `true` can lead to missed events if the observer endpoint is temporarily unavailable or experiences issues. | `false` (retries are enabled) | +| [endpoint](#events_observer-endpoint) | URL endpoint (hostname and port) where event notifications will be sent via HTTP POST requests.

The node will automatically prepend `http://` to this endpoint and append the
specific event path (e.g., `/new_block`, `/new_mempool_tx`).
Therefore, this value should be specified as `hostname:port` (e.g., "localhost:3700").

This should point to a service capable of receiving and processing Stacks event data.

**Notes:**
- **Do NOT include the `http://` scheme in this configuration value.**

**Example:**
endpoint = "localhost:3700"
| **Required** | +| [events_keys](#events_observer-events_keys) | List of event types that this observer is configured to receive.

Each string in the list specifies an event category or a specific event to subscribe to.
For an observer to receive any notifications, this list must contain at least one valid key.
Providing an invalid string that doesn't match any of the valid formats below will cause
the node to panic on startup when parsing the configuration.

All observers, regardless of their `events_keys` configuration, implicitly receive
payloads on the `/attachments/new` endpoint.

Valid Event Keys:
- `"*"`: Subscribes to a broad set of common events.
  - Events delivered to:
    - `/new_block`: For blocks containing transactions that generate STX, FT, NFT, or smart contract events.
    - `/new_microblocks`: For all new microblock streams. Note: Only until epoch 2.5.
    - `/new_mempool_tx`: For new mempool transactions.
    - `/drop_mempool_tx`: For dropped mempool transactions.
    - `/new_burn_block`: For new burnchain blocks.
  - Note: This key does NOT by itself subscribe to `/stackerdb_chunks` or `/proposal_response`.

- `"stx"`: Subscribes to STX token operation events (transfer, mint, burn, lock).
  - Events delivered to: `/new_block`, `/new_microblocks`.
  - Payload details: The "events" array in the delivered payloads will be filtered to include only STX-related events.

- `"memtx"`: Subscribes to new and dropped mempool transaction events.
  - Events delivered to: `/new_mempool_tx`, `/drop_mempool_tx`.

- `"burn_blocks"`: Subscribes to new burnchain block events.
  - Events delivered to: `/new_burn_block`.

- `"microblocks"`: Subscribes to new microblock stream events.
  - Events delivered to: `/new_microblocks`.
  - Payload details:
    - The "transactions" field will contain all transactions from the microblocks.
    - The "events" field will contain STX, FT, NFT, or specific smart contract events
*only if* this observer is also subscribed to those more specific event types
(e.g., via `"stx"`, `"*"`, a specific contract event key, or a specific asset identifier key).
  - Note: Only until epoch 2.5.

- `"stackerdb"`: Subscribes to StackerDB chunk update events.
  - Events delivered to: `/stackerdb_chunks`.

- `"block_proposal"`: Subscribes to block proposal response events (for Nakamoto consensus).
  - Events delivered to: `/proposal_response`.

- Smart Contract Event: Subscribes to a specific smart contract event.
  - Format: `"{contract_address}.{contract_name}::{event_name}"`
(e.g., `ST0000000000000000000000000000000000000000.my-contract::my-custom-event`)
  - Events delivered to: `/new_block`, `/new_microblocks`.
  - Payload details: The "events" array in the delivered payloads will be filtered for this specific event.

- Asset Identifier for FT/NFT Events: Subscribes to events (mint, burn, transfer) for a specific Fungible Token (FT) or Non-Fungible Token (NFT).
  - Format: `"{contract_address}.{contract_name}.{asset_name}"`
(e.g., for an FT: `ST0000000000000000000000000000000000000000.my-ft-contract.my-fungible-token`)
  - Events delivered to: `/new_block`, `/new_microblocks`.
  - Payload details: The "events" array in the delivered payloads will be filtered for events related to the specified asset.

**Notes:**
- For a more detailed documentation check the event-dispatcher docs in the `/docs` folder.

**Example:**
events_keys = [
  "burn_blocks",
  "memtx",
  "ST0000000000000000000000000000000000000000.my-contract::my-custom-event",
  "ST0000000000000000000000000000000000000000.token-contract.my-ft"
]
| **Required** | +| [timeout_ms](#events_observer-timeout_ms) | Maximum duration (in milliseconds) to wait for the observer endpoint to respond.

When the node sends an event notification to this observer, it will wait at most this long
for a successful HTTP response (status code 200) before considering the request timed out.
If a timeout occurs and retries are enabled (see `EventObserverConfig::disable_retries`),
the request will be attempted again according to the retry strategy.

**Units:** milliseconds | `1_000` | ## [connection_options] @@ -223,3 +224,4 @@ The configuration is automatically generated from the Rust source code documenta | ~~[wait_on_interim_blocks](#miner-wait_on_interim_blocks)~~ | Amount of time while mining in nakamoto to wait in between mining interim blocks.

**⚠️ DEPRECATED:** Use `min_time_between_blocks_ms` instead. | `None` | + From 51c0e103d7379aa8e6d3070bd9a4b6c9a2f951c9 Mon Sep 17 00:00:00 2001 From: Francesco Leacche Date: Fri, 6 Jun 2025 18:56:08 +0100 Subject: [PATCH 10/20] fix tests --- .../src/generate_markdown.rs | 55 ++++++++++++------- 1 file changed, 35 insertions(+), 20 deletions(-) diff --git a/contrib/tools/config-docs-generator/src/generate_markdown.rs b/contrib/tools/config-docs-generator/src/generate_markdown.rs index 4a0d6c2085..d7d26924e7 100644 --- a/contrib/tools/config-docs-generator/src/generate_markdown.rs +++ b/contrib/tools/config-docs-generator/src/generate_markdown.rs @@ -443,21 +443,7 @@ fn struct_to_section_name(struct_name: &str, custom_mappings: &HashMap "[node]") - // NOTE: This function contains hardcoded mappings from Rust struct names to their - // desired TOML section names in the Markdown output. It must be updated if new - // top-level configuration structs are added or existing ones are renamed. - match struct_name { - "BurnchainConfig" => "[burnchain]".to_string(), - "NodeConfig" => "[node]".to_string(), - "MinerConfig" => "[miner]".to_string(), - "ConnectionOptionsFile" => "[connection_options]".to_string(), - "FeeEstimationConfigFile" => "[fee_estimation]".to_string(), - "EventObserverConfigFile" => "[[events_observer]]".to_string(), - "InitialBalanceFile" => "[[ustx_balance]]".to_string(), - _ => format!("[{}]", struct_name.to_lowercase()), - } + format!("[{}]", struct_name.to_lowercase()) } fn struct_to_section_name_with_context( @@ -646,6 +632,11 @@ mod tests { let mut struct_to_anchor = HashMap::new(); let mut field_to_struct = HashMap::new(); let mut constants = HashMap::new(); + let mut custom_mappings = HashMap::new(); + + // Add custom mappings like the real ones + custom_mappings.insert("NodeConfig".to_string(), "[node]".to_string()); + custom_mappings.insert("MinerConfig".to_string(), "[miner]".to_string()); // Add some test structs and fields struct_to_anchor.insert("NodeConfig".to_string(), "#node".to_string()); @@ -667,7 +658,7 @@ mod tests { struct_to_anchor, field_to_struct, constants, - custom_mappings: HashMap::new(), + custom_mappings, } } @@ -676,7 +667,8 @@ mod tests { #[test] fn test_generate_markdown_empty_config() { let config_docs = create_config_docs(vec![]); - let result = generate_markdown(&config_docs, None, &HashMap::new()).unwrap(); + let template_path = "templates/reference_template.md"; + let result = generate_markdown(&config_docs, template_path, &HashMap::new()).unwrap(); assert!(result.contains("# Stacks Node Configuration Reference")); assert!(result.contains("## Table of Contents")); @@ -688,7 +680,8 @@ mod tests { fn test_generate_markdown_with_one_struct_no_fields() { let struct_doc = create_struct_doc("TestStruct", Some("A test struct"), vec![]); let config_docs = create_config_docs(vec![struct_doc]); - let result = generate_markdown(&config_docs, None, &HashMap::new()).unwrap(); + let template_path = "templates/reference_template.md"; + let result = generate_markdown(&config_docs, template_path, &HashMap::new()).unwrap(); assert!(result.contains("# Stacks Node Configuration Reference")); assert!(result.contains("- [[teststruct]](#teststruct)")); @@ -702,7 +695,8 @@ mod tests { let field = create_field_doc("test_field", "A test field"); let struct_doc = create_struct_doc("TestStruct", Some("A test struct"), vec![field]); let config_docs = create_config_docs(vec![struct_doc]); - let result = generate_markdown(&config_docs, None, &HashMap::new()).unwrap(); + let template_path = "templates/reference_template.md"; + let result = generate_markdown(&config_docs, template_path, &HashMap::new()).unwrap(); assert!(result.contains("# Stacks Node Configuration Reference")); assert!(result.contains("- [[teststruct]](#teststruct)")); @@ -717,7 +711,28 @@ mod tests { #[test] fn test_struct_to_section_name_known_structs() { - let mappings = HashMap::new(); + let mut mappings = HashMap::new(); + // Load the expected mappings based on section_name_mappings.json + mappings.insert("BurnchainConfig".to_string(), "[burnchain]".to_string()); + mappings.insert("NodeConfig".to_string(), "[node]".to_string()); + mappings.insert("MinerConfig".to_string(), "[miner]".to_string()); + mappings.insert( + "ConnectionOptionsFile".to_string(), + "[connection_options]".to_string(), + ); + mappings.insert( + "FeeEstimationConfigFile".to_string(), + "[fee_estimation]".to_string(), + ); + mappings.insert( + "EventObserverConfigFile".to_string(), + "[[events_observer]]".to_string(), + ); + mappings.insert( + "InitialBalanceFile".to_string(), + "[[ustx_balance]]".to_string(), + ); + assert_eq!( struct_to_section_name("BurnchainConfig", &mappings), "[burnchain]" From 90de815d049666fca7ab5531b4a1604e6360372e Mon Sep 17 00:00:00 2001 From: Francesco Leacche Date: Tue, 10 Jun 2025 15:59:17 +0200 Subject: [PATCH 11/20] update generated docs --- docs/generated/configuration-reference.md | 238 +++++++++++----------- 1 file changed, 119 insertions(+), 119 deletions(-) diff --git a/docs/generated/configuration-reference.md b/docs/generated/configuration-reference.md index d6d550940c..a424d8a56d 100644 --- a/docs/generated/configuration-reference.md +++ b/docs/generated/configuration-reference.md @@ -27,59 +27,59 @@ The configuration is automatically generated from the Rust source code documenta | Parameter | Description | Default | |-----------|-------------|----------| -| [disable_retries](#events_observer-disable_retries) | Controls whether the node should retry sending event notifications if delivery fails or times out.

- If `false` (default): The node will attempt to deliver event notifications persistently.
If an attempt fails (due to network error, timeout, or a non-200 HTTP response), the event
payload is saved and retried indefinitely. This ensures that all events will eventually be
delivered. However, this can cause the node's block processing to stall if an observer is
down, or indefinitely fails to process the event.

- If `true`: The node will make only a single attempt to deliver each event notification.
If this single attempt fails for any reason, the event is discarded, and no further retries
will be made for that specific event.

**Notes:**
- **Warning:** Setting this to `true` can lead to missed events if the observer endpoint is temporarily unavailable or experiences issues. | `false` (retries are enabled) | -| [endpoint](#events_observer-endpoint) | URL endpoint (hostname and port) where event notifications will be sent via HTTP POST requests.

The node will automatically prepend `http://` to this endpoint and append the
specific event path (e.g., `/new_block`, `/new_mempool_tx`).
Therefore, this value should be specified as `hostname:port` (e.g., "localhost:3700").

This should point to a service capable of receiving and processing Stacks event data.

**Notes:**
- **Do NOT include the `http://` scheme in this configuration value.**

**Example:**
endpoint = "localhost:3700"
| **Required** | -| [events_keys](#events_observer-events_keys) | List of event types that this observer is configured to receive.

Each string in the list specifies an event category or a specific event to subscribe to.
For an observer to receive any notifications, this list must contain at least one valid key.
Providing an invalid string that doesn't match any of the valid formats below will cause
the node to panic on startup when parsing the configuration.

All observers, regardless of their `events_keys` configuration, implicitly receive
payloads on the `/attachments/new` endpoint.

Valid Event Keys:
- `"*"`: Subscribes to a broad set of common events.
  - Events delivered to:
    - `/new_block`: For blocks containing transactions that generate STX, FT, NFT, or smart contract events.
    - `/new_microblocks`: For all new microblock streams. Note: Only until epoch 2.5.
    - `/new_mempool_tx`: For new mempool transactions.
    - `/drop_mempool_tx`: For dropped mempool transactions.
    - `/new_burn_block`: For new burnchain blocks.
  - Note: This key does NOT by itself subscribe to `/stackerdb_chunks` or `/proposal_response`.

- `"stx"`: Subscribes to STX token operation events (transfer, mint, burn, lock).
  - Events delivered to: `/new_block`, `/new_microblocks`.
  - Payload details: The "events" array in the delivered payloads will be filtered to include only STX-related events.

- `"memtx"`: Subscribes to new and dropped mempool transaction events.
  - Events delivered to: `/new_mempool_tx`, `/drop_mempool_tx`.

- `"burn_blocks"`: Subscribes to new burnchain block events.
  - Events delivered to: `/new_burn_block`.

- `"microblocks"`: Subscribes to new microblock stream events.
  - Events delivered to: `/new_microblocks`.
  - Payload details:
    - The "transactions" field will contain all transactions from the microblocks.
    - The "events" field will contain STX, FT, NFT, or specific smart contract events
*only if* this observer is also subscribed to those more specific event types
(e.g., via `"stx"`, `"*"`, a specific contract event key, or a specific asset identifier key).
  - Note: Only until epoch 2.5.

- `"stackerdb"`: Subscribes to StackerDB chunk update events.
  - Events delivered to: `/stackerdb_chunks`.

- `"block_proposal"`: Subscribes to block proposal response events (for Nakamoto consensus).
  - Events delivered to: `/proposal_response`.

- Smart Contract Event: Subscribes to a specific smart contract event.
  - Format: `"{contract_address}.{contract_name}::{event_name}"`
(e.g., `ST0000000000000000000000000000000000000000.my-contract::my-custom-event`)
  - Events delivered to: `/new_block`, `/new_microblocks`.
  - Payload details: The "events" array in the delivered payloads will be filtered for this specific event.

- Asset Identifier for FT/NFT Events: Subscribes to events (mint, burn, transfer) for a specific Fungible Token (FT) or Non-Fungible Token (NFT).
  - Format: `"{contract_address}.{contract_name}.{asset_name}"`
(e.g., for an FT: `ST0000000000000000000000000000000000000000.my-ft-contract.my-fungible-token`)
  - Events delivered to: `/new_block`, `/new_microblocks`.
  - Payload details: The "events" array in the delivered payloads will be filtered for events related to the specified asset.

**Notes:**
- For a more detailed documentation check the event-dispatcher docs in the `/docs` folder.

**Example:**
events_keys = [
  "burn_blocks",
  "memtx",
  "ST0000000000000000000000000000000000000000.my-contract::my-custom-event",
  "ST0000000000000000000000000000000000000000.token-contract.my-ft"
]
| **Required** | -| [timeout_ms](#events_observer-timeout_ms) | Maximum duration (in milliseconds) to wait for the observer endpoint to respond.

When the node sends an event notification to this observer, it will wait at most this long
for a successful HTTP response (status code 200) before considering the request timed out.
If a timeout occurs and retries are enabled (see `EventObserverConfig::disable_retries`),
the request will be attempted again according to the retry strategy.

**Units:** milliseconds | `1_000` | +| [disable_retries](#events_observer-disable_retries) | Controls whether the node should retry sending event notifications if delivery
fails or times out.

If `false` (default): The node will attempt to deliver event notifications
persistently. If an attempt fails (due to network error, timeout, or a
non-200 HTTP response), the event payload is saved and retried indefinitely.
This ensures that all events will eventually be delivered. However, this can
cause the node's block processing to stall if an observer is down, or
indefinitely fails to process the event.

- If `true`: The node will make only a single attempt to deliver each event
notification. If this single attempt fails for any reason, the event is
discarded, and no further retries will be made for that specific event.

**Notes:**
- **Warning:** Setting this to `true` can lead to missed events if the observer endpoint is temporarily unavailable or experiences issues. | `false` (retries are enabled) | +| [endpoint](#events_observer-endpoint) | URL endpoint (hostname and port) where event notifications will be sent via
HTTP POST requests.

The node will automatically prepend `http://` to this endpoint and append the
specific event path (e.g., `/new_block`, `/new_mempool_tx`). Therefore, this
value should be specified as `hostname:port` (e.g., "localhost:3700").

This should point to a service capable of receiving and processing Stacks event data.

**Notes:**
- **Do NOT include the `http://` scheme in this configuration value.**

**Example:**
endpoint = "localhost:3700"
| **Required** | +| [events_keys](#events_observer-events_keys) | List of event types that this observer is configured to receive.

Each string in the list specifies an event category or a specific event to
subscribe to. For an observer to receive any notifications, this list must
contain at least one valid key. Providing an invalid string that doesn't match
any of the valid formats below will cause the node to panic on startup when
parsing the configuration.

All observers, regardless of their `events_keys` configuration, implicitly
receive payloads on the `/attachments/new` endpoint.

Valid Event Keys:
- `"*"`: Subscribes to a broad set of common events.
  - Events delivered to:
    - `/new_block`: For blocks containing transactions that generate STX, FT,
NFT, or smart contract events.
    - `/new_microblocks`: For all new microblock streams. Note: Only until epoch 2.5.
    - `/new_mempool_tx`: For new mempool transactions.
    - `/drop_mempool_tx`: For dropped mempool transactions.
    - `/new_burn_block`: For new burnchain blocks.
  - Note: This key does NOT by itself subscribe to `/stackerdb_chunks` or `/proposal_response`.

- `"stx"`: Subscribes to STX token operation events (transfer, mint, burn, lock).
  - Events delivered to: `/new_block`, `/new_microblocks`.
  - Payload details: The "events" array in the delivered payloads will be
filtered to include only STX-related events.

- `"memtx"`: Subscribes to new and dropped mempool transaction events.
  - Events delivered to: `/new_mempool_tx`, `/drop_mempool_tx`.

- `"burn_blocks"`: Subscribes to new burnchain block events.
  - Events delivered to: `/new_burn_block`.

- `"microblocks"`: Subscribes to new microblock stream events.
  - Events delivered to: `/new_microblocks`.
  - Payload details:
    - The "transactions" field will contain all transactions from the microblocks.
    - The "events" field will contain STX, FT, NFT, or specific smart contract
events *only if* this observer is also subscribed to those more specific
event types (e.g., via `"stx"`, `"*"`, a specific contract event key,
or a specific asset identifier key).
  - Note: Only until epoch 2.5.

- `"stackerdb"`: Subscribes to StackerDB chunk update events.
  - Events delivered to: `/stackerdb_chunks`.

- `"block_proposal"`: Subscribes to block proposal response events (for Nakamoto consensus).
  - Events delivered to: `/proposal_response`.

- Smart Contract Event: Subscribes to a specific smart contract event.
  - Format: `"{contract_address}.{contract_name}::{event_name}"`
(e.g., `ST0000000000000000000000000000000000000000.my-contract::my-custom-event`)
  - Events delivered to: `/new_block`, `/new_microblocks`.
  - Payload details: The "events" array in the delivered payloads will be
filtered for this specific event.

- Asset Identifier for FT/NFT Events: Subscribes to events (mint, burn,
transfer) for a specific Fungible Token (FT) or Non-Fungible Token (NFT).
  - Format: `"{contract_address}.{contract_name}.{asset_name}"`
(e.g., for an FT: `ST0000000000000000000000000000000000000000.contract.token`)
  - Events delivered to: `/new_block`, `/new_microblocks`.
  - Payload details: The "events" array in the delivered payloads will be
filtered for events related to the specified asset.

**Notes:**
- For a more detailed documentation check the event-dispatcher docs in the `/docs` folder.

**Example:**
events_keys = [
  "burn_blocks",
  "memtx",
  "ST0000000000000000000000000000000000000000.my-contract::my-custom-event",
  "ST0000000000000000000000000000000000000000.token-contract.my-ft"
]
| **Required** | +| [timeout_ms](#events_observer-timeout_ms) | Maximum duration (in milliseconds) to wait for the observer endpoint to respond.

When the node sends an event notification to this observer, it will wait at
most this long for a successful HTTP response (status code 200) before
considering the request timed out. If a timeout occurs and retries are enabled
(see [disable_retries](#events_observer-disable_retries) ), the request will be attempted
again according to the retry strategy.

**Units:** milliseconds | `1_000` | ## [connection_options] | Parameter | Description | Default | |-----------|-------------|----------| -| [auth_token](#connection_options-auth_token) | HTTP auth password to use when communicating with stacks-signer binary.

This token is used in the `Authorization` header for certain requests.
Primarily, it secures the communication channel between this node and a connected
`stacks-signer` instance.

It is also used to authenticate requests to `/v2/blocks?broadcast=1`.

**Notes:**
- **Requirement:** This field **must** be configured if the node needs to receive block proposals from a configured `stacks-signer` event_observer via the `/v3/block_proposal` endpoint. The value must match the token configured on the signer. | `None` (authentication disabled for relevant endpoints) | -| [block_proposal_max_age_secs](#connection_options-block_proposal_max_age_secs) | Maximum age (in seconds) allowed for a block proposal received via the `/v3/block_proposal` RPC endpoint.

If a block proposal is received whose timestamp is older than
the current time minus this configured value, the node will reject the proposal
with an HTTP 422 (Unprocessable Entity) error, considering it too stale.
This prevents the node from spending resources validating outdated proposals.

**Units:** seconds | `600` | -| [connect_timeout](#connection_options-connect_timeout) | Maximum duration (in seconds) a connection attempt is allowed to remain in the connecting state.

This applies to both incoming P2P and HTTP connections. If a remote peer initiates a connection
but does not complete the connection process (e.g., handshake for P2P) within this time, the node
will consider it unresponsive and drop the connection attempt.

**Units:** seconds | `10` | -| [disable_block_download](#connection_options-disable_block_download) | If true, completely disables the block download state machine.

The node will not attempt to download Stacks blocks (neither Nakamoto tenures nor
legacy blocks) from peers.

**Notes:**
- Intended for testing or specialized node configurations. | `false` | -| [disable_inbound_handshakes](#connection_options-disable_inbound_handshakes) | If true, prevents the node from processing initial handshake messages from new inbound P2P connections.

This effectively stops the node from establishing new authenticated inbound P2P sessions.
Outbound connections initiated by this node are unaffected.

**Notes:**
- Primarily intended for testing purposes. | `false` | -| [disable_inbound_walks](#connection_options-disable_inbound_walks) | If true, disables the neighbor discovery mechanism from starting walks from inbound peers.
Walks will only initiate from seed/bootstrap peers, outbound connections, or pingbacks.

**Notes:**
- Primarily intended for testing or specific network debugging scenarios. | `false` | -| [dns_timeout](#connection_options-dns_timeout) | Maximum time (in milliseconds) to wait for a DNS query to resolve.

When the node needs to resolve a hostname (e.g., from a peer's advertised `data_url`
or an Atlas attachment URL) into an IP address, it initiates a DNS lookup.
This setting defines the maximum duration the node will wait for the DNS server
to respond before considering the lookup timed out.

**Units:** milliseconds | `15_000` (15 seconds) | -| [force_disconnect_interval](#connection_options-force_disconnect_interval) | Fault injection setting for testing purposes. Interval (in seconds) for forced disconnection of all peers.

If set to a positive value, the node will periodically disconnect all of its P2P peers at
roughly this interval. This simulates network churn or partitioning for testing node resilience.

**Notes:**
- The code enforcing this behavior is conditionally compiled using `cfg!(test)` and is only active during test runs.
- This setting has no effect in standard production builds.

**Units:** seconds | `None` (feature disabled) | -| [handshake_timeout](#connection_options-handshake_timeout) | Maximum duration (in seconds) a P2P peer is allowed after connecting before completing the handshake.

If a P2P peer connects successfully but fails to send the necessary handshake messages
within this time, the node will consider it unresponsive and drop the connection.

**Units:** seconds | `5` | -| [heartbeat](#connection_options-heartbeat) | Interval (in seconds) at which this node expects to send or receive P2P keep-alive messages.

During the P2P handshake, this node advertises this configured `heartbeat` value to its peers.
Each peer uses the other's advertised heartbeat interval (plus a timeout margin) to monitor
responsiveness and detect potential disconnections. This node also uses its own configured
value to proactively send Ping messages if the connection would otherwise be idle, helping to
keep it active.

**Units:** seconds | `3_600` (1 hour) | -| [idle_timeout](#connection_options-idle_timeout) | Maximum idle time (in seconds) for HTTP connections.

This applies only to HTTP connections. It defines the maximum allowed time since the
last response was sent by the node to the client. An HTTP connection is dropped if
both this `idle_timeout` and the general [timeout](#connection_options-timeout) (time since last
request received) are exceeded.

**Units:** seconds | `15` | +| [auth_token](#connection_options-auth_token) | HTTP auth password to use when communicating with stacks-signer binary.

This token is used in the `Authorization` header for certain requests.
Primarily, it secures the communication channel between this node and a
connected `stacks-signer` instance.

It is also used to authenticate requests to `/v2/blocks?broadcast=1`.

**Notes:**
- This field **must** be configured if the node needs to receive block proposals from a configured `stacks-signer` [[events_observer]] via the `/v3/block_proposal` endpoint.
- The value must match the token configured on the signer. | `None` (authentication disabled for relevant endpoints) | +| [block_proposal_max_age_secs](#connection_options-block_proposal_max_age_secs) | Maximum age (in seconds) allowed for a block proposal received via the
`/v3/block_proposal` RPC endpoint.

If a block proposal is received whose timestamp is older than the current
time minus this configured value, the node will reject the proposal with an
HTTP 422 (Unprocessable Entity) error, considering it too stale. This
prevents the node from spending resources validating outdated proposals.

**Units:** seconds | `600` | +| [connect_timeout](#connection_options-connect_timeout) | Maximum duration (in seconds) a connection attempt is allowed to remain in
the connecting state.

This applies to both incoming P2P and HTTP connections. If a remote peer
initiates a connection but does not complete the connection process
(e.g., handshake for P2P) within this time, the node will consider it
unresponsive and drop the connection attempt.

**Units:** seconds | `10` | +| [disable_block_download](#connection_options-disable_block_download) | If true, completely disables the block download state machine.

The node will not attempt to download Stacks blocks (neither Nakamoto
tenures nor legacy blocks) from peers.

**Notes:**
- Intended for testing or specialized node configurations. | `false` | +| [disable_inbound_handshakes](#connection_options-disable_inbound_handshakes) | If true, prevents the node from processing initial handshake messages from new
inbound P2P connections.

This effectively stops the node from establishing new authenticated inbound
P2P sessions. Outbound connections initiated by this node are unaffected.

**Notes:**
- Primarily intended for testing purposes. | `false` | +| [disable_inbound_walks](#connection_options-disable_inbound_walks) | If true, disables the neighbor discovery mechanism from starting walks from
inbound peers. Walks will only initiate from seed/bootstrap peers, outbound
connections, or pingbacks.

**Notes:**
- Primarily intended for testing or specific network debugging scenarios. | `false` | +| [dns_timeout](#connection_options-dns_timeout) | Maximum time (in milliseconds) to wait for a DNS query to resolve.

When the node needs to resolve a hostname (e.g., from a peer's advertised
[[node].data_url](#node-data_url) or an Atlas attachment URL) into an IP address, it
initiates a DNS lookup. This setting defines the maximum duration the node will
wait for the DNS server to respond before considering the lookup timed out.

**Units:** milliseconds | `15_000` (15 seconds) | +| [force_disconnect_interval](#connection_options-force_disconnect_interval) | Fault injection setting for testing purposes. Interval (in seconds) for
forced disconnection of all peers.

If set to a positive value, the node will periodically disconnect all of its
P2P peers at roughly this interval. This simulates network churn or
partitioning for testing node resilience.

**Notes:**
- If set to a positive value, the node will periodically disconnect all of its P2P peers at roughly this interval.
- This simulates network churn or partitioning for testing node resilience.
- The code enforcing this behavior is conditionally compiled using `cfg!(test)` and is only active during test runs.
- This setting has no effect in standard production builds.

**Units:** seconds | `None` (feature disabled) | +| [handshake_timeout](#connection_options-handshake_timeout) | Maximum duration (in seconds) a P2P peer is allowed after connecting before
completing the handshake.

If a P2P peer connects successfully but fails to send the necessary handshake
messages within this time, the node will consider it unresponsive and drop the
connection.

**Units:** seconds | `5` | +| [heartbeat](#connection_options-heartbeat) | Interval (in seconds) at which this node expects to send or receive P2P
keep-alive messages.

During the P2P handshake, this node advertises this configured `heartbeat`
value to its peers. Each peer uses the other's advertised heartbeat
interval (plus a timeout margin) to monitor responsiveness and detect
potential disconnections. This node also uses its own configured value to
proactively send Ping messages if the connection would otherwise be idle,
helping to keep it active.

**Units:** seconds | `3_600` (1 hour) | +| [idle_timeout](#connection_options-idle_timeout) | Maximum idle time (in seconds) for HTTP connections.

This applies only to HTTP connections. It defines the maximum allowed time
since the last response was sent by the node to the client. An HTTP
connection is dropped if both this `idle_timeout` and the general
[timeout](#connection_options-timeout) (time since last request received) are exceeded.

**Units:** seconds | `15` | | [inbox_maxlen](#connection_options-inbox_maxlen) | Maximum number of messages allowed in the per-connection incoming buffer.
The limits apply individually to each established connection (both P2P and HTTP). | `100` | -| [inv_reward_cycles](#connection_options-inv_reward_cycles) | Lookback depth (in PoX reward cycles) for Nakamoto inventory synchronization requests.

When initiating an inventory sync cycle with a peer, the node requests data starting
from `inv_reward_cycles` cycles before the current target reward cycle. This determines
how much historical inventory information is requested in each sync attempt.

**Units:** PoX reward cycles | - `3` if [[burnchain].mode](#burnchain-mode) is `"mainnet"`
- `6` otherwise | -| [inv_sync_interval](#connection_options-inv_sync_interval) | Minimum interval (in seconds) between initiating inventory synchronization attempts with the same peer.

Acts as a per-peer cooldown to throttle sync requests. A new sync cycle with a peer generally
starts only after this interval has passed since the previous attempt began *and* the previous
cycle is considered complete.

**Units:** seconds | `45` | -| [log_neighbors_freq](#connection_options-log_neighbors_freq) | Frequency (in milliseconds) for logging the current P2P neighbor list at the DEBUG level.

If set to a non-zero value, the node will periodically log details about its currently
established P2P connections (neighbors). Setting this to 0 disables this periodic logging.

**Units:** milliseconds | `60_000` (1 minute) | -| [max_http_clients](#connection_options-max_http_clients) | Maximum total number of allowed concurrent HTTP connections.

This limits the total number of simultaneous connections the node's RPC/HTTP server
will accept. If this limit is reached, new incoming HTTP connection attempts
will be rejected. | `1000` | +| [inv_reward_cycles](#connection_options-inv_reward_cycles) | Lookback depth (in PoX reward cycles) for Nakamoto inventory synchronization requests.

When initiating an inventory sync cycle with a peer, the node requests data
starting from `inv_reward_cycles` cycles before the current target reward
cycle. This determines how much historical inventory information is requested
in each sync attempt.

**Units:** PoX reward cycles | - if [[burnchain].mode](#burnchain-mode) is `"mainnet"`: `3`
- else: `6` | +| [inv_sync_interval](#connection_options-inv_sync_interval) | Minimum interval (in seconds) between initiating inventory synchronization
attempts with the same peer.

Acts as a per-peer cooldown to throttle sync requests. A new sync cycle with
a peer generally starts only after this interval has passed since the previous
attempt began *and* the previous cycle is considered complete.

**Units:** seconds | `45` | +| [log_neighbors_freq](#connection_options-log_neighbors_freq) | Frequency (in milliseconds) for logging the current P2P neighbor list at the
DEBUG level.

If set to a non-zero value, the node will periodically log details about its
currently established P2P connections (neighbors). Setting this to 0 disables
this periodic logging.

**Units:** milliseconds | `60_000` (1 minute) | +| [max_http_clients](#connection_options-max_http_clients) | Maximum total number of allowed concurrent HTTP connections.

This limits the total number of simultaneous connections the node's RPC/HTTP
server will accept. If this limit is reached, new incoming HTTP connection
attempts will be rejected. | `1000` | | [max_inflight_attachments](#connection_options-max_inflight_attachments) | Maximum number of concurrent Atlas data attachment download requests allowed.

This limits how many separate download requests for Atlas data attachments
can be active simultaneously. Helps manage network resources when fetching
potentially large attachment data. | `6` | | [max_inflight_blocks](#connection_options-max_inflight_blocks) | Maximum number of concurrent Nakamoto block download requests allowed.

This limits how many separate block download processes for Nakamoto tenures
(both confirmed and unconfirmed) can be active simultaneously. Helps manage
network bandwidth and processing load during chain synchronization. | `6` | -| [max_sockets](#connection_options-max_sockets) | Maximum total number of concurrent network sockets the node is allowed to manage.

This limit applies globally to all types of sockets handled by the node's networking layer,
including listening sockets (P2P and RPC/HTTP), established P2P connections (inbound/outbound),
and established HTTP connections.
It serves as a hard limit to prevent the node from exhausting operating system
resources related to socket descriptors. | `800` | -| [maximum_call_argument_size](#connection_options-maximum_call_argument_size) | Maximum size (in bytes) of the HTTP request body for read-only contract calls.

This limit is enforced on the `Content-Length` of incoming requests to the
`/v2/contracts/call-read-only/...` RPC endpoint. It prevents excessively large
request bodies, which might contain numerous or very large hex-encoded function arguments,
from overwhelming the node.

**Notes:**
- Calculated as 20 * `clarity::vm::types::BOUND_VALUE_SERIALIZATION_HEX`.

**Units:** bytes | `83_886_080` (80 MiB) | -| [num_clients](#connection_options-num_clients) | Maximum number of allowed concurrent inbound P2P connections.

This acts as a hard limit. If the node already has this many active inbound P2P
connections, any new incoming P2P connection attempts will be rejected.
Outbound P2P connections initiated by this node are not counted against this limit. | `750` | -| [num_neighbors](#connection_options-num_neighbors) | Target number of peers for StackerDB replication.

Sets the maximum number of potential replication target peers requested from the
StackerDB control contract (`get-replication-targets`) when configuring a replica.

Note: Formerly (pre-Epoch 3.0), this also controlled the target peer count for
inventory synchronization. | `32` | +| [max_sockets](#connection_options-max_sockets) | Maximum total number of concurrent network sockets the node is allowed to manage.

This limit applies globally to all types of sockets handled by the node's
networking layer, including listening sockets (P2P and RPC/HTTP),
established P2P connections (inbound/outbound), and established HTTP connections.
It serves as a hard limit to prevent the node from exhausting operating
system resources related to socket descriptors. | `800` | +| [maximum_call_argument_size](#connection_options-maximum_call_argument_size) | Maximum size (in bytes) of the HTTP request body for read-only contract calls.

This limit is enforced on the `Content-Length` of incoming requests to the
`/v2/contracts/call-read-only/...` RPC endpoint. It prevents excessively large
request bodies, which might contain numerous or very large hex-encoded
function arguments, from overwhelming the node.

**Notes:**
- Calculated as 20 * `clarity::vm::types::BOUND_VALUE_SERIALIZATION_HEX`.

**Units:** bytes | `83_886_080` (80 MiB) | +| [num_clients](#connection_options-num_clients) | Maximum number of allowed concurrent inbound P2P connections.

This acts as a hard limit. If the node already has this many active inbound
P2P connections, any new incoming P2P connection attempts will be rejected.
Outbound P2P connections initiated by this node are not counted against this limit. | `750` | +| [num_neighbors](#connection_options-num_neighbors) | Target number of peers for StackerDB replication.

Sets the maximum number of potential replication target peers requested from
the StackerDB control contract (`get-replication-targets`) when configuring a replica.

Note: Formerly (pre-Epoch 3.0), this also controlled the target peer count for
inventory synchronization. | `32` | | [outbox_maxlen](#connection_options-outbox_maxlen) | Maximum number of messages allowed in the per-connection outgoing buffer.
The limit applies individually to each established connection (both P2P and HTTP). | `100` | -| [private_key_lifetime](#connection_options-private_key_lifetime) | Validity duration (in number of bitcoin blocks) for the node's P2P session private key.

The node uses a temporary private key for signing P2P messages. This key has an associated
expiry bitcoin block height stored in the peer database. When the current bitcoin height
reaches or exceeds the key's expiry height, the node automatically generates a new random
private key.
The expiry block height for this new key is calculated by adding the configured
[private_key_lifetime](#connection_options-private_key_lifetime) (in blocks) to the previous key's expiry block height.
The node then re-handshakes with peers to transition to the new key.
This provides periodic key rotation for P2P communication.

**Units:** bitcoin blocks | `9223372036854775807` (i64::MAX, effectively infinite, disabling automatic re-keying). | -| [private_neighbors](#connection_options-private_neighbors) | Whether to allow connections and interactions with peers having private IP addresses.

If `false` (default), the node will generally:
- Reject incoming connection attempts from peers with private IPs.
- Avoid initiating connections to peers known to have private IPs.
- Ignore peers with private IPs during neighbor discovery (walks).
- Skip querying peers with private IPs for mempool or StackerDB data.
- Filter out peers with private IPs from API responses listing potential peers.

Setting this to `true` disables these restrictions, which can be useful for local testing
environments or fully private network deployments. | `false` | -| [public_ip_address](#connection_options-public_ip_address) | The Public IPv4 address and port (e.g. "203.0.113.42:20444") to advertise to other nodes.

If this option is not set (`None`), the node will attempt to automatically discover its
public IP address. | `None` (triggers automatic discovery attempt) | -| [read_only_call_limit_read_count](#connection_options-read_only_call_limit_read_count) | Maximum number of distinct read operations from Clarity data space allowed during a read-only call. | `30` | -| [read_only_call_limit_read_length](#connection_options-read_only_call_limit_read_length) | Maximum total size (in bytes) of data allowed to be read from Clarity data space (variables, maps)
during a read-only call.

**Units:** bytes | `100_000` (100 KB). | +| [private_key_lifetime](#connection_options-private_key_lifetime) | Validity duration (in number of bitcoin blocks) for the node's P2P session
private key.

The node uses a temporary private key for signing P2P messages. This key has
an associated expiry bitcoin block height stored in the peer database. When
the current bitcoin height reaches or exceeds the key's expiry height, the
node automatically generates a new random private key.
The expiry block height for this new key is calculated by adding the
configured [private_key_lifetime](#connection_options-private_key_lifetime) (in blocks) to the
previous key's expiry block height. The node then re-handshakes with peers
to transition to the new key. This provides periodic key rotation for P2P communication.

**Units:** bitcoin blocks | `9223372036854775807` (i64::MAX, effectively infinite, disabling automatic re-keying). | +| [private_neighbors](#connection_options-private_neighbors) | Whether to allow connections and interactions with peers having private IP addresses.

If `false` (default), the node will generally:
- Reject incoming connection attempts from peers with private IPs.
- Avoid initiating connections to peers known to have private IPs.
- Ignore peers with private IPs during neighbor discovery (walks).
- Skip querying peers with private IPs for mempool or StackerDB data.
- Filter out peers with private IPs from API responses listing potential peers.

Setting this to `true` disables these restrictions, which can be useful for
local testing environments or fully private network deployments. | `false` | +| [public_ip_address](#connection_options-public_ip_address) | The Public IPv4 address and port (e.g. "203.0.113.42:20444") to advertise to other nodes.

If this option is not set (`None`), the node will attempt to automatically
discover its public IP address. | `None` (triggers automatic discovery attempt) | +| [read_only_call_limit_read_count](#connection_options-read_only_call_limit_read_count) | Maximum number of distinct read operations from Clarity data space allowed
during a read-only call. | `30` | +| [read_only_call_limit_read_length](#connection_options-read_only_call_limit_read_length) | Maximum total size (in bytes) of data allowed to be read from Clarity data
space (variables, maps) during a read-only call.

**Units:** bytes | `100_000` (100 KB). | | [read_only_call_limit_runtime](#connection_options-read_only_call_limit_runtime) | Runtime cost limit for an individual read-only function call. This represents
computation effort within the Clarity VM.
(See SIP-006: https://github.com/stacksgov/sips/blob/main/sips/sip-006/sip-006-runtime-cost-assessment.md)

**Units:** Clarity VM cost units | `1_000_000_000` | | [read_only_call_limit_write_count](#connection_options-read_only_call_limit_write_count) | Maximum number of distinct write operations allowed during a read-only call.

**Notes:**
- This limit is effectively forced to 0 by the API handler, ensuring read-only behavior.
- Configuring a non-zero value has no effect on read-only call execution. | `0` | | [read_only_call_limit_write_length](#connection_options-read_only_call_limit_write_length) | Maximum total size (in bytes) of data allowed to be written during a read-only call.

**Notes:**
- This limit is effectively forced to 0 by the API handler, ensuring read-only behavior.
- Configuring a non-zero value has no effect on read-only call execution.

**Units:** bytes | `0` | -| [reject_blocks_pushed](#connection_options-reject_blocks_pushed) | Controls whether the node accepts Nakamoto blocks pushed proactively by peers.

- If `true`: Pushed blocks are ignored (logged at DEBUG and discarded). The node will
still process blocks that it actively downloads.
- If `false`: Both pushed blocks and actively downloaded blocks are processed. | `false` | -| [soft_max_clients_per_host](#connection_options-soft_max_clients_per_host) | Soft limit on the number of inbound P2P connections allowed per host IP address.

During inbound connection pruning (when total inbound connections > [soft_num_clients](#connection_options-soft_num_clients) ),
the node checks if any single IP address has more connections than this limit.
If so, it preferentially prunes the newest connections originating from that
specific IP address until its count is reduced to this limit.
This prevents a single host from dominating the node's inbound connection capacity. | `4` | -| [soft_max_neighbors_per_org](#connection_options-soft_max_neighbors_per_org) | Soft limit on the number of outbound P2P connections per network organization (ASN).

During connection pruning (when total outbound connections > [soft_num_neighbors](#connection_options-soft_num_neighbors) ),
the node checks if any single network organization (identified by ASN) has more
outbound connections than this limit. If so, it preferentially prunes the least
healthy/newest connections from that overrepresented organization until its count
is reduced to this limit or the total outbound count reaches
[soft_num_neighbors](#connection_options-soft_num_neighbors) . This encourages connection diversity across
different network providers. | `32` | -| [soft_num_clients](#connection_options-soft_num_clients) | Soft limit threshold for triggering inbound P2P connection pruning.

If the total number of currently active inbound P2P connections exceeds this value,
the node will activate pruning logic to reduce the count, typically by applying
per-host limits (see [soft_max_clients_per_host](#connection_options-soft_max_clients_per_host) ).
This helps manage the overall load from inbound peers. | `750` | -| [soft_num_neighbors](#connection_options-soft_num_neighbors) | Target number of outbound P2P connections the node aims to maintain.

The connection pruning logic only activates if the current number of established
outbound P2P connections exceeds this value. Pruning aims to reduce the connection
count back down to this target, ensuring the node maintains a baseline number
of outbound peers for network connectivity. | `16` | -| [stackerdb_hint_replicas](#connection_options-stackerdb_hint_replicas) | Static list of preferred replica peers for specific StackerDB contracts, provided as a JSON string.

This allows manually specifying known peers to use for replicating particular StackerDBs,
potentially overriding or supplementing the peers discovered via the StackerDB's control contract.

Format: The configuration value must be a TOML string containing valid JSON.
The JSON structure must be an array of tuples, where each tuple pairs a contract identifier
with a list of preferred neighbor addresses:
`[[ContractIdentifier, [NeighborAddress, ...]], ...]`

1. `ContractIdentifier`: A JSON object representing the `QualifiedContractIdentifier`.
It must have the specific structure:
`{"issuer": [version_byte, [byte_array_20]], "name": "contract-name"}`

2. `NeighborAddress`: A JSON object specifying the peer details:
`{"ip": "...", "port": ..., "public_key_hash": "..."}`

**Notes:**
- Use this option with caution, primarily for advanced testing or bootstrapping.

**Example:**
stackerdb_hint_replicas = '''
[
  [
    {
      "issuer": [1, [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]],
      "name": "my-contract"
    },
    [
      {
        "ip": "192.0.2.1",
        "port": 20444,
        "public_key_hash": "0102030405060708090a0b0c0d0e0f1011121314"
      }
    ]
  ]
]
'''
| `None` (no hints provided) | -| [timeout](#connection_options-timeout) | General communication timeout (in seconds).

- For HTTP connections: Governs two timeout aspects:
  - Server-side: Defines the maximum allowed time since the last request was received from a client.
An idle connection is dropped if both this timeout and [idle_timeout](#connection_options-idle_timeout) are exceeded.
  - Client-side: Sets the timeout duration (TTL) for outgoing HTTP requests initiated by the node itself.
- For P2P connections: Used as the specific timeout for NAT punch-through requests.

**Units:** seconds | `15` | -| [walk_interval](#connection_options-walk_interval) | Minimum interval (in seconds) between the start of consecutive neighbor discovery walks.

The node periodically performs "neighbor walks" to discover new peers and maintain
an up-to-date view of the P2P network topology. This setting controls how frequently
these walks can be initiated, preventing excessive network traffic and processing.

**Units:** seconds | `60` | -| [walk_seed_probability](#connection_options-walk_seed_probability) | Probability (0.0 to 1.0) of forcing a neighbor walk to start from a seed/bootstrap peer.

This probability applies only when the node is not in Initial Block Download (IBD)
and is already connected to at least one seed/bootstrap peer.
Normally, in this situation, the walk would start from a random inbound or outbound peer.
However, with this probability, the walk is forced to start from a seed peer instead.
This helps ensure the node periodically re-establishes its network view from trusted entry points. | `0.1` (10%) | -| ~~[antientropy_public](#connection_options-antientropy_public)~~ | Controls whether a node with public inbound connections should still push blocks, even if not NAT'ed.

In the Stacks 2.x anti-entropy logic, if a node detected it had inbound connections
from public IPs (suggesting it wasn't behind NAT) and this flag was set to `false`,
it would refrain from proactively pushing blocks and microblocks to peers.
The assumption was that publicly reachable nodes should primarily serve downloads.
If set to `true` (default), the node would push data regardless of its perceived reachability.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. | `true` | -| ~~[antientropy_retry](#connection_options-antientropy_retry)~~ | Minimum interval (in seconds) between attempts to run the Epoch 2.x anti-entropy data push mechanism.

The Stacks 2.x anti-entropy protocol involves the node proactively pushing its known
Stacks blocks and microblocks to peers. This value specifies the cooldown period for this operation.
This prevents the node from excessively attempting to push data to its peers.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+.

**Units:** seconds | `3_600` (1 hour) | -| ~~[download_interval](#connection_options-download_interval)~~ | Minimum interval (in seconds) between consecutive block download scans in epoch 2.x.

In the pre-Nakamoto block download logic, if a full scan for blocks completed without
finding any new blocks to download, and if the known peer inventories had not changed,
the node would wait at least this duration before initiating the next download scan.
This throttled the downloader when the node was likely already synchronized.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+.

**Units:** seconds | `10` | +| [reject_blocks_pushed](#connection_options-reject_blocks_pushed) | Controls whether the node accepts Nakamoto blocks pushed proactively by peers.

- If `true`: Pushed blocks are ignored (logged at DEBUG and discarded). The
node will still process blocks that it actively downloads.
- If `false`: Both pushed blocks and actively downloaded blocks are processed. | `false` | +| [soft_max_clients_per_host](#connection_options-soft_max_clients_per_host) | Soft limit on the number of inbound P2P connections allowed per host IP address.

During inbound connection pruning (when total inbound connections >
[soft_num_clients](#connection_options-soft_num_clients) ), the node checks if any single
IP address has more connections than this limit. If so, it preferentially
prunes the newest connections originating from that specific IP address
until its count is reduced to this limit. This prevents a single host from
dominating the node's inbound connection capacity. | `4` | +| [soft_max_neighbors_per_org](#connection_options-soft_max_neighbors_per_org) | Soft limit on the number of outbound P2P connections per network organization (ASN).

During connection pruning (when total outbound connections >
[soft_num_neighbors](#connection_options-soft_num_neighbors) ), the node checks if any single
network organization (identified by ASN) has more outbound connections than
this limit. If so, it preferentially prunes the least healthy/newest
connections from that overrepresented organization until its count is
reduced to this limit or the total outbound count reaches
[soft_num_neighbors](#connection_options-soft_num_neighbors) . This encourages connection diversity
across different network providers. | `32` | +| [soft_num_clients](#connection_options-soft_num_clients) | Soft limit threshold for triggering inbound P2P connection pruning.

If the total number of currently active inbound P2P connections exceeds this
value, the node will activate pruning logic to reduce the count, typically by
applying per-host limits (see [soft_max_clients_per_host](#connection_options-soft_max_clients_per_host) ).
This helps manage the overall load from inbound peers. | `750` | +| [soft_num_neighbors](#connection_options-soft_num_neighbors) | Target number of outbound P2P connections the node aims to maintain.

The connection pruning logic only activates if the current number of established
outbound P2P connections exceeds this value. Pruning aims to reduce the
connection count back down to this target, ensuring the node maintains a
baseline number of outbound peers for network connectivity. | `16` | +| [stackerdb_hint_replicas](#connection_options-stackerdb_hint_replicas) | Static list of preferred replica peers for specific StackerDB contracts,
provided as a JSON string.

This allows manually specifying known peers to use for replicating particular
StackerDBs, potentially overriding or supplementing the peers discovered via
the StackerDB's control contract.

Format: The configuration value must be a TOML string containing valid JSON.
The JSON structure must be an array of tuples, where each tuple pairs a
contract identifier with a list of preferred neighbor addresses:
`[[ContractIdentifier, [NeighborAddress, ...]], ...]`

1. `ContractIdentifier`: A JSON object representing the `QualifiedContractIdentifier`.
It must have the specific structure:
`{"issuer": [version_byte, [byte_array_20]], "name": "contract-name"}`

2. `NeighborAddress`: A JSON object specifying the peer details:
`{"ip": "...", "port": ..., "public_key_hash": "..."}`

**Notes:**
- Use this option with caution, primarily for advanced testing or bootstrapping.

**Example:**
stackerdb_hint_replicas = '''
[
  [
    {
      "issuer": [1, [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]],
      "name": "my-contract"
    },
    [
      {
        "ip": "192.0.2.1",
        "port": 20444,
        "public_key_hash": "0102030405060708090a0b0c0d0e0f1011121314"
      }
    ]
  ]
]
'''
| `None` (no hints provided) | +| [timeout](#connection_options-timeout) | General communication timeout (in seconds).

- For HTTP connections: Governs two timeout aspects:
  - Server-side: Defines the maximum allowed time since the last request was
received from a client. An idle connection is dropped if both this
timeout and [idle_timeout](#connection_options-idle_timeout) are exceeded.
  - Client-side: Sets the timeout duration (TTL) for outgoing HTTP requests
initiated by the node itself.
- For P2P connections: Used as the specific timeout for NAT punch-through requests.

**Units:** seconds | `15` | +| [walk_interval](#connection_options-walk_interval) | Minimum interval (in seconds) between the start of consecutive neighbor discovery walks.

The node periodically performs "neighbor walks" to discover new peers and
maintain an up-to-date view of the P2P network topology. This setting
controls how frequently these walks can be initiated, preventing excessive
network traffic and processing.

**Units:** seconds | `60` | +| [walk_seed_probability](#connection_options-walk_seed_probability) | Probability (0.0 to 1.0) of forcing a neighbor walk to start from a seed/bootstrap peer.

This probability applies only when the node is not in Initial Block Download (IBD)
and is already connected to at least one seed/bootstrap peer.
Normally, in this situation, the walk would start from a random inbound or
outbound peer. However, with this probability, the walk is forced to start
from a seed peer instead. This helps ensure the node periodically
re-establishes its network view from trusted entry points. | `0.1` (10%) | +| ~~[antientropy_public](#connection_options-antientropy_public)~~ | Controls whether a node with public inbound connections should still push
blocks, even if not NAT'ed.

In the Stacks 2.x anti-entropy logic, if a node detected it had inbound
connections from public IPs (suggesting it wasn't behind NAT) and this flag
was set to `false`, it would refrain from proactively pushing blocks and
microblocks to peers. The assumption was that publicly reachable nodes should
primarily serve downloads. If set to `true` (default), the node would push
data regardless of its perceived reachability.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. | `true` | +| ~~[antientropy_retry](#connection_options-antientropy_retry)~~ | Minimum interval (in seconds) between attempts to run the Epoch 2.x anti-entropy
data push mechanism.

The Stacks 2.x anti-entropy protocol involves the node proactively pushing its
known Stacks blocks and microblocks to peers. This value specifies the
cooldown period for this operation. This prevents the node from excessively
attempting to push data to its peers.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+.

**Units:** seconds | `3_600` (1 hour) | +| ~~[download_interval](#connection_options-download_interval)~~ | Minimum interval (in seconds) between consecutive block download scans in epoch 2.x.

In the pre-Nakamoto block download logic, if a full scan for blocks completed
without finding any new blocks to download, and if the known peer inventories
had not changed, the node would wait at least this duration before
initiating the next download scan. This throttled the downloader when the
node was likely already synchronized.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+.

**Units:** seconds | `10` | | ~~[full_inv_sync_interval](#connection_options-full_inv_sync_interval)~~ | Deprecated: it does not have any effect on the node's behavior.

**⚠️ DEPRECATED:** It does not have any effect on the node's behavior. | `None` | | ~~[max_clients_per_host](#connection_options-max_clients_per_host)~~ | Maximum number of inbound p2p connections per host we permit.

**⚠️ DEPRECATED:** It does not have any effect on the node's behavior. | `4` | | ~~[max_neighbors_per_host](#connection_options-max_neighbors_per_host)~~ | Maximum number of neighbors per host we permit.

**⚠️ DEPRECATED:** It does not have any effect on the node's behavior. | `1` | @@ -92,10 +92,10 @@ The configuration is automatically generated from the Rust source code documenta |-----------|-------------|----------| | [cost_estimator](#fee_estimation-cost_estimator) | Specifies the name of the cost estimator to use.
This controls how the node estimates computational costs for transactions.

Accepted values:
- `"NaivePessimistic"`: The only currently supported cost estimator. This estimator
tracks the highest observed costs for each operation type and uses the average
of the top 10 values as its estimate, providing a conservative approach to
cost estimation.

**Notes:**
- If [disabled](#fee_estimation-disabled) is `true`, the node will use the default unit cost estimator. | `"NaivePessimistic"` | | [cost_metric](#fee_estimation-cost_metric) | Specifies the name of the cost metric to use.
This controls how the node measures and compares transaction costs.

Accepted values:
- `"ProportionDotProduct"`: The only currently supported cost metric. This metric
computes a weighted sum of cost dimensions (runtime, read/write counts, etc.)
proportional to how much of the block limit they consume.

**Notes:**
- If [disabled](#fee_estimation-disabled) is `true`, the node will use the default unit cost metric. | `"ProportionDotProduct"` | -| [disabled](#fee_estimation-disabled) | If `true`, all fee and cost estimation features are disabled.
The node will use unit estimators and metrics, which effectively
provide no actual estimation capabilities.

When disabled, the node will:
1. Not track historical transaction costs or fee rates
2. Return simple unit values for costs for any transaction, regardless of its actual complexity
3. Be unable to provide meaningful fee estimates for API requests (always returns an error)
4. Consider only raw transaction fees (not fees per cost unit) when assembling blocks

This setting takes precedence over individual estimator/metric configurations.

**Notes:**
- When `true`, the values for [cost_estimator](#fee_estimation-cost_estimator) , [fee_estimator](#fee_estimation-fee_estimator) , and [cost_metric](#fee_estimation-cost_metric) are ignored. | `false` | +| [disabled](#fee_estimation-disabled) | If `true`, all fee and cost estimation features are disabled.
The node will use unit estimators and metrics, which effectively provide no
actual estimation capabilities.

When disabled, the node will:
1. Not track historical transaction costs or fee rates.
2. Return simple unit values for costs for any transaction, regardless of
its actual complexity.
3. Be unable to provide meaningful fee estimates for API requests (always
returns an error).
4. Consider only raw transaction fees (not fees per cost unit) when
assembling blocks.

This setting takes precedence over individual estimator/metric configurations.

**Notes:**
- When `true`, the values for [cost_estimator](#fee_estimation-cost_estimator) , [fee_estimator](#fee_estimation-fee_estimator) , and [cost_metric](#fee_estimation-cost_metric) are ignored. | `false` | | [fee_estimator](#fee_estimation-fee_estimator) | Specifies the name of the fee estimator to use.
This controls how the node calculates appropriate transaction fees based on costs.

Accepted values:
- `"ScalarFeeRate"`: Simple multiplier-based fee estimation that uses percentiles
(5th, 50th, and 95th) of observed fee rates from recent blocks.
- `"FuzzedWeightedMedianFeeRate"`: Fee estimation that adds controlled randomness
to a weighted median rate calculator. This helps prevent fee optimization attacks
by adding unpredictability to fee estimates while still maintaining accuracy.

**Notes:**
- If [disabled](#fee_estimation-disabled) is `true`, the node will use the default unit fee estimator. | `"ScalarFeeRate"` | -| [fee_rate_fuzzer_fraction](#fee_estimation-fee_rate_fuzzer_fraction) | Specifies the fraction of random noise to add if using the `FuzzedWeightedMedianFeeRate` fee estimator.
This value should be in the range [0, 1], representing a percentage of the base fee rate.

For example, with a value of 0.1 (10%), fee rate estimates will have random noise added
within the range of ±10% of the original estimate. This randomization makes it difficult
for users to precisely optimize their fees while still providing reasonable estimates.

**Notes:**
- This setting is only relevant when [fee_estimator](#fee_estimation-fee_estimator) is set to `"FuzzedWeightedMedianFeeRate"`. | `0.1` (10%) | -| [fee_rate_window_size](#fee_estimation-fee_rate_window_size) | Specifies the window size for the `WeightedMedianFeeRateEstimator`.
This determines how many historical fee rate data points are considered
when calculating the median fee rate.

The window size controls how quickly the fee estimator responds to changing
network conditions. A smaller window size (e.g., 5) makes the estimator more
responsive to recent fee rate changes but potentially more volatile. A larger
window size (e.g., 10) produces more stable estimates but may be slower to
adapt to rapid network changes.

**Notes:**
- This setting is primarily relevant when [fee_estimator](#fee_estimation-fee_estimator) is set to `"FuzzedWeightedMedianFeeRate"`, as it's used by the underlying `WeightedMedianFeeRateEstimator`. | `5` | +| [fee_rate_fuzzer_fraction](#fee_estimation-fee_rate_fuzzer_fraction) | Specifies the fraction of random noise to add if using the
`FuzzedWeightedMedianFeeRate` fee estimator. This value should be in the
range [0, 1], representing a percentage of the base fee rate.

For example, with a value of 0.1 (10%), fee rate estimates will have random
noise added within the range of ±10% of the original estimate. This
randomization makes it difficult for users to precisely optimize their fees
while still providing reasonable estimates.

**Notes:**
- This setting is only relevant when [fee_estimator](#fee_estimation-fee_estimator) is set to `"FuzzedWeightedMedianFeeRate"`. | `0.1` (10%) | +| [fee_rate_window_size](#fee_estimation-fee_rate_window_size) | Specifies the window size for the `WeightedMedianFeeRateEstimator`.
This determines how many historical fee rate data points are considered
when calculating the median fee rate.

**Notes:**
- This setting is primarily relevant when [fee_estimator](#fee_estimation-fee_estimator) is set to `"FuzzedWeightedMedianFeeRate"`. | `5` | | [log_error](#fee_estimation-log_error) | If `true`, errors encountered during cost or fee estimation will be logged.
This can help diagnose issues with the fee estimation subsystem. | `false` | @@ -104,79 +104,79 @@ The configuration is automatically generated from the Rust source code documenta | Parameter | Description | Default | |-----------|-------------|----------| | [block_commit_tx_estimated_size](#burnchain-block_commit_tx_estimated_size) | Estimated size (in virtual bytes) of a block commit transaction on bitcoin.
Used for fee calculation in mining logic by multiplying with the fee rate
[satoshis_per_byte](#burnchain-satoshis_per_byte) .

**Notes:**
- Only relevant if [[node].miner](#node-miner) is `true`.

**Units:** virtual bytes | `380` | -| [burn_fee_cap](#burnchain-burn_fee_cap) | The maximum amount (in sats) of "burn commitment" to broadcast for the next block's leader election.
Acts as a safety cap to limit the maximum amount spent on mining.
It serves as both the target fee and a fallback if dynamic fee calculations fail or cannot be performed.

This setting can be hot-reloaded from the config file, allowing adjustment without restarting.

**Notes:**
- Only relevant if [[node].miner](#node-miner) is `true`.

**Units:** satoshis | `20_000` | +| [burn_fee_cap](#burnchain-burn_fee_cap) | The maximum amount (in sats) of "burn commitment" to broadcast for the next
block's leader election. Acts as a safety cap to limit the maximum amount
spent on mining. It serves as both the target fee and a fallback if dynamic
fee calculations fail or cannot be performed.

This setting can be hot-reloaded from the config file, allowing adjustment
without restarting.

**Notes:**
- Only relevant if [[node].miner](#node-miner) is `true`.

**Units:** satoshis | `20_000` | | [chain](#burnchain-chain) | The underlying blockchain used for Proof-of-Transfer.

**Notes:**
- Currently, only `"bitcoin"` is supported. | `"bitcoin"` | -| [chain_id](#burnchain-chain_id) | The network-specific identifier used in P2P communication and database initialization.

**Notes:**
- **Warning:** Do not modify this unless you really know what you're doing.
- This is intended strictly for testing purposes. | - `0x00000001` if [mode](#burnchain-mode) is `"mainnet"`
- `0x80000000` otherwise | -| [commit_anchor_block_within](#burnchain-commit_anchor_block_within) | Specifies a mandatory wait period (in milliseconds) after receiving a burnchain tip
before the node attempts to build the anchored block for the new tenure.
This duration effectively schedules the start of the block-building
process relative to the tip's arrival time.

**Notes:**
- This is intended strictly for testing purposes.

**Units:** milliseconds | `5_000` | -| [epochs](#burnchain-epochs) | Custom override for the definitions of Stacks epochs (start/end burnchain heights, consensus rules).
This setting allows testing specific epoch transitions or custom consensus rules by defining exactly
when each epoch starts on bitcoin.

Epochs define distinct protocol rule sets (consensus rules, execution costs, capabilities).
When configured, the list must include all epochs sequentially from "1.0" up to the
highest desired epoch, without skipping any intermediate ones.
Valid `epoch_name` values currently include:
`"1.0"`, `"2.0"`, `"2.05"`, `"2.1"`, `"2.2"`, `"2.3"`, `"2.4"`, `"2.5"`, `"3.0"`, `"3.1"`.

**Validation Rules:**
- Epochs must be provided in strict chronological order (`1.0`, `2.0`, `2.05`...).
- `start_height` values must be non-decreasing across the list.
- Epoch `"1.0"` must have `start_height = 0`.
- The number of defined epochs cannot exceed the maximum supported by the node software.

**Notes:**
- Applied only if [mode](#burnchain-mode) is not "mainnet".
- This is intended strictly for testing purposes.
- Configured as a list `[[burnchain.epochs]]` in TOML, each with `epoch_name` (string) and `start_height` (integer Bitcoin block height).

**Example:**
[[burnchain.epochs]]
epoch_name = "2.1"
start_height = 150

[[burnchain.epochs]]
epoch_name = "2.2"
start_height = 200
| `None` (uses the standard epoch definitions for the selected [mode](#burnchain-mode) ) | -| [fault_injection_burnchain_block_delay](#burnchain-fault_injection_burnchain_block_delay) | Fault injection setting for testing. Introduces an artificial delay (in milliseconds)
before processing each burnchain block download. Simulates a slow burnchain connection.

**Notes:**
- This is intended strictly for testing purposes.

**Units:** milliseconds | `0` (no delay) | +| [chain_id](#burnchain-chain_id) | The network-specific identifier used in P2P communication and database initialization.

**Notes:**
- **Warning:** Do not modify this unless you really know what you're doing.
- This is intended strictly for testing purposes. | - if [mode](#burnchain-mode) is `"mainnet"`: `CHAIN_ID_MAINNET`
- else: `CHAIN_ID_TESTNET` | +| [commit_anchor_block_within](#burnchain-commit_anchor_block_within) | Specifies a mandatory wait period (in milliseconds) after receiving a burnchain tip
before the node attempts to build the anchored block for the new tenure.
This duration effectively schedules the start of the block-building process
relative to the tip's arrival time.

**Notes:**
- This is intended strictly for testing purposes.

**Units:** milliseconds | `5_000` | +| [epochs](#burnchain-epochs) | Custom override for the definitions of Stacks epochs (start/end burnchain
heights, consensus rules). This setting allows testing specific epoch
transitions or custom consensus rules by defining exactly when each epoch
starts on bitcoin.

Epochs define distinct protocol rule sets (consensus rules, execution costs,
capabilities). When configured, the list must include all epochs
sequentially from "1.0" up to the highest desired epoch, without skipping
any intermediate ones. Valid `epoch_name` values currently include:
`"1.0"`, `"2.0"`, `"2.05"`, `"2.1"`, `"2.2"`, `"2.3"`, `"2.4"`, `"2.5"`, `"3.0"`, `"3.1"`.

**Validation Rules:**
- Epochs must be provided in strict chronological order (`1.0`, `2.0`, `2.05`...).
- `start_height` values must be non-decreasing across the list.
- Epoch `"1.0"` must have `start_height = 0`.
- The number of defined epochs cannot exceed the maximum supported by the node software.

**Notes:**
- Applied only if [mode](#burnchain-mode) is not "mainnet".
- This is intended strictly for testing purposes.
- Configured as a list `[[burnchain.epochs]]` in TOML, each with `epoch_name` (string) and `start_height` (integer Bitcoin block height).

**Example:**
[[burnchain.epochs]]
epoch_name = "2.1"
start_height = 150

[[burnchain.epochs]]
epoch_name = "2.2"
start_height = 200
| `None` (uses the standard epoch definitions for the selected [mode](#burnchain-mode) ) | +| [fault_injection_burnchain_block_delay](#burnchain-fault_injection_burnchain_block_delay) | Fault injection setting for testing. Introduces an artificial delay (in
milliseconds) before processing each burnchain block download. Simulates a
slow burnchain connection.

**Notes:**
- This is intended strictly for testing purposes.

**Units:** milliseconds | `0` (no delay) | | [first_burn_block_hash](#burnchain-first_burn_block_hash) | Overrides the default starting block hash of the burnchain.

**Notes:**
- Applied only if [mode](#burnchain-mode) is not "mainnet".
- This is intended strictly for testing purposes.
- Should be used together with [first_burn_block_height](#burnchain-first_burn_block_height) and [first_burn_block_timestamp](#burnchain-first_burn_block_timestamp) for proper operation. | `None` (uses the burnchain's default starting block hash) | | [first_burn_block_height](#burnchain-first_burn_block_height) | Overrides the default starting bitcoin block height for the node.
Allows starting synchronization from a specific historical point in test environments.

**Notes:**
- Applied only if [mode](#burnchain-mode) is not "mainnet".
- This is intended strictly for testing purposes.
- Should be used together with [first_burn_block_timestamp](#burnchain-first_burn_block_timestamp) and [first_burn_block_hash](#burnchain-first_burn_block_hash) for proper operation. | `None` (uses the burnchain's default starting height for the mode) | | [first_burn_block_timestamp](#burnchain-first_burn_block_timestamp) | Overrides the default starting block timestamp of the burnchain.

**Notes:**
- Applied only if [mode](#burnchain-mode) is not "mainnet".
- This is intended strictly for testing purposes.
- Should be used together with [first_burn_block_height](#burnchain-first_burn_block_height) and [first_burn_block_hash](#burnchain-first_burn_block_hash) for proper operation. | `None` (uses the burnchain's default starting timestamp) | -| [leader_key_tx_estimated_size](#burnchain-leader_key_tx_estimated_size) | Estimated size (in virtual bytes) of a leader key registration transaction on bitcoin.
Used for fee calculation in mining logic by multiplying with the fee rate
[satoshis_per_byte](#burnchain-satoshis_per_byte) .

**Notes:**
- Only relevant if [[node].miner](#node-miner) is `true`.

**Units:** virtual bytes | `290` | -| [local_mining_public_key](#burnchain-local_mining_public_key) | The public key associated with the local mining address for the underlying Bitcoin regtest node.
Provided as a hex string representing an uncompressed public key.

It is primarily used in modes that rely on a controlled Bitcoin regtest backend
(e.g., "helium", "mocknet", "neon") where the Stacks node itself needs to
instruct the Bitcoin node to generate blocks.

The key is used to derive the Bitcoin address that receives the coinbase rewards
when generating blocks on the regtest network.

**Notes:**
- Mandatory if [mode](#burnchain-mode) is "helium".
- This is intended strictly for testing purposes. | `None` | -| [magic_bytes](#burnchain-magic_bytes) | The network "magic bytes" used to identify packets for the specific bitcoin network
instance (e.g., mainnet, testnet, regtest). Must match the magic bytes of the connected
bitcoin node.

These two-byte identifiers help ensure that nodes only connect to peers on the same
network type. Common values include:
- "X2" for mainnet
- "T2" for testnet (xenon)
- Other values for specific test networks

Configured as a 2-character ASCII string (e.g., "X2" for mainnet). | - `"T2"` if [mode](#burnchain-mode) is `"xenon"`
- `"X2"` otherwise | -| [max_rbf](#burnchain-max_rbf) | Maximum fee rate multiplier allowed when using Replace-By-Fee (RBF) for bitcoin transactions.
Expressed as a percentage of the original [satoshis_per_byte](#burnchain-satoshis_per_byte) rate (e.g.,
150 means the fee rate can be increased up to 1.5x). Used in mining logic for RBF decisions
to cap the replacement fee rate.

**Notes:**
- Only relevant if [[node].miner](#node-miner) is `true`.

**Units:** percent | `150` | -| [max_unspent_utxos](#burnchain-max_unspent_utxos) | The maximum number of unspent transaction outputs (UTXOs) to request from the bitcoin node.

This value is passed as the `maximumCount` parameter to the bitcoin node. It helps manage
response size and processing load, particularly relevant for miners querying for available
UTXOs to fund operations like block commits or leader key registrations.

Setting this limit too high might lead to performance issues or timeouts when querying
nodes with a very large number of UTXOs. Conversely, setting it too low might prevent
the miner from finding enough UTXOs in a single query to meet the required funding amount
for a transaction, even if sufficient funds exist across more UTXOs not returned by the limited query.

**Notes:**
- This value must be `<= 1024`.
- Only relevant if [[node].miner](#node-miner) is `true`. | `1024` | +| [leader_key_tx_estimated_size](#burnchain-leader_key_tx_estimated_size) | Estimated size (in virtual bytes) of a leader key registration transaction
on bitcoin. Used for fee calculation in mining logic by multiplying with the
fee rate [satoshis_per_byte](#burnchain-satoshis_per_byte) .

**Notes:**
- Only relevant if [[node].miner](#node-miner) is `true`.

**Units:** virtual bytes | `290` | +| [local_mining_public_key](#burnchain-local_mining_public_key) | The public key associated with the local mining address for the underlying
Bitcoin regtest node. Provided as a hex string representing an uncompressed
public key.

It is primarily used in modes that rely on a controlled Bitcoin regtest
backend (e.g., "helium", "mocknet", "neon") where the Stacks node itself
needs to instruct the Bitcoin node to generate blocks.

The key is used to derive the Bitcoin address that receives the coinbase
rewards when generating blocks on the regtest network.

**Notes:**
- Mandatory if [mode](#burnchain-mode) is "helium".
- This is intended strictly for testing purposes. | `None` | +| [magic_bytes](#burnchain-magic_bytes) | The network "magic bytes" used to identify packets for the specific bitcoin
network instance (e.g., mainnet, testnet, regtest). Must match the magic
bytes of the connected bitcoin node.

These two-byte identifiers help ensure that nodes only connect to peers on the
same network type. Common values include:
- "X2" for mainnet
- "T2" for testnet (xenon)
- Other values for specific test networks

Configured as a 2-character ASCII string (e.g., "X2" for mainnet). | - if [mode](#burnchain-mode) is `"xenon"`: `"T2"`
- else: `"X2"` | +| [max_rbf](#burnchain-max_rbf) | Maximum fee rate multiplier allowed when using Replace-By-Fee (RBF) for
bitcoin transactions. Expressed as a percentage of the original
[satoshis_per_byte](#burnchain-satoshis_per_byte) rate (e.g., 150 means the fee rate
can be increased up to 1.5x). Used in mining logic for RBF decisions to
cap the replacement fee rate.

**Notes:**
- Only relevant if [[node].miner](#node-miner) is `true`.

**Units:** percent | `150` | +| [max_unspent_utxos](#burnchain-max_unspent_utxos) | The maximum number of unspent transaction outputs (UTXOs) to request from
the bitcoin node.

This value is passed as the `maximumCount` parameter to the bitcoin node.
It helps manage response size and processing load, particularly relevant
for miners querying for available UTXOs to fund operations like block
commits or leader key registrations.

Setting this limit too high might lead to performance issues or timeouts when
querying nodes with a very large number of UTXOs. Conversely, setting it too
low might prevent the miner from finding enough UTXOs in a single query to
meet the required funding amount for a transaction, even if sufficient funds
exist across more UTXOs not returned by the limited query.

**Notes:**
- This value must be `<= 1024`.
- Only relevant if [[node].miner](#node-miner) is `true`. | `1024` | | [mode](#burnchain-mode) | The operational mode or network profile for the Stacks node.
This setting determines network parameters (like chain ID, peer version),
default configurations, genesis block definitions, and overall node behavior.

Supported values:
- `"mainnet"`: mainnet
- `"xenon"`: testnet
- `"mocknet"`: regtest
- `"helium"`: regtest
- `"neon"`: regtest
- `"argon"`: regtest
- `"krypton"`: regtest
- `"nakamoto-neon"`: regtest | `"mocknet"` | | [password](#burnchain-password) | The password for authenticating with the bitcoin node's RPC interface.
Required if the bitcoin node requires RPC authentication.

**Notes:**
- Only relevant if [[node].miner](#node-miner) is `true`. | `None` | -| [peer_host](#burnchain-peer_host) | The hostname or IP address of the bitcoin node peer.

This field is required for all node configurations as it specifies where to find the underlying
bitcoin node to interact with for PoX operations, block validation, and mining. | `"0.0.0.0"` | +| [peer_host](#burnchain-peer_host) | The hostname or IP address of the bitcoin node peer.

This field is required for all node configurations as it specifies where to
find the underlying bitcoin node to interact with for PoX operations,
block validation, and mining. | `"0.0.0.0"` | | [peer_port](#burnchain-peer_port) | The P2P network port of the bitcoin node specified by [peer_host](#burnchain-peer_host) . | `8333` | -| [peer_version](#burnchain-peer_version) | The peer protocol version number used in P2P communication.
This parameter cannot be set via the configuration file.

**Notes:**
- **Warning:** Do not modify this unless you really know what you're doing. | - `402_653_196` if [mode](#burnchain-mode) is `"mainnet"`
- `4_207_599_116` otherwise | -| [poll_time_secs](#burnchain-poll_time_secs) | The interval, in seconds, at which the node polls the bitcoin node for new blocks and state updates.

The default value of 10 seconds is mainly intended for testing purposes.
It's suggested to set this to a higher value for mainnet, e.g., 300 seconds (5 minutes).

**Units:** seconds | `10` | -| [pox_2_activation](#burnchain-pox_2_activation) | Sets a custom burnchain height for PoX-2 activation (for testing).

This affects two key transitions:
1. The block height at which PoX v1 lockups are automatically unlocked.
2. The block height from which PoX reward set calculations switch to PoX v2 rules.

**Behavior:**
- This value directly sets the auto unlock height for PoX v1 lockups before transition to PoX v2.
This also defines the burn height at which PoX reward sets are calculated using PoX v2 rather than v1.
- If custom [epochs](#burnchain-epochs) are provided:
  - This value is used to validate that Epoch 2.1's start height is ≤ this value.
  - However, the height specified in `epochs` for Epoch 2.1 takes precedence.

**Notes:**
- Applied only if [mode](#burnchain-mode) is not "mainnet".
- This is intended strictly for testing purposes. | `None` | +| [peer_version](#burnchain-peer_version) | The peer protocol version number used in P2P communication.
This parameter cannot be set via the configuration file.

**Notes:**
- **Warning:** Do not modify this unless you really know what you're doing. | - if [mode](#burnchain-mode) is `"mainnet"`: `PEER_VERSION_MAINNET`
- else: `PEER_VERSION_TESTNET` | +| [poll_time_secs](#burnchain-poll_time_secs) | The interval, in seconds, at which the node polls the bitcoin node for new
blocks and state updates.

The default value of 10 seconds is mainly intended for testing purposes.
It's suggested to set this to a higher value for mainnet, e.g., 300 seconds
(5 minutes).

**Units:** seconds | `10` | +| [pox_2_activation](#burnchain-pox_2_activation) | Sets a custom burnchain height for PoX-2 activation (for testing).

This affects two key transitions:
1. The block height at which PoX v1 lockups are automatically unlocked.
2. The block height from which PoX reward set calculations switch to PoX v2 rules.

**Behavior:**
- This value directly sets the auto unlock height for PoX v1 lockups before
transition to PoX v2. This also defines the burn height at which PoX reward
sets are calculated using PoX v2 rather than v1.
- If custom [epochs](#burnchain-epochs) are provided:
  - This value is used to validate that Epoch 2.1's start height is ≤ this value.
  - However, the height specified in `epochs` for Epoch 2.1 takes precedence.

**Notes:**
- Applied only if [mode](#burnchain-mode) is not "mainnet".
- This is intended strictly for testing purposes. | `None` | | [pox_prepare_length](#burnchain-pox_prepare_length) | Overrides the length (in bitcoin blocks) of the PoX prepare phase.

**Notes:**
- Applied only if [mode](#burnchain-mode) is not "mainnet".
- This is intended strictly for testing purposes.

**Units:** bitcoin blocks | `None` (uses the standard prepare phase length for the mode) | | [pox_reward_length](#burnchain-pox_reward_length) | Overrides the length (in bitcoin blocks) of the PoX reward cycle.

**Notes:**
- Applied only if [mode](#burnchain-mode) is not "mainnet".
- This is intended strictly for testing purposes.

**Units:** bitcoin blocks | `None` (uses the standard reward cycle length for the mode) | -| [process_exit_at_block_height](#burnchain-process_exit_at_block_height) | Optional bitcoin block height at which the Stacks node process should gracefully exit.
When bitcoin reaches this height, the node logs a message and initiates a graceful shutdown.

**Notes:**
- Applied only if [mode](#burnchain-mode) is not "mainnet".
- This is intended strictly for testing purposes. | `None` | +| [process_exit_at_block_height](#burnchain-process_exit_at_block_height) | Optional bitcoin block height at which the Stacks node process should
gracefully exit. When bitcoin reaches this height, the node logs a message
and initiates a graceful shutdown.

**Notes:**
- Applied only if [mode](#burnchain-mode) is not "mainnet".
- This is intended strictly for testing purposes. | `None` | | [rbf_fee_increment](#burnchain-rbf_fee_increment) | The incremental amount (in sats/vByte) to add to the previous transaction's
fee rate for RBF bitcoin transactions.

**Notes:**
- Only relevant if [[node].miner](#node-miner) is `true`.

**Units:** sats/vByte | `5` | | [rpc_port](#burnchain-rpc_port) | The RPC port of the bitcoin node specified by [peer_host](#burnchain-peer_host) . | `8332` | -| [rpc_ssl](#burnchain-rpc_ssl) | Flag indicating whether to use SSL/TLS when connecting to the bitcoin node's RPC interface. | `false` | +| [rpc_ssl](#burnchain-rpc_ssl) | Flag indicating whether to use SSL/TLS when connecting to the bitcoin node's
RPC interface. | `false` | | [satoshis_per_byte](#burnchain-satoshis_per_byte) | The default fee rate in sats/vByte to use when estimating fees for miners
to submit bitcoin transactions (like block commits or leader key registrations).

**Notes:**
- Only relevant if [[node].miner](#node-miner) is `true`.

**Units:** sats/vByte | `50` | | [timeout](#burnchain-timeout) | Timeout duration, in seconds, for RPC calls made to the bitcoin node.
Configures the timeout on the underlying HTTP client.

**Units:** seconds | `60` | | [username](#burnchain-username) | The username for authenticating with the bitcoin node's RPC interface.
Required if the bitcoin node requires RPC authentication.

**Notes:**
- Only relevant if [[node].miner](#node-miner) is `true`. | `None` | -| [wallet_name](#burnchain-wallet_name) | Specifies the name of the Bitcoin wallet to use within the connected bitcoin node.
Used to interact with a specific named wallet if the bitcoin node manages multiple wallets.

If the specified wallet doesn't exist, the node will attempt to create it via the
`createwallet` RPC call. This is particularly useful for miners who need to manage
separate wallets.

**Notes:**
- Primarily relevant for miners interacting with multi-wallet Bitcoin nodes. | `""` (empty string, implying the default wallet or no specific wallet needed) | -| ~~[affirmation_overrides](#burnchain-affirmation_overrides)~~ | Overrides for the burnchain block affirmation map for specific reward cycles.
Allows manually setting the miner affirmation ('p'resent/'n'ot-present/'a'bsent) map for a
given cycle, bypassing the map normally derived from sortition results.

Special defaults are added when [mode](#burnchain-mode) is "xenon", but config entries take precedence.
At startup, these overrides are written to the `BurnchainDB` (`overrides` table).

**Notes:**
- Primarily used for testing or recovering from network issues.
- Configured as a list `[[burnchain.affirmation_overrides]]` in TOML, each with `reward_cycle` (integer) and `affirmation` (string of 'p'/'n'/'a', length `reward_cycle - 1`).

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode.

**Example:**
[[burnchain.affirmation_overrides]]
reward_cycle = 413
affirmation = "pna..." # Must be 412 chars long
| Empty map | -| ~~[ast_precheck_size_height](#burnchain-ast_precheck_size_height)~~ | Override for the burnchain height activating stricter AST size checks pre-epoch 3.0 for testing purposes.

Used pre-epoch 3.0 to control activation before it became standard (at burn height `752000`).
Ignored in standard production builds as the underlying mechanism is disabled unless the `testing`
feature is active.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. | `None` | -| ~~[sunset_end](#burnchain-sunset_end)~~ | Overrides the bitcoin height, non-inclusive, at which the PoX sunset period ends in epochs before 2.1.
After this height, Stacking rewards are disabled completely. This parameter works together
with `sunset_start` to define the full sunset transition period for PoX.

**Notes:**
- Applied only if [mode](#burnchain-mode) is not "mainnet".
- This is intended strictly for testing purposes.

**⚠️ DEPRECATED:** The sunset phase was removed in Epoch 2.1. This parameter can still be used for
testing purposes for epochs before 2.1. | `None` (uses the standard sunset end height for the mode) | -| ~~[sunset_start](#burnchain-sunset_start)~~ | Overrides the bitcoin height at which the PoX sunset period begins in epochs before 2.1.
The sunset period represents a planned phase-out of the PoX mechanism. During this period,
stacking rewards gradually decrease, eventually ceasing entirely. This parameter allows
testing the PoX sunset transition by explicitly setting its start height.

**Notes:**
- Applied only if [mode](#burnchain-mode) is not "mainnet".
- This is intended strictly for testing purposes.

**⚠️ DEPRECATED:** The sunset phase was removed in Epoch 2.1. This parameter can still be used for
testing purposes for epochs before 2.1. | `None` (uses the standard sunset start height for the mode) | +| [wallet_name](#burnchain-wallet_name) | Specifies the name of the Bitcoin wallet to use within the connected bitcoin
node. Used to interact with a specific named wallet if the bitcoin node
manages multiple wallets.

If the specified wallet doesn't exist, the node will attempt to create it via
the `createwallet` RPC call. This is particularly useful for miners who need
to manage separate wallets.

**Notes:**
- Primarily relevant for miners interacting with multi-wallet Bitcoin nodes. | `""` (empty string, implying the default wallet or no specific wallet needed) | +| ~~[affirmation_overrides](#burnchain-affirmation_overrides)~~ | Overrides for the burnchain block affirmation map for specific reward cycles.
Allows manually setting the miner affirmation ('p'resent/'n'ot-present/'a'bsent)
map for a given cycle, bypassing the map normally derived from sortition results.

Special defaults are added when [mode](#burnchain-mode) is "xenon", but
config entries take precedence. At startup, these overrides are written to
the `BurnchainDB` (`overrides` table).

**Notes:**
- Primarily used for testing or recovering from network issues.
- Configured as a list `[[burnchain.affirmation_overrides]]` in TOML, each with `reward_cycle` (integer) and `affirmation` (string of 'p'/'n'/'a', length `reward_cycle - 1`).

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode.

**Example:**
[[burnchain.affirmation_overrides]]
reward_cycle = 413
affirmation = "pna..." # Must be 412 chars long
| Empty map | +| ~~[ast_precheck_size_height](#burnchain-ast_precheck_size_height)~~ | Override for the burnchain height activating stricter AST size checks
pre-epoch 3.0 for testing purposes.

Used pre-epoch 3.0 to control activation before it became standard (at burn
height `752000`). Ignored in standard production builds as the underlying
mechanism is disabled unless the `testing` feature is active.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. | `None` | +| ~~[sunset_end](#burnchain-sunset_end)~~ | Overrides the bitcoin height, non-inclusive, at which the PoX sunset period
ends in epochs before 2.1. After this height, Stacking rewards are disabled
completely. This parameter works together with `sunset_start` to define the
full sunset transition period for PoX.

**Notes:**
- Applied only if [mode](#burnchain-mode) is not "mainnet".
- This is intended strictly for testing purposes for epochs before 2.1.

**⚠️ DEPRECATED:** The sunset phase was removed in Epoch 2.1. | `None` (uses the standard sunset end height for the mode) | +| ~~[sunset_start](#burnchain-sunset_start)~~ | Overrides the bitcoin height at which the PoX sunset period begins in epochs
before 2.1. The sunset period represents a planned phase-out of the PoX
mechanism. During this period, stacking rewards gradually decrease,
eventually ceasing entirely. This parameter allows testing the PoX sunset
transition by explicitly setting its start height.

**Notes:**
- Applied only if [mode](#burnchain-mode) is not "mainnet".
- This is intended strictly for testing purposes for epochs before 2.1.

**⚠️ DEPRECATED:** The sunset phase was removed in Epoch 2.1. | `None` (uses the standard sunset start height for the mode) | ## [node] | Parameter | Description | Default | |-----------|-------------|----------| -| [always_use_affirmation_maps](#node-always_use_affirmation_maps) | Controls if Stacks Epoch 2.1+ affirmation map logic should be applied even before Epoch 2.1.
- If `true` (default), the node consistently uses the newer (Epoch 2.1) rules for PoX anchor block
validation and affirmation-based reorg handling, even in earlier epochs.
- If `false`, the node strictly follows the rules defined for the specific epoch it is currently
processing, only applying 2.1+ logic from Epoch 2.1 onwards.
Differences in this setting between nodes prior to Epoch 2.1 could lead to consensus forks. | `true` | -| [assume_present_anchor_blocks](#node-assume_present_anchor_blocks) | Controls if the node must strictly wait for any PoX anchor block selected by the core consensus mechanism.
- If `true`: Halts burnchain processing immediately whenever a selected anchor block is missing locally
(`SelectedAndUnknown` status), regardless of affirmation status. This is always true in Nakamoto (Epoch 3.0+)
and runs *before* affirmation checks.
- If `false` (primarily for testing): Skips this immediate halt, allowing processing to proceed to
affirmation map checks.
Normal operation requires this to be `true`; setting to `false` will likely break consensus adherence.

**Notes:**
- This parameter cannot be set via the configuration file; it must be modified programmatically. | `true` | -| [bootstrap_node](#node-bootstrap_node) | A list of initial peer nodes used to bootstrap connections into the Stacks P2P network.
Peers are specified in a configuration file as comma-separated strings in the
format `"PUBKEY@IP:PORT"` or `"PUBKEY@HOSTNAME:PORT"`. DNS hostnames are resolved
during configuration loading.

**Example:**
bootstrap_node = "pubkey1@example.com:30444,pubkey2@192.168.1.100:20444"
| `[]` (empty vector) | -| [chain_liveness_poll_time_secs](#node-chain_liveness_poll_time_secs) | The polling interval, in seconds, for the background thread that monitors chain liveness.
This thread periodically wakes up the main coordinator to check for chain progress or
other conditions requiring action.

**Units:** seconds | `300` (5 minutes) | -| [data_url](#node-data_url) | The publicly accessible URL that this node advertises to peers during the P2P handshake
as its HTTP RPC endpoint. Other nodes or services might use this URL to query the node's API. | `http://{rpc_bind}` (e.g., "http://0.0.0.0:20443" if [rpc_bind](#node-rpc_bind) is default). | -| [deny_nodes](#node-deny_nodes) | A list of peer addresses that this node should explicitly deny connections from.
Peers are specified as comma-separated strings in the format "IP:PORT" or "HOSTNAME:PORT"
in the configuration file. DNS hostnames are resolved during configuration loading.

**Example:**
deny_nodes = "192.168.1.100:20444,badhost.example.com:20444"
| `[]` (empty vector) | -| [fault_injection_block_push_fail_probability](#node-fault_injection_block_push_fail_probability) | Fault injection setting for testing purposes. If set to `Some(p)`, where `p` is between 0 and 100,
the node will have a `p` percent chance of intentionally *not* pushing a newly processed block
to its peers.

**Notes:**
- Values: 0-100 (percentage). | `None` (no fault injection) | -| [fault_injection_hide_blocks](#node-fault_injection_hide_blocks) | Fault injection setting for testing purposes. If `true`, the node's chainstate database
access layer may intentionally fail to retrieve block data, even if it exists,
simulating block hiding or data unavailability.

**Notes:**
- This parameter cannot be set via the configuration file; it must be modified programmatically. | `false` | -| [local_peer_seed](#node-local_peer_seed) | The private key seed, provided as a hex string in the config file, used specifically for the
node's identity and message signing within the P2P networking layer.
This is separate from the main [seed](#node-seed) . | Randomly generated 32 bytes | +| [always_use_affirmation_maps](#node-always_use_affirmation_maps) | Controls if Stacks Epoch 2.1+ affirmation map logic should be applied even
before Epoch 2.1.
- If `true` (default), the node consistently uses the newer (Epoch 2.1) rules
for PoX anchor block validation and affirmation-based reorg handling, even in
earlier epochs.
- If `false`, the node strictly follows the rules defined for the specific epoch
it is currently processing, only applying 2.1+ logic from Epoch 2.1 onwards.
Differences in this setting between nodes prior to Epoch 2.1 could lead to
consensus forks. | `true` | +| [assume_present_anchor_blocks](#node-assume_present_anchor_blocks) | Controls if the node must strictly wait for any PoX anchor block selected by
the core consensus mechanism.
- If `true`: Halts burnchain processing immediately whenever a selected anchor
block is missing locally (`SelectedAndUnknown` status), regardless of
affirmation status.
- If `false` (primarily for testing): Skips this immediate halt, allowing
processing to proceed to affirmation map checks.
Normal operation requires this to be `true`; setting to `false` will likely
break consensus adherence.

**Notes:**
- This parameter cannot be set via the configuration file; it must be modified programmatically.
- This is intended strictly for testing purposes.
- The halt check runs *before* affirmation checks.
- In Nakamoto (Epoch 3.0+), all prepare phases have anchor blocks. | `true` | +| [bootstrap_node](#node-bootstrap_node) | A list of initial peer nodes used to bootstrap connections into the Stacks P2P
network. Peers are specified in a configuration file as comma-separated
strings in the format `"PUBKEY@IP:PORT"` or `"PUBKEY@HOSTNAME:PORT"`. DNS
hostnames are resolved during configuration loading.

**Example:**
bootstrap_node = "pubkey1@example.com:30444,pubkey2@192.168.1.100:20444"
| `[]` (empty vector) | +| [chain_liveness_poll_time_secs](#node-chain_liveness_poll_time_secs) | The polling interval, in seconds, for the background thread that monitors
chain liveness. This thread periodically wakes up the main coordinator to
check for chain progress or other conditions requiring action.

**Units:** seconds | `300` (5 minutes) | +| [data_url](#node-data_url) | The publicly accessible URL that this node advertises to peers during the P2P
handshake as its HTTP RPC endpoint. Other nodes or services might use this URL
to query the node's API.

**Notes:**
- Example: For rpc_bind="0.0.0.0:20443", data_url becomes "http://0.0.0.0:20443". | Derived by adding "http://" prefix to [rpc_bind](#node-rpc_bind) value. | +| [deny_nodes](#node-deny_nodes) | A list of peer addresses that this node should explicitly deny connections from.
Peers are specified as comma-separated strings in the format "IP:PORT" or
"HOSTNAME:PORT" in the configuration file. DNS hostnames are resolved during
configuration loading.

**Example:**
deny_nodes = "192.168.1.100:20444,badhost.example.com:20444"
| `[]` (empty vector) | +| [fault_injection_block_push_fail_probability](#node-fault_injection_block_push_fail_probability) | Fault injection setting for testing purposes. If set to `Some(p)`, where `p` is
between 0 and 100, the node will have a `p` percent chance of intentionally
*not* pushing a newly processed block to its peers.

**Notes:**
- Values: 0-100 (percentage). | `None` (no fault injection) | +| [fault_injection_hide_blocks](#node-fault_injection_hide_blocks) | Fault injection setting for testing purposes. If `true`, the node's chainstate
database access layer may intentionally fail to retrieve block data, even if it
exists, simulating block hiding or data unavailability.

**Notes:**
- This parameter cannot be set via the configuration file; it must be modified programmatically. | `false` | +| [local_peer_seed](#node-local_peer_seed) | The private key seed, provided as a hex string in the config file, used
specifically for the node's identity and message signing within the P2P
networking layer. This is separate from the main [seed](#node-seed) . | Randomly generated 32 bytes | | [marf_cache_strategy](#node-marf_cache_strategy) | The strategy to use for MARF trie node caching in memory.
Controls the trade-off between memory usage and performance for state access.

Possible values:
- `"noop"`: No caching (least memory).
- `"everything"`: Cache all nodes (most memory, potentially fastest).
- `"node256"`: Cache only larger `TrieNode256` nodes.

If the value is `None` or an unrecognized string, it defaults to `"noop"`. | `None` (effectively `"noop"`) | -| [marf_defer_hashing](#node-marf_defer_hashing) | Controls the timing of hash calculations for MARF trie nodes.
- If `true`, hashes are calculated only when the MARF is flushed to disk (deferred hashing).
- If `false`, hashes are calculated immediately as leaf nodes are inserted or updated (immediate hashing).
Deferred hashing might improve write performance. | `true` | -| [miner](#node-miner) | Flag indicating whether this node should activate its mining logic and attempt to produce Stacks blocks.
Setting this to `true` typically requires providing necessary private keys (either [seed](#node-seed) or
[[miner].mining_key](#miner-mining_key) ). It also influences default behavior for settings like
[require_affirmed_anchor_blocks](#node-require_affirmed_anchor_blocks) . | `false` | +| [marf_defer_hashing](#node-marf_defer_hashing) | Controls the timing of hash calculations for MARF trie nodes.
- If `true`, hashes are calculated only when the MARF is flushed to disk
(deferred hashing).
- If `false`, hashes are calculated immediately as leaf nodes are inserted or
updated (immediate hashing).
Deferred hashing might improve write performance. | `true` | +| [miner](#node-miner) | Flag indicating whether this node should activate its mining logic and attempt to
produce Stacks blocks. Setting this to `true` typically requires providing
necessary private keys (either [seed](#node-seed) or [[miner].mining_key](#miner-mining_key) ).
It also influences default behavior for settings like
[require_affirmed_anchor_blocks](#node-require_affirmed_anchor_blocks) . | `false` | | [mock_mining](#node-mock_mining) | Enables a simulated mining mode, primarily for local testing and development.
When `true`, the node may generate blocks locally without participating in the
real bitcoin consensus or P2P block production process.

**Notes:**
- Only relevant if [miner](#node-miner) is `true`. | `false` | -| [name](#node-name) | Human-readable name for the node. Primarily used for identification in testing environments
(e.g., deriving log file names, temporary directory names). | `"helium-node"` | -| [next_initiative_delay](#node-next_initiative_delay) | Controls how frequently, in milliseconds, the Nakamoto miner's relay thread polls for work
or takes periodic actions when idle (e.g., checking for new burnchain blocks).
Default value of 10 seconds is reasonable in mainnet (where bitcoin blocks are ~10 minutes)
A lower value might be useful in other environments with faster burn blocks.

**Units:** milliseconds | `10_000` (10 seconds) | -| [p2p_address](#node-p2p_address) | The publicly accessible IPv4 address and port that this node advertises to peers for P2P connections.
This might differ from [p2p_bind](#node-p2p_bind) if the node is behind NAT or a proxy.

**Notes:**
- The default value derivation might be unexpected, potentially using the [rpc_bind](#node-rpc_bind) address; explicit configuration is recommended if needed. | Derived from [rpc_bind](#node-rpc_bind) (e.g., "0.0.0.0:20443" if [rpc_bind](#node-rpc_bind) is default). | -| [p2p_bind](#node-p2p_bind) | The IPv4 address and port (e.g., "0.0.0.0:20444") on which the node's P2P networking
service should bind and listen for incoming connections from other peers. | `"0.0.0.0:20444"` | -| [prometheus_bind](#node-prometheus_bind) | Optional network address and port (e.g., "127.0.0.1:9153") for binding the Prometheus metrics server.
If set, the node will start an HTTP server on this address to expose internal metrics
for scraping by a Prometheus instance. | `None` (Prometheus server disabled) | -| [require_affirmed_anchor_blocks](#node-require_affirmed_anchor_blocks) | Controls if the node must wait for locally missing but burnchain-affirmed PoX anchor blocks.
If an anchor block is confirmed by the affirmation map but not yet processed by this node:
- If `true`: Burnchain processing halts until the affirmed block is acquired. Ensures strict
adherence to the affirmed canonical chain, typical for followers.
- If `false`: Burnchain processing continues without waiting. Allows miners to operate optimistically
but may necessitate unwinding later if the affirmed block alters the chain state. | - `true` if [miner](#node-miner) is `false`
- `false` if [miner](#node-miner) is `true` | -| [rpc_bind](#node-rpc_bind) | The IPv4 address and port (e.g., "0.0.0.0:20443") on which the node's HTTP RPC server
should bind and listen for incoming API requests. | `"0.0.0.0:20443"` | -| [seed](#node-seed) | The node's Bitcoin wallet private key, provided as a hex string in the config file.
Used to initialize the node's keychain for signing operations.
If [[miner].mining_key](#miner-mining_key) is not set, this seed may also be used for mining-related signing.

**Notes:**
- Required if [miner](#node-miner) is `true` and [[miner].mining_key](#miner-mining_key) is absent. | Randomly generated 32 bytes | -| [stacker](#node-stacker) | Setting this to `true` enables the node to replicate the miner and signer Stacker DBs
required for signing, and is required if the node is connected to a signer. | `false` | -| [stacker_dbs](#node-stacker_dbs) | A list of specific StackerDB contracts (identified by their qualified contract identifiers,
e.g., "SP000000000000000000002Q6VF78.pox-3") that this node should actively replicate.

**Notes:**
- Values are strings representing qualified contract identifiers.

**Example:**
stacker_dbs = [
  "SP000000000000000000002Q6VF78.pox-3",
  "SP2C2YFP12AJZB4M4KUPSTMZQR0SNHNPH204SCQJM.stx-oracle-v1"
]
| - If [miner](#node-miner) is `true` or [stacker](#node-stacker) is `true`, relevant system contracts
(like `.miners`, `.signers-*`) are automatically added in addition to any contracts
specified in the configuration file.
- Otherwise, defaults to an empty list `[]` if not specified in the TOML. | -| [txindex](#node-txindex) | Enables the transaction index, which maps transaction IDs to the blocks containing them.
Setting this to `true` allows the use of RPC endpoints that look up transactions by ID
(e.g., `/extended/v1/tx/{txid}`), but requires substantial additional disk space for the index database. | `false` | -| [use_test_genesis_chainstate](#node-use_test_genesis_chainstate) | If set to `true`, the node initializes its state using an alternative test genesis block definition,
loading different initial balances, names, and lockups than the standard network genesis.
This is intended strictly for testing purposes and is disallowed on mainnet.

**Notes:**
- This is intended strictly for testing purposes and is disallowed on mainnet. | `None` (uses standard network genesis) | +| [name](#node-name) | Human-readable name for the node. Primarily used for identification in testing
environments (e.g., deriving log file names, temporary directory names). | `"helium-node"` | +| [next_initiative_delay](#node-next_initiative_delay) | Controls how frequently, in milliseconds, the Nakamoto miner's relay thread
polls for work or takes periodic actions when idle (e.g., checking for new
burnchain blocks). A default value of 10 seconds is reasonable on mainnet
(where bitcoin blocks are ~10 minutes). A lower value might be useful in
other environments with faster burn blocks.

**Units:** milliseconds | `10_000` (10 seconds) | +| [p2p_address](#node-p2p_address) | The publicly accessible IPv4 address and port that this node advertises to peers
for P2P connections. This might differ from [p2p_bind](#node-p2p_bind) if the
node is behind NAT or a proxy.

**Notes:**
- Example: For rpc_bind="0.0.0.0:20443", p2p_address becomes "0.0.0.0:20443".
- The default value derivation might be unexpected, potentially using the [rpc_bind](#node-rpc_bind) address; explicit configuration is recommended if needed. | Derived directly from [rpc_bind](#node-rpc_bind) value. | +| [p2p_bind](#node-p2p_bind) | The IPv4 address and port (e.g., "0.0.0.0:20444") on which the node's P2P
networking service should bind and listen for incoming connections from other peers. | `"0.0.0.0:20444"` | +| [prometheus_bind](#node-prometheus_bind) | Optional network address and port (e.g., "127.0.0.1:9153") for binding the
Prometheus metrics server. If set, the node will start an HTTP server on this
address to expose internal metrics for scraping by a Prometheus instance. | `None` (Prometheus server disabled) | +| [require_affirmed_anchor_blocks](#node-require_affirmed_anchor_blocks) | Controls if the node must wait for locally missing but burnchain-affirmed PoX
anchor blocks. If an anchor block is confirmed by the affirmation map but not
yet processed by this node:
- If `true`: Burnchain processing halts until the affirmed block is acquired.
Ensures strict adherence to the affirmed canonical chain, typical for
followers.
- If `false`: Burnchain processing continues without waiting. Allows miners to
operate optimistically but may necessitate unwinding later if the affirmed
block alters the chain state. | Derived from the inverse of [miner](#node-miner) value. | +| [rpc_bind](#node-rpc_bind) | The IPv4 address and port (e.g., "0.0.0.0:20443") on which the node's HTTP RPC
server should bind and listen for incoming API requests. | `"0.0.0.0:20443"` | +| [seed](#node-seed) | The node's Bitcoin wallet private key, provided as a hex string in the config file.
Used to initialize the node's keychain for signing operations.
If [[miner].mining_key](#miner-mining_key) is not set, this seed may also be used for
mining-related signing.

**Notes:**
- Required if [miner](#node-miner) is `true` and [[miner].mining_key](#miner-mining_key) is absent. | Randomly generated 32 bytes | +| [stacker](#node-stacker) | Setting this to `true` enables the node to replicate the miner and signer
Stacker DBs required for signing, and is required if the node is connected to a
signer. | `false` | +| [stacker_dbs](#node-stacker_dbs) | A list of specific StackerDB contracts (identified by their qualified contract
identifiers, e.g., "SP000000000000000000002Q6VF78.pox-3") that this node
should actively replicate.

**Notes:**
- Values are strings representing qualified contract identifiers.

**Example:**
stacker_dbs = [
  "SP000000000000000000002Q6VF78.pox-3",
  "SP2C2YFP12AJZB4M4KUPSTMZQR0SNHNPH204SCQJM.stx-oracle-v1"
]
| - if [miner](#node-miner) is `true` or [stacker](#node-stacker) is `true`:
relevant system contracts (e.g., `.miners`, `.signers-*`) are
automatically added in addition to any contracts specified in the
configuration file.
- else: defaults to an empty list `[]`. | +| [txindex](#node-txindex) | Enables the transaction index, which maps transaction IDs to the blocks
containing them. Setting this to `true` allows the use of RPC endpoints
that look up transactions by ID (e.g., `/extended/v1/tx/{txid}`), but
requires substantial additional disk space for the index database. | `false` | +| [use_test_genesis_chainstate](#node-use_test_genesis_chainstate) | If set to `true`, the node initializes its state using an alternative test
genesis block definition, loading different initial balances, names, and
lockups than the standard network genesis.

**Notes:**
- This is intended strictly for testing purposes and is disallowed on mainnet. | `None` (uses standard network genesis) | | [wait_time_for_blocks](#node-wait_time_for_blocks) | When operating as a miner, this specifies the maximum time (in milliseconds)
the node waits after detecting a new burnchain block to synchronize corresponding
Stacks block data from the network before resuming mining attempts.
If synchronization doesn't complete within this duration, mining resumes anyway
to prevent stalling. This setting is loaded by all nodes but primarily affects
miner behavior within the relayer thread.

**Units:** milliseconds | `30_000` (30 seconds) | -| [working_dir](#node-working_dir) | The file system absolute path to the node's working directory.
All persistent data, including chainstate, burnchain databases, and potentially other stores,
will be located within this directory.
This path can be overridden by setting the `STACKS_WORKING_DIR` environment variable.

**Notes:**
- For persistent mainnet or testnet nodes, this path must be explicitly configured to a non-temporary location. | `/tmp/stacks-node-{current_timestamp}` | +| [working_dir](#node-working_dir) | The file system absolute path to the node's working directory.
All persistent data, including chainstate, burnchain databases, and potentially
other stores, will be located within this directory. This path can be
overridden by setting the `STACKS_WORKING_DIR` environment variable.

**Notes:**
- For persistent mainnet or testnet nodes, this path must be explicitly configured to a non-temporary location. | `/tmp/stacks-node-{current_timestamp}` | | ~~[max_microblocks](#node-max_microblocks)~~ | The maximum number of microblocks allowed per Stacks block.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 2.5+. | `65535` (u16::MAX) | | ~~[microblock_frequency](#node-microblock_frequency)~~ | How often to attempt producing microblocks, in milliseconds.

**Notes:**
- Only applies when [mine_microblocks](#node-mine_microblocks) is true and before Epoch 2.5.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 2.5+.

**Units:** milliseconds | `30_000` (30 seconds) | | ~~[mine_microblocks](#node-mine_microblocks)~~ | Enable microblock mining.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 2.5+. | `true` | -| ~~[mock_mining_output_dir](#node-mock_mining_output_dir)~~ | If [mock_mining](#node-mock_mining) is enabled, this specifies an optional directory path where the
generated mock Stacks blocks will be saved. (pre-Nakamoto)
The path is canonicalized on load.

**⚠️ DEPRECATED:** This setting was only used in the neon node and is ignored in Epoch 3.0+. | `None` | -| ~~[pox_sync_sample_secs](#node-pox_sync_sample_secs)~~ | Sampling interval in seconds for the PoX synchronization watchdog thread (pre-Nakamoto).
Determines how often the watchdog checked PoX state consistency in the Neon run loop.

**⚠️ DEPRECATED:** Unused after the Nakamoto upgrade. This setting is ignored in Epoch 3.0+.

**Units:** seconds | `30` | +| ~~[mock_mining_output_dir](#node-mock_mining_output_dir)~~ | If [mock_mining](#node-mock_mining) is enabled, this specifies an optional directory
path where the generated mock Stacks blocks will be saved. (pre-Nakamoto)
The path is canonicalized on load.

**⚠️ DEPRECATED:** This setting was only used in the neon node and is ignored in Epoch 3.0+. | `None` | +| ~~[pox_sync_sample_secs](#node-pox_sync_sample_secs)~~ | Sampling interval in seconds for the PoX synchronization watchdog thread
(pre-Nakamoto). Determines how often the watchdog checked PoX state
consistency in the Neon run loop.

**⚠️ DEPRECATED:** Unused after the Nakamoto upgrade. This setting is ignored in Epoch 3.0+.

**Units:** seconds | `30` | | ~~[wait_time_for_microblocks](#node-wait_time_for_microblocks)~~ | Cooldown period after a microblock is produced, in milliseconds.

**Notes:**
- Only applies when [mine_microblocks](#node-mine_microblocks) is true and before Epoch 2.5.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 2.5+.

**Units:** milliseconds | `30_000` (30 seconds) | @@ -184,42 +184,42 @@ The configuration is automatically generated from the Rust source code documenta | Parameter | Description | Default | |-----------|-------------|----------| -| [activated_vrf_key_path](#miner-activated_vrf_key_path) | Path to a file for storing and loading the currently active, registered VRF leader key.

Loading: On startup or when needing to register a key, if this path is set, the relayer first
attempts to load a serialized `RegisteredKey` from this file. If successful, it uses the
loaded key and skips the on-chain VRF key registration transaction, saving time and fees.
Saving: After a new VRF key registration transaction is confirmed and activated on the burnchain,
if this path is set, the node saves the details of the newly activated `RegisteredKey` to this file.
This allows the miner to persist its active VRF key across restarts.
If the file doesn't exist during load, or the path is `None`, the node proceeds with a new registration. | `None` | -| [block_commit_delay](#miner-block_commit_delay) | Time in milliseconds to wait for a Nakamoto block after seeing a burnchain block before submitting a block commit.

After observing a new burnchain block, the miner's relayer waits for this duration
before submitting its next block commit transaction to Bitcoin. This delay provides an opportunity
for a new Nakamoto block (produced by the winner of the latest sortition) to arrive.
Waiting helps avoid situations where the relayer immediately submits a commit that needs
to be replaced via RBF if a new Stacks block appears shortly after.
This delay is skipped if the new burnchain blocks leading to the tip contain no sortitions.

**Units:** milliseconds | `40_000` | -| [block_rejection_timeout_steps](#miner-block_rejection_timeout_steps) | Defines adaptive timeouts for waiting for signer responses, based on the accumulated weight of rejections.

Configured as a map where keys represent rejection count thresholds in percentage,
and values are the timeout durations (in seconds) to apply when the rejection count
reaches or exceeds that key but is less than the next key.

When a miner proposes a block, it waits for signer responses (approvals or rejections).
The SignerCoordinator tracks the total weight of received rejections. It uses this map to determine
the current timeout duration. It selects the timeout value associated with the largest key
in the map that is less than or equal to the current accumulated rejection weight.
If this timeout duration expires before a decision is reached, the coordinator signals a timeout.
This prompts the miner to potentially retry proposing the block.
As more rejections come in, the applicable timeout step might change (likely decrease),
allowing the miner to abandon unviable proposals faster.

A key for 0 (zero rejections) must be defined, representing the initial timeout when no rejections have been received.

**Notes:**
- Keys are rejection weight percentages (0-100). Values are timeout durations.

**Example:**
# Keys are rejection counts (as strings), values are timeouts in seconds.
[miner.block_rejection_timeout_steps]
"0" = 180
"10" = 90
"20" = 45
"30" = 0
| `{ 0: 180, 10: 90, 20: 45, 30: 0 }` (times in seconds) | -| [block_reward_recipient](#miner-block_reward_recipient) | Optional recipient for the coinbase block reward, overriding the default miner address.

By default (`None`), the reward is sent to the miner's primary address ([[node].seed](#node-seed) ).
If set to some principal address *and* the current Stacks epoch is > 2.1,
the reward will be directed to the specified address instead. | `None` | -| [candidate_retry_cache_size](#miner-candidate_retry_cache_size) | Max size (in *number* of items) of transaction candidates to hold in the in-memory
retry cache.

This cache stores transactions encountered during a `GlobalFeeRate` mempool walk
whose nonces are currently too high for immediate processing. These candidates
are prioritized for reconsideration later within the *same* walk, potentially
becoming valid if other processed transactions update the expected nonces.

A larger cache retains more potentially valid future candidates but uses more memory.
This setting is primarily relevant for the `GlobalFeeRate` strategy.

**Notes:**
- Each element `crate::core::mempool::MemPoolTxInfoPartial` is currently 112 bytes.

**Units:** items | `1048576` | -| [empty_mempool_sleep_time](#miner-empty_mempool_sleep_time) | The amount of time in milliseconds that the miner should sleep in between attempts to
mine a block when the mempool is empty.

This prevents the miner from busy-looping when there are no pending transactions,
conserving CPU resources. During this sleep, the miner still checks burnchain tip changes.

**Units:** milliseconds | `2_500` | -| [filter_origins](#miner-filter_origins) | A comma separated list of Stacks addresses to whitelist so that only transactions from
these addresses should be considered during the mempool walk for block building. If this
list is non-empty, any transaction whose origin address is *not* in this set will be skipped.

This allows miners to prioritize transactions originating from specific accounts that are
important to them.
Configured as a comma-separated string of standard Stacks addresses (e.g., "ST123...,ST456...")
in the configuration file.

**Example:**
filter_origins = "ST2QKZ4FKHAH1NQKYKYAYZPY440FEPK7GZ1R5HBP2,ST319CF5WV77KYR1H3GT0GZ7B8Q4AQPY42ETP1VPF"
| Empty set (all origins are considered). | -| [first_rejection_pause_ms](#miner-first_rejection_pause_ms) | Time in milliseconds to pause after receiving the first threshold rejection, before proposing a new block.

When a miner's block proposal fails to gather enough signatures from the signers for the first time
at a given height, the miner will pause for this duration before attempting to mine and propose again.

**Units:** milliseconds | `5_000` | -| [max_execution_time_secs](#miner-max_execution_time_secs) | Defines the maximum execution time (in seconds) allowed for a single contract call transaction.

When processing a transaction (contract call or smart contract deployment), if this option is set,
and the execution time exceeds this limit, the transaction processing fails with an `ExecutionTimeout` error,
and the transaction is skipped. This prevents potentially long-running or infinite-loop transactions
from blocking block production.

**Units:** seconds | `None` (no execution time limit) | +| [activated_vrf_key_path](#miner-activated_vrf_key_path) | Path to a file for storing and loading the currently active, registered VRF leader key.

Loading: On startup or when needing to register a key, if this path is set,
the relayer first attempts to load a serialized `RegisteredKey` from this
file. If successful, it uses the loaded key and skips the on-chain VRF key
registration transaction, saving time and fees.
Saving: After a new VRF key registration transaction is confirmed and
activated on the burnchain, if this path is set, the node saves the details
of the newly activated `RegisteredKey` to this file. This allows the
miner to persist its active VRF key across restarts.
If the file doesn't exist during load, or the path is `None`, the node
proceeds with a new registration. | `None` | +| [block_commit_delay](#miner-block_commit_delay) | Time in milliseconds to wait for a Nakamoto block after seeing a burnchain
block before submitting a block commit.

After observing a new burnchain block, the miner's relayer waits for this
duration before submitting its next block commit transaction to Bitcoin.
This delay provides an opportunity for a new Nakamoto block (produced by the
winner of the latest sortition) to arrive. Waiting helps avoid situations
where the relayer immediately submits a commit that needs to be replaced
via RBF if a new Stacks block appears shortly after. This delay is skipped
if the new burnchain blocks leading to the tip contain no sortitions.

**Units:** milliseconds | `40_000` | +| [block_rejection_timeout_steps](#miner-block_rejection_timeout_steps) | Defines adaptive timeouts for waiting for signer responses, based on the
accumulated weight of rejections.

Configured as a map where keys represent rejection count thresholds in
percentage, and values are the timeout durations (in seconds) to apply when
the rejection count reaches or exceeds that key but is less than the next key.

When a miner proposes a block, it waits for signer responses (approvals or
rejections). The SignerCoordinator tracks the total weight of received
rejections. It uses this map to determine the current timeout duration. It
selects the timeout value associated with the largest key in the map that is
less than or equal to the current accumulated rejection weight. If this
timeout duration expires before a decision is reached, the coordinator
signals a timeout. This prompts the miner to potentially retry proposing the
block. As more rejections come in, the applicable timeout step might change
(likely decrease), allowing the miner to abandon unviable proposals faster.

A key for 0 (zero rejections) must be defined, representing the initial
timeout when no rejections have been received.

**Notes:**
- Keys are rejection weight percentages (0-100).
- Values are timeout durations.

**Example:**
# Keys are rejection counts (as strings), values are timeouts in seconds.
[miner.block_rejection_timeout_steps]
"0" = 180
"10" = 90
"20" = 45
"30" = 0
| `{ 0: 180, 10: 90, 20: 45, 30: 0 }` (times in seconds) | +| [block_reward_recipient](#miner-block_reward_recipient) | Optional recipient for the coinbase block reward, overriding the default miner address.

By default (`None`), the reward is sent to the miner's primary address
([[node].seed](#node-seed) ). If set to some principal address *and* the current
Stacks epoch is > 2.1, the reward will be directed to the specified
address instead. | `None` | +| [candidate_retry_cache_size](#miner-candidate_retry_cache_size) | Max size (in *number* of items) of transaction candidates to hold in the in-memory
retry cache.

This cache stores transactions encountered during a `GlobalFeeRate` mempool
walk whose nonces are currently too high for immediate processing. These
candidates are prioritized for reconsideration later within the *same* walk,
potentially becoming valid if other processed transactions update the
expected nonces.

A larger cache retains more potentially valid future candidates but uses more
memory. This setting is primarily relevant for the `GlobalFeeRate` strategy.

**Notes:**
- Each element `crate::core::mempool::MemPoolTxInfoPartial` is currently 112 bytes.

**Units:** items | `1048576` | +| [empty_mempool_sleep_time](#miner-empty_mempool_sleep_time) | The amount of time in milliseconds that the miner should sleep in between
attempts to mine a block when the mempool is empty.

This prevents the miner from busy-looping when there are no pending
transactions, conserving CPU resources. During this sleep, the miner still
checks burnchain tip changes.

**Units:** milliseconds | `2_500` | +| [filter_origins](#miner-filter_origins) | A comma separated list of Stacks addresses to whitelist so that only
transactions from these addresses should be considered during the mempool walk
for block building. If this list is non-empty, any transaction whose origin
address is *not* in this set will be skipped.

This allows miners to prioritize transactions originating from specific accounts that are
important to them.
Configured as a comma-separated string of standard Stacks addresses
(e.g., "ST123...,ST456...") in the configuration file.

**Example:**
filter_origins = "ST2QKZ4FKHAH1NQKYKYAYZPY440FEPK7GZ1R5HBP2,ST319CF5WV77KYR1H3GT0GZ7B8Q4AQPY42ETP1VPF"
| Empty set (all origins are considered). | +| [first_rejection_pause_ms](#miner-first_rejection_pause_ms) | Time in milliseconds to pause after receiving the first threshold rejection,
before proposing a new block.

When a miner's block proposal fails to gather enough signatures from the
signers for the first time at a given height, the miner will pause for this
duration before attempting to mine and propose again.

**Units:** milliseconds | `5_000` | +| [max_execution_time_secs](#miner-max_execution_time_secs) | Defines the maximum execution time (in seconds) allowed for a single contract call transaction.

When processing a transaction (contract call or smart contract deployment),
if this option is set, and the execution time exceeds this limit, the
transaction processing fails with an `ExecutionTimeout` error, and the
transaction is skipped. This prevents potentially long-running or
infinite-loop transactions from blocking block production.

**Units:** seconds | `None` (no execution time limit) | | [mempool_walk_strategy](#miner-mempool_walk_strategy) | Strategy for selecting the next transaction candidate from the mempool.
Controls prioritization between maximizing immediate fee capture vs. ensuring
transaction nonce order for account progression and processing efficiency.

See `MemPoolWalkStrategy` for variant details.

Possible values (use variant names for configuration):
- `"GlobalFeeRate"`: Selects the transaction with the highest fee rate globally.
- `"NextNonceWithHighestFeeRate"`: Selects the highest-fee transaction among those
matching the next expected nonce for sender/sponsor accounts. | `"GlobalFeeRate"` | -| [min_time_between_blocks_ms](#miner-min_time_between_blocks_ms) | The minimum time to wait between mining blocks in milliseconds. The value must be greater
than or equal to 1000 ms because if a block is mined within the same second as its parent,
it will be rejected by the signers.

This check ensures compliance with signer rules that prevent blocks with identical timestamps
(at second resolution) to their parents. If a lower value is configured, 1000 ms is used instead.

**Units:** milliseconds | `1_000` | -| [mining_key](#miner-mining_key) | The private key (Secp256k1) used for signing blocks, provided as a hex string.

This key must be present at runtime for mining operations to succeed. | - [[node].seed](#node-seed) if the `[miner]` section *is present* in the config file
- `None` if the `[miner]` section *is not present* | -| [nakamoto_attempt_time_ms](#miner-nakamoto_attempt_time_ms) | Maximum time (in milliseconds) the miner spends selecting transactions from the mempool
when assembling a Nakamoto block. Once this duration is exceeded, the miner stops
adding transactions and finalizes the block with those already selected.

**Units:** milliseconds | `5_000` (5 seconds) | -| [nonce_cache_size](#miner-nonce_cache_size) | Max size (in bytes) of the in-memory cache for storing expected account nonces.

This cache accelerates mempool processing (e.g., during block building) by storing
the anticipated next nonce for accounts, reducing expensive lookups into the node's
state (MARF trie). A larger cache can improve performance for workloads involving
many unique accounts but increases memory consumption.

**Notes:**
- Must be configured to a value greater than 0.

**Units:** bytes | `1048576` (1 MiB) | -| [probability_pick_no_estimate_tx](#miner-probability_pick_no_estimate_tx) | Probability (percentage, 0-100) of prioritizing a transaction without a known fee rate
during candidate selection.

Only effective when `mempool_walk_strategy` is `GlobalFeeRate`. Helps ensure
transactions lacking fee estimates are periodically considered alongside high-fee ones,
preventing potential starvation. A value of 0 means never prioritize them first,
100 means always prioritize them first (if available).

**Notes:**
- Values: 0-100.

**Units:** percent | `25` (25% chance) | +| [min_time_between_blocks_ms](#miner-min_time_between_blocks_ms) | The minimum time to wait between mining blocks in milliseconds. The value
must be greater than or equal to 1000 ms because if a block is mined
within the same second as its parent, it will be rejected by the signers.

This check ensures compliance with signer rules that prevent blocks with
identical timestamps (at second resolution) to their parents. If a lower
value is configured, 1000 ms is used instead.

**Units:** milliseconds | `1_000` | +| [mining_key](#miner-mining_key) | The private key (Secp256k1) used for signing blocks, provided as a hex string.

This key must be present at runtime for mining operations to succeed. | - if the `[miner]` section *is present* in the config file: [[node].seed](#node-seed)
- else: `None` | +| [nakamoto_attempt_time_ms](#miner-nakamoto_attempt_time_ms) | Maximum time (in milliseconds) the miner spends selecting transactions from
the mempool when assembling a Nakamoto block. Once this duration is exceeded,
the miner stops adding transactions and finalizes the block with those
already selected.

**Units:** milliseconds | `5_000` (5 seconds) | +| [nonce_cache_size](#miner-nonce_cache_size) | Max size (in bytes) of the in-memory cache for storing expected account nonces.

This cache accelerates mempool processing (e.g., during block building) by
storing the anticipated next nonce for accounts, reducing expensive lookups
into the node's state (MARF trie). A larger cache can improve performance
for workloads involving many unique accounts but increases memory consumption.

**Notes:**
- Must be configured to a value greater than 0.

**Units:** bytes | `1048576` (1 MiB) | +| [probability_pick_no_estimate_tx](#miner-probability_pick_no_estimate_tx) | Probability (percentage, 0-100) of prioritizing a transaction without a
known fee rate during candidate selection.

Only effective when `mempool_walk_strategy` is `GlobalFeeRate`. Helps ensure
transactions lacking fee estimates are periodically considered alongside
high-fee ones, preventing potential starvation. A value of 0 means never
prioritize them first, 100 means always prioritize them first (if available).

**Notes:**
- Values: 0-100.

**Units:** percent | `25` (25% chance) | | [replay_transactions](#miner-replay_transactions) | TODO: remove this option when its no longer a testing feature and it becomes default behaviour
The miner will attempt to replay transactions that a threshold number of signers are expecting in the next block | **Required** | | [segwit](#miner-segwit) | If possible, mine with a p2wpkh address. | `false` | -| [subsequent_rejection_pause_ms](#miner-subsequent_rejection_pause_ms) | Time in milliseconds to pause after receiving subsequent threshold rejections, before proposing a new block.

If a miner's block proposal is rejected multiple times at the same height (after the first rejection),
this potentially longer pause duration is used before retrying. This gives more significant time
for network state changes or signer coordination.

**Units:** milliseconds | `10_000` | -| [tenure_cost_limit_per_block_percentage](#miner-tenure_cost_limit_per_block_percentage) | The percentage of the remaining tenure cost limit to consume each block.

This setting limits the execution cost (Clarity cost) a single Nakamoto block can incur,
expressed as a percentage of the *remaining* cost budget for the current mining tenure.
For example, if set to 25, a block can use at most 25% of the tenure's currently available cost limit.
This allows miners to spread the tenure's total execution budget across multiple blocks rather than
potentially consuming it all in the first block.

**Notes:**
- Values: 1-100.
- Setting to 100 effectively disables this per-block limit, allowing a block to use the entire remaining tenure budget.

**Units:** percent | `25` | -| [tenure_extend_cost_threshold](#miner-tenure_extend_cost_threshold) | Percentage of block budget that must be used before attempting a time-based tenure extend.

This sets a minimum threshold for the accumulated execution cost within a tenure before a
time-based tenure extension ([tenure_timeout](#miner-tenure_timeout) ) can be initiated.
The miner checks if the proportion of the total tenure budget consumed so far exceeds this percentage.
If the cost usage is below this threshold, a time-based extension will not be attempted, even if
the [tenure_timeout](#miner-tenure_timeout) duration has elapsed.
This prevents miners from extending tenures very early if they have produced only low-cost blocks.

**Notes:**
- Values: 0-100.

**Units:** percent | `50` | -| [tenure_extend_poll_timeout](#miner-tenure_extend_poll_timeout) | Duration to wait in-between polling the sortition DB to see if we need to
extend the ongoing tenure (e.g. because the current sortition is empty or invalid).

After the relayer determines that a tenure extension might be needed but cannot proceed immediately
(e.g., because a miner thread is already active for the current burn view), it will wait for this
duration before re-checking the conditions for tenure extension.

**Units:** seconds | `1` | -| [tenure_extend_wait_timeout](#miner-tenure_extend_wait_timeout) | Duration to wait before trying to continue a tenure because the next miner did not produce blocks.

If the node was the winner of the previous sortition but not the most recent one,
the relayer waits for this duration before attempting to extend its own tenure.
This gives the new winner of the most recent sortition a grace period to produce their first block.
Also used in scenarios with empty sortitions to give the winner of the *last valid* sortition time
to produce a block before the current miner attempts an extension.

**Units:** milliseconds | `120_000` | -| [tenure_timeout](#miner-tenure_timeout) | Duration to wait before attempting to issue a time-based tenure extend.

A miner can proactively attempt to extend its tenure if a significant amount of time has passed
since the last tenure change, even without an explicit trigger like an empty sortition.
If the time elapsed since the last tenure change exceeds this value, and the signer coordinator
indicates an extension is timely, and the cost usage threshold ([tenure_extend_cost_threshold](#miner-tenure_extend_cost_threshold) )
is met, the miner will include a tenure extension transaction in its next block.

**Units:** seconds | `180` | -| [txs_to_consider](#miner-txs_to_consider) | Specifies which types of transactions the miner should consider including in a block
during the mempool walk process. Transactions of types not included in this set will be skipped.

This allows miners to exclude specific transaction categories.
Configured as a comma-separated string of transaction type names in the configuration file.

Accepted values correspond to variants of `MemPoolWalkTxTypes`:
- `"TokenTransfer"`
- `"SmartContract"`
- `"ContractCall"`

**Example:**
txs_to_consider = "TokenTransfer,ContractCall"
| All transaction types are considered (equivalent to [`MemPoolWalkTxTypes::all()`]). | +| [subsequent_rejection_pause_ms](#miner-subsequent_rejection_pause_ms) | Time in milliseconds to pause after receiving subsequent threshold rejections,
before proposing a new block.

If a miner's block proposal is rejected multiple times at the same height
(after the first rejection), this potentially longer pause duration is used
before retrying. This gives more significant time for network state changes
or signer coordination.

**Units:** milliseconds | `10_000` | +| [tenure_cost_limit_per_block_percentage](#miner-tenure_cost_limit_per_block_percentage) | The percentage of the remaining tenure cost limit to consume each block.

This setting limits the execution cost (Clarity cost) a single Nakamoto block
can incur, expressed as a percentage of the *remaining* cost budget for the
current mining tenure. For example, if set to 25, a block can use at most
25% of the tenure's currently available cost limit. This allows miners to
spread the tenure's total execution budget across multiple blocks rather than
potentially consuming it all in the first block.

**Notes:**
- Values: 1-100.
- Setting to 100 effectively disables this per-block limit, allowing a block to use the entire remaining tenure budget.

**Units:** percent | `25` | +| [tenure_extend_cost_threshold](#miner-tenure_extend_cost_threshold) | Percentage of block budget that must be used before attempting a time-based tenure extend.

This sets a minimum threshold for the accumulated execution cost within a
tenure before a time-based tenure extension ([tenure_timeout](#miner-tenure_timeout) )
can be initiated. The miner checks if the proportion of the total tenure
budget consumed so far exceeds this percentage. If the cost usage is below
this threshold, a time-based extension will not be attempted, even if the
[tenure_timeout](#miner-tenure_timeout) duration has elapsed. This prevents miners
from extending tenures very early if they have produced only low-cost blocks.

**Notes:**
- Values: 0-100.

**Units:** percent | `50` | +| [tenure_extend_poll_timeout](#miner-tenure_extend_poll_timeout) | Duration to wait in-between polling the sortition DB to see if we need to
extend the ongoing tenure (e.g. because the current sortition is empty or invalid).

After the relayer determines that a tenure extension might be needed but
cannot proceed immediately (e.g., because a miner thread is already active
for the current burn view), it will wait for this duration before
re-checking the conditions for tenure extension.

**Units:** seconds | `1` | +| [tenure_extend_wait_timeout](#miner-tenure_extend_wait_timeout) | Duration to wait before trying to continue a tenure because the next miner
did not produce blocks.

If the node was the winner of the previous sortition but not the most recent
one, the relayer waits for this duration before attempting to extend its own
tenure. This gives the new winner of the most recent sortition a grace period
to produce their first block. Also used in scenarios with empty sortitions
to give the winner of the *last valid* sortition time to produce a block
before the current miner attempts an extension.

**Units:** milliseconds | `120_000` | +| [tenure_timeout](#miner-tenure_timeout) | Duration to wait before attempting to issue a time-based tenure extend.

A miner can proactively attempt to extend its tenure if a significant amount
of time has passed since the last tenure change, even without an explicit
trigger like an empty sortition. If the time elapsed since the last tenure
change exceeds this value, and the signer coordinator indicates an extension
is timely, and the cost usage threshold ([tenure_extend_cost_threshold](#miner-tenure_extend_cost_threshold) )
is met, the miner will include a tenure extension transaction in its next block.

**Units:** seconds | `180` | +| [txs_to_consider](#miner-txs_to_consider) | Specifies which types of transactions the miner should consider including in a
block during the mempool walk process. Transactions of types not included in
this set will be skipped.

This allows miners to exclude specific transaction categories.
Configured as a comma-separated string of transaction type names in the configuration file.

Accepted values correspond to variants of `MemPoolWalkTxTypes`:
- `"TokenTransfer"`
- `"SmartContract"`
- `"ContractCall"`

**Example:**
txs_to_consider = "TokenTransfer,ContractCall"
| All transaction types are considered (equivalent to [`MemPoolWalkTxTypes::all()`]). | | [wait_for_block_download](#miner-wait_for_block_download) | Wait for a downloader pass before mining.
This can only be disabled in testing; it can't be changed in the config file. | `true` | -| ~~[fast_rampup](#miner-fast_rampup)~~ | Controls how the miner estimates its win probability when checking for underperformance.

This estimation is used in conjunction with [target_win_probability](#miner-target_win_probability) and
[underperform_stop_threshold](#miner-underperform_stop_threshold) to decide whether to pause mining due to
low predicted success rate.

- If `true`: The win probability estimation looks at projected spend distributions
~6 blocks into the future. This might help the miner adjust its spending more quickly
based on anticipated competition changes.
- If `false`: The win probability estimation uses the currently observed spend distribution
for the next block.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode and by the
`get-spend-amount` cli subcommand. | `false` | +| ~~[fast_rampup](#miner-fast_rampup)~~ | Controls how the miner estimates its win probability when checking for underperformance.

This estimation is used in conjunction with [target_win_probability](#miner-target_win_probability) and
[underperform_stop_threshold](#miner-underperform_stop_threshold) to decide whether to pause
mining due to low predicted success rate.

- If `true`: The win probability estimation looks at projected spend
distributions ~6 blocks into the future. This might help the miner adjust
its spending more quickly based on anticipated competition changes.
- If `false`: The win probability estimation uses the currently observed
spend distribution for the next block.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode and by the
`get-spend-amount` cli subcommand. | `false` | | ~~[first_attempt_time_ms](#miner-first_attempt_time_ms)~~ | Time to wait (in milliseconds) before the first attempt to mine a block.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode.

**Units:** milliseconds | `10` | -| ~~[max_reorg_depth](#miner-max_reorg_depth)~~ | Defines the maximum depth (in Stacks blocks) the miner considers when evaluating
potential chain tips when selecting the best tip to mine the next block on.

The miner analyzes candidate tips within this depth from the highest known tip.
It selects the "nicest" tip, often defined as the one that minimizes chain reorganizations
or orphans within this lookback window. A lower value restricts the analysis to shallower forks,
while a higher value considers deeper potential reorganizations.

This setting influences which fork the miner chooses to build upon if multiple valid tips exist.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode and the
`pick-best-tip` cli subcommand. | `3` | +| ~~[max_reorg_depth](#miner-max_reorg_depth)~~ | Defines the maximum depth (in Stacks blocks) the miner considers when
evaluating potential chain tips when selecting the best tip to mine the next
block on.

The miner analyzes candidate tips within this depth from the highest known
tip. It selects the "nicest" tip, often defined as the one that minimizes
chain reorganizations or orphans within this lookback window. A lower value
restricts the analysis to shallower forks, while a higher value considers
deeper potential reorganizations.

This setting influences which fork the miner chooses to build upon if multiple valid tips exist.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode and the
`pick-best-tip` cli subcommand. | `3` | | ~~[microblock_attempt_time_ms](#miner-microblock_attempt_time_ms)~~ | Time to wait (in milliseconds) to mine a microblock.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode.

**Units:** milliseconds | `30_000` (30 seconds) | -| ~~[min_tx_count](#miner-min_tx_count)~~ | Minimum number of transactions that must be in a block if we're going to replace a pending
block-commit with a new block-commit.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode. | `0` | -| ~~[only_increase_tx_count](#miner-only_increase_tx_count)~~ | If true, requires subsequent mining attempts for the same block height
to have a transaction count >= the previous best attempt.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode. | `false` | -| ~~[pre_nakamoto_mock_signing](#miner-pre_nakamoto_mock_signing)~~ | Enables a mock signing process for testing purposes, specifically designed for use during Epoch 2.5
before the activation of Nakamoto consensus.

When set to `true` and [mining_key](#miner-mining_key) is provided, the miner will interact
with the `.miners` and `.signers` contracts via the stackerdb to send and receive mock
proposals and signatures, simulating aspects of the Nakamoto leader election and block signing flow.

**Notes:**
- This is intended strictly for testing purposes for Epoch 2.5 conditions.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. | `false` (Should only default true if [mining_key](#miner-mining_key) is set). | +| ~~[min_tx_count](#miner-min_tx_count)~~ | Minimum number of transactions that must be in a block if we're going to
replace a pending block-commit with a new block-commit.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode. | `0` | +| ~~[only_increase_tx_count](#miner-only_increase_tx_count)~~ | If true, requires subsequent mining attempts for the same block height to have
a transaction count >= the previous best attempt.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode. | `false` | +| ~~[pre_nakamoto_mock_signing](#miner-pre_nakamoto_mock_signing)~~ | Enables a mock signing process for testing purposes, specifically designed
for use during Epoch 2.5 before the activation of Nakamoto consensus.

When set to `true` and [mining_key](#miner-mining_key) is provided, the miner
will interact with the `.miners` and `.signers` contracts via the stackerdb
to send and receive mock proposals and signatures, simulating aspects of the
Nakamoto leader election and block signing flow.

**Notes:**
- This is intended strictly for testing Epoch 2.5 conditions.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. | `false` (Should only default true if [mining_key](#miner-mining_key) is set). | | ~~[subsequent_attempt_time_ms](#miner-subsequent_attempt_time_ms)~~ | Time to wait (in milliseconds) for subsequent attempts to mine a block,
after the first attempt fails.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode.

**Units:** milliseconds | `120_000` (2 minutes) | -| ~~[target_win_probability](#miner-target_win_probability)~~ | The minimum win probability this miner aims to achieve in block sortitions.

This target is used to detect prolonged periods of underperformance. If the miner's
calculated win probability consistently falls below this value for a duration specified
by [underperform_stop_threshold](#miner-underperform_stop_threshold) (after an initial startup phase), the miner may
cease spending in subsequent sortitions (returning a burn fee cap of 0) to conserve resources.

Setting this value close to 0.0 effectively disables the underperformance check.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode. | `0.0` | -| ~~[unconfirmed_commits_helper](#miner-unconfirmed_commits_helper)~~ | Optional path to an external helper script for fetching unconfirmed block-commits.
Used to inform the miner's dynamic burn fee bidding strategy with off-chain data.

If a path is provided, the target script must:
- Be executable by the user running the Stacks node process.
- Accept a list of active miner burnchain addresses as command-line arguments.
- On successful execution, print a JSON array representing `Vec`
(see `stacks::config::chain_data::UnconfirmedBlockCommit` struct) to stdout.
- Exit with code 0 on success.

Look at `test_get_unconfirmed_commits` in `stackslib/src/config/chain_data.rs` for an example script.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode and by the
`get-spend-amount` cli subcommand. | `None` (feature disabled). | -| ~~[underperform_stop_threshold](#miner-underperform_stop_threshold)~~ | The maximum number of consecutive Bitcoin blocks the miner will tolerate underperforming
(i.e., having a calculated win probability below [target_win_probability](#miner-target_win_probability) )
before temporarily pausing mining efforts.

This check is only active after an initial startup phase (6 blocks past the mining start height).
If the miner underperforms for this number of consecutive blocks, the
`BlockMinerThread::get_mining_spend_amount` function will return 0, effectively preventing the
miner from submitting a block commit for the current sortition to conserve funds.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode. | `None` (underperformance check is disabled). | +| ~~[target_win_probability](#miner-target_win_probability)~~ | The minimum win probability this miner aims to achieve in block sortitions.

This target is used to detect prolonged periods of underperformance. If the
miner's calculated win probability consistently falls below this value for a
duration specified by [underperform_stop_threshold](#miner-underperform_stop_threshold) (after
an initial startup phase), the miner may cease spending in subsequent
sortitions (returning a burn fee cap of 0) to conserve resources.

Setting this value close to 0.0 effectively disables the underperformance check.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode. | `0.0` | +| ~~[unconfirmed_commits_helper](#miner-unconfirmed_commits_helper)~~ | Optional path to an external helper script for fetching unconfirmed
block-commits. Used to inform the miner's dynamic burn fee bidding strategy
with off-chain data.

If a path is provided, the target script must:
- Be executable by the user running the Stacks node process.
- Accept a list of active miner burnchain addresses as command-line arguments.
- On successful execution, print a JSON array representing `Vec`
(see `stacks::config::chain_data::UnconfirmedBlockCommit` struct) to stdout.
- Exit with code 0 on success.

Look at `test_get_unconfirmed_commits` in `stackslib/src/config/chain_data.rs`
for an example script.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode
and by the `get-spend-amount` cli subcommand. | `None` (feature disabled). | +| ~~[underperform_stop_threshold](#miner-underperform_stop_threshold)~~ | The maximum number of consecutive Bitcoin blocks the miner will tolerate
underperforming (i.e., having a calculated win probability below
[target_win_probability](#miner-target_win_probability) ) before temporarily pausing mining efforts.

This check is only active after an initial startup phase (6 blocks past the
mining start height). If the miner underperforms for this number of
consecutive blocks, the `BlockMinerThread::get_mining_spend_amount` function
will return 0, effectively preventing the miner from submitting a block commit
for the current sortition to conserve funds.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode. | `None` (underperformance check is disabled). | | ~~[unprocessed_block_deadline_secs](#miner-unprocessed_block_deadline_secs)~~ | Amount of time (in seconds) to wait for unprocessed blocks before mining a new block.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode.

**Units:** seconds | `30` | | ~~[wait_on_interim_blocks](#miner-wait_on_interim_blocks)~~ | Amount of time while mining in nakamoto to wait in between mining interim blocks.

**⚠️ DEPRECATED:** Use `min_time_between_blocks_ms` instead. | `None` | From 31beefe650eb6f9e01a10b39330bf53079b20cf0 Mon Sep 17 00:00:00 2001 From: Francesco Leacche Date: Tue, 10 Jun 2025 16:00:13 +0200 Subject: [PATCH 12/20] remove CARGO_HOME from dockerfile --- contrib/tools/config-docs-generator/Dockerfile | 1 - 1 file changed, 1 deletion(-) diff --git a/contrib/tools/config-docs-generator/Dockerfile b/contrib/tools/config-docs-generator/Dockerfile index 49ad4a68b4..58b092b9e1 100644 --- a/contrib/tools/config-docs-generator/Dockerfile +++ b/contrib/tools/config-docs-generator/Dockerfile @@ -16,7 +16,6 @@ WORKDIR /project_root # Set environment variables for generate-config-docs.sh ENV PROJECT_ROOT=/project_root -ENV CARGO_HOME=/project_root/.cargo ENV CARGO_TARGET_DIR=/tmp/stacks-config-docs/target ENV TEMP_DIR=/tmp/stacks-config-docs/doc-generation ENV EXTRACT_DOCS_BIN=/build/target/release/extract-docs From c2212a2b3009b7f4c35a9219e235c7ee58ff7ad2 Mon Sep 17 00:00:00 2001 From: Francesco Leacche Date: Tue, 10 Jun 2025 18:11:09 +0200 Subject: [PATCH 13/20] simplify json parsing logic --- .../config-docs-generator/src/extract_docs.rs | 287 +++++++++++------- 1 file changed, 177 insertions(+), 110 deletions(-) diff --git a/contrib/tools/config-docs-generator/src/extract_docs.rs b/contrib/tools/config-docs-generator/src/extract_docs.rs index a7ddce71e8..c6b22e339d 100644 --- a/contrib/tools/config-docs-generator/src/extract_docs.rs +++ b/contrib/tools/config-docs-generator/src/extract_docs.rs @@ -51,6 +51,46 @@ struct ConfigDocs { referenced_constants: HashMap>, // Name -> Resolved Value (or None) } +// JSON navigation helper functions +/// Navigate through nested JSON structure using an array of keys +/// Returns None if any part of the path doesn't exist +/// +/// Example: get_json_path(value, &["inner", "struct", "kind"]) +/// is equivalent to value.get("inner")?.get("struct")?.get("kind") +fn get_json_path<'a>(value: &'a serde_json::Value, path: &[&str]) -> Option<&'a serde_json::Value> { + let mut current = value; + + for &key in path { + current = current.get(key)?; + } + + Some(current) +} + +/// Navigate to an array at the given JSON path +/// Returns None if the path doesn't exist or the value is not an array +fn get_json_array<'a>( + value: &'a serde_json::Value, + path: &[&str], +) -> Option<&'a Vec> { + get_json_path(value, path)?.as_array() +} + +/// Navigate to an object at the given JSON path +/// Returns None if the path doesn't exist or the value is not an object +fn get_json_object<'a>( + value: &'a serde_json::Value, + path: &[&str], +) -> Option<&'a serde_json::Map> { + get_json_path(value, path)?.as_object() +} + +/// Navigate to a string at the given JSON path +/// Returns None if the path doesn't exist or the value is not a string +fn get_json_string<'a>(value: &'a serde_json::Value, path: &[&str]) -> Option<&'a str> { + get_json_path(value, path)?.as_str() +} + fn main() -> Result<()> { let matches = ClapCommand::new("extract-docs") .about("Extract documentation from Rust source code using rustdoc JSON") @@ -200,33 +240,28 @@ fn extract_config_docs_from_rustdoc( let mut all_referenced_constants = std::collections::HashSet::new(); // Access the main index containing all items from the rustdoc JSON output - let index = rustdoc_json - .get("index") - .and_then(|v| v.as_object()) + let index = get_json_object(rustdoc_json, &["index"]) .context("Missing 'index' field in rustdoc JSON")?; for (_item_id, item) in index { // Extract the item's name from rustdoc JSON structure - if let Some(name) = item.get("name").and_then(|v| v.as_str()) { - // Navigate to the item's type information - if let Some(inner) = item.get("inner") { - // Check if this item is a struct by looking for the "struct" field - if let Some(_struct_data) = inner.get("struct") { - // Check if this struct is in our target list (if specified) - if let Some(targets) = target_structs { - if !targets.contains(&name.to_string()) { - continue; - } + if let Some(name) = get_json_string(item, &["name"]) { + // Check if this item is a struct by looking for the "struct" field + if get_json_object(item, &["inner", "struct"]).is_some() { + // Check if this struct is in our target list (if specified) + if let Some(targets) = target_structs { + if !targets.contains(&name.to_string()) { + continue; } + } - let (struct_doc_opt, referenced_constants) = - extract_struct_from_rustdoc_index(index, name, item)?; + let (struct_doc_opt, referenced_constants) = + extract_struct_from_rustdoc_index(index, name, item)?; - if let Some(struct_doc) = struct_doc_opt { - structs.push(struct_doc); - } - all_referenced_constants.extend(referenced_constants); + if let Some(struct_doc) = struct_doc_opt { + structs.push(struct_doc); } + all_referenced_constants.extend(referenced_constants); } } } @@ -252,10 +287,7 @@ fn extract_struct_from_rustdoc_index( let mut all_referenced_constants = std::collections::HashSet::new(); // Extract struct documentation - let description = struct_item - .get("docs") - .and_then(|v| v.as_str()) - .map(|s| s.to_string()); + let description = get_json_string(struct_item, &["docs"]).map(|s| s.to_string()); // Collect constant references from struct description if let Some(desc) = &description { @@ -289,56 +321,43 @@ fn extract_struct_fields( // Navigate through rustdoc JSON structure to access struct fields // Path: item.inner.struct.kind.plain.fields[] - if let Some(inner) = struct_item.get("inner") { - if let Some(struct_data) = inner.get("struct") { - if let Some(kind) = struct_data.get("kind") { - if let Some(plain) = kind.get("plain") { - // Access the array of field IDs that reference other items in the index - if let Some(field_ids) = plain.get("fields").and_then(|v| v.as_array()) { - for field_id in field_ids { - // Field IDs can be either integers or strings in rustdoc JSON, try both formats - let field_item = if let Some(field_id_num) = field_id.as_u64() { - // Numeric field ID - convert to string for index lookup - index.get(&field_id_num.to_string()) - } else if let Some(field_id_str) = field_id.as_str() { - // String field ID - use directly for index lookup - index.get(field_id_str) - } else { - None - }; - - if let Some(field_item) = field_item { - // Extract the field's name from the rustdoc item - let field_name = field_item - .get("name") - .and_then(|v| v.as_str()) - .unwrap_or("unknown") - .to_string(); - - // Extract the field's documentation text from rustdoc - let field_docs = field_item - .get("docs") - .and_then(|v| v.as_str()) - .unwrap_or("") - .to_string(); - - // Parse the structured documentation - let (field_doc, referenced_constants) = - parse_field_documentation(&field_docs, &field_name)?; - - // Only include fields that have documentation - if !field_doc.description.is_empty() - || field_doc.default_value.is_some() - { - fields.push(field_doc); - } + if let Some(field_ids) = + get_json_array(struct_item, &["inner", "struct", "kind", "plain", "fields"]) + { + for field_id in field_ids { + // Field IDs can be either integers or strings in rustdoc JSON, try both formats + let field_item = if let Some(field_id_num) = field_id.as_u64() { + // Numeric field ID - convert to string for index lookup + index.get(&field_id_num.to_string()) + } else if let Some(field_id_str) = field_id.as_str() { + // String field ID - use directly for index lookup + index.get(field_id_str) + } else { + None + }; - // Extend referenced constants - all_referenced_constants.extend(referenced_constants); - } - } - } + if let Some(field_item) = field_item { + // Extract the field's name from the rustdoc item + let field_name = get_json_string(field_item, &["name"]) + .unwrap_or("unknown") + .to_string(); + + // Extract the field's documentation text from rustdoc + let field_docs = get_json_string(field_item, &["docs"]) + .unwrap_or("") + .to_string(); + + // Parse the structured documentation + let (field_doc, referenced_constants) = + parse_field_documentation(&field_docs, &field_name)?; + + // Only include fields that have documentation + if !field_doc.description.is_empty() || field_doc.default_value.is_some() { + fields.push(field_doc); } + + // Extend referenced constants + all_referenced_constants.extend(referenced_constants); } } } @@ -808,7 +827,7 @@ fn resolve_constant_reference( let json_file_path = format!("target/rustdoc-json/doc/{}.json", lib_name); if let Ok(json_content) = std::fs::read_to_string(&json_file_path) { if let Ok(rustdoc_json) = serde_json::from_str::(&json_content) { - if let Some(index) = rustdoc_json.get("index").and_then(|v| v.as_object()) { + if let Some(index) = get_json_object(&rustdoc_json, &["index"]) { if let Some(value) = resolve_constant_in_index(name, index) { return Some(value); } @@ -827,61 +846,60 @@ fn resolve_constant_in_index( // Look for a constant with the given name in the rustdoc index for (_item_id, item) in rustdoc_index { // Check if this item's name matches the constant we're looking for - if let Some(item_name) = item.get("name").and_then(|v| v.as_str()) { + if let Some(item_name) = get_json_string(item, &["name"]) { if item_name == name { - // Navigate to the item's type information in rustdoc JSON - if let Some(inner) = item.get("inner") { - // Check if this item is a constant by looking for the "constant" field - if let Some(constant_data) = inner.get("constant") { - // Try newer rustdoc JSON structure first (with nested 'const' field) - if let Some(const_inner) = constant_data.get("const") { - // For literal constants, prefer expr which doesn't have type suffix - if let Some(is_literal) = - const_inner.get("is_literal").and_then(|v| v.as_bool()) + // Check if this item is a constant by looking for the "constant" field + if let Some(constant_data) = get_json_object(item, &["inner", "constant"]) { + // Try newer rustdoc JSON structure first (with nested 'const' field) + let constant_data_value = serde_json::Value::Object(constant_data.clone()); + if get_json_object(&constant_data_value, &["const"]).is_some() { + // For literal constants, prefer expr which doesn't have type suffix + if get_json_path(&constant_data_value, &["const", "is_literal"]) + .and_then(|v| v.as_bool()) + == Some(true) + { + // Access the expression field for literal constant values + if let Some(expr) = + get_json_string(&constant_data_value, &["const", "expr"]) { - if is_literal { - // Access the expression field for literal constant values - if let Some(expr) = - const_inner.get("expr").and_then(|v| v.as_str()) - { - if expr != "_" { - return Some(expr.to_string()); - } - } - } - } - - // For computed constants or when expr is "_", use value but strip type suffix - if let Some(value) = const_inner.get("value").and_then(|v| v.as_str()) { - return Some(strip_type_suffix(value)); - } - - // Fallback to expr if value is not available - if let Some(expr) = const_inner.get("expr").and_then(|v| v.as_str()) { if expr != "_" { return Some(expr.to_string()); } } } - // Fall back to older rustdoc JSON structure for compatibility - if let Some(value) = constant_data.get("value").and_then(|v| v.as_str()) { + // For computed constants or when expr is "_", use value but strip type suffix + if let Some(value) = + get_json_string(&constant_data_value, &["const", "value"]) + { return Some(strip_type_suffix(value)); } - if let Some(expr) = constant_data.get("expr").and_then(|v| v.as_str()) { + + // Fallback to expr if value is not available + if let Some(expr) = + get_json_string(&constant_data_value, &["const", "expr"]) + { if expr != "_" { return Some(expr.to_string()); } } + } - // For some constants, the value might be in the type field if it's a simple literal - if let Some(type_info) = constant_data.get("type") { - if let Some(type_str) = type_info.as_str() { - // Handle simple numeric or string literals embedded in type - return Some(type_str.to_string()); - } + // Fall back to older rustdoc JSON structure for compatibility + if let Some(value) = get_json_string(&constant_data_value, &["value"]) { + return Some(strip_type_suffix(value)); + } + if let Some(expr) = get_json_string(&constant_data_value, &["expr"]) { + if expr != "_" { + return Some(expr.to_string()); } } + + // For some constants, the value might be in the type field if it's a simple literal + if let Some(type_str) = get_json_string(&constant_data_value, &["type"]) { + // Handle simple numeric or string literals embedded in type + return Some(type_str.to_string()); + } } } } @@ -2492,4 +2510,53 @@ and includes various formatting. let expected_folded = "Next line content Another line"; assert_eq!(folded_result.trim(), expected_folded); } + + #[test] + fn test_json_navigation_helpers() { + let test_json = json!({ + "level1": { + "level2": { + "level3": "value", + "array": ["item1", "item2"], + "object": { + "key": "value" + } + }, + "string_field": "test_string" + } + }); + + // Test get_json_path - valid paths + assert!(get_json_path(&test_json, &["level1"]).is_some()); + assert!(get_json_path(&test_json, &["level1", "level2"]).is_some()); + assert!(get_json_path(&test_json, &["level1", "level2", "level3"]).is_some()); + + // Test get_json_path - invalid paths + assert!(get_json_path(&test_json, &["nonexistent"]).is_none()); + assert!(get_json_path(&test_json, &["level1", "nonexistent"]).is_none()); + assert!(get_json_path(&test_json, &["level1", "level2", "level3", "too_deep"]).is_none()); + + // Test get_json_string + assert_eq!( + get_json_string(&test_json, &["level1", "level2", "level3"]), + Some("value") + ); + assert_eq!( + get_json_string(&test_json, &["level1", "string_field"]), + Some("test_string") + ); + assert!(get_json_string(&test_json, &["level1", "level2", "array"]).is_none()); // not a string + + // Test get_json_array + let array_result = get_json_array(&test_json, &["level1", "level2", "array"]); + assert!(array_result.is_some()); + assert_eq!(array_result.unwrap().len(), 2); + assert!(get_json_array(&test_json, &["level1", "string_field"]).is_none()); // not an array + + // Test get_json_object + assert!(get_json_object(&test_json, &["level1"]).is_some()); + assert!(get_json_object(&test_json, &["level1", "level2"]).is_some()); + assert!(get_json_object(&test_json, &["level1", "level2", "object"]).is_some()); + assert!(get_json_object(&test_json, &["level1", "string_field"]).is_none()); // not an object + } } From 45917e4ad2e25c75e0ae82de39a9df782e084d6a Mon Sep 17 00:00:00 2001 From: Francesco Leacche Date: Tue, 10 Jun 2025 18:31:26 +0100 Subject: [PATCH 14/20] improve test coverage --- Cargo.lock | 74 ++ .../tools/config-docs-generator/Cargo.toml | 10 + .../config-docs-generator/src/extract_docs.rs | 320 +++++++- .../src/generate_markdown.rs | 157 ++++ .../tests/fixtures/minimal_config.json | 80 ++ .../tests/fixtures/test_mappings.json | 3 + .../tests/fixtures/test_template.md | 15 + .../tests/integration.rs | 708 ++++++++++++++++++ 8 files changed, 1328 insertions(+), 39 deletions(-) create mode 100644 contrib/tools/config-docs-generator/tests/fixtures/minimal_config.json create mode 100644 contrib/tools/config-docs-generator/tests/fixtures/test_mappings.json create mode 100644 contrib/tools/config-docs-generator/tests/fixtures/test_template.md create mode 100644 contrib/tools/config-docs-generator/tests/integration.rs diff --git a/Cargo.lock b/Cargo.lock index c1ffde555d..9b30155770 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -206,6 +206,22 @@ dependencies = [ "serde_json", ] +[[package]] +name = "assert_cmd" +version = "2.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bd389a4b2970a01282ee455294913c0a43724daedcd1a24c3eb0ec1c1320b66" +dependencies = [ + "anstyle", + "bstr", + "doc-comment", + "libc", + "predicates", + "predicates-core", + "predicates-tree", + "wait-timeout", +] + [[package]] name = "async-attributes" version = "1.1.2" @@ -528,6 +544,17 @@ dependencies = [ "tracing", ] +[[package]] +name = "bstr" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "234113d19d0d7d613b40e86fb654acf958910802bcceab913a4f9e7cda03b1a4" +dependencies = [ + "memchr", + "regex-automata 0.4.5", + "serde", +] + [[package]] name = "bumpalo" version = "3.14.0" @@ -690,11 +717,13 @@ name = "config-docs-generator" version = "0.1.0" dependencies = [ "anyhow", + "assert_cmd", "clap", "once_cell", "regex", "serde", "serde_json", + "tempfile", ] [[package]] @@ -870,6 +899,12 @@ dependencies = [ "powerfmt", ] +[[package]] +name = "difflib" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6184e33543162437515c2e2b48714794e37845ec9851711914eec9d308f6ebe8" + [[package]] name = "digest" version = "0.9.0" @@ -916,6 +951,12 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "212d0f5754cb6769937f4501cc0e67f4f4483c8d2c3e1e922ee9edbe4ab4c7c0" +[[package]] +name = "doc-comment" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" + [[package]] name = "ed25519" version = "2.2.3" @@ -2317,6 +2358,33 @@ version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" +[[package]] +name = "predicates" +version = "3.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5d19ee57562043d37e82899fade9a22ebab7be9cef5026b07fda9cdd4293573" +dependencies = [ + "anstyle", + "difflib", + "predicates-core", +] + +[[package]] +name = "predicates-core" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "727e462b119fe9c93fd0eb1429a5f7647394014cf3c04ab2c0350eeb09095ffa" + +[[package]] +name = "predicates-tree" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72dd2d6d381dfb73a193c7fca536518d7caee39fc8503f74e7dc0be0531b425c" +dependencies = [ + "predicates-core", + "termtree", +] + [[package]] name = "proc-macro-error" version = "1.0.4" @@ -3512,6 +3580,12 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "termtree" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683" + [[package]] name = "thiserror" version = "1.0.65" diff --git a/contrib/tools/config-docs-generator/Cargo.toml b/contrib/tools/config-docs-generator/Cargo.toml index 17bbd2200d..ccf1cdbb80 100644 --- a/contrib/tools/config-docs-generator/Cargo.toml +++ b/contrib/tools/config-docs-generator/Cargo.toml @@ -11,6 +11,11 @@ path = "src/extract_docs.rs" name = "generate-markdown" path = "src/generate_markdown.rs" +# Add integration test configuration +[[test]] +name = "integration" +path = "tests/integration.rs" + [dependencies] serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" @@ -18,3 +23,8 @@ clap = { version = "4.0", features = ["derive"] } regex = "1.0" anyhow = "1.0" once_cell = "1.18" + +# Add test dependencies +[dev-dependencies] +tempfile = "3.0" +assert_cmd = "2.0" diff --git a/contrib/tools/config-docs-generator/src/extract_docs.rs b/contrib/tools/config-docs-generator/src/extract_docs.rs index c6b22e339d..d6d9a8cace 100644 --- a/contrib/tools/config-docs-generator/src/extract_docs.rs +++ b/contrib/tools/config-docs-generator/src/extract_docs.rs @@ -2366,45 +2366,6 @@ and includes various formatting. assert!(!result.ends_with("\n\n")); // Should not have multiple trailing newlines } - #[test] - #[ignore = "Test for old behavior - same-line content mode has been removed"] - fn test_extract_annotation_literal_and_folded_same_line_content() { - // Test same-line content handling for both | and > - let metadata_literal = r#"@notes: | Same line content - Next line content - Another line"#; - - let metadata_folded = r#"@default: > Same line content - Next line content - Another line"#; - - let literal_result = extract_annotation(metadata_literal, "notes").unwrap(); - let folded_result = extract_annotation(metadata_folded, "default").unwrap(); - - // Both should include same-line content - assert!(literal_result.contains("Same line content")); - assert!(folded_result.contains("Same line content")); - - // Literal mode should preserve all content and line structure exactly - assert!(literal_result.contains("Next line content")); - assert!(literal_result.contains("Another line")); - - let literal_lines: Vec<&str> = literal_result.lines().collect(); - assert_eq!(literal_lines.len(), 3); - assert_eq!(literal_lines[0], "Same line content"); - assert_eq!(literal_lines[1], "Next line content"); - assert_eq!(literal_lines[2], "Another line"); - - // Folded mode with same-line content has current implementation limitation: - // it only captures the same-line content and ignores subsequent block lines. - // This is an acceptable edge case behavior. - assert_eq!(folded_result, "Same line content"); - - // Verify it doesn't contain the subsequent lines (current limitation) - assert!(!folded_result.contains("Next line content")); - assert!(!folded_result.contains("Another line")); - } - #[test] fn test_extract_annotation_edge_cases_empty_and_whitespace() { // Test annotations with only whitespace or empty content @@ -2559,4 +2520,285 @@ and includes various formatting. assert!(get_json_object(&test_json, &["level1", "level2", "object"]).is_some()); assert!(get_json_object(&test_json, &["level1", "string_field"]).is_none()); // not an object } + + #[test] + fn test_resolve_constant_in_index_edge_cases() { + // Test with empty index + let empty_index = serde_json::Map::new(); + let result = resolve_constant_in_index("ANY_CONSTANT", &empty_index); + assert_eq!(result, None); + + // Test with index containing non-constant items + let mock_index = serde_json::json!({ + "item_1": { + "name": "NotAConstant", + "inner": { + "function": {} + } + } + }); + let index = mock_index.as_object().unwrap(); + let result = resolve_constant_in_index("NotAConstant", index); + assert_eq!(result, None); + } + + #[test] + fn test_resolve_constant_in_index_malformed_constant() { + // Test constant without value or expr - falls back to type field + let mock_index = serde_json::json!({ + "const_1": { + "name": "MALFORMED_CONSTANT", + "inner": { + "constant": { + "type": "u32" + // Missing value and expr fields + } + } + } + }); + let index = mock_index.as_object().unwrap(); + let result = resolve_constant_in_index("MALFORMED_CONSTANT", index); + assert_eq!(result, Some("u32".to_string())); + } + + #[test] + fn test_resolve_constant_in_index_underscore_expr() { + // Test constant with "_" expr and no value - falls back to type field + let mock_index = serde_json::json!({ + "const_1": { + "name": "COMPUTED_CONSTANT", + "inner": { + "constant": { + "expr": "_", + "type": "u32" + // No value field + } + } + } + }); + let index = mock_index.as_object().unwrap(); + let result = resolve_constant_in_index("COMPUTED_CONSTANT", index); + assert_eq!(result, Some("u32".to_string())); + } + + #[test] + fn test_strip_type_suffix_edge_cases() { + // Test with invalid suffixes that shouldn't be stripped + assert_eq!(strip_type_suffix("123abc"), "123abc"); + assert_eq!( + strip_type_suffix("value_with_u32_in_middle"), + "value_with_u32_in_middle" + ); + + // Test with partial type names + assert_eq!(strip_type_suffix("u"), "u"); + assert_eq!(strip_type_suffix("u3"), "u3"); + + // Test with non-numeric values before type suffix + assert_eq!(strip_type_suffix("abcu32"), "abcu32"); + + // Test string literals with type suffixes inside + assert_eq!(strip_type_suffix("\"value_u32\""), "\"value_u32\""); + } + + #[test] + fn test_get_json_navigation_edge_cases() { + let test_json = serde_json::json!({ + "level1": { + "string": "value", + "number": 42, + "boolean": true, + "null_value": null + } + }); + + // Test getting wrong types + assert!(get_json_string(&test_json, &["level1", "number"]).is_none()); + assert!(get_json_array(&test_json, &["level1", "string"]).is_none()); + assert!(get_json_object(&test_json, &["level1", "boolean"]).is_none()); + + // Test deep paths that don't exist + assert!(get_json_path(&test_json, &["level1", "string", "deeper"]).is_none()); + assert!(get_json_path(&test_json, &["nonexistent", "path"]).is_none()); + + // Test null values + assert!(get_json_string(&test_json, &["level1", "null_value"]).is_none()); + } + + #[test] + fn test_parse_field_documentation_edge_cases() { + // Test with only separator, no content + let doc_text = "Description\n---\n"; + let result = parse_field_documentation(doc_text, "test_field").unwrap(); + assert_eq!(result.0.description, "Description"); + assert_eq!(result.0.default_value, None); + + // Test with multiple separators + let doc_text = "Description\n---\n@default: value\n---\nIgnored section"; + let result = parse_field_documentation(doc_text, "test_field").unwrap(); + assert_eq!(result.0.description, "Description"); + assert_eq!(result.0.default_value, Some("value".to_string())); + + // Test with empty description + let doc_text = "\n---\n@default: value"; + let result = parse_field_documentation(doc_text, "test_field").unwrap(); + assert_eq!(result.0.description, ""); + assert_eq!(result.0.default_value, Some("value".to_string())); + } + + #[test] + fn test_extract_annotation_malformed_input() { + // Test with annotation without colon + let metadata = "@default no_colon_here\n@notes: valid"; + assert_eq!(extract_annotation(metadata, "default"), None); + assert_eq!( + extract_annotation(metadata, "notes"), + Some("valid".to_string()) + ); + + // Test with nested annotations - this will actually find "inside" because the function + // looks for the pattern anywhere in a line, not necessarily at the start + let metadata = "text with @default: inside\n@actual: real_value"; + assert_eq!( + extract_annotation(metadata, "default"), + Some("inside".to_string()) + ); + assert_eq!( + extract_annotation(metadata, "actual"), + Some("real_value".to_string()) + ); + } + + #[test] + fn test_parse_literal_block_scalar_edge_cases() { + // Test with empty input + let result = parse_literal_block_scalar(&[], 0); + assert_eq!(result, ""); + + // Test with only empty lines + let lines = vec!["", " ", "\t", ""]; + let result = parse_literal_block_scalar(&lines, 0); + assert_eq!(result, ""); + + // Test with mixed indentation + let lines = vec![" line1", " line2", "line3", " line4"]; + let result = parse_literal_block_scalar(&lines, 0); + assert!(result.contains("line1")); + assert!(result.contains(" line2")); // Preserved relative indent + assert!(result.contains("line3")); + assert!(result.contains(" line4")); // Preserved relative indent + } + + #[test] + fn test_parse_folded_block_scalar_edge_cases() { + // Test with empty input + let result = parse_folded_block_scalar(&[], 0); + assert_eq!(result, ""); + + // Test with only empty lines + let lines = vec!["", " ", "\t"]; + let result = parse_folded_block_scalar(&lines, 0); + assert_eq!(result, ""); + + // Test paragraph separation + let lines = vec![ + " First paragraph line", + " continues here", + "", + " Second paragraph", + " also continues", + ]; + let result = parse_folded_block_scalar(&lines, 0); + assert!(result.contains("First paragraph line continues here")); + assert!(result.contains("Second paragraph also continues")); + // Should have paragraph separation + assert!(result.matches('\n').count() >= 1); + } + + #[test] + fn test_collect_annotation_block_lines_edge_cases() { + let lines = vec![ + "@first: value1", + " content line 1", + " content line 2", + "@second: value2", + " different content", + ]; + + // Test collecting until next annotation + let result = collect_annotation_block_lines(&lines, 1, "@first: value1"); + assert_eq!(result.len(), 2); + assert_eq!(result[0], " content line 1"); + assert_eq!(result[1], " content line 2"); + + // Test collecting from end + let result = collect_annotation_block_lines(&lines, 4, "@second: value2"); + assert_eq!(result.len(), 1); + assert_eq!(result[0], " different content"); + } + + #[test] + fn test_find_constant_references_edge_cases() { + // Test with malformed brackets + let text = "[INCOMPLETE or [`VALID_CONSTANT`] and `not_constant`"; + let constants = find_constant_references(text); + assert_eq!(constants.len(), 1); + assert!(constants.contains("VALID_CONSTANT")); + + // Test with nested brackets - this won't match because [ in the middle breaks the pattern + let text = "[`OUTER_[INNER]_CONSTANT`]"; + let constants = find_constant_references(text); + assert_eq!(constants.len(), 0); + + // Test with empty brackets + let text = "[``] and [`VALID`]"; + let constants = find_constant_references(text); + assert_eq!(constants.len(), 1); + assert!(constants.contains("VALID")); + } + + #[test] + fn test_extract_struct_fields_complex_scenarios() { + // Test struct with no fields array + let mock_index = serde_json::json!({ + "struct_1": { + "name": "EmptyStruct", + "inner": { + "struct": { + "kind": { + "plain": { + // No fields array + } + } + } + } + } + }); + + let index = mock_index.as_object().unwrap(); + let struct_item = &mock_index["struct_1"]; + let (fields, _) = extract_struct_fields(index, struct_item).unwrap(); + assert_eq!(fields.len(), 0); + + // Test struct with empty fields array + let mock_index = serde_json::json!({ + "struct_1": { + "name": "EmptyFieldsStruct", + "inner": { + "struct": { + "kind": { + "plain": { + "fields": [] + } + } + } + } + } + }); + + let index = mock_index.as_object().unwrap(); + let struct_item = &mock_index["struct_1"]; + let (fields, _) = extract_struct_fields(index, struct_item).unwrap(); + assert_eq!(fields.len(), 0); + } } diff --git a/contrib/tools/config-docs-generator/src/generate_markdown.rs b/contrib/tools/config-docs-generator/src/generate_markdown.rs index d7d26924e7..c0a06bc0fd 100644 --- a/contrib/tools/config-docs-generator/src/generate_markdown.rs +++ b/contrib/tools/config-docs-generator/src/generate_markdown.rs @@ -1178,4 +1178,161 @@ mod tests { assert!(output.contains("\"value2\"")); assert!(output.contains("
")); } + + #[test] + fn test_load_section_name_mappings_file_not_found() { + let result = load_section_name_mappings("nonexistent.json"); + assert!(result.is_err()); + assert!(result.unwrap_err().to_string().contains("Failed to read")); + } + + #[test] + fn test_load_section_name_mappings_invalid_json() { + use std::io::Write; + + use tempfile::NamedTempFile; + + let mut temp_file = NamedTempFile::new().unwrap(); + writeln!(temp_file, "invalid json content").unwrap(); + + let result = load_section_name_mappings(temp_file.path().to_str().unwrap()); + assert!(result.is_err()); + assert!( + result + .unwrap_err() + .to_string() + .contains("Failed to parse section name mappings JSON") + ); + } + + #[test] + fn test_load_template_file_not_found() { + let result = load_template("nonexistent_template.md"); + assert!(result.is_err()); + assert!( + result + .unwrap_err() + .to_string() + .contains("Failed to read template file") + ); + } + + #[test] + fn test_render_template_complex_substitutions() { + let template = "Hello {{name}}! Your score is {{score}}. {{missing}} should stay as is."; + let mut variables = HashMap::new(); + variables.insert("name".to_string(), "Alice".to_string()); + variables.insert("score".to_string(), "100".to_string()); + + let result = render_template(template, variables); + assert_eq!( + result, + "Hello Alice! Your score is 100. {{missing}} should stay as is." + ); + } + + #[test] + fn test_render_template_empty_variables() { + let template = "Template with {{variable}} that won't be replaced"; + let result = render_template(template, HashMap::new()); + assert_eq!(result, "Template with {{variable}} that won't be replaced"); + } + + #[test] + fn test_render_template_multiple_same_variable() { + let template = "{{name}} said hello to {{name}} twice"; + let mut variables = HashMap::new(); + variables.insert("name".to_string(), "Bob".to_string()); + + let result = render_template(template, variables); + assert_eq!(result, "Bob said hello to Bob twice"); + } + + #[test] + fn test_generate_markdown_error_paths() { + // Test with invalid template path + let config_docs = create_config_docs(vec![]); + let custom_mappings = HashMap::new(); + + let result = generate_markdown(&config_docs, "nonexistent_template.md", &custom_mappings); + assert!(result.is_err()); + assert!( + result + .unwrap_err() + .to_string() + .contains("Failed to read template file") + ); + } + + #[test] + fn test_escape_markdown_edge_cases() { + assert_eq!(escape_markdown(""), ""); + assert_eq!(escape_markdown("normal text"), "normal text"); + assert_eq!(escape_markdown("[text]"), "\\[text\\]"); + assert_eq!(escape_markdown("table|cell"), "table\\|cell"); + assert_eq!(escape_markdown("[table|cell]"), "\\[table\\|cell\\]"); + } + + #[test] + fn test_escape_markdown_table_edge_cases() { + assert_eq!(escape_markdown_table(""), ""); + assert_eq!(escape_markdown_table("normal text"), "normal text"); + assert_eq!(escape_markdown_table("table|cell"), "table\\|cell"); + assert_eq!(escape_markdown_table("line\nbreak"), "line
break"); + assert_eq!( + escape_markdown_table("both|pipe\nand newline"), + "both\\|pipe
and newline" + ); + } + + #[test] + fn test_section_anchor_edge_cases() { + assert_eq!(section_anchor(""), "#"); + assert_eq!(section_anchor("UPPERCASE"), "#uppercase"); + assert_eq!( + section_anchor("[complex section name]"), + "#complex-section-name" + ); + assert_eq!(section_anchor("Multiple Spaces"), "#multiple---spaces"); + assert_eq!( + section_anchor("[section_with_underscores]"), + "#section_with_underscores" + ); + } + + #[test] + fn test_process_reference_edge_cases() { + let global_context = create_mock_global_context(); + + // Test unknown reference + let result = process_reference("UNKNOWN_CONSTANT", &global_context, "TestStruct"); + assert_eq!(result, "`UNKNOWN_CONSTANT`"); + + // Test malformed struct::field reference + let result = process_reference("OnlyStruct::", &global_context, "TestStruct"); + assert_eq!(result, "`OnlyStruct::`"); + + // Test empty reference + let result = process_reference("", &global_context, "TestStruct"); + assert_eq!(result, "``"); + } + + #[test] + fn test_struct_to_section_name_edge_cases() { + let mappings = HashMap::new(); + + // Test empty struct name + assert_eq!(struct_to_section_name("", &mappings), "[]"); + + // Test struct name with special characters + assert_eq!( + struct_to_section_name("Struct_With_Underscores", &mappings), + "[struct_with_underscores]" + ); + + // Test very long struct name + let long_name = "A".repeat(100); + let expected = format!("[{}]", "a".repeat(100)); + assert_eq!(struct_to_section_name(&long_name, &mappings), expected); + } } diff --git a/contrib/tools/config-docs-generator/tests/fixtures/minimal_config.json b/contrib/tools/config-docs-generator/tests/fixtures/minimal_config.json new file mode 100644 index 0000000000..3ede4781ca --- /dev/null +++ b/contrib/tools/config-docs-generator/tests/fixtures/minimal_config.json @@ -0,0 +1,80 @@ +{ + "structs": [ + { + "name": "NodeConfig", + "description": "Configuration settings for a Stacks node", + "fields": [ + { + "name": "name", + "description": "Human-readable name for the node. Primarily used for identification in testing\nenvironments (e.g., deriving log file names, temporary directory names).", + "default_value": "`\"helium-node\"`", + "notes": null, + "deprecated": null, + "toml_example": null, + "required": null, + "units": null + }, + { + "name": "seed", + "description": "The node's Bitcoin wallet private key, provided as a hex string in the config file.\nUsed to initialize the node's keychain for signing operations.\nIf [`MinerConfig::mining_key`] is not set, this seed may also be used for\nmining-related signing.", + "default_value": "Randomly generated 32 bytes", + "notes": [ + "Required if [`NodeConfig::miner`] is `true` and [`MinerConfig::mining_key`] is absent." + ], + "deprecated": null, + "toml_example": null, + "required": null, + "units": null + }, + { + "name": "rpc_bind", + "description": "The IPv4 address and port (e.g., \"0.0.0.0:20443\") on which the node's HTTP RPC\nserver should bind and listen for incoming API requests.", + "default_value": "`\"0.0.0.0:20443\"`", + "notes": null, + "deprecated": null, + "toml_example": null, + "required": null, + "units": null + }, + { + "name": "bootstrap_node", + "description": "A list of initial peer nodes used to bootstrap connections into the Stacks P2P\nnetwork. Peers are specified in a configuration file as comma-separated\nstrings in the format `\"PUBKEY@IP:PORT\"` or `\"PUBKEY@HOSTNAME:PORT\"`. DNS\nhostnames are resolved during configuration loading.", + "default_value": "`[]` (empty vector)", + "notes": null, + "deprecated": null, + "toml_example": "bootstrap_node = \"pubkey1@example.com:30444,pubkey2@192.168.1.100:20444\"", + "required": null, + "units": null + }, + { + "name": "miner", + "description": "Flag indicating whether this node should activate its mining logic and attempt to\nproduce Stacks blocks. Setting this to `true` typically requires providing\nnecessary private keys (either [`NodeConfig::seed`] or [`MinerConfig::mining_key`]).\nIt also influences default behavior for settings like\n[`NodeConfig::require_affirmed_anchor_blocks`].", + "default_value": "`false`", + "notes": null, + "deprecated": null, + "toml_example": null, + "required": null, + "units": null + }, + { + "name": "microblock_frequency", + "description": "How often to attempt producing microblocks, in milliseconds.", + "default_value": "`30_000` (30 seconds)", + "notes": [ + "Only applies when [`NodeConfig::mine_microblocks`] is true and before Epoch 2.5." + ], + "deprecated": "This setting is ignored in Epoch 2.5+.", + "toml_example": null, + "required": null, + "units": "milliseconds" + } + ] + } + ], + "referenced_constants": { + "MinerConfig::mining_key": null, + "NodeConfig::miner": null, + "NodeConfig::mine_microblocks": null, + "NodeConfig::require_affirmed_anchor_blocks": null + } +} \ No newline at end of file diff --git a/contrib/tools/config-docs-generator/tests/fixtures/test_mappings.json b/contrib/tools/config-docs-generator/tests/fixtures/test_mappings.json new file mode 100644 index 0000000000..49826a9fc4 --- /dev/null +++ b/contrib/tools/config-docs-generator/tests/fixtures/test_mappings.json @@ -0,0 +1,3 @@ +{ + "NodeConfig": "[node]" +} \ No newline at end of file diff --git a/contrib/tools/config-docs-generator/tests/fixtures/test_template.md b/contrib/tools/config-docs-generator/tests/fixtures/test_template.md new file mode 100644 index 0000000000..e25d14bb94 --- /dev/null +++ b/contrib/tools/config-docs-generator/tests/fixtures/test_template.md @@ -0,0 +1,15 @@ +# Test Configuration Reference + +This is a test template for integration testing. + +## Table of Contents + +{{toc_content}} + +## Configuration Sections + +{{struct_sections}} + +## End of Document + +Generated with test template. \ No newline at end of file diff --git a/contrib/tools/config-docs-generator/tests/integration.rs b/contrib/tools/config-docs-generator/tests/integration.rs new file mode 100644 index 0000000000..b67b62d124 --- /dev/null +++ b/contrib/tools/config-docs-generator/tests/integration.rs @@ -0,0 +1,708 @@ +use std::fs; + +use assert_cmd::Command; +use serde_json::json; +use tempfile::TempDir; + +#[test] +fn test_extract_docs_missing_arguments() { + let mut cmd = Command::cargo_bin("extract-docs").unwrap(); + let output = cmd.output().unwrap(); + + assert!(!output.status.success()); + let stderr = String::from_utf8_lossy(&output.stderr); + assert!(stderr.contains("required arguments were not provided")); +} + +#[test] +fn test_extract_docs_help() { + let mut cmd = Command::cargo_bin("extract-docs").unwrap(); + cmd.arg("--help"); + let output = cmd.output().unwrap(); + + assert!(output.status.success()); + let stdout = String::from_utf8_lossy(&output.stdout); + assert!(stdout.contains("Extract documentation from Rust source code")); +} + +#[test] +fn test_extract_docs_invalid_package() { + let temp_dir = TempDir::new().unwrap(); + let output_file = temp_dir.path().join("output.json"); + + let mut cmd = Command::cargo_bin("extract-docs").unwrap(); + cmd.args([ + "--package", + "nonexistent-package", + "--output", + output_file.to_str().unwrap(), + "--structs", + "TestStruct", + ]); + + let output = cmd.output().unwrap(); + assert!(!output.status.success()); + let stderr = String::from_utf8_lossy(&output.stderr); + assert!(stderr.contains("cargo rustdoc failed")); +} + +#[test] +fn test_generate_markdown_missing_arguments() { + let mut cmd = Command::cargo_bin("generate-markdown").unwrap(); + let output = cmd.output().unwrap(); + + assert!(!output.status.success()); + let stderr = String::from_utf8_lossy(&output.stderr); + assert!(stderr.contains("required arguments were not provided")); +} + +#[test] +fn test_generate_markdown_help() { + let mut cmd = Command::cargo_bin("generate-markdown").unwrap(); + cmd.arg("--help"); + let output = cmd.output().unwrap(); + + assert!(output.status.success()); + let stdout = String::from_utf8_lossy(&output.stdout); + assert!(stdout.contains("Generate Markdown documentation")); +} + +#[test] +fn test_generate_markdown_missing_input_file() { + let temp_dir = TempDir::new().unwrap(); + let output_file = temp_dir.path().join("output.md"); + let template_file = temp_dir.path().join("template.md"); + let mappings_file = temp_dir.path().join("mappings.json"); + + // Create valid template and mappings files + fs::write( + &template_file, + "# Test\n{{toc_content}}\n{{struct_sections}}", + ) + .unwrap(); + fs::write(&mappings_file, "{}").unwrap(); + + let mut cmd = Command::cargo_bin("generate-markdown").unwrap(); + cmd.args([ + "--input", + "nonexistent.json", + "--output", + output_file.to_str().unwrap(), + "--template", + template_file.to_str().unwrap(), + "--section-name-mappings", + mappings_file.to_str().unwrap(), + ]); + + let output = cmd.output().unwrap(); + assert!(!output.status.success()); + let stderr = String::from_utf8_lossy(&output.stderr); + assert!(stderr.contains("Failed to read input JSON file")); +} + +#[test] +fn test_generate_markdown_invalid_input_json() { + let temp_dir = TempDir::new().unwrap(); + let input_file = temp_dir.path().join("input.json"); + let output_file = temp_dir.path().join("output.md"); + let template_file = temp_dir.path().join("template.md"); + let mappings_file = temp_dir.path().join("mappings.json"); + + // Create invalid JSON input + fs::write(&input_file, "invalid json").unwrap(); + fs::write( + &template_file, + "# Test\n{{toc_content}}\n{{struct_sections}}", + ) + .unwrap(); + fs::write(&mappings_file, "{}").unwrap(); + + let mut cmd = Command::cargo_bin("generate-markdown").unwrap(); + cmd.args([ + "--input", + input_file.to_str().unwrap(), + "--output", + output_file.to_str().unwrap(), + "--template", + template_file.to_str().unwrap(), + "--section-name-mappings", + mappings_file.to_str().unwrap(), + ]); + + let output = cmd.output().unwrap(); + assert!(!output.status.success()); + let stderr = String::from_utf8_lossy(&output.stderr); + assert!(stderr.contains("Failed to parse input JSON")); +} + +#[test] +fn test_generate_markdown_missing_template_file() { + let temp_dir = TempDir::new().unwrap(); + let input_file = temp_dir.path().join("input.json"); + let output_file = temp_dir.path().join("output.md"); + let mappings_file = temp_dir.path().join("mappings.json"); + + // Create valid input and mappings + let config_docs = json!({ + "structs": [], + "referenced_constants": {} + }); + fs::write( + &input_file, + serde_json::to_string_pretty(&config_docs).unwrap(), + ) + .unwrap(); + fs::write(&mappings_file, "{}").unwrap(); + + let mut cmd = Command::cargo_bin("generate-markdown").unwrap(); + cmd.args([ + "--input", + input_file.to_str().unwrap(), + "--output", + output_file.to_str().unwrap(), + "--template", + "nonexistent_template.md", + "--section-name-mappings", + mappings_file.to_str().unwrap(), + ]); + + let output = cmd.output().unwrap(); + assert!(!output.status.success()); + let stderr = String::from_utf8_lossy(&output.stderr); + assert!(stderr.contains("Failed to read template file")); +} + +#[test] +fn test_generate_markdown_invalid_mappings_json() { + let temp_dir = TempDir::new().unwrap(); + let input_file = temp_dir.path().join("input.json"); + let output_file = temp_dir.path().join("output.md"); + let template_file = temp_dir.path().join("template.md"); + let mappings_file = temp_dir.path().join("mappings.json"); + + // Create valid input and template, invalid mappings + let config_docs = json!({ + "structs": [], + "referenced_constants": {} + }); + fs::write( + &input_file, + serde_json::to_string_pretty(&config_docs).unwrap(), + ) + .unwrap(); + fs::write( + &template_file, + "# Test\n{{toc_content}}\n{{struct_sections}}", + ) + .unwrap(); + fs::write(&mappings_file, "invalid json").unwrap(); + + let mut cmd = Command::cargo_bin("generate-markdown").unwrap(); + cmd.args([ + "--input", + input_file.to_str().unwrap(), + "--output", + output_file.to_str().unwrap(), + "--template", + template_file.to_str().unwrap(), + "--section-name-mappings", + mappings_file.to_str().unwrap(), + ]); + + let output = cmd.output().unwrap(); + assert!(!output.status.success()); + let stderr = String::from_utf8_lossy(&output.stderr); + assert!(stderr.contains("Failed to parse section name mappings JSON")); +} + +#[test] +fn test_generate_markdown_successful_execution() { + let temp_dir = TempDir::new().unwrap(); + let input_file = temp_dir.path().join("input.json"); + let output_file = temp_dir.path().join("output.md"); + let template_file = temp_dir.path().join("template.md"); + let mappings_file = temp_dir.path().join("mappings.json"); + + // Create valid test data + let config_docs = json!({ + "structs": [{ + "name": "TestStruct", + "description": "A test configuration struct", + "fields": [{ + "name": "test_field", + "description": "A test field", + "default_value": "`42`", + "notes": null, + "deprecated": null, + "toml_example": null, + "required": null, + "units": null + }] + }], + "referenced_constants": {} + }); + + fs::write( + &input_file, + serde_json::to_string_pretty(&config_docs).unwrap(), + ) + .unwrap(); + fs::write( + &template_file, + "# Configuration Reference\n\n{{toc_content}}\n\n{{struct_sections}}", + ) + .unwrap(); + fs::write(&mappings_file, r#"{"TestStruct": "[test]"}"#).unwrap(); + + let mut cmd = Command::cargo_bin("generate-markdown").unwrap(); + cmd.args([ + "--input", + input_file.to_str().unwrap(), + "--output", + output_file.to_str().unwrap(), + "--template", + template_file.to_str().unwrap(), + "--section-name-mappings", + mappings_file.to_str().unwrap(), + ]); + + let output = cmd.output().unwrap(); + assert!(output.status.success()); + let stdout = String::from_utf8_lossy(&output.stdout); + assert!(stdout.contains("Successfully generated Markdown documentation")); + + // Verify output file was created and contains expected content + let output_content = fs::read_to_string(&output_file).unwrap(); + assert!(output_content.contains("Configuration Reference")); + assert!(output_content.contains("[test]")); + assert!(output_content.contains("test_field")); + assert!(output_content.contains("A test field")); +} + +#[test] +fn test_generate_markdown_file_write_permission_error() { + let temp_dir = TempDir::new().unwrap(); + let input_file = temp_dir.path().join("input.json"); + let template_file = temp_dir.path().join("template.md"); + let mappings_file = temp_dir.path().join("mappings.json"); + + // Create valid input files + let config_docs = json!({ + "structs": [], + "referenced_constants": {} + }); + fs::write( + &input_file, + serde_json::to_string_pretty(&config_docs).unwrap(), + ) + .unwrap(); + fs::write( + &template_file, + "# Test\n{{toc_content}}\n{{struct_sections}}", + ) + .unwrap(); + fs::write(&mappings_file, "{}").unwrap(); + + // Try to write to a directory that doesn't exist (should fail) + let invalid_output = "/nonexistent/path/output.md"; + + let mut cmd = Command::cargo_bin("generate-markdown").unwrap(); + cmd.args([ + "--input", + input_file.to_str().unwrap(), + "--output", + invalid_output, + "--template", + template_file.to_str().unwrap(), + "--section-name-mappings", + mappings_file.to_str().unwrap(), + ]); + + let output = cmd.output().unwrap(); + assert!(!output.status.success()); + let stderr = String::from_utf8_lossy(&output.stderr); + assert!(stderr.contains("Failed to write output file")); +} + +// New comprehensive integration tests using real fixture data + +#[test] +fn test_generate_markdown_with_real_fixture_data() { + let temp_dir = TempDir::new().unwrap(); + let output_file = temp_dir.path().join("output.md"); + + // Use the fixture files we created + let mut cmd = Command::cargo_bin("generate-markdown").unwrap(); + cmd.args([ + "--input", + "tests/fixtures/minimal_config.json", + "--output", + output_file.to_str().unwrap(), + "--template", + "tests/fixtures/test_template.md", + "--section-name-mappings", + "tests/fixtures/test_mappings.json", + ]); + + let output = cmd.output().unwrap(); + assert!(output.status.success()); + let stdout = String::from_utf8_lossy(&output.stdout); + assert!(stdout.contains("Successfully generated Markdown documentation")); + + // Verify output file was created and contains expected realistic content + let output_content = fs::read_to_string(&output_file).unwrap(); + assert!(output_content.contains("Test Configuration Reference")); + assert!(output_content.contains("[node]")); + assert!(output_content.contains("Configuration settings for a Stacks node")); + assert!(output_content.contains("seed")); + assert!(output_content.contains("rpc_bind")); + assert!(output_content.contains("MinerConfig::mining_key")); + assert!(output_content.contains("DEPRECATED")); + assert!(output_content.contains("Units")); + assert!(output_content.contains("milliseconds")); + assert!(output_content.contains("Example:")); + assert!(output_content.contains("bootstrap_node")); +} + +#[test] +fn test_generate_markdown_with_complex_field_features() { + let temp_dir = TempDir::new().unwrap(); + let input_file = temp_dir.path().join("input.json"); + let output_file = temp_dir.path().join("output.md"); + let template_file = temp_dir.path().join("template.md"); + let mappings_file = temp_dir.path().join("mappings.json"); + + // Create test data with all field features + let config_docs = json!({ + "structs": [{ + "name": "ComplexStruct", + "description": "A struct with all possible field features", + "fields": [ + { + "name": "basic_field", + "description": "A basic field with description", + "default_value": "`42`", + "notes": null, + "deprecated": null, + "toml_example": null, + "required": null, + "units": null + }, + { + "name": "field_with_notes", + "description": "A field with multiple notes", + "default_value": "`\"default\"`", + "notes": [ + "First note about this field", + "Second note with more details" + ], + "deprecated": null, + "toml_example": null, + "required": null, + "units": null + }, + { + "name": "deprecated_field", + "description": "A deprecated field", + "default_value": "`false`", + "notes": null, + "deprecated": "This field is deprecated since version 2.0", + "toml_example": null, + "required": null, + "units": null + }, + { + "name": "field_with_toml_example", + "description": "A field with TOML example", + "default_value": "`{}`", + "notes": null, + "deprecated": null, + "toml_example": "field_with_toml_example = { key = \"value\", number = 123 }", + "required": null, + "units": null + }, + { + "name": "required_field", + "description": "A required field", + "default_value": null, + "notes": null, + "deprecated": null, + "toml_example": null, + "required": true, + "units": null + }, + { + "name": "field_with_units", + "description": "A field with units", + "default_value": "`30_000`", + "notes": null, + "deprecated": null, + "toml_example": null, + "required": null, + "units": "milliseconds" + } + ] + }], + "referenced_constants": {} + }); + + fs::write( + &input_file, + serde_json::to_string_pretty(&config_docs).unwrap(), + ) + .unwrap(); + fs::write( + &template_file, + "# Complex Test\n\n{{toc_content}}\n\n{{struct_sections}}", + ) + .unwrap(); + fs::write(&mappings_file, r#"{}"#).unwrap(); + + let mut cmd = Command::cargo_bin("generate-markdown").unwrap(); + cmd.args([ + "--input", + input_file.to_str().unwrap(), + "--output", + output_file.to_str().unwrap(), + "--template", + template_file.to_str().unwrap(), + "--section-name-mappings", + mappings_file.to_str().unwrap(), + ]); + + let output = cmd.output().unwrap(); + assert!(output.status.success()); + let stdout = String::from_utf8_lossy(&output.stdout); + assert!(stdout.contains("Successfully generated Markdown documentation")); + + // Verify all field features are properly rendered + let output_content = fs::read_to_string(&output_file).unwrap(); + assert!(output_content.contains("Complex Test")); + assert!(output_content.contains("[complexstruct]")); + assert!(output_content.contains("basic_field")); + assert!(output_content.contains("field_with_notes")); + assert!(output_content.contains("First note about this field")); + assert!(output_content.contains("Second note with more details")); + assert!(output_content.contains("deprecated_field")); + assert!(output_content.contains("**⚠️ DEPRECATED:**")); + assert!(output_content.contains("deprecated since version 2.0")); + assert!(output_content.contains("field_with_toml_example")); + assert!(output_content.contains("required_field")); + assert!(output_content.contains("**Required**")); + assert!(output_content.contains("field_with_units")); + assert!(output_content.contains("milliseconds")); +} + +#[test] +fn test_generate_markdown_with_constant_references() { + let temp_dir = TempDir::new().unwrap(); + let input_file = temp_dir.path().join("input.json"); + let output_file = temp_dir.path().join("output.md"); + let template_file = temp_dir.path().join("template.md"); + let mappings_file = temp_dir.path().join("mappings.json"); + + // Create test data with constant references + let config_docs = json!({ + "structs": [{ + "name": "ConfigWithConstants", + "description": "A struct with constant references", + "fields": [{ + "name": "timeout", + "description": "Connection timeout using [`DEFAULT_TIMEOUT`] constant", + "default_value": "[`DEFAULT_TIMEOUT`]", + "notes": ["See [`MAX_RETRIES`] for retry logic"], + "deprecated": null, + "toml_example": null, + "required": null, + "units": "seconds" + }] + }], + "referenced_constants": { + "DEFAULT_TIMEOUT": "30", + "MAX_RETRIES": "3" + } + }); + + fs::write( + &input_file, + serde_json::to_string_pretty(&config_docs).unwrap(), + ) + .unwrap(); + fs::write( + &template_file, + "# Constants Test\n\n{{toc_content}}\n\n{{struct_sections}}", + ) + .unwrap(); + fs::write(&mappings_file, r#"{}"#).unwrap(); + + let mut cmd = Command::cargo_bin("generate-markdown").unwrap(); + cmd.args([ + "--input", + input_file.to_str().unwrap(), + "--output", + output_file.to_str().unwrap(), + "--template", + template_file.to_str().unwrap(), + "--section-name-mappings", + mappings_file.to_str().unwrap(), + ]); + + let output = cmd.output().unwrap(); + assert!(output.status.success()); + let stdout = String::from_utf8_lossy(&output.stdout); + assert!(stdout.contains("Successfully generated Markdown documentation")); + + // Verify constant references are properly processed + let output_content = fs::read_to_string(&output_file).unwrap(); + assert!(output_content.contains("Constants Test")); + assert!(output_content.contains("[configwithconstants]")); + assert!(output_content.contains("timeout")); + assert!(output_content.contains("30")); // DEFAULT_TIMEOUT resolved + assert!(output_content.contains("3")); // MAX_RETRIES resolved +} + +#[test] +fn test_generate_markdown_empty_struct_description() { + let temp_dir = TempDir::new().unwrap(); + let input_file = temp_dir.path().join("input.json"); + let output_file = temp_dir.path().join("output.md"); + let template_file = temp_dir.path().join("template.md"); + let mappings_file = temp_dir.path().join("mappings.json"); + + // Create test data with null struct description + let config_docs = json!({ + "structs": [{ + "name": "NoDescStruct", + "description": null, + "fields": [{ + "name": "field", + "description": "A field in a struct with no description", + "default_value": "`value`", + "notes": null, + "deprecated": null, + "toml_example": null, + "required": null, + "units": null + }] + }], + "referenced_constants": {} + }); + + fs::write( + &input_file, + serde_json::to_string_pretty(&config_docs).unwrap(), + ) + .unwrap(); + fs::write( + &template_file, + "# No Description Test\n\n{{toc_content}}\n\n{{struct_sections}}", + ) + .unwrap(); + fs::write(&mappings_file, r#"{}"#).unwrap(); + + let mut cmd = Command::cargo_bin("generate-markdown").unwrap(); + cmd.args([ + "--input", + input_file.to_str().unwrap(), + "--output", + output_file.to_str().unwrap(), + "--template", + template_file.to_str().unwrap(), + "--section-name-mappings", + mappings_file.to_str().unwrap(), + ]); + + let output = cmd.output().unwrap(); + assert!(output.status.success()); + let stdout = String::from_utf8_lossy(&output.stdout); + assert!(stdout.contains("Successfully generated Markdown documentation")); + + // Verify struct with null description is handled properly + let output_content = fs::read_to_string(&output_file).unwrap(); + assert!(output_content.contains("No Description Test")); + assert!(output_content.contains("[nodescstruct]")); + assert!(output_content.contains("field")); +} + +#[test] +fn test_generate_markdown_multiple_structs() { + let temp_dir = TempDir::new().unwrap(); + let input_file = temp_dir.path().join("input.json"); + let output_file = temp_dir.path().join("output.md"); + let template_file = temp_dir.path().join("template.md"); + let mappings_file = temp_dir.path().join("mappings.json"); + + // Create test data with multiple structs + let config_docs = json!({ + "structs": [ + { + "name": "FirstStruct", + "description": "The first configuration struct", + "fields": [{ + "name": "first_field", + "description": "Field in first struct", + "default_value": "`1`", + "notes": null, + "deprecated": null, + "toml_example": null, + "required": null, + "units": null + }] + }, + { + "name": "SecondStruct", + "description": "The second configuration struct", + "fields": [{ + "name": "second_field", + "description": "Field in second struct", + "default_value": "`2`", + "notes": null, + "deprecated": null, + "toml_example": null, + "required": null, + "units": null + }] + } + ], + "referenced_constants": {} + }); + + fs::write( + &input_file, + serde_json::to_string_pretty(&config_docs).unwrap(), + ) + .unwrap(); + fs::write( + &template_file, + "# Multiple Structs Test\n\n{{toc_content}}\n\n{{struct_sections}}", + ) + .unwrap(); + fs::write(&mappings_file, r#"{}"#).unwrap(); + + let mut cmd = Command::cargo_bin("generate-markdown").unwrap(); + cmd.args([ + "--input", + input_file.to_str().unwrap(), + "--output", + output_file.to_str().unwrap(), + "--template", + template_file.to_str().unwrap(), + "--section-name-mappings", + mappings_file.to_str().unwrap(), + ]); + + let output = cmd.output().unwrap(); + assert!(output.status.success()); + let stdout = String::from_utf8_lossy(&output.stdout); + assert!(stdout.contains("Successfully generated Markdown documentation")); + + // Verify both structs are properly rendered + let output_content = fs::read_to_string(&output_file).unwrap(); + assert!(output_content.contains("Multiple Structs Test")); + assert!(output_content.contains("[firststruct]")); + assert!(output_content.contains("[secondstruct]")); + assert!(output_content.contains("first_field")); + assert!(output_content.contains("second_field")); + assert!(output_content.contains("The first configuration struct")); + assert!(output_content.contains("The second configuration struct")); +} From d40be623e18860861e436cabfe02751990720fc9 Mon Sep 17 00:00:00 2001 From: Francesco Leacche Date: Tue, 10 Jun 2025 19:48:25 +0100 Subject: [PATCH 15/20] make required parsing strict. only true/false --- contrib/tools/config-docs-generator/README.md | 9 ++--- .../config-docs-generator/src/extract_docs.rs | 38 +++++++------------ 2 files changed, 17 insertions(+), 30 deletions(-) diff --git a/contrib/tools/config-docs-generator/README.md b/contrib/tools/config-docs-generator/README.md index e749ef70b4..3d2875fceb 100644 --- a/contrib/tools/config-docs-generator/README.md +++ b/contrib/tools/config-docs-generator/README.md @@ -139,16 +139,15 @@ Provides TOML configuration examples. #### `@required: ` Indicates whether the field is mandatory. -- **Value Type**: Boolean (flexible parsing) -- **Default**: `false` if annotation is omitted +- **Value Type**: Boolean +- **Default**: If annotation is omitted, the field is considered *not required*. - **Supported Values**: - - `true`, `True`, `TRUE`, `yes`, `Yes`, `YES`, `1` → `true` - - `false`, `False`, `FALSE`, `no`, `No`, `NO`, `0` → `false` + - ``true` + - `false` - Invalid values default to `false` - **Examples**: ```rust /// @required: true - /// @required: yes /// @required: false ``` diff --git a/contrib/tools/config-docs-generator/src/extract_docs.rs b/contrib/tools/config-docs-generator/src/extract_docs.rs index d6d9a8cace..689b7a7fac 100644 --- a/contrib/tools/config-docs-generator/src/extract_docs.rs +++ b/contrib/tools/config-docs-generator/src/extract_docs.rs @@ -460,19 +460,15 @@ fn parse_field_documentation( // Parse @required: annotations if let Some(required_text) = extract_annotation(metadata_section, "required") { - // Parse boolean value, handling common representations - let required_bool = match required_text.trim().to_lowercase().as_str() { + let required_bool = match required_text.trim() { "" => false, // Empty string defaults to false - "true" | "yes" | "1" => true, - "false" | "no" | "0" => false, - _ => { - // Default to false for invalid values, but could log a warning in the future + text => text.parse::().unwrap_or_else(|_| { eprintln!( "Warning: Invalid @required value '{}' for field '{}', defaulting to false", - required_text, field_name + text, field_name ); false - } + }), }; required = Some(required_bool); } @@ -2177,17 +2173,17 @@ and includes various formatting. let result2 = parse_field_documentation(doc_text2, "field2").unwrap(); assert_eq!(result2.0.required, Some(false)); - // Test "yes" variant + // Test "TRUE" variant let doc_text3 = r#"Required field. --- -@required: yes"#; +@required: TRUE"#; // Needs to be lowercase, will default to false, but will log a warning let result3 = parse_field_documentation(doc_text3, "field3").unwrap(); - assert_eq!(result3.0.required, Some(true)); + assert_eq!(result3.0.required, Some(false)); - // Test "no" variant + // Test "FALSE" variant let doc_text4 = r#"Optional field. --- -@required: no"#; +@required: FALSE"#; // Needs to be lowercase, will default to false, but will log a warning let result4 = parse_field_documentation(doc_text4, "field4").unwrap(); assert_eq!(result4.0.required, Some(false)); @@ -2383,19 +2379,11 @@ and includes various formatting. // Test all supported boolean representations for @required let test_cases = vec![ ("true", Some(true)), - ("True", Some(true)), - ("TRUE", Some(true)), - ("yes", Some(true)), - ("Yes", Some(true)), - ("YES", Some(true)), - ("1", Some(true)), + ("True", Some(false)), // Need to be lowercase + ("TRUE", Some(false)), // Need to be lowercase ("false", Some(false)), - ("False", Some(false)), - ("FALSE", Some(false)), - ("no", Some(false)), - ("No", Some(false)), - ("NO", Some(false)), - ("0", Some(false)), + ("False", Some(false)), // Will default to false, but will log a warning + ("FALSE", Some(false)), // Will default to false, but will log a warning ("maybe", Some(false)), // Invalid defaults to false ("invalid", Some(false)), ]; From 39ddc9cbe783998f7020c47f419d57a05a762982 Mon Sep 17 00:00:00 2001 From: Francesco Leacche Date: Thu, 12 Jun 2025 14:56:25 +0100 Subject: [PATCH 16/20] use mapping file for TARGET_STRUCTS --- contrib/tools/config-docs-generator/Dockerfile | 17 +++++++++++------ .../generate-config-docs.sh | 12 ++++++++---- docs/generated/configuration-reference.md | 4 ++-- 3 files changed, 21 insertions(+), 12 deletions(-) diff --git a/contrib/tools/config-docs-generator/Dockerfile b/contrib/tools/config-docs-generator/Dockerfile index 58b092b9e1..e1bbfee83d 100644 --- a/contrib/tools/config-docs-generator/Dockerfile +++ b/contrib/tools/config-docs-generator/Dockerfile @@ -1,6 +1,11 @@ # Use a specific nightly toolchain for reproducible builds FROM rustlang/rust@sha256:04690ffa09cddd358b349272173155319f384e57816614eea0840ec7f9422862 +RUN apt-get update -y \ + && apt-get install -y --no-install-recommends \ + jq \ + && rm -rf /var/lib/apt/lists/* + # Set the working directory for building WORKDIR /build @@ -15,12 +20,12 @@ RUN cargo build --package config-docs-generator --release WORKDIR /project_root # Set environment variables for generate-config-docs.sh -ENV PROJECT_ROOT=/project_root -ENV CARGO_TARGET_DIR=/tmp/stacks-config-docs/target -ENV TEMP_DIR=/tmp/stacks-config-docs/doc-generation -ENV EXTRACT_DOCS_BIN=/build/target/release/extract-docs -ENV GENERATE_MARKDOWN_BIN=/build/target/release/generate-markdown -ENV SKIP_BUILD=true +ENV PROJECT_ROOT=/project_root \ + CARGO_TARGET_DIR=/tmp/stacks-config-docs/target \ + TEMP_DIR=/tmp/stacks-config-docs/doc-generation \ + EXTRACT_DOCS_BIN=/build/target/release/extract-docs \ + GENERATE_MARKDOWN_BIN=/build/target/release/generate-markdown \ + SKIP_BUILD=true # Create the Docker-specific temp directory RUN mkdir -p /tmp/stacks-config-docs diff --git a/contrib/tools/config-docs-generator/generate-config-docs.sh b/contrib/tools/config-docs-generator/generate-config-docs.sh index 4e6b5f9d05..c9d5d0e231 100755 --- a/contrib/tools/config-docs-generator/generate-config-docs.sh +++ b/contrib/tools/config-docs-generator/generate-config-docs.sh @@ -66,10 +66,14 @@ main() { # Step 2: Extract documentation from source code using rustdoc log_info "Extracting configuration documentation using rustdoc..." EXTRACTED_JSON="$TEMP_DIR/extracted-config-docs.json" - # List of specific Rust struct names to be documented - # NOTE: This variable must be manually updated if this list changes - # (e.g., new config structs are added or removed from the project) - TARGET_STRUCTS="BurnchainConfig,NodeConfig,MinerConfig,ConnectionOptionsFile,FeeEstimationConfigFile,EventObserverConfigFile,InitialBalanceFile" + + # Determine the list of structs to document from section_name_mappings.json + # If the caller sets $TARGET_STRUCTS explicitly we honour that override. + if [[ -z "${TARGET_STRUCTS:-}" ]]; then + TARGET_STRUCTS="$(jq -r 'keys | join(",")' "$SECTION_MAPPINGS_PATH")" + fi + log_info "Structs to be documented: $TARGET_STRUCTS" + "$EXTRACT_DOCS_BIN" \ --package stackslib \ --structs "$TARGET_STRUCTS" \ diff --git a/docs/generated/configuration-reference.md b/docs/generated/configuration-reference.md index a424d8a56d..bce5a8e9b9 100644 --- a/docs/generated/configuration-reference.md +++ b/docs/generated/configuration-reference.md @@ -106,7 +106,7 @@ The configuration is automatically generated from the Rust source code documenta | [block_commit_tx_estimated_size](#burnchain-block_commit_tx_estimated_size) | Estimated size (in virtual bytes) of a block commit transaction on bitcoin.
Used for fee calculation in mining logic by multiplying with the fee rate
[satoshis_per_byte](#burnchain-satoshis_per_byte) .

**Notes:**
- Only relevant if [[node].miner](#node-miner) is `true`.

**Units:** virtual bytes | `380` | | [burn_fee_cap](#burnchain-burn_fee_cap) | The maximum amount (in sats) of "burn commitment" to broadcast for the next
block's leader election. Acts as a safety cap to limit the maximum amount
spent on mining. It serves as both the target fee and a fallback if dynamic
fee calculations fail or cannot be performed.

This setting can be hot-reloaded from the config file, allowing adjustment
without restarting.

**Notes:**
- Only relevant if [[node].miner](#node-miner) is `true`.

**Units:** satoshis | `20_000` | | [chain](#burnchain-chain) | The underlying blockchain used for Proof-of-Transfer.

**Notes:**
- Currently, only `"bitcoin"` is supported. | `"bitcoin"` | -| [chain_id](#burnchain-chain_id) | The network-specific identifier used in P2P communication and database initialization.

**Notes:**
- **Warning:** Do not modify this unless you really know what you're doing.
- This is intended strictly for testing purposes. | - if [mode](#burnchain-mode) is `"mainnet"`: `CHAIN_ID_MAINNET`
- else: `CHAIN_ID_TESTNET` | +| [chain_id](#burnchain-chain_id) | The network-specific identifier used in P2P communication and database initialization.

**Notes:**
- **Warning:** Do not modify this unless you really know what you're doing.
- This is intended strictly for testing purposes. | - if [mode](#burnchain-mode) is `"mainnet"`: `0x00000001`
- else: `0x80000000` | | [commit_anchor_block_within](#burnchain-commit_anchor_block_within) | Specifies a mandatory wait period (in milliseconds) after receiving a burnchain tip
before the node attempts to build the anchored block for the new tenure.
This duration effectively schedules the start of the block-building process
relative to the tip's arrival time.

**Notes:**
- This is intended strictly for testing purposes.

**Units:** milliseconds | `5_000` | | [epochs](#burnchain-epochs) | Custom override for the definitions of Stacks epochs (start/end burnchain
heights, consensus rules). This setting allows testing specific epoch
transitions or custom consensus rules by defining exactly when each epoch
starts on bitcoin.

Epochs define distinct protocol rule sets (consensus rules, execution costs,
capabilities). When configured, the list must include all epochs
sequentially from "1.0" up to the highest desired epoch, without skipping
any intermediate ones. Valid `epoch_name` values currently include:
`"1.0"`, `"2.0"`, `"2.05"`, `"2.1"`, `"2.2"`, `"2.3"`, `"2.4"`, `"2.5"`, `"3.0"`, `"3.1"`.

**Validation Rules:**
- Epochs must be provided in strict chronological order (`1.0`, `2.0`, `2.05`...).
- `start_height` values must be non-decreasing across the list.
- Epoch `"1.0"` must have `start_height = 0`.
- The number of defined epochs cannot exceed the maximum supported by the node software.

**Notes:**
- Applied only if [mode](#burnchain-mode) is not "mainnet".
- This is intended strictly for testing purposes.
- Configured as a list `[[burnchain.epochs]]` in TOML, each with `epoch_name` (string) and `start_height` (integer Bitcoin block height).

**Example:**
[[burnchain.epochs]]
epoch_name = "2.1"
start_height = 150

[[burnchain.epochs]]
epoch_name = "2.2"
start_height = 200
| `None` (uses the standard epoch definitions for the selected [mode](#burnchain-mode) ) | | [fault_injection_burnchain_block_delay](#burnchain-fault_injection_burnchain_block_delay) | Fault injection setting for testing. Introduces an artificial delay (in
milliseconds) before processing each burnchain block download. Simulates a
slow burnchain connection.

**Notes:**
- This is intended strictly for testing purposes.

**Units:** milliseconds | `0` (no delay) | @@ -122,7 +122,7 @@ The configuration is automatically generated from the Rust source code documenta | [password](#burnchain-password) | The password for authenticating with the bitcoin node's RPC interface.
Required if the bitcoin node requires RPC authentication.

**Notes:**
- Only relevant if [[node].miner](#node-miner) is `true`. | `None` | | [peer_host](#burnchain-peer_host) | The hostname or IP address of the bitcoin node peer.

This field is required for all node configurations as it specifies where to
find the underlying bitcoin node to interact with for PoX operations,
block validation, and mining. | `"0.0.0.0"` | | [peer_port](#burnchain-peer_port) | The P2P network port of the bitcoin node specified by [peer_host](#burnchain-peer_host) . | `8333` | -| [peer_version](#burnchain-peer_version) | The peer protocol version number used in P2P communication.
This parameter cannot be set via the configuration file.

**Notes:**
- **Warning:** Do not modify this unless you really know what you're doing. | - if [mode](#burnchain-mode) is `"mainnet"`: `PEER_VERSION_MAINNET`
- else: `PEER_VERSION_TESTNET` | +| [peer_version](#burnchain-peer_version) | The peer protocol version number used in P2P communication.
This parameter cannot be set via the configuration file.

**Notes:**
- **Warning:** Do not modify this unless you really know what you're doing. | - if [mode](#burnchain-mode) is `"mainnet"`: `402_653_196`
- else: `4_207_599_116` | | [poll_time_secs](#burnchain-poll_time_secs) | The interval, in seconds, at which the node polls the bitcoin node for new
blocks and state updates.

The default value of 10 seconds is mainly intended for testing purposes.
It's suggested to set this to a higher value for mainnet, e.g., 300 seconds
(5 minutes).

**Units:** seconds | `10` | | [pox_2_activation](#burnchain-pox_2_activation) | Sets a custom burnchain height for PoX-2 activation (for testing).

This affects two key transitions:
1. The block height at which PoX v1 lockups are automatically unlocked.
2. The block height from which PoX reward set calculations switch to PoX v2 rules.

**Behavior:**
- This value directly sets the auto unlock height for PoX v1 lockups before
transition to PoX v2. This also defines the burn height at which PoX reward
sets are calculated using PoX v2 rather than v1.
- If custom [epochs](#burnchain-epochs) are provided:
  - This value is used to validate that Epoch 2.1's start height is ≤ this value.
  - However, the height specified in `epochs` for Epoch 2.1 takes precedence.

**Notes:**
- Applied only if [mode](#burnchain-mode) is not "mainnet".
- This is intended strictly for testing purposes. | `None` | | [pox_prepare_length](#burnchain-pox_prepare_length) | Overrides the length (in bitcoin blocks) of the PoX prepare phase.

**Notes:**
- Applied only if [mode](#burnchain-mode) is not "mainnet".
- This is intended strictly for testing purposes.

**Units:** bitcoin blocks | `None` (uses the standard prepare phase length for the mode) | From 5e5e6a24bf353e70f0cf90a1904bb33968e6b853 Mon Sep 17 00:00:00 2001 From: Francesco Leacche Date: Mon, 16 Jun 2025 16:04:45 +0100 Subject: [PATCH 17/20] do not run docker as root --- contrib/tools/config-docs-generator/Dockerfile | 4 ++++ docs/generated/configuration-reference.md | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/contrib/tools/config-docs-generator/Dockerfile b/contrib/tools/config-docs-generator/Dockerfile index e1bbfee83d..d3e8f15f47 100644 --- a/contrib/tools/config-docs-generator/Dockerfile +++ b/contrib/tools/config-docs-generator/Dockerfile @@ -13,6 +13,10 @@ WORKDIR /build # Copy from three levels up (project root) to maintain the directory structure COPY ../../../ /build +RUN useradd -ms /bin/bash docs-builder +RUN chown docs-builder -R /build +USER docs-builder + # Pre-build the config-docs-generator binaries during image build RUN cargo build --package config-docs-generator --release diff --git a/docs/generated/configuration-reference.md b/docs/generated/configuration-reference.md index bce5a8e9b9..44929a1219 100644 --- a/docs/generated/configuration-reference.md +++ b/docs/generated/configuration-reference.md @@ -193,7 +193,7 @@ The configuration is automatically generated from the Rust source code documenta | [filter_origins](#miner-filter_origins) | A comma separated list of Stacks addresses to whitelist so that only
transactions from these addresses should be considered during the mempool walk
for block building. If this list is non-empty, any transaction whose origin
address is *not* in this set will be skipped.

This allows miners to prioritize transactions originating from specific accounts that are
important to them.
Configured as a comma-separated string of standard Stacks addresses
(e.g., "ST123...,ST456...") in the configuration file.

**Example:**
filter_origins = "ST2QKZ4FKHAH1NQKYKYAYZPY440FEPK7GZ1R5HBP2,ST319CF5WV77KYR1H3GT0GZ7B8Q4AQPY42ETP1VPF"
| Empty set (all origins are considered). | | [first_rejection_pause_ms](#miner-first_rejection_pause_ms) | Time in milliseconds to pause after receiving the first threshold rejection,
before proposing a new block.

When a miner's block proposal fails to gather enough signatures from the
signers for the first time at a given height, the miner will pause for this
duration before attempting to mine and propose again.

**Units:** milliseconds | `5_000` | | [max_execution_time_secs](#miner-max_execution_time_secs) | Defines the maximum execution time (in seconds) allowed for a single contract call transaction.

When processing a transaction (contract call or smart contract deployment),
if this option is set, and the execution time exceeds this limit, the
transaction processing fails with an `ExecutionTimeout` error, and the
transaction is skipped. This prevents potentially long-running or
infinite-loop transactions from blocking block production.

**Units:** seconds | `None` (no execution time limit) | -| [mempool_walk_strategy](#miner-mempool_walk_strategy) | Strategy for selecting the next transaction candidate from the mempool.
Controls prioritization between maximizing immediate fee capture vs. ensuring
transaction nonce order for account progression and processing efficiency.

See `MemPoolWalkStrategy` for variant details.

Possible values (use variant names for configuration):
- `"GlobalFeeRate"`: Selects the transaction with the highest fee rate globally.
- `"NextNonceWithHighestFeeRate"`: Selects the highest-fee transaction among those
matching the next expected nonce for sender/sponsor accounts. | `"GlobalFeeRate"` | +| [mempool_walk_strategy](#miner-mempool_walk_strategy) | Strategy for selecting the next transaction candidate from the mempool.
Controls prioritization between maximizing immediate fee capture vs. ensuring
transaction nonce order for account progression and processing efficiency.

See `MemPoolWalkStrategy` for variant details.

Possible values (use variant names for configuration):
- `"GlobalFeeRate"`: Selects the transaction with the highest fee rate globally.
- `"NextNonceWithHighestFeeRate"`: Selects the highest-fee transaction among those
matching the next expected nonce for sender/sponsor accounts. | `"NextNonceWithHighestFeeRate"` | | [min_time_between_blocks_ms](#miner-min_time_between_blocks_ms) | The minimum time to wait between mining blocks in milliseconds. The value
must be greater than or equal to 1000 ms because if a block is mined
within the same second as its parent, it will be rejected by the signers.

This check ensures compliance with signer rules that prevent blocks with
identical timestamps (at second resolution) to their parents. If a lower
value is configured, 1000 ms is used instead.

**Units:** milliseconds | `1_000` | | [mining_key](#miner-mining_key) | The private key (Secp256k1) used for signing blocks, provided as a hex string.

This key must be present at runtime for mining operations to succeed. | - if the `[miner]` section *is present* in the config file: [[node].seed](#node-seed)
- else: `None` | | [nakamoto_attempt_time_ms](#miner-nakamoto_attempt_time_ms) | Maximum time (in milliseconds) the miner spends selecting transactions from
the mempool when assembling a Nakamoto block. Once this duration is exceeded,
the miner stops adding transactions and finalizes the block with those
already selected.

**Units:** milliseconds | `5_000` (5 seconds) | From 358fe358a6ac03597665689f9767894cca45e917 Mon Sep 17 00:00:00 2001 From: Francesco Leacche Date: Mon, 16 Jun 2025 18:58:12 +0100 Subject: [PATCH 18/20] remove generated configuration documentation --- docs/generated/configuration-reference.md | 227 ---------------------- 1 file changed, 227 deletions(-) delete mode 100644 docs/generated/configuration-reference.md diff --git a/docs/generated/configuration-reference.md b/docs/generated/configuration-reference.md deleted file mode 100644 index 44929a1219..0000000000 --- a/docs/generated/configuration-reference.md +++ /dev/null @@ -1,227 +0,0 @@ -# Stacks Node Configuration Reference - -This document provides a comprehensive reference for all configuration options available in the Stacks node TOML configuration file. - -The configuration is automatically generated from the Rust source code documentation. - -## Table of Contents - -- [[[ustx_balance]]](#ustx_balance) -- [[[events_observer]]](#events_observer) -- [[connection_options]](#connection_options) -- [[fee_estimation]](#fee_estimation) -- [[burnchain]](#burnchain) -- [[node]](#node) -- [[miner]](#miner) - - -## [[ustx_balance]] - -| Parameter | Description | Default | -|-----------|-------------|----------| -| [address](#ustx_balance-address) | The Stacks address to receive the initial STX balance.
Must be a valid "non-mainnet" Stacks address (e.g., "ST2QKZ4FKHAH1NQKYKYAYZPY440FEPK7GZ1R5HBP2"). | **Required** | -| [amount](#ustx_balance-amount) | The amount of microSTX to allocate to the address at node startup.
1 STX = 1,000,000 microSTX.

**Units:** microSTX | **Required** | - - -## [[events_observer]] - -| Parameter | Description | Default | -|-----------|-------------|----------| -| [disable_retries](#events_observer-disable_retries) | Controls whether the node should retry sending event notifications if delivery
fails or times out.

If `false` (default): The node will attempt to deliver event notifications
persistently. If an attempt fails (due to network error, timeout, or a
non-200 HTTP response), the event payload is saved and retried indefinitely.
This ensures that all events will eventually be delivered. However, this can
cause the node's block processing to stall if an observer is down, or
indefinitely fails to process the event.

- If `true`: The node will make only a single attempt to deliver each event
notification. If this single attempt fails for any reason, the event is
discarded, and no further retries will be made for that specific event.

**Notes:**
- **Warning:** Setting this to `true` can lead to missed events if the observer endpoint is temporarily unavailable or experiences issues. | `false` (retries are enabled) | -| [endpoint](#events_observer-endpoint) | URL endpoint (hostname and port) where event notifications will be sent via
HTTP POST requests.

The node will automatically prepend `http://` to this endpoint and append the
specific event path (e.g., `/new_block`, `/new_mempool_tx`). Therefore, this
value should be specified as `hostname:port` (e.g., "localhost:3700").

This should point to a service capable of receiving and processing Stacks event data.

**Notes:**
- **Do NOT include the `http://` scheme in this configuration value.**

**Example:**
endpoint = "localhost:3700"
| **Required** | -| [events_keys](#events_observer-events_keys) | List of event types that this observer is configured to receive.

Each string in the list specifies an event category or a specific event to
subscribe to. For an observer to receive any notifications, this list must
contain at least one valid key. Providing an invalid string that doesn't match
any of the valid formats below will cause the node to panic on startup when
parsing the configuration.

All observers, regardless of their `events_keys` configuration, implicitly
receive payloads on the `/attachments/new` endpoint.

Valid Event Keys:
- `"*"`: Subscribes to a broad set of common events.
  - Events delivered to:
    - `/new_block`: For blocks containing transactions that generate STX, FT,
NFT, or smart contract events.
    - `/new_microblocks`: For all new microblock streams. Note: Only until epoch 2.5.
    - `/new_mempool_tx`: For new mempool transactions.
    - `/drop_mempool_tx`: For dropped mempool transactions.
    - `/new_burn_block`: For new burnchain blocks.
  - Note: This key does NOT by itself subscribe to `/stackerdb_chunks` or `/proposal_response`.

- `"stx"`: Subscribes to STX token operation events (transfer, mint, burn, lock).
  - Events delivered to: `/new_block`, `/new_microblocks`.
  - Payload details: The "events" array in the delivered payloads will be
filtered to include only STX-related events.

- `"memtx"`: Subscribes to new and dropped mempool transaction events.
  - Events delivered to: `/new_mempool_tx`, `/drop_mempool_tx`.

- `"burn_blocks"`: Subscribes to new burnchain block events.
  - Events delivered to: `/new_burn_block`.

- `"microblocks"`: Subscribes to new microblock stream events.
  - Events delivered to: `/new_microblocks`.
  - Payload details:
    - The "transactions" field will contain all transactions from the microblocks.
    - The "events" field will contain STX, FT, NFT, or specific smart contract
events *only if* this observer is also subscribed to those more specific
event types (e.g., via `"stx"`, `"*"`, a specific contract event key,
or a specific asset identifier key).
  - Note: Only until epoch 2.5.

- `"stackerdb"`: Subscribes to StackerDB chunk update events.
  - Events delivered to: `/stackerdb_chunks`.

- `"block_proposal"`: Subscribes to block proposal response events (for Nakamoto consensus).
  - Events delivered to: `/proposal_response`.

- Smart Contract Event: Subscribes to a specific smart contract event.
  - Format: `"{contract_address}.{contract_name}::{event_name}"`
(e.g., `ST0000000000000000000000000000000000000000.my-contract::my-custom-event`)
  - Events delivered to: `/new_block`, `/new_microblocks`.
  - Payload details: The "events" array in the delivered payloads will be
filtered for this specific event.

- Asset Identifier for FT/NFT Events: Subscribes to events (mint, burn,
transfer) for a specific Fungible Token (FT) or Non-Fungible Token (NFT).
  - Format: `"{contract_address}.{contract_name}.{asset_name}"`
(e.g., for an FT: `ST0000000000000000000000000000000000000000.contract.token`)
  - Events delivered to: `/new_block`, `/new_microblocks`.
  - Payload details: The "events" array in the delivered payloads will be
filtered for events related to the specified asset.

**Notes:**
- For a more detailed documentation check the event-dispatcher docs in the `/docs` folder.

**Example:**
events_keys = [
  "burn_blocks",
  "memtx",
  "ST0000000000000000000000000000000000000000.my-contract::my-custom-event",
  "ST0000000000000000000000000000000000000000.token-contract.my-ft"
]
| **Required** | -| [timeout_ms](#events_observer-timeout_ms) | Maximum duration (in milliseconds) to wait for the observer endpoint to respond.

When the node sends an event notification to this observer, it will wait at
most this long for a successful HTTP response (status code 200) before
considering the request timed out. If a timeout occurs and retries are enabled
(see [disable_retries](#events_observer-disable_retries) ), the request will be attempted
again according to the retry strategy.

**Units:** milliseconds | `1_000` | - - -## [connection_options] - -| Parameter | Description | Default | -|-----------|-------------|----------| -| [auth_token](#connection_options-auth_token) | HTTP auth password to use when communicating with stacks-signer binary.

This token is used in the `Authorization` header for certain requests.
Primarily, it secures the communication channel between this node and a
connected `stacks-signer` instance.

It is also used to authenticate requests to `/v2/blocks?broadcast=1`.

**Notes:**
- This field **must** be configured if the node needs to receive block proposals from a configured `stacks-signer` [[events_observer]] via the `/v3/block_proposal` endpoint.
- The value must match the token configured on the signer. | `None` (authentication disabled for relevant endpoints) | -| [block_proposal_max_age_secs](#connection_options-block_proposal_max_age_secs) | Maximum age (in seconds) allowed for a block proposal received via the
`/v3/block_proposal` RPC endpoint.

If a block proposal is received whose timestamp is older than the current
time minus this configured value, the node will reject the proposal with an
HTTP 422 (Unprocessable Entity) error, considering it too stale. This
prevents the node from spending resources validating outdated proposals.

**Units:** seconds | `600` | -| [connect_timeout](#connection_options-connect_timeout) | Maximum duration (in seconds) a connection attempt is allowed to remain in
the connecting state.

This applies to both incoming P2P and HTTP connections. If a remote peer
initiates a connection but does not complete the connection process
(e.g., handshake for P2P) within this time, the node will consider it
unresponsive and drop the connection attempt.

**Units:** seconds | `10` | -| [disable_block_download](#connection_options-disable_block_download) | If true, completely disables the block download state machine.

The node will not attempt to download Stacks blocks (neither Nakamoto
tenures nor legacy blocks) from peers.

**Notes:**
- Intended for testing or specialized node configurations. | `false` | -| [disable_inbound_handshakes](#connection_options-disable_inbound_handshakes) | If true, prevents the node from processing initial handshake messages from new
inbound P2P connections.

This effectively stops the node from establishing new authenticated inbound
P2P sessions. Outbound connections initiated by this node are unaffected.

**Notes:**
- Primarily intended for testing purposes. | `false` | -| [disable_inbound_walks](#connection_options-disable_inbound_walks) | If true, disables the neighbor discovery mechanism from starting walks from
inbound peers. Walks will only initiate from seed/bootstrap peers, outbound
connections, or pingbacks.

**Notes:**
- Primarily intended for testing or specific network debugging scenarios. | `false` | -| [dns_timeout](#connection_options-dns_timeout) | Maximum time (in milliseconds) to wait for a DNS query to resolve.

When the node needs to resolve a hostname (e.g., from a peer's advertised
[[node].data_url](#node-data_url) or an Atlas attachment URL) into an IP address, it
initiates a DNS lookup. This setting defines the maximum duration the node will
wait for the DNS server to respond before considering the lookup timed out.

**Units:** milliseconds | `15_000` (15 seconds) | -| [force_disconnect_interval](#connection_options-force_disconnect_interval) | Fault injection setting for testing purposes. Interval (in seconds) for
forced disconnection of all peers.

If set to a positive value, the node will periodically disconnect all of its
P2P peers at roughly this interval. This simulates network churn or
partitioning for testing node resilience.

**Notes:**
- If set to a positive value, the node will periodically disconnect all of its P2P peers at roughly this interval.
- This simulates network churn or partitioning for testing node resilience.
- The code enforcing this behavior is conditionally compiled using `cfg!(test)` and is only active during test runs.
- This setting has no effect in standard production builds.

**Units:** seconds | `None` (feature disabled) | -| [handshake_timeout](#connection_options-handshake_timeout) | Maximum duration (in seconds) a P2P peer is allowed after connecting before
completing the handshake.

If a P2P peer connects successfully but fails to send the necessary handshake
messages within this time, the node will consider it unresponsive and drop the
connection.

**Units:** seconds | `5` | -| [heartbeat](#connection_options-heartbeat) | Interval (in seconds) at which this node expects to send or receive P2P
keep-alive messages.

During the P2P handshake, this node advertises this configured `heartbeat`
value to its peers. Each peer uses the other's advertised heartbeat
interval (plus a timeout margin) to monitor responsiveness and detect
potential disconnections. This node also uses its own configured value to
proactively send Ping messages if the connection would otherwise be idle,
helping to keep it active.

**Units:** seconds | `3_600` (1 hour) | -| [idle_timeout](#connection_options-idle_timeout) | Maximum idle time (in seconds) for HTTP connections.

This applies only to HTTP connections. It defines the maximum allowed time
since the last response was sent by the node to the client. An HTTP
connection is dropped if both this `idle_timeout` and the general
[timeout](#connection_options-timeout) (time since last request received) are exceeded.

**Units:** seconds | `15` | -| [inbox_maxlen](#connection_options-inbox_maxlen) | Maximum number of messages allowed in the per-connection incoming buffer.
The limits apply individually to each established connection (both P2P and HTTP). | `100` | -| [inv_reward_cycles](#connection_options-inv_reward_cycles) | Lookback depth (in PoX reward cycles) for Nakamoto inventory synchronization requests.

When initiating an inventory sync cycle with a peer, the node requests data
starting from `inv_reward_cycles` cycles before the current target reward
cycle. This determines how much historical inventory information is requested
in each sync attempt.

**Units:** PoX reward cycles | - if [[burnchain].mode](#burnchain-mode) is `"mainnet"`: `3`
- else: `6` | -| [inv_sync_interval](#connection_options-inv_sync_interval) | Minimum interval (in seconds) between initiating inventory synchronization
attempts with the same peer.

Acts as a per-peer cooldown to throttle sync requests. A new sync cycle with
a peer generally starts only after this interval has passed since the previous
attempt began *and* the previous cycle is considered complete.

**Units:** seconds | `45` | -| [log_neighbors_freq](#connection_options-log_neighbors_freq) | Frequency (in milliseconds) for logging the current P2P neighbor list at the
DEBUG level.

If set to a non-zero value, the node will periodically log details about its
currently established P2P connections (neighbors). Setting this to 0 disables
this periodic logging.

**Units:** milliseconds | `60_000` (1 minute) | -| [max_http_clients](#connection_options-max_http_clients) | Maximum total number of allowed concurrent HTTP connections.

This limits the total number of simultaneous connections the node's RPC/HTTP
server will accept. If this limit is reached, new incoming HTTP connection
attempts will be rejected. | `1000` | -| [max_inflight_attachments](#connection_options-max_inflight_attachments) | Maximum number of concurrent Atlas data attachment download requests allowed.

This limits how many separate download requests for Atlas data attachments
can be active simultaneously. Helps manage network resources when fetching
potentially large attachment data. | `6` | -| [max_inflight_blocks](#connection_options-max_inflight_blocks) | Maximum number of concurrent Nakamoto block download requests allowed.

This limits how many separate block download processes for Nakamoto tenures
(both confirmed and unconfirmed) can be active simultaneously. Helps manage
network bandwidth and processing load during chain synchronization. | `6` | -| [max_sockets](#connection_options-max_sockets) | Maximum total number of concurrent network sockets the node is allowed to manage.

This limit applies globally to all types of sockets handled by the node's
networking layer, including listening sockets (P2P and RPC/HTTP),
established P2P connections (inbound/outbound), and established HTTP connections.
It serves as a hard limit to prevent the node from exhausting operating
system resources related to socket descriptors. | `800` | -| [maximum_call_argument_size](#connection_options-maximum_call_argument_size) | Maximum size (in bytes) of the HTTP request body for read-only contract calls.

This limit is enforced on the `Content-Length` of incoming requests to the
`/v2/contracts/call-read-only/...` RPC endpoint. It prevents excessively large
request bodies, which might contain numerous or very large hex-encoded
function arguments, from overwhelming the node.

**Notes:**
- Calculated as 20 * `clarity::vm::types::BOUND_VALUE_SERIALIZATION_HEX`.

**Units:** bytes | `83_886_080` (80 MiB) | -| [num_clients](#connection_options-num_clients) | Maximum number of allowed concurrent inbound P2P connections.

This acts as a hard limit. If the node already has this many active inbound
P2P connections, any new incoming P2P connection attempts will be rejected.
Outbound P2P connections initiated by this node are not counted against this limit. | `750` | -| [num_neighbors](#connection_options-num_neighbors) | Target number of peers for StackerDB replication.

Sets the maximum number of potential replication target peers requested from
the StackerDB control contract (`get-replication-targets`) when configuring a replica.

Note: Formerly (pre-Epoch 3.0), this also controlled the target peer count for
inventory synchronization. | `32` | -| [outbox_maxlen](#connection_options-outbox_maxlen) | Maximum number of messages allowed in the per-connection outgoing buffer.
The limit applies individually to each established connection (both P2P and HTTP). | `100` | -| [private_key_lifetime](#connection_options-private_key_lifetime) | Validity duration (in number of bitcoin blocks) for the node's P2P session
private key.

The node uses a temporary private key for signing P2P messages. This key has
an associated expiry bitcoin block height stored in the peer database. When
the current bitcoin height reaches or exceeds the key's expiry height, the
node automatically generates a new random private key.
The expiry block height for this new key is calculated by adding the
configured [private_key_lifetime](#connection_options-private_key_lifetime) (in blocks) to the
previous key's expiry block height. The node then re-handshakes with peers
to transition to the new key. This provides periodic key rotation for P2P communication.

**Units:** bitcoin blocks | `9223372036854775807` (i64::MAX, effectively infinite, disabling automatic re-keying). | -| [private_neighbors](#connection_options-private_neighbors) | Whether to allow connections and interactions with peers having private IP addresses.

If `false` (default), the node will generally:
- Reject incoming connection attempts from peers with private IPs.
- Avoid initiating connections to peers known to have private IPs.
- Ignore peers with private IPs during neighbor discovery (walks).
- Skip querying peers with private IPs for mempool or StackerDB data.
- Filter out peers with private IPs from API responses listing potential peers.

Setting this to `true` disables these restrictions, which can be useful for
local testing environments or fully private network deployments. | `false` | -| [public_ip_address](#connection_options-public_ip_address) | The Public IPv4 address and port (e.g. "203.0.113.42:20444") to advertise to other nodes.

If this option is not set (`None`), the node will attempt to automatically
discover its public IP address. | `None` (triggers automatic discovery attempt) | -| [read_only_call_limit_read_count](#connection_options-read_only_call_limit_read_count) | Maximum number of distinct read operations from Clarity data space allowed
during a read-only call. | `30` | -| [read_only_call_limit_read_length](#connection_options-read_only_call_limit_read_length) | Maximum total size (in bytes) of data allowed to be read from Clarity data
space (variables, maps) during a read-only call.

**Units:** bytes | `100_000` (100 KB). | -| [read_only_call_limit_runtime](#connection_options-read_only_call_limit_runtime) | Runtime cost limit for an individual read-only function call. This represents
computation effort within the Clarity VM.
(See SIP-006: https://github.com/stacksgov/sips/blob/main/sips/sip-006/sip-006-runtime-cost-assessment.md)

**Units:** Clarity VM cost units | `1_000_000_000` | -| [read_only_call_limit_write_count](#connection_options-read_only_call_limit_write_count) | Maximum number of distinct write operations allowed during a read-only call.

**Notes:**
- This limit is effectively forced to 0 by the API handler, ensuring read-only behavior.
- Configuring a non-zero value has no effect on read-only call execution. | `0` | -| [read_only_call_limit_write_length](#connection_options-read_only_call_limit_write_length) | Maximum total size (in bytes) of data allowed to be written during a read-only call.

**Notes:**
- This limit is effectively forced to 0 by the API handler, ensuring read-only behavior.
- Configuring a non-zero value has no effect on read-only call execution.

**Units:** bytes | `0` | -| [reject_blocks_pushed](#connection_options-reject_blocks_pushed) | Controls whether the node accepts Nakamoto blocks pushed proactively by peers.

- If `true`: Pushed blocks are ignored (logged at DEBUG and discarded). The
node will still process blocks that it actively downloads.
- If `false`: Both pushed blocks and actively downloaded blocks are processed. | `false` | -| [soft_max_clients_per_host](#connection_options-soft_max_clients_per_host) | Soft limit on the number of inbound P2P connections allowed per host IP address.

During inbound connection pruning (when total inbound connections >
[soft_num_clients](#connection_options-soft_num_clients) ), the node checks if any single
IP address has more connections than this limit. If so, it preferentially
prunes the newest connections originating from that specific IP address
until its count is reduced to this limit. This prevents a single host from
dominating the node's inbound connection capacity. | `4` | -| [soft_max_neighbors_per_org](#connection_options-soft_max_neighbors_per_org) | Soft limit on the number of outbound P2P connections per network organization (ASN).

During connection pruning (when total outbound connections >
[soft_num_neighbors](#connection_options-soft_num_neighbors) ), the node checks if any single
network organization (identified by ASN) has more outbound connections than
this limit. If so, it preferentially prunes the least healthy/newest
connections from that overrepresented organization until its count is
reduced to this limit or the total outbound count reaches
[soft_num_neighbors](#connection_options-soft_num_neighbors) . This encourages connection diversity
across different network providers. | `32` | -| [soft_num_clients](#connection_options-soft_num_clients) | Soft limit threshold for triggering inbound P2P connection pruning.

If the total number of currently active inbound P2P connections exceeds this
value, the node will activate pruning logic to reduce the count, typically by
applying per-host limits (see [soft_max_clients_per_host](#connection_options-soft_max_clients_per_host) ).
This helps manage the overall load from inbound peers. | `750` | -| [soft_num_neighbors](#connection_options-soft_num_neighbors) | Target number of outbound P2P connections the node aims to maintain.

The connection pruning logic only activates if the current number of established
outbound P2P connections exceeds this value. Pruning aims to reduce the
connection count back down to this target, ensuring the node maintains a
baseline number of outbound peers for network connectivity. | `16` | -| [stackerdb_hint_replicas](#connection_options-stackerdb_hint_replicas) | Static list of preferred replica peers for specific StackerDB contracts,
provided as a JSON string.

This allows manually specifying known peers to use for replicating particular
StackerDBs, potentially overriding or supplementing the peers discovered via
the StackerDB's control contract.

Format: The configuration value must be a TOML string containing valid JSON.
The JSON structure must be an array of tuples, where each tuple pairs a
contract identifier with a list of preferred neighbor addresses:
`[[ContractIdentifier, [NeighborAddress, ...]], ...]`

1. `ContractIdentifier`: A JSON object representing the `QualifiedContractIdentifier`.
It must have the specific structure:
`{"issuer": [version_byte, [byte_array_20]], "name": "contract-name"}`

2. `NeighborAddress`: A JSON object specifying the peer details:
`{"ip": "...", "port": ..., "public_key_hash": "..."}`

**Notes:**
- Use this option with caution, primarily for advanced testing or bootstrapping.

**Example:**
stackerdb_hint_replicas = '''
[
  [
    {
      "issuer": [1, [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]],
      "name": "my-contract"
    },
    [
      {
        "ip": "192.0.2.1",
        "port": 20444,
        "public_key_hash": "0102030405060708090a0b0c0d0e0f1011121314"
      }
    ]
  ]
]
'''
| `None` (no hints provided) | -| [timeout](#connection_options-timeout) | General communication timeout (in seconds).

- For HTTP connections: Governs two timeout aspects:
  - Server-side: Defines the maximum allowed time since the last request was
received from a client. An idle connection is dropped if both this
timeout and [idle_timeout](#connection_options-idle_timeout) are exceeded.
  - Client-side: Sets the timeout duration (TTL) for outgoing HTTP requests
initiated by the node itself.
- For P2P connections: Used as the specific timeout for NAT punch-through requests.

**Units:** seconds | `15` | -| [walk_interval](#connection_options-walk_interval) | Minimum interval (in seconds) between the start of consecutive neighbor discovery walks.

The node periodically performs "neighbor walks" to discover new peers and
maintain an up-to-date view of the P2P network topology. This setting
controls how frequently these walks can be initiated, preventing excessive
network traffic and processing.

**Units:** seconds | `60` | -| [walk_seed_probability](#connection_options-walk_seed_probability) | Probability (0.0 to 1.0) of forcing a neighbor walk to start from a seed/bootstrap peer.

This probability applies only when the node is not in Initial Block Download (IBD)
and is already connected to at least one seed/bootstrap peer.
Normally, in this situation, the walk would start from a random inbound or
outbound peer. However, with this probability, the walk is forced to start
from a seed peer instead. This helps ensure the node periodically
re-establishes its network view from trusted entry points. | `0.1` (10%) | -| ~~[antientropy_public](#connection_options-antientropy_public)~~ | Controls whether a node with public inbound connections should still push
blocks, even if not NAT'ed.

In the Stacks 2.x anti-entropy logic, if a node detected it had inbound
connections from public IPs (suggesting it wasn't behind NAT) and this flag
was set to `false`, it would refrain from proactively pushing blocks and
microblocks to peers. The assumption was that publicly reachable nodes should
primarily serve downloads. If set to `true` (default), the node would push
data regardless of its perceived reachability.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. | `true` | -| ~~[antientropy_retry](#connection_options-antientropy_retry)~~ | Minimum interval (in seconds) between attempts to run the Epoch 2.x anti-entropy
data push mechanism.

The Stacks 2.x anti-entropy protocol involves the node proactively pushing its
known Stacks blocks and microblocks to peers. This value specifies the
cooldown period for this operation. This prevents the node from excessively
attempting to push data to its peers.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+.

**Units:** seconds | `3_600` (1 hour) | -| ~~[download_interval](#connection_options-download_interval)~~ | Minimum interval (in seconds) between consecutive block download scans in epoch 2.x.

In the pre-Nakamoto block download logic, if a full scan for blocks completed
without finding any new blocks to download, and if the known peer inventories
had not changed, the node would wait at least this duration before
initiating the next download scan. This throttled the downloader when the
node was likely already synchronized.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+.

**Units:** seconds | `10` | -| ~~[full_inv_sync_interval](#connection_options-full_inv_sync_interval)~~ | Deprecated: it does not have any effect on the node's behavior.

**⚠️ DEPRECATED:** It does not have any effect on the node's behavior. | `None` | -| ~~[max_clients_per_host](#connection_options-max_clients_per_host)~~ | Maximum number of inbound p2p connections per host we permit.

**⚠️ DEPRECATED:** It does not have any effect on the node's behavior. | `4` | -| ~~[max_neighbors_per_host](#connection_options-max_neighbors_per_host)~~ | Maximum number of neighbors per host we permit.

**⚠️ DEPRECATED:** It does not have any effect on the node's behavior. | `1` | -| ~~[soft_max_neighbors_per_host](#connection_options-soft_max_neighbors_per_host)~~ | Soft limit on the number of neighbors per host we permit.

**⚠️ DEPRECATED:** It does not have any effect on the node's behavior. | `1` | - - -## [fee_estimation] - -| Parameter | Description | Default | -|-----------|-------------|----------| -| [cost_estimator](#fee_estimation-cost_estimator) | Specifies the name of the cost estimator to use.
This controls how the node estimates computational costs for transactions.

Accepted values:
- `"NaivePessimistic"`: The only currently supported cost estimator. This estimator
tracks the highest observed costs for each operation type and uses the average
of the top 10 values as its estimate, providing a conservative approach to
cost estimation.

**Notes:**
- If [disabled](#fee_estimation-disabled) is `true`, the node will use the default unit cost estimator. | `"NaivePessimistic"` | -| [cost_metric](#fee_estimation-cost_metric) | Specifies the name of the cost metric to use.
This controls how the node measures and compares transaction costs.

Accepted values:
- `"ProportionDotProduct"`: The only currently supported cost metric. This metric
computes a weighted sum of cost dimensions (runtime, read/write counts, etc.)
proportional to how much of the block limit they consume.

**Notes:**
- If [disabled](#fee_estimation-disabled) is `true`, the node will use the default unit cost metric. | `"ProportionDotProduct"` | -| [disabled](#fee_estimation-disabled) | If `true`, all fee and cost estimation features are disabled.
The node will use unit estimators and metrics, which effectively provide no
actual estimation capabilities.

When disabled, the node will:
1. Not track historical transaction costs or fee rates.
2. Return simple unit values for costs for any transaction, regardless of
its actual complexity.
3. Be unable to provide meaningful fee estimates for API requests (always
returns an error).
4. Consider only raw transaction fees (not fees per cost unit) when
assembling blocks.

This setting takes precedence over individual estimator/metric configurations.

**Notes:**
- When `true`, the values for [cost_estimator](#fee_estimation-cost_estimator) , [fee_estimator](#fee_estimation-fee_estimator) , and [cost_metric](#fee_estimation-cost_metric) are ignored. | `false` | -| [fee_estimator](#fee_estimation-fee_estimator) | Specifies the name of the fee estimator to use.
This controls how the node calculates appropriate transaction fees based on costs.

Accepted values:
- `"ScalarFeeRate"`: Simple multiplier-based fee estimation that uses percentiles
(5th, 50th, and 95th) of observed fee rates from recent blocks.
- `"FuzzedWeightedMedianFeeRate"`: Fee estimation that adds controlled randomness
to a weighted median rate calculator. This helps prevent fee optimization attacks
by adding unpredictability to fee estimates while still maintaining accuracy.

**Notes:**
- If [disabled](#fee_estimation-disabled) is `true`, the node will use the default unit fee estimator. | `"ScalarFeeRate"` | -| [fee_rate_fuzzer_fraction](#fee_estimation-fee_rate_fuzzer_fraction) | Specifies the fraction of random noise to add if using the
`FuzzedWeightedMedianFeeRate` fee estimator. This value should be in the
range [0, 1], representing a percentage of the base fee rate.

For example, with a value of 0.1 (10%), fee rate estimates will have random
noise added within the range of ±10% of the original estimate. This
randomization makes it difficult for users to precisely optimize their fees
while still providing reasonable estimates.

**Notes:**
- This setting is only relevant when [fee_estimator](#fee_estimation-fee_estimator) is set to `"FuzzedWeightedMedianFeeRate"`. | `0.1` (10%) | -| [fee_rate_window_size](#fee_estimation-fee_rate_window_size) | Specifies the window size for the `WeightedMedianFeeRateEstimator`.
This determines how many historical fee rate data points are considered
when calculating the median fee rate.

**Notes:**
- This setting is primarily relevant when [fee_estimator](#fee_estimation-fee_estimator) is set to `"FuzzedWeightedMedianFeeRate"`. | `5` | -| [log_error](#fee_estimation-log_error) | If `true`, errors encountered during cost or fee estimation will be logged.
This can help diagnose issues with the fee estimation subsystem. | `false` | - - -## [burnchain] - -| Parameter | Description | Default | -|-----------|-------------|----------| -| [block_commit_tx_estimated_size](#burnchain-block_commit_tx_estimated_size) | Estimated size (in virtual bytes) of a block commit transaction on bitcoin.
Used for fee calculation in mining logic by multiplying with the fee rate
[satoshis_per_byte](#burnchain-satoshis_per_byte) .

**Notes:**
- Only relevant if [[node].miner](#node-miner) is `true`.

**Units:** virtual bytes | `380` | -| [burn_fee_cap](#burnchain-burn_fee_cap) | The maximum amount (in sats) of "burn commitment" to broadcast for the next
block's leader election. Acts as a safety cap to limit the maximum amount
spent on mining. It serves as both the target fee and a fallback if dynamic
fee calculations fail or cannot be performed.

This setting can be hot-reloaded from the config file, allowing adjustment
without restarting.

**Notes:**
- Only relevant if [[node].miner](#node-miner) is `true`.

**Units:** satoshis | `20_000` | -| [chain](#burnchain-chain) | The underlying blockchain used for Proof-of-Transfer.

**Notes:**
- Currently, only `"bitcoin"` is supported. | `"bitcoin"` | -| [chain_id](#burnchain-chain_id) | The network-specific identifier used in P2P communication and database initialization.

**Notes:**
- **Warning:** Do not modify this unless you really know what you're doing.
- This is intended strictly for testing purposes. | - if [mode](#burnchain-mode) is `"mainnet"`: `0x00000001`
- else: `0x80000000` | -| [commit_anchor_block_within](#burnchain-commit_anchor_block_within) | Specifies a mandatory wait period (in milliseconds) after receiving a burnchain tip
before the node attempts to build the anchored block for the new tenure.
This duration effectively schedules the start of the block-building process
relative to the tip's arrival time.

**Notes:**
- This is intended strictly for testing purposes.

**Units:** milliseconds | `5_000` | -| [epochs](#burnchain-epochs) | Custom override for the definitions of Stacks epochs (start/end burnchain
heights, consensus rules). This setting allows testing specific epoch
transitions or custom consensus rules by defining exactly when each epoch
starts on bitcoin.

Epochs define distinct protocol rule sets (consensus rules, execution costs,
capabilities). When configured, the list must include all epochs
sequentially from "1.0" up to the highest desired epoch, without skipping
any intermediate ones. Valid `epoch_name` values currently include:
`"1.0"`, `"2.0"`, `"2.05"`, `"2.1"`, `"2.2"`, `"2.3"`, `"2.4"`, `"2.5"`, `"3.0"`, `"3.1"`.

**Validation Rules:**
- Epochs must be provided in strict chronological order (`1.0`, `2.0`, `2.05`...).
- `start_height` values must be non-decreasing across the list.
- Epoch `"1.0"` must have `start_height = 0`.
- The number of defined epochs cannot exceed the maximum supported by the node software.

**Notes:**
- Applied only if [mode](#burnchain-mode) is not "mainnet".
- This is intended strictly for testing purposes.
- Configured as a list `[[burnchain.epochs]]` in TOML, each with `epoch_name` (string) and `start_height` (integer Bitcoin block height).

**Example:**
[[burnchain.epochs]]
epoch_name = "2.1"
start_height = 150

[[burnchain.epochs]]
epoch_name = "2.2"
start_height = 200
| `None` (uses the standard epoch definitions for the selected [mode](#burnchain-mode) ) | -| [fault_injection_burnchain_block_delay](#burnchain-fault_injection_burnchain_block_delay) | Fault injection setting for testing. Introduces an artificial delay (in
milliseconds) before processing each burnchain block download. Simulates a
slow burnchain connection.

**Notes:**
- This is intended strictly for testing purposes.

**Units:** milliseconds | `0` (no delay) | -| [first_burn_block_hash](#burnchain-first_burn_block_hash) | Overrides the default starting block hash of the burnchain.

**Notes:**
- Applied only if [mode](#burnchain-mode) is not "mainnet".
- This is intended strictly for testing purposes.
- Should be used together with [first_burn_block_height](#burnchain-first_burn_block_height) and [first_burn_block_timestamp](#burnchain-first_burn_block_timestamp) for proper operation. | `None` (uses the burnchain's default starting block hash) | -| [first_burn_block_height](#burnchain-first_burn_block_height) | Overrides the default starting bitcoin block height for the node.
Allows starting synchronization from a specific historical point in test environments.

**Notes:**
- Applied only if [mode](#burnchain-mode) is not "mainnet".
- This is intended strictly for testing purposes.
- Should be used together with [first_burn_block_timestamp](#burnchain-first_burn_block_timestamp) and [first_burn_block_hash](#burnchain-first_burn_block_hash) for proper operation. | `None` (uses the burnchain's default starting height for the mode) | -| [first_burn_block_timestamp](#burnchain-first_burn_block_timestamp) | Overrides the default starting block timestamp of the burnchain.

**Notes:**
- Applied only if [mode](#burnchain-mode) is not "mainnet".
- This is intended strictly for testing purposes.
- Should be used together with [first_burn_block_height](#burnchain-first_burn_block_height) and [first_burn_block_hash](#burnchain-first_burn_block_hash) for proper operation. | `None` (uses the burnchain's default starting timestamp) | -| [leader_key_tx_estimated_size](#burnchain-leader_key_tx_estimated_size) | Estimated size (in virtual bytes) of a leader key registration transaction
on bitcoin. Used for fee calculation in mining logic by multiplying with the
fee rate [satoshis_per_byte](#burnchain-satoshis_per_byte) .

**Notes:**
- Only relevant if [[node].miner](#node-miner) is `true`.

**Units:** virtual bytes | `290` | -| [local_mining_public_key](#burnchain-local_mining_public_key) | The public key associated with the local mining address for the underlying
Bitcoin regtest node. Provided as a hex string representing an uncompressed
public key.

It is primarily used in modes that rely on a controlled Bitcoin regtest
backend (e.g., "helium", "mocknet", "neon") where the Stacks node itself
needs to instruct the Bitcoin node to generate blocks.

The key is used to derive the Bitcoin address that receives the coinbase
rewards when generating blocks on the regtest network.

**Notes:**
- Mandatory if [mode](#burnchain-mode) is "helium".
- This is intended strictly for testing purposes. | `None` | -| [magic_bytes](#burnchain-magic_bytes) | The network "magic bytes" used to identify packets for the specific bitcoin
network instance (e.g., mainnet, testnet, regtest). Must match the magic
bytes of the connected bitcoin node.

These two-byte identifiers help ensure that nodes only connect to peers on the
same network type. Common values include:
- "X2" for mainnet
- "T2" for testnet (xenon)
- Other values for specific test networks

Configured as a 2-character ASCII string (e.g., "X2" for mainnet). | - if [mode](#burnchain-mode) is `"xenon"`: `"T2"`
- else: `"X2"` | -| [max_rbf](#burnchain-max_rbf) | Maximum fee rate multiplier allowed when using Replace-By-Fee (RBF) for
bitcoin transactions. Expressed as a percentage of the original
[satoshis_per_byte](#burnchain-satoshis_per_byte) rate (e.g., 150 means the fee rate
can be increased up to 1.5x). Used in mining logic for RBF decisions to
cap the replacement fee rate.

**Notes:**
- Only relevant if [[node].miner](#node-miner) is `true`.

**Units:** percent | `150` | -| [max_unspent_utxos](#burnchain-max_unspent_utxos) | The maximum number of unspent transaction outputs (UTXOs) to request from
the bitcoin node.

This value is passed as the `maximumCount` parameter to the bitcoin node.
It helps manage response size and processing load, particularly relevant
for miners querying for available UTXOs to fund operations like block
commits or leader key registrations.

Setting this limit too high might lead to performance issues or timeouts when
querying nodes with a very large number of UTXOs. Conversely, setting it too
low might prevent the miner from finding enough UTXOs in a single query to
meet the required funding amount for a transaction, even if sufficient funds
exist across more UTXOs not returned by the limited query.

**Notes:**
- This value must be `<= 1024`.
- Only relevant if [[node].miner](#node-miner) is `true`. | `1024` | -| [mode](#burnchain-mode) | The operational mode or network profile for the Stacks node.
This setting determines network parameters (like chain ID, peer version),
default configurations, genesis block definitions, and overall node behavior.

Supported values:
- `"mainnet"`: mainnet
- `"xenon"`: testnet
- `"mocknet"`: regtest
- `"helium"`: regtest
- `"neon"`: regtest
- `"argon"`: regtest
- `"krypton"`: regtest
- `"nakamoto-neon"`: regtest | `"mocknet"` | -| [password](#burnchain-password) | The password for authenticating with the bitcoin node's RPC interface.
Required if the bitcoin node requires RPC authentication.

**Notes:**
- Only relevant if [[node].miner](#node-miner) is `true`. | `None` | -| [peer_host](#burnchain-peer_host) | The hostname or IP address of the bitcoin node peer.

This field is required for all node configurations as it specifies where to
find the underlying bitcoin node to interact with for PoX operations,
block validation, and mining. | `"0.0.0.0"` | -| [peer_port](#burnchain-peer_port) | The P2P network port of the bitcoin node specified by [peer_host](#burnchain-peer_host) . | `8333` | -| [peer_version](#burnchain-peer_version) | The peer protocol version number used in P2P communication.
This parameter cannot be set via the configuration file.

**Notes:**
- **Warning:** Do not modify this unless you really know what you're doing. | - if [mode](#burnchain-mode) is `"mainnet"`: `402_653_196`
- else: `4_207_599_116` | -| [poll_time_secs](#burnchain-poll_time_secs) | The interval, in seconds, at which the node polls the bitcoin node for new
blocks and state updates.

The default value of 10 seconds is mainly intended for testing purposes.
It's suggested to set this to a higher value for mainnet, e.g., 300 seconds
(5 minutes).

**Units:** seconds | `10` | -| [pox_2_activation](#burnchain-pox_2_activation) | Sets a custom burnchain height for PoX-2 activation (for testing).

This affects two key transitions:
1. The block height at which PoX v1 lockups are automatically unlocked.
2. The block height from which PoX reward set calculations switch to PoX v2 rules.

**Behavior:**
- This value directly sets the auto unlock height for PoX v1 lockups before
transition to PoX v2. This also defines the burn height at which PoX reward
sets are calculated using PoX v2 rather than v1.
- If custom [epochs](#burnchain-epochs) are provided:
  - This value is used to validate that Epoch 2.1's start height is ≤ this value.
  - However, the height specified in `epochs` for Epoch 2.1 takes precedence.

**Notes:**
- Applied only if [mode](#burnchain-mode) is not "mainnet".
- This is intended strictly for testing purposes. | `None` | -| [pox_prepare_length](#burnchain-pox_prepare_length) | Overrides the length (in bitcoin blocks) of the PoX prepare phase.

**Notes:**
- Applied only if [mode](#burnchain-mode) is not "mainnet".
- This is intended strictly for testing purposes.

**Units:** bitcoin blocks | `None` (uses the standard prepare phase length for the mode) | -| [pox_reward_length](#burnchain-pox_reward_length) | Overrides the length (in bitcoin blocks) of the PoX reward cycle.

**Notes:**
- Applied only if [mode](#burnchain-mode) is not "mainnet".
- This is intended strictly for testing purposes.

**Units:** bitcoin blocks | `None` (uses the standard reward cycle length for the mode) | -| [process_exit_at_block_height](#burnchain-process_exit_at_block_height) | Optional bitcoin block height at which the Stacks node process should
gracefully exit. When bitcoin reaches this height, the node logs a message
and initiates a graceful shutdown.

**Notes:**
- Applied only if [mode](#burnchain-mode) is not "mainnet".
- This is intended strictly for testing purposes. | `None` | -| [rbf_fee_increment](#burnchain-rbf_fee_increment) | The incremental amount (in sats/vByte) to add to the previous transaction's
fee rate for RBF bitcoin transactions.

**Notes:**
- Only relevant if [[node].miner](#node-miner) is `true`.

**Units:** sats/vByte | `5` | -| [rpc_port](#burnchain-rpc_port) | The RPC port of the bitcoin node specified by [peer_host](#burnchain-peer_host) . | `8332` | -| [rpc_ssl](#burnchain-rpc_ssl) | Flag indicating whether to use SSL/TLS when connecting to the bitcoin node's
RPC interface. | `false` | -| [satoshis_per_byte](#burnchain-satoshis_per_byte) | The default fee rate in sats/vByte to use when estimating fees for miners
to submit bitcoin transactions (like block commits or leader key registrations).

**Notes:**
- Only relevant if [[node].miner](#node-miner) is `true`.

**Units:** sats/vByte | `50` | -| [timeout](#burnchain-timeout) | Timeout duration, in seconds, for RPC calls made to the bitcoin node.
Configures the timeout on the underlying HTTP client.

**Units:** seconds | `60` | -| [username](#burnchain-username) | The username for authenticating with the bitcoin node's RPC interface.
Required if the bitcoin node requires RPC authentication.

**Notes:**
- Only relevant if [[node].miner](#node-miner) is `true`. | `None` | -| [wallet_name](#burnchain-wallet_name) | Specifies the name of the Bitcoin wallet to use within the connected bitcoin
node. Used to interact with a specific named wallet if the bitcoin node
manages multiple wallets.

If the specified wallet doesn't exist, the node will attempt to create it via
the `createwallet` RPC call. This is particularly useful for miners who need
to manage separate wallets.

**Notes:**
- Primarily relevant for miners interacting with multi-wallet Bitcoin nodes. | `""` (empty string, implying the default wallet or no specific wallet needed) | -| ~~[affirmation_overrides](#burnchain-affirmation_overrides)~~ | Overrides for the burnchain block affirmation map for specific reward cycles.
Allows manually setting the miner affirmation ('p'resent/'n'ot-present/'a'bsent)
map for a given cycle, bypassing the map normally derived from sortition results.

Special defaults are added when [mode](#burnchain-mode) is "xenon", but
config entries take precedence. At startup, these overrides are written to
the `BurnchainDB` (`overrides` table).

**Notes:**
- Primarily used for testing or recovering from network issues.
- Configured as a list `[[burnchain.affirmation_overrides]]` in TOML, each with `reward_cycle` (integer) and `affirmation` (string of 'p'/'n'/'a', length `reward_cycle - 1`).

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode.

**Example:**
[[burnchain.affirmation_overrides]]
reward_cycle = 413
affirmation = "pna..." # Must be 412 chars long
| Empty map | -| ~~[ast_precheck_size_height](#burnchain-ast_precheck_size_height)~~ | Override for the burnchain height activating stricter AST size checks
pre-epoch 3.0 for testing purposes.

Used pre-epoch 3.0 to control activation before it became standard (at burn
height `752000`). Ignored in standard production builds as the underlying
mechanism is disabled unless the `testing` feature is active.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. | `None` | -| ~~[sunset_end](#burnchain-sunset_end)~~ | Overrides the bitcoin height, non-inclusive, at which the PoX sunset period
ends in epochs before 2.1. After this height, Stacking rewards are disabled
completely. This parameter works together with `sunset_start` to define the
full sunset transition period for PoX.

**Notes:**
- Applied only if [mode](#burnchain-mode) is not "mainnet".
- This is intended strictly for testing purposes for epochs before 2.1.

**⚠️ DEPRECATED:** The sunset phase was removed in Epoch 2.1. | `None` (uses the standard sunset end height for the mode) | -| ~~[sunset_start](#burnchain-sunset_start)~~ | Overrides the bitcoin height at which the PoX sunset period begins in epochs
before 2.1. The sunset period represents a planned phase-out of the PoX
mechanism. During this period, stacking rewards gradually decrease,
eventually ceasing entirely. This parameter allows testing the PoX sunset
transition by explicitly setting its start height.

**Notes:**
- Applied only if [mode](#burnchain-mode) is not "mainnet".
- This is intended strictly for testing purposes for epochs before 2.1.

**⚠️ DEPRECATED:** The sunset phase was removed in Epoch 2.1. | `None` (uses the standard sunset start height for the mode) | - - -## [node] - -| Parameter | Description | Default | -|-----------|-------------|----------| -| [always_use_affirmation_maps](#node-always_use_affirmation_maps) | Controls if Stacks Epoch 2.1+ affirmation map logic should be applied even
before Epoch 2.1.
- If `true` (default), the node consistently uses the newer (Epoch 2.1) rules
for PoX anchor block validation and affirmation-based reorg handling, even in
earlier epochs.
- If `false`, the node strictly follows the rules defined for the specific epoch
it is currently processing, only applying 2.1+ logic from Epoch 2.1 onwards.
Differences in this setting between nodes prior to Epoch 2.1 could lead to
consensus forks. | `true` | -| [assume_present_anchor_blocks](#node-assume_present_anchor_blocks) | Controls if the node must strictly wait for any PoX anchor block selected by
the core consensus mechanism.
- If `true`: Halts burnchain processing immediately whenever a selected anchor
block is missing locally (`SelectedAndUnknown` status), regardless of
affirmation status.
- If `false` (primarily for testing): Skips this immediate halt, allowing
processing to proceed to affirmation map checks.
Normal operation requires this to be `true`; setting to `false` will likely
break consensus adherence.

**Notes:**
- This parameter cannot be set via the configuration file; it must be modified programmatically.
- This is intended strictly for testing purposes.
- The halt check runs *before* affirmation checks.
- In Nakamoto (Epoch 3.0+), all prepare phases have anchor blocks. | `true` | -| [bootstrap_node](#node-bootstrap_node) | A list of initial peer nodes used to bootstrap connections into the Stacks P2P
network. Peers are specified in a configuration file as comma-separated
strings in the format `"PUBKEY@IP:PORT"` or `"PUBKEY@HOSTNAME:PORT"`. DNS
hostnames are resolved during configuration loading.

**Example:**
bootstrap_node = "pubkey1@example.com:30444,pubkey2@192.168.1.100:20444"
| `[]` (empty vector) | -| [chain_liveness_poll_time_secs](#node-chain_liveness_poll_time_secs) | The polling interval, in seconds, for the background thread that monitors
chain liveness. This thread periodically wakes up the main coordinator to
check for chain progress or other conditions requiring action.

**Units:** seconds | `300` (5 minutes) | -| [data_url](#node-data_url) | The publicly accessible URL that this node advertises to peers during the P2P
handshake as its HTTP RPC endpoint. Other nodes or services might use this URL
to query the node's API.

**Notes:**
- Example: For rpc_bind="0.0.0.0:20443", data_url becomes "http://0.0.0.0:20443". | Derived by adding "http://" prefix to [rpc_bind](#node-rpc_bind) value. | -| [deny_nodes](#node-deny_nodes) | A list of peer addresses that this node should explicitly deny connections from.
Peers are specified as comma-separated strings in the format "IP:PORT" or
"HOSTNAME:PORT" in the configuration file. DNS hostnames are resolved during
configuration loading.

**Example:**
deny_nodes = "192.168.1.100:20444,badhost.example.com:20444"
| `[]` (empty vector) | -| [fault_injection_block_push_fail_probability](#node-fault_injection_block_push_fail_probability) | Fault injection setting for testing purposes. If set to `Some(p)`, where `p` is
between 0 and 100, the node will have a `p` percent chance of intentionally
*not* pushing a newly processed block to its peers.

**Notes:**
- Values: 0-100 (percentage). | `None` (no fault injection) | -| [fault_injection_hide_blocks](#node-fault_injection_hide_blocks) | Fault injection setting for testing purposes. If `true`, the node's chainstate
database access layer may intentionally fail to retrieve block data, even if it
exists, simulating block hiding or data unavailability.

**Notes:**
- This parameter cannot be set via the configuration file; it must be modified programmatically. | `false` | -| [local_peer_seed](#node-local_peer_seed) | The private key seed, provided as a hex string in the config file, used
specifically for the node's identity and message signing within the P2P
networking layer. This is separate from the main [seed](#node-seed) . | Randomly generated 32 bytes | -| [marf_cache_strategy](#node-marf_cache_strategy) | The strategy to use for MARF trie node caching in memory.
Controls the trade-off between memory usage and performance for state access.

Possible values:
- `"noop"`: No caching (least memory).
- `"everything"`: Cache all nodes (most memory, potentially fastest).
- `"node256"`: Cache only larger `TrieNode256` nodes.

If the value is `None` or an unrecognized string, it defaults to `"noop"`. | `None` (effectively `"noop"`) | -| [marf_defer_hashing](#node-marf_defer_hashing) | Controls the timing of hash calculations for MARF trie nodes.
- If `true`, hashes are calculated only when the MARF is flushed to disk
(deferred hashing).
- If `false`, hashes are calculated immediately as leaf nodes are inserted or
updated (immediate hashing).
Deferred hashing might improve write performance. | `true` | -| [miner](#node-miner) | Flag indicating whether this node should activate its mining logic and attempt to
produce Stacks blocks. Setting this to `true` typically requires providing
necessary private keys (either [seed](#node-seed) or [[miner].mining_key](#miner-mining_key) ).
It also influences default behavior for settings like
[require_affirmed_anchor_blocks](#node-require_affirmed_anchor_blocks) . | `false` | -| [mock_mining](#node-mock_mining) | Enables a simulated mining mode, primarily for local testing and development.
When `true`, the node may generate blocks locally without participating in the
real bitcoin consensus or P2P block production process.

**Notes:**
- Only relevant if [miner](#node-miner) is `true`. | `false` | -| [name](#node-name) | Human-readable name for the node. Primarily used for identification in testing
environments (e.g., deriving log file names, temporary directory names). | `"helium-node"` | -| [next_initiative_delay](#node-next_initiative_delay) | Controls how frequently, in milliseconds, the Nakamoto miner's relay thread
polls for work or takes periodic actions when idle (e.g., checking for new
burnchain blocks). A default value of 10 seconds is reasonable on mainnet
(where bitcoin blocks are ~10 minutes). A lower value might be useful in
other environments with faster burn blocks.

**Units:** milliseconds | `10_000` (10 seconds) | -| [p2p_address](#node-p2p_address) | The publicly accessible IPv4 address and port that this node advertises to peers
for P2P connections. This might differ from [p2p_bind](#node-p2p_bind) if the
node is behind NAT or a proxy.

**Notes:**
- Example: For rpc_bind="0.0.0.0:20443", p2p_address becomes "0.0.0.0:20443".
- The default value derivation might be unexpected, potentially using the [rpc_bind](#node-rpc_bind) address; explicit configuration is recommended if needed. | Derived directly from [rpc_bind](#node-rpc_bind) value. | -| [p2p_bind](#node-p2p_bind) | The IPv4 address and port (e.g., "0.0.0.0:20444") on which the node's P2P
networking service should bind and listen for incoming connections from other peers. | `"0.0.0.0:20444"` | -| [prometheus_bind](#node-prometheus_bind) | Optional network address and port (e.g., "127.0.0.1:9153") for binding the
Prometheus metrics server. If set, the node will start an HTTP server on this
address to expose internal metrics for scraping by a Prometheus instance. | `None` (Prometheus server disabled) | -| [require_affirmed_anchor_blocks](#node-require_affirmed_anchor_blocks) | Controls if the node must wait for locally missing but burnchain-affirmed PoX
anchor blocks. If an anchor block is confirmed by the affirmation map but not
yet processed by this node:
- If `true`: Burnchain processing halts until the affirmed block is acquired.
Ensures strict adherence to the affirmed canonical chain, typical for
followers.
- If `false`: Burnchain processing continues without waiting. Allows miners to
operate optimistically but may necessitate unwinding later if the affirmed
block alters the chain state. | Derived from the inverse of [miner](#node-miner) value. | -| [rpc_bind](#node-rpc_bind) | The IPv4 address and port (e.g., "0.0.0.0:20443") on which the node's HTTP RPC
server should bind and listen for incoming API requests. | `"0.0.0.0:20443"` | -| [seed](#node-seed) | The node's Bitcoin wallet private key, provided as a hex string in the config file.
Used to initialize the node's keychain for signing operations.
If [[miner].mining_key](#miner-mining_key) is not set, this seed may also be used for
mining-related signing.

**Notes:**
- Required if [miner](#node-miner) is `true` and [[miner].mining_key](#miner-mining_key) is absent. | Randomly generated 32 bytes | -| [stacker](#node-stacker) | Setting this to `true` enables the node to replicate the miner and signer
Stacker DBs required for signing, and is required if the node is connected to a
signer. | `false` | -| [stacker_dbs](#node-stacker_dbs) | A list of specific StackerDB contracts (identified by their qualified contract
identifiers, e.g., "SP000000000000000000002Q6VF78.pox-3") that this node
should actively replicate.

**Notes:**
- Values are strings representing qualified contract identifiers.

**Example:**
stacker_dbs = [
  "SP000000000000000000002Q6VF78.pox-3",
  "SP2C2YFP12AJZB4M4KUPSTMZQR0SNHNPH204SCQJM.stx-oracle-v1"
]
| - if [miner](#node-miner) is `true` or [stacker](#node-stacker) is `true`:
relevant system contracts (e.g., `.miners`, `.signers-*`) are
automatically added in addition to any contracts specified in the
configuration file.
- else: defaults to an empty list `[]`. | -| [txindex](#node-txindex) | Enables the transaction index, which maps transaction IDs to the blocks
containing them. Setting this to `true` allows the use of RPC endpoints
that look up transactions by ID (e.g., `/extended/v1/tx/{txid}`), but
requires substantial additional disk space for the index database. | `false` | -| [use_test_genesis_chainstate](#node-use_test_genesis_chainstate) | If set to `true`, the node initializes its state using an alternative test
genesis block definition, loading different initial balances, names, and
lockups than the standard network genesis.

**Notes:**
- This is intended strictly for testing purposes and is disallowed on mainnet. | `None` (uses standard network genesis) | -| [wait_time_for_blocks](#node-wait_time_for_blocks) | When operating as a miner, this specifies the maximum time (in milliseconds)
the node waits after detecting a new burnchain block to synchronize corresponding
Stacks block data from the network before resuming mining attempts.
If synchronization doesn't complete within this duration, mining resumes anyway
to prevent stalling. This setting is loaded by all nodes but primarily affects
miner behavior within the relayer thread.

**Units:** milliseconds | `30_000` (30 seconds) | -| [working_dir](#node-working_dir) | The file system absolute path to the node's working directory.
All persistent data, including chainstate, burnchain databases, and potentially
other stores, will be located within this directory. This path can be
overridden by setting the `STACKS_WORKING_DIR` environment variable.

**Notes:**
- For persistent mainnet or testnet nodes, this path must be explicitly configured to a non-temporary location. | `/tmp/stacks-node-{current_timestamp}` | -| ~~[max_microblocks](#node-max_microblocks)~~ | The maximum number of microblocks allowed per Stacks block.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 2.5+. | `65535` (u16::MAX) | -| ~~[microblock_frequency](#node-microblock_frequency)~~ | How often to attempt producing microblocks, in milliseconds.

**Notes:**
- Only applies when [mine_microblocks](#node-mine_microblocks) is true and before Epoch 2.5.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 2.5+.

**Units:** milliseconds | `30_000` (30 seconds) | -| ~~[mine_microblocks](#node-mine_microblocks)~~ | Enable microblock mining.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 2.5+. | `true` | -| ~~[mock_mining_output_dir](#node-mock_mining_output_dir)~~ | If [mock_mining](#node-mock_mining) is enabled, this specifies an optional directory
path where the generated mock Stacks blocks will be saved. (pre-Nakamoto)
The path is canonicalized on load.

**⚠️ DEPRECATED:** This setting was only used in the neon node and is ignored in Epoch 3.0+. | `None` | -| ~~[pox_sync_sample_secs](#node-pox_sync_sample_secs)~~ | Sampling interval in seconds for the PoX synchronization watchdog thread
(pre-Nakamoto). Determines how often the watchdog checked PoX state
consistency in the Neon run loop.

**⚠️ DEPRECATED:** Unused after the Nakamoto upgrade. This setting is ignored in Epoch 3.0+.

**Units:** seconds | `30` | -| ~~[wait_time_for_microblocks](#node-wait_time_for_microblocks)~~ | Cooldown period after a microblock is produced, in milliseconds.

**Notes:**
- Only applies when [mine_microblocks](#node-mine_microblocks) is true and before Epoch 2.5.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 2.5+.

**Units:** milliseconds | `30_000` (30 seconds) | - - -## [miner] - -| Parameter | Description | Default | -|-----------|-------------|----------| -| [activated_vrf_key_path](#miner-activated_vrf_key_path) | Path to a file for storing and loading the currently active, registered VRF leader key.

Loading: On startup or when needing to register a key, if this path is set,
the relayer first attempts to load a serialized `RegisteredKey` from this
file. If successful, it uses the loaded key and skips the on-chain VRF key
registration transaction, saving time and fees.
Saving: After a new VRF key registration transaction is confirmed and
activated on the burnchain, if this path is set, the node saves the details
of the newly activated `RegisteredKey` to this file. This allows the
miner to persist its active VRF key across restarts.
If the file doesn't exist during load, or the path is `None`, the node
proceeds with a new registration. | `None` | -| [block_commit_delay](#miner-block_commit_delay) | Time in milliseconds to wait for a Nakamoto block after seeing a burnchain
block before submitting a block commit.

After observing a new burnchain block, the miner's relayer waits for this
duration before submitting its next block commit transaction to Bitcoin.
This delay provides an opportunity for a new Nakamoto block (produced by the
winner of the latest sortition) to arrive. Waiting helps avoid situations
where the relayer immediately submits a commit that needs to be replaced
via RBF if a new Stacks block appears shortly after. This delay is skipped
if the new burnchain blocks leading to the tip contain no sortitions.

**Units:** milliseconds | `40_000` | -| [block_rejection_timeout_steps](#miner-block_rejection_timeout_steps) | Defines adaptive timeouts for waiting for signer responses, based on the
accumulated weight of rejections.

Configured as a map where keys represent rejection count thresholds in
percentage, and values are the timeout durations (in seconds) to apply when
the rejection count reaches or exceeds that key but is less than the next key.

When a miner proposes a block, it waits for signer responses (approvals or
rejections). The SignerCoordinator tracks the total weight of received
rejections. It uses this map to determine the current timeout duration. It
selects the timeout value associated with the largest key in the map that is
less than or equal to the current accumulated rejection weight. If this
timeout duration expires before a decision is reached, the coordinator
signals a timeout. This prompts the miner to potentially retry proposing the
block. As more rejections come in, the applicable timeout step might change
(likely decrease), allowing the miner to abandon unviable proposals faster.

A key for 0 (zero rejections) must be defined, representing the initial
timeout when no rejections have been received.

**Notes:**
- Keys are rejection weight percentages (0-100).
- Values are timeout durations.

**Example:**
# Keys are rejection counts (as strings), values are timeouts in seconds.
[miner.block_rejection_timeout_steps]
"0" = 180
"10" = 90
"20" = 45
"30" = 0
| `{ 0: 180, 10: 90, 20: 45, 30: 0 }` (times in seconds) | -| [block_reward_recipient](#miner-block_reward_recipient) | Optional recipient for the coinbase block reward, overriding the default miner address.

By default (`None`), the reward is sent to the miner's primary address
([[node].seed](#node-seed) ). If set to some principal address *and* the current
Stacks epoch is > 2.1, the reward will be directed to the specified
address instead. | `None` | -| [candidate_retry_cache_size](#miner-candidate_retry_cache_size) | Max size (in *number* of items) of transaction candidates to hold in the in-memory
retry cache.

This cache stores transactions encountered during a `GlobalFeeRate` mempool
walk whose nonces are currently too high for immediate processing. These
candidates are prioritized for reconsideration later within the *same* walk,
potentially becoming valid if other processed transactions update the
expected nonces.

A larger cache retains more potentially valid future candidates but uses more
memory. This setting is primarily relevant for the `GlobalFeeRate` strategy.

**Notes:**
- Each element `crate::core::mempool::MemPoolTxInfoPartial` is currently 112 bytes.

**Units:** items | `1048576` | -| [empty_mempool_sleep_time](#miner-empty_mempool_sleep_time) | The amount of time in milliseconds that the miner should sleep in between
attempts to mine a block when the mempool is empty.

This prevents the miner from busy-looping when there are no pending
transactions, conserving CPU resources. During this sleep, the miner still
checks burnchain tip changes.

**Units:** milliseconds | `2_500` | -| [filter_origins](#miner-filter_origins) | A comma separated list of Stacks addresses to whitelist so that only
transactions from these addresses should be considered during the mempool walk
for block building. If this list is non-empty, any transaction whose origin
address is *not* in this set will be skipped.

This allows miners to prioritize transactions originating from specific accounts that are
important to them.
Configured as a comma-separated string of standard Stacks addresses
(e.g., "ST123...,ST456...") in the configuration file.

**Example:**
filter_origins = "ST2QKZ4FKHAH1NQKYKYAYZPY440FEPK7GZ1R5HBP2,ST319CF5WV77KYR1H3GT0GZ7B8Q4AQPY42ETP1VPF"
| Empty set (all origins are considered). | -| [first_rejection_pause_ms](#miner-first_rejection_pause_ms) | Time in milliseconds to pause after receiving the first threshold rejection,
before proposing a new block.

When a miner's block proposal fails to gather enough signatures from the
signers for the first time at a given height, the miner will pause for this
duration before attempting to mine and propose again.

**Units:** milliseconds | `5_000` | -| [max_execution_time_secs](#miner-max_execution_time_secs) | Defines the maximum execution time (in seconds) allowed for a single contract call transaction.

When processing a transaction (contract call or smart contract deployment),
if this option is set, and the execution time exceeds this limit, the
transaction processing fails with an `ExecutionTimeout` error, and the
transaction is skipped. This prevents potentially long-running or
infinite-loop transactions from blocking block production.

**Units:** seconds | `None` (no execution time limit) | -| [mempool_walk_strategy](#miner-mempool_walk_strategy) | Strategy for selecting the next transaction candidate from the mempool.
Controls prioritization between maximizing immediate fee capture vs. ensuring
transaction nonce order for account progression and processing efficiency.

See `MemPoolWalkStrategy` for variant details.

Possible values (use variant names for configuration):
- `"GlobalFeeRate"`: Selects the transaction with the highest fee rate globally.
- `"NextNonceWithHighestFeeRate"`: Selects the highest-fee transaction among those
matching the next expected nonce for sender/sponsor accounts. | `"NextNonceWithHighestFeeRate"` | -| [min_time_between_blocks_ms](#miner-min_time_between_blocks_ms) | The minimum time to wait between mining blocks in milliseconds. The value
must be greater than or equal to 1000 ms because if a block is mined
within the same second as its parent, it will be rejected by the signers.

This check ensures compliance with signer rules that prevent blocks with
identical timestamps (at second resolution) to their parents. If a lower
value is configured, 1000 ms is used instead.

**Units:** milliseconds | `1_000` | -| [mining_key](#miner-mining_key) | The private key (Secp256k1) used for signing blocks, provided as a hex string.

This key must be present at runtime for mining operations to succeed. | - if the `[miner]` section *is present* in the config file: [[node].seed](#node-seed)
- else: `None` | -| [nakamoto_attempt_time_ms](#miner-nakamoto_attempt_time_ms) | Maximum time (in milliseconds) the miner spends selecting transactions from
the mempool when assembling a Nakamoto block. Once this duration is exceeded,
the miner stops adding transactions and finalizes the block with those
already selected.

**Units:** milliseconds | `5_000` (5 seconds) | -| [nonce_cache_size](#miner-nonce_cache_size) | Max size (in bytes) of the in-memory cache for storing expected account nonces.

This cache accelerates mempool processing (e.g., during block building) by
storing the anticipated next nonce for accounts, reducing expensive lookups
into the node's state (MARF trie). A larger cache can improve performance
for workloads involving many unique accounts but increases memory consumption.

**Notes:**
- Must be configured to a value greater than 0.

**Units:** bytes | `1048576` (1 MiB) | -| [probability_pick_no_estimate_tx](#miner-probability_pick_no_estimate_tx) | Probability (percentage, 0-100) of prioritizing a transaction without a
known fee rate during candidate selection.

Only effective when `mempool_walk_strategy` is `GlobalFeeRate`. Helps ensure
transactions lacking fee estimates are periodically considered alongside
high-fee ones, preventing potential starvation. A value of 0 means never
prioritize them first, 100 means always prioritize them first (if available).

**Notes:**
- Values: 0-100.

**Units:** percent | `25` (25% chance) | -| [replay_transactions](#miner-replay_transactions) | TODO: remove this option when its no longer a testing feature and it becomes default behaviour
The miner will attempt to replay transactions that a threshold number of signers are expecting in the next block | **Required** | -| [segwit](#miner-segwit) | If possible, mine with a p2wpkh address. | `false` | -| [subsequent_rejection_pause_ms](#miner-subsequent_rejection_pause_ms) | Time in milliseconds to pause after receiving subsequent threshold rejections,
before proposing a new block.

If a miner's block proposal is rejected multiple times at the same height
(after the first rejection), this potentially longer pause duration is used
before retrying. This gives more significant time for network state changes
or signer coordination.

**Units:** milliseconds | `10_000` | -| [tenure_cost_limit_per_block_percentage](#miner-tenure_cost_limit_per_block_percentage) | The percentage of the remaining tenure cost limit to consume each block.

This setting limits the execution cost (Clarity cost) a single Nakamoto block
can incur, expressed as a percentage of the *remaining* cost budget for the
current mining tenure. For example, if set to 25, a block can use at most
25% of the tenure's currently available cost limit. This allows miners to
spread the tenure's total execution budget across multiple blocks rather than
potentially consuming it all in the first block.

**Notes:**
- Values: 1-100.
- Setting to 100 effectively disables this per-block limit, allowing a block to use the entire remaining tenure budget.

**Units:** percent | `25` | -| [tenure_extend_cost_threshold](#miner-tenure_extend_cost_threshold) | Percentage of block budget that must be used before attempting a time-based tenure extend.

This sets a minimum threshold for the accumulated execution cost within a
tenure before a time-based tenure extension ([tenure_timeout](#miner-tenure_timeout) )
can be initiated. The miner checks if the proportion of the total tenure
budget consumed so far exceeds this percentage. If the cost usage is below
this threshold, a time-based extension will not be attempted, even if the
[tenure_timeout](#miner-tenure_timeout) duration has elapsed. This prevents miners
from extending tenures very early if they have produced only low-cost blocks.

**Notes:**
- Values: 0-100.

**Units:** percent | `50` | -| [tenure_extend_poll_timeout](#miner-tenure_extend_poll_timeout) | Duration to wait in-between polling the sortition DB to see if we need to
extend the ongoing tenure (e.g. because the current sortition is empty or invalid).

After the relayer determines that a tenure extension might be needed but
cannot proceed immediately (e.g., because a miner thread is already active
for the current burn view), it will wait for this duration before
re-checking the conditions for tenure extension.

**Units:** seconds | `1` | -| [tenure_extend_wait_timeout](#miner-tenure_extend_wait_timeout) | Duration to wait before trying to continue a tenure because the next miner
did not produce blocks.

If the node was the winner of the previous sortition but not the most recent
one, the relayer waits for this duration before attempting to extend its own
tenure. This gives the new winner of the most recent sortition a grace period
to produce their first block. Also used in scenarios with empty sortitions
to give the winner of the *last valid* sortition time to produce a block
before the current miner attempts an extension.

**Units:** milliseconds | `120_000` | -| [tenure_timeout](#miner-tenure_timeout) | Duration to wait before attempting to issue a time-based tenure extend.

A miner can proactively attempt to extend its tenure if a significant amount
of time has passed since the last tenure change, even without an explicit
trigger like an empty sortition. If the time elapsed since the last tenure
change exceeds this value, and the signer coordinator indicates an extension
is timely, and the cost usage threshold ([tenure_extend_cost_threshold](#miner-tenure_extend_cost_threshold) )
is met, the miner will include a tenure extension transaction in its next block.

**Units:** seconds | `180` | -| [txs_to_consider](#miner-txs_to_consider) | Specifies which types of transactions the miner should consider including in a
block during the mempool walk process. Transactions of types not included in
this set will be skipped.

This allows miners to exclude specific transaction categories.
Configured as a comma-separated string of transaction type names in the configuration file.

Accepted values correspond to variants of `MemPoolWalkTxTypes`:
- `"TokenTransfer"`
- `"SmartContract"`
- `"ContractCall"`

**Example:**
txs_to_consider = "TokenTransfer,ContractCall"
| All transaction types are considered (equivalent to [`MemPoolWalkTxTypes::all()`]). | -| [wait_for_block_download](#miner-wait_for_block_download) | Wait for a downloader pass before mining.
This can only be disabled in testing; it can't be changed in the config file. | `true` | -| ~~[fast_rampup](#miner-fast_rampup)~~ | Controls how the miner estimates its win probability when checking for underperformance.

This estimation is used in conjunction with [target_win_probability](#miner-target_win_probability) and
[underperform_stop_threshold](#miner-underperform_stop_threshold) to decide whether to pause
mining due to low predicted success rate.

- If `true`: The win probability estimation looks at projected spend
distributions ~6 blocks into the future. This might help the miner adjust
its spending more quickly based on anticipated competition changes.
- If `false`: The win probability estimation uses the currently observed
spend distribution for the next block.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode and by the
`get-spend-amount` cli subcommand. | `false` | -| ~~[first_attempt_time_ms](#miner-first_attempt_time_ms)~~ | Time to wait (in milliseconds) before the first attempt to mine a block.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode.

**Units:** milliseconds | `10` | -| ~~[max_reorg_depth](#miner-max_reorg_depth)~~ | Defines the maximum depth (in Stacks blocks) the miner considers when
evaluating potential chain tips when selecting the best tip to mine the next
block on.

The miner analyzes candidate tips within this depth from the highest known
tip. It selects the "nicest" tip, often defined as the one that minimizes
chain reorganizations or orphans within this lookback window. A lower value
restricts the analysis to shallower forks, while a higher value considers
deeper potential reorganizations.

This setting influences which fork the miner chooses to build upon if multiple valid tips exist.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode and the
`pick-best-tip` cli subcommand. | `3` | -| ~~[microblock_attempt_time_ms](#miner-microblock_attempt_time_ms)~~ | Time to wait (in milliseconds) to mine a microblock.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode.

**Units:** milliseconds | `30_000` (30 seconds) | -| ~~[min_tx_count](#miner-min_tx_count)~~ | Minimum number of transactions that must be in a block if we're going to
replace a pending block-commit with a new block-commit.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode. | `0` | -| ~~[only_increase_tx_count](#miner-only_increase_tx_count)~~ | If true, requires subsequent mining attempts for the same block height to have
a transaction count >= the previous best attempt.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode. | `false` | -| ~~[pre_nakamoto_mock_signing](#miner-pre_nakamoto_mock_signing)~~ | Enables a mock signing process for testing purposes, specifically designed
for use during Epoch 2.5 before the activation of Nakamoto consensus.

When set to `true` and [mining_key](#miner-mining_key) is provided, the miner
will interact with the `.miners` and `.signers` contracts via the stackerdb
to send and receive mock proposals and signatures, simulating aspects of the
Nakamoto leader election and block signing flow.

**Notes:**
- This is intended strictly for testing Epoch 2.5 conditions.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. | `false` (Should only default true if [mining_key](#miner-mining_key) is set). | -| ~~[subsequent_attempt_time_ms](#miner-subsequent_attempt_time_ms)~~ | Time to wait (in milliseconds) for subsequent attempts to mine a block,
after the first attempt fails.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode.

**Units:** milliseconds | `120_000` (2 minutes) | -| ~~[target_win_probability](#miner-target_win_probability)~~ | The minimum win probability this miner aims to achieve in block sortitions.

This target is used to detect prolonged periods of underperformance. If the
miner's calculated win probability consistently falls below this value for a
duration specified by [underperform_stop_threshold](#miner-underperform_stop_threshold) (after
an initial startup phase), the miner may cease spending in subsequent
sortitions (returning a burn fee cap of 0) to conserve resources.

Setting this value close to 0.0 effectively disables the underperformance check.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode. | `0.0` | -| ~~[unconfirmed_commits_helper](#miner-unconfirmed_commits_helper)~~ | Optional path to an external helper script for fetching unconfirmed
block-commits. Used to inform the miner's dynamic burn fee bidding strategy
with off-chain data.

If a path is provided, the target script must:
- Be executable by the user running the Stacks node process.
- Accept a list of active miner burnchain addresses as command-line arguments.
- On successful execution, print a JSON array representing `Vec`
(see `stacks::config::chain_data::UnconfirmedBlockCommit` struct) to stdout.
- Exit with code 0 on success.

Look at `test_get_unconfirmed_commits` in `stackslib/src/config/chain_data.rs`
for an example script.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode
and by the `get-spend-amount` cli subcommand. | `None` (feature disabled). | -| ~~[underperform_stop_threshold](#miner-underperform_stop_threshold)~~ | The maximum number of consecutive Bitcoin blocks the miner will tolerate
underperforming (i.e., having a calculated win probability below
[target_win_probability](#miner-target_win_probability) ) before temporarily pausing mining efforts.

This check is only active after an initial startup phase (6 blocks past the
mining start height). If the miner underperforms for this number of
consecutive blocks, the `BlockMinerThread::get_mining_spend_amount` function
will return 0, effectively preventing the miner from submitting a block commit
for the current sortition to conserve funds.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode. | `None` (underperformance check is disabled). | -| ~~[unprocessed_block_deadline_secs](#miner-unprocessed_block_deadline_secs)~~ | Amount of time (in seconds) to wait for unprocessed blocks before mining a new block.

**⚠️ DEPRECATED:** This setting is ignored in Epoch 3.0+. Only used in the neon chain mode.

**Units:** seconds | `30` | -| ~~[wait_on_interim_blocks](#miner-wait_on_interim_blocks)~~ | Amount of time while mining in nakamoto to wait in between mining interim blocks.

**⚠️ DEPRECATED:** Use `min_time_between_blocks_ms` instead. | `None` | - - - From 0366c261394136bd41b154168c248694ec6ff18a Mon Sep 17 00:00:00 2001 From: Francesco Leacche Date: Tue, 17 Jun 2025 12:21:49 +0100 Subject: [PATCH 19/20] add prerequisites for running locally --- contrib/tools/config-docs-generator/README.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/contrib/tools/config-docs-generator/README.md b/contrib/tools/config-docs-generator/README.md index 3d2875fceb..1be588994a 100644 --- a/contrib/tools/config-docs-generator/README.md +++ b/contrib/tools/config-docs-generator/README.md @@ -27,6 +27,13 @@ This approach: If you prefer to run without Docker: +### Prerequisites + +- Rust nightly toolchain (install with `rustup toolchain install nightly`) +- jq (install with `apt-get install jq`) + +### Steps + ```bash # Install nightly toolchain if needed rustup toolchain install nightly From 826c7cc2a8c37963176d91dc9419b016e175e1af Mon Sep 17 00:00:00 2001 From: Francesco Leacche Date: Tue, 17 Jun 2025 12:22:06 +0100 Subject: [PATCH 20/20] generate docs in target folder --- contrib/tools/config-docs-generator/Dockerfile | 1 + contrib/tools/config-docs-generator/generate-config-docs.sh | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/contrib/tools/config-docs-generator/Dockerfile b/contrib/tools/config-docs-generator/Dockerfile index d3e8f15f47..1d03d68f1f 100644 --- a/contrib/tools/config-docs-generator/Dockerfile +++ b/contrib/tools/config-docs-generator/Dockerfile @@ -29,6 +29,7 @@ ENV PROJECT_ROOT=/project_root \ TEMP_DIR=/tmp/stacks-config-docs/doc-generation \ EXTRACT_DOCS_BIN=/build/target/release/extract-docs \ GENERATE_MARKDOWN_BIN=/build/target/release/generate-markdown \ + OUTPUT_DIR=/project_root/target/generated-docs \ SKIP_BUILD=true # Create the Docker-specific temp directory diff --git a/contrib/tools/config-docs-generator/generate-config-docs.sh b/contrib/tools/config-docs-generator/generate-config-docs.sh index c9d5d0e231..b9c0924512 100755 --- a/contrib/tools/config-docs-generator/generate-config-docs.sh +++ b/contrib/tools/config-docs-generator/generate-config-docs.sh @@ -11,8 +11,8 @@ NC='\033[0m' # No Color # Configuration - Allow environment variable overrides SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" PROJECT_ROOT="${PROJECT_ROOT:-$(cd "$SCRIPT_DIR/../../../" && pwd)}" -OUTPUT_DIR="$PROJECT_ROOT/docs/generated" CARGO_TARGET_DIR="${CARGO_TARGET_DIR:-$PROJECT_ROOT/target}" +OUTPUT_DIR="${OUTPUT_DIR:-$CARGO_TARGET_DIR/generated-docs}" TEMP_DIR="${TEMP_DIR:-$CARGO_TARGET_DIR/doc-generation}" # Binary paths - allow override via environment