diff --git a/.gitignore b/.gitignore index 5069c47120..bb95069e7f 100644 --- a/.gitignore +++ b/.gitignore @@ -61,6 +61,7 @@ testnet/index.html /testnet/helium/target/ /contrib/tools/puppet-chain/target/ /contrib/core-contract-tests/.cache/ +/contrib/tools/config-docs-generator/target/ # These are backup files generated by rustfmt **/*.rs.bk diff --git a/Cargo.lock b/Cargo.lock index f1d755c53f..a762a48fff 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -206,6 +206,22 @@ dependencies = [ "serde_json", ] +[[package]] +name = "assert_cmd" +version = "2.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bd389a4b2970a01282ee455294913c0a43724daedcd1a24c3eb0ec1c1320b66" +dependencies = [ + "anstyle", + "bstr", + "doc-comment", + "libc", + "predicates", + "predicates-core", + "predicates-tree", + "wait-timeout", +] + [[package]] name = "async-attributes" version = "1.1.2" @@ -528,6 +544,17 @@ dependencies = [ "tracing", ] +[[package]] +name = "bstr" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "234113d19d0d7d613b40e86fb654acf958910802bcceab913a4f9e7cda03b1a4" +dependencies = [ + "memchr", + "regex-automata 0.4.5", + "serde", +] + [[package]] name = "bumpalo" version = "3.14.0" @@ -685,6 +712,20 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "config-docs-generator" +version = "0.1.0" +dependencies = [ + "anyhow", + "assert_cmd", + "clap", + "once_cell", + "regex", + "serde", + "serde_json", + "tempfile", +] + [[package]] name = "const-oid" version = "0.9.6" @@ -858,6 +899,12 @@ dependencies = [ "powerfmt", ] +[[package]] +name = "difflib" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6184e33543162437515c2e2b48714794e37845ec9851711914eec9d308f6ebe8" + [[package]] name = "digest" version = "0.9.0" @@ -904,6 +951,12 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "212d0f5754cb6769937f4501cc0e67f4f4483c8d2c3e1e922ee9edbe4ab4c7c0" +[[package]] +name = "doc-comment" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" + [[package]] name = "ed25519" version = "2.2.3" @@ -2305,6 +2358,33 @@ version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" +[[package]] +name = "predicates" +version = "3.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5d19ee57562043d37e82899fade9a22ebab7be9cef5026b07fda9cdd4293573" +dependencies = [ + "anstyle", + "difflib", + "predicates-core", +] + +[[package]] +name = "predicates-core" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "727e462b119fe9c93fd0eb1429a5f7647394014cf3c04ab2c0350eeb09095ffa" + +[[package]] +name = "predicates-tree" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72dd2d6d381dfb73a193c7fca536518d7caee39fc8503f74e7dc0be0531b425c" +dependencies = [ + "predicates-core", + "termtree", +] + [[package]] name = "proc-macro-error" version = "1.0.4" @@ -3482,6 +3562,12 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "termtree" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683" + [[package]] name = "thiserror" version = "1.0.65" diff --git a/Cargo.toml b/Cargo.toml index 3b9486b61d..2ea2ba53ee 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -10,7 +10,8 @@ members = [ "contrib/tools/relay-server", "libsigner", "stacks-signer", - "testnet/stacks-node"] + "testnet/stacks-node", + "contrib/tools/config-docs-generator"] # Dependencies we want to keep the same between workspace members [workspace.dependencies] diff --git a/contrib/tools/config-docs-generator/Cargo.toml b/contrib/tools/config-docs-generator/Cargo.toml new file mode 100644 index 0000000000..ccf1cdbb80 --- /dev/null +++ b/contrib/tools/config-docs-generator/Cargo.toml @@ -0,0 +1,30 @@ +[package] +name = "config-docs-generator" +version = "0.1.0" +edition = "2024" + +[[bin]] +name = "extract-docs" +path = "src/extract_docs.rs" + +[[bin]] +name = "generate-markdown" +path = "src/generate_markdown.rs" + +# Add integration test configuration +[[test]] +name = "integration" +path = "tests/integration.rs" + +[dependencies] +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" +clap = { version = "4.0", features = ["derive"] } +regex = "1.0" +anyhow = "1.0" +once_cell = "1.18" + +# Add test dependencies +[dev-dependencies] +tempfile = "3.0" +assert_cmd = "2.0" diff --git a/contrib/tools/config-docs-generator/Dockerfile b/contrib/tools/config-docs-generator/Dockerfile new file mode 100644 index 0000000000..1d03d68f1f --- /dev/null +++ b/contrib/tools/config-docs-generator/Dockerfile @@ -0,0 +1,38 @@ +# Use a specific nightly toolchain for reproducible builds +FROM rustlang/rust@sha256:04690ffa09cddd358b349272173155319f384e57816614eea0840ec7f9422862 + +RUN apt-get update -y \ + && apt-get install -y --no-install-recommends \ + jq \ + && rm -rf /var/lib/apt/lists/* + +# Set the working directory for building +WORKDIR /build + +# Copy the entire project root to preserve structure +# Copy from three levels up (project root) to maintain the directory structure +COPY ../../../ /build + +RUN useradd -ms /bin/bash docs-builder +RUN chown docs-builder -R /build +USER docs-builder + +# Pre-build the config-docs-generator binaries during image build +RUN cargo build --package config-docs-generator --release + +# Set the working directory where the project will be mounted at runtime +WORKDIR /project_root + +# Set environment variables for generate-config-docs.sh +ENV PROJECT_ROOT=/project_root \ + CARGO_TARGET_DIR=/tmp/stacks-config-docs/target \ + TEMP_DIR=/tmp/stacks-config-docs/doc-generation \ + EXTRACT_DOCS_BIN=/build/target/release/extract-docs \ + GENERATE_MARKDOWN_BIN=/build/target/release/generate-markdown \ + OUTPUT_DIR=/project_root/target/generated-docs \ + SKIP_BUILD=true + +# Create the Docker-specific temp directory +RUN mkdir -p /tmp/stacks-config-docs + +ENTRYPOINT ["/build/generate-config-docs.sh"] diff --git a/contrib/tools/config-docs-generator/README.md b/contrib/tools/config-docs-generator/README.md new file mode 100644 index 0000000000..1be588994a --- /dev/null +++ b/contrib/tools/config-docs-generator/README.md @@ -0,0 +1,374 @@ +# Configuration Documentation Generator + +This tool automatically generates markdown documentation from Rust configuration structs by extracting specially formatted doc comments. + +## Quick Start + +### Using Docker (Recommended) + +The easiest way to generate configuration documentation: + +```bash +# Navigate to the config-docs-generator directory +cd contrib/tools/config-docs-generator + +# Build the Docker image (one-time setup) +docker build -t config-docs-generator . + +# Generate documentation +docker run --rm -v "$(pwd)/../../../:/project_root" config-docs-generator +``` + +This approach: +- Uses a consistent nightly Rust environment +- Generates `docs/generated/configuration-reference.md` + +### Using Local Setup (Alternative) + +If you prefer to run without Docker: + +### Prerequisites + +- Rust nightly toolchain (install with `rustup toolchain install nightly`) +- jq (install with `apt-get install jq`) + +### Steps + +```bash +# Install nightly toolchain if needed +rustup toolchain install nightly + +# Navigate to the config-docs-generator directory +cd contrib/tools/config-docs-generator + +# Generate documentation +./generate-config-docs.sh +``` + +## What It Does + +The tool processes these configuration structs from the Stacks codebase: +- `BurnchainConfig` → `[burnchain]` section +- `NodeConfig` → `[node]` section +- `MinerConfig` → `[miner]` section +- `ConnectionOptionsFile` → `[connection_options]` section +- `FeeEstimationConfigFile` → `[fee_estimation]` section +- `EventObserverConfigFile` → `[[events_observer]]` section +- `InitialBalanceFile` → `[[ustx_balance]]` section + +For each configuration field, it extracts: +- Field documentation from `///` comments +- Default values (including constant references) +- Usage notes and examples +- Deprecation warnings + +## Output Files + +- **Primary**: `docs/generated/configuration-reference.md` - Complete configuration reference +- **Intermediate**: `target/doc-generation/extracted-config-docs.json` - Raw extracted data + +## Annotation Syntax Guide + +### Overview + +The generator processes doc comments with a structured annotation format: + +```rust +/// [Description text in Markdown format] +/// --- +/// @annotation_name: value +/// @another_annotation: value +pub field_name: Type, +``` + +### General Structure + +- **Description**: Standard Markdown text before the `---` separator +- **Separator**: Three dashes (`---`) separate description from annotations +- **Annotations**: Key-value pairs starting with `@`, each on its own line + +### Supported Annotations + +#### `@default: ` +Specifies the default value for the field. +- **Value Type**: String +- **Multiline Support**: Yes (all modes) +- **Examples**: + ```rust + /// @default: `None` + /// @default: `"localhost:8080"` + /// @default: | + /// Complex multi-line + /// default value + ``` + +#### `@notes: ` +Additional notes or explanations, rendered as a bulleted list. +- **Value Type**: String (parsed into list items) +- **Multiline Support**: Yes (all modes) +- **List Processing**: Lines starting with `-`, `*`, or `•` become list items +- **Examples**: + ```rust + /// @notes: Single line note + /// @notes: + /// - First bullet point + /// - Second bullet point + /// @notes: | + /// Complex formatting with + /// preserved line breaks + ``` + +#### `@deprecated: ` +Marks a field as deprecated with an optional message. +- **Value Type**: String +- **Multiline Support**: Yes (all modes) +- **Examples**: + ```rust + /// @deprecated: Use new_field instead + /// @deprecated: | + /// This field will be removed in v3.0. + /// Migrate to the new configuration system. + ``` + +#### `@toml_example: ` +Provides TOML configuration examples. +- **Value Type**: String +- **Multiline Support**: Yes (all modes) +- **Rendering**: Displayed in `
` blocks in markdown tables
+- **Examples**:
+  ```rust
+  /// @toml_example: key = "value"
+  /// @toml_example: |
+  ///   [section]
+  ///   key = "value"
+  ///   nested = { a = 1, b = 2 }
+  ```
+
+#### `@required: `
+Indicates whether the field is mandatory.
+- **Value Type**: Boolean
+- **Default**: If annotation is omitted, the field is considered *not required*.
+- **Supported Values**:
+  - ``true`
+  - `false`
+  - Invalid values default to `false`
+- **Examples**:
+  ```rust
+  /// @required: true
+  /// @required: false
+  ```
+
+#### `@units: `
+Specifies the unit of measurement for the field.
+- **Value Type**: String
+- **Multiline Support**: Yes (all modes)
+- **Constant References**: Supports `[`CONSTANT_NAME`]` syntax
+- **Examples**:
+  ```rust
+  /// @units: milliseconds
+  /// @units: sats/vByte
+  ```
+
+### Multiline Content Support
+
+All annotations support three multiline modes:
+
+#### Default Literal-like Mode
+Content preserves newlines and relative indentation within the annotation block.
+
+```rust
+/// @notes:
+///   First line with base indentation
+///     Second line more indented
+///   Third line back to base
+///       Fourth line very indented
+```
+
+**Output preserves relative indentation**:
+```
+First line with base indentation
+  Second line more indented
+Third line back to base
+    Fourth line very indented
+```
+
+#### Literal Block Style (`|`)
+Exact preservation of newlines and relative indentation. Uses "clip" chomping (single trailing newline preserved).
+
+```rust
+/// @toml_example: |
+///   [network]
+///   bind = "0.0.0.0:20444"
+///     # Indented comment
+///   timeout = 30
+```
+
+**Output**:
+```
+[network]
+bind = "0.0.0.0:20444"
+  # Indented comment
+timeout = 30
+```
+
+#### Folded Block Style (`>`)
+Folds lines into paragraphs with intelligent spacing. More-indented lines preserved as literal blocks.
+
+```rust
+/// @notes: >
+///   This is a long paragraph that will be
+///   folded into a single line with spaces
+///   between the original line breaks.
+///
+///   This is a second paragraph after a blank line.
+///
+///     This indented block will be preserved
+///     exactly as written, like code.
+///
+///   Back to normal folded paragraph text.
+```
+
+**Output**:
+```
+This is a long paragraph that will be folded into a single line with spaces between the original line breaks.
+
+This is a second paragraph after a blank line.
+
+  This indented block will be preserved
+  exactly as written, like code.
+
+Back to normal folded paragraph text.
+```
+
+### Same-line Content
+
+Content can start immediately after the colon for default multiline mode:
+
+```rust
+/// @default: immediate content
+/// @notes: Content that starts immediately
+///   and continues on the next line
+```
+
+For literal (`|`) and folded (`>`) modes, content must start on the next line:
+
+```rust
+/// @notes: |
+///   Content starts here on the next line
+///   All content must be indented on subsequent lines
+/// @deprecated: >
+///   Folded content also starts on the next line
+///   and will be joined appropriately
+```
+
+### Complete Example
+
+```rust
+/// Timeout duration for network connections.
+///
+/// This setting controls how long the node will wait for network operations
+/// to complete before timing out. Setting this too low may cause connection
+/// failures on slow networks.
+/// ---
+/// @default: [`DEFAULT_NETWORK_TIMEOUT`]
+/// @required: true
+/// @units: milliseconds
+/// @notes:
+///   - Must be greater than 0
+///   - Recommended range: 1000-30000
+///   - Higher values needed for slow connections
+/// @toml_example: |
+///   [network]
+///   timeout = 15000  # 15 seconds
+/// @deprecated: >
+///   Use the new `connection_timeout` setting instead.
+///   This field will be removed in version 3.0.
+pub timeout_ms: u64,
+```
+
+### Best Practices
+
+1. **Choose the right multiline mode**:
+   - Default mode: General text with preserved formatting
+   - Literal (`|`): Code examples, exact formatting required
+   - Folded (`>`): Documentation prose, automatic paragraph wrapping
+
+2. **Use constant references in `@default` when appropriate**
+
+### Integration with Rust Documentation
+
+This system integrates with standard Rust documentation tools:
+- Doc comments remain valid for `rustdoc`
+- Annotations are ignored by standard documentation generators
+- Full compatibility with existing documentation workflows
+
+## Adding New Configuration Structs
+
+### 1. Update the Target List
+
+Edit `contrib/tools/config-docs-generator/generate-config-docs.sh`:
+
+```bash
+TARGET_STRUCTS="BurnchainConfig,NodeConfig,MinerConfig,YourNewConfig"
+```
+
+### 2. Document Your Struct
+
+Add proper documentation to your Rust configuration struct:
+
+```rust
+/// Configuration for your new feature.
+///
+/// This controls how the feature operates and integrates
+/// with the existing node functionality.
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct YourNewConfig {
+    /// Enable or disable the new feature.
+    /// ---
+    /// @default: `false`
+    /// @notes:
+    ///   - Requires restart to take effect
+    ///   - May impact performance when enabled
+    /// @toml_example: |
+    ///   enabled = true
+    pub enabled: bool,
+
+    /// Timeout for feature operations in milliseconds.
+    /// ---
+    /// @default: [`DEFAULT_TIMEOUT`]
+    pub timeout: u64,
+}
+```
+
+### Supported Annotations
+
+- **@default**: Default value (supports constant references like `[`CONSTANT_NAME`]`)
+- **@notes**: Bullet-pointed usage notes
+- **@deprecated**: Deprecation message
+- **@toml_example**: Example TOML configuration
+
+### 3. Generate
+
+Override TOML section names using JSON configuration:
+
+```bash
+# Using Docker with custom mappings and template
+cd contrib/tools/config-docs-generator
+docker run --rm -v "$(pwd)/../../../:/project_root" \
+  -e SECTION_MAPPINGS_PATH="/build/contrib/tools/config-docs-generator/custom_mappings.json" \
+  -e TEMPLATE_PATH="/build/contrib/tools/config-docs-generator/templates/custom_template.md" \
+  config-docs-generator
+
+# OR using local setup
+./generate-config-docs.sh --section-name-mappings custom_mappings.json --template custom_template.md
+```
+
+## How It Works
+
+The tool uses a three-step process:
+
+1. **Extract**: Uses `cargo +nightly rustdoc --output-format json` to generate documentation JSON
+2. **Parse**: Extracts field information, resolves constant references across crates
+3. **Generate**: Converts to Markdown with proper cross-references and formatting
+
+The process is automated by the shell script which coordinates building the tools and running the extraction/generation pipeline.
diff --git a/contrib/tools/config-docs-generator/generate-config-docs.sh b/contrib/tools/config-docs-generator/generate-config-docs.sh
new file mode 100755
index 0000000000..b9c0924512
--- /dev/null
+++ b/contrib/tools/config-docs-generator/generate-config-docs.sh
@@ -0,0 +1,149 @@
+#!/bin/bash
+
+set -euo pipefail
+
+# Colors for output
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+YELLOW='\033[1;33m'
+NC='\033[0m' # No Color
+
+# Configuration - Allow environment variable overrides
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+PROJECT_ROOT="${PROJECT_ROOT:-$(cd "$SCRIPT_DIR/../../../" && pwd)}"
+CARGO_TARGET_DIR="${CARGO_TARGET_DIR:-$PROJECT_ROOT/target}"
+OUTPUT_DIR="${OUTPUT_DIR:-$CARGO_TARGET_DIR/generated-docs}"
+TEMP_DIR="${TEMP_DIR:-$CARGO_TARGET_DIR/doc-generation}"
+
+# Binary paths - allow override via environment
+EXTRACT_DOCS_BIN="${EXTRACT_DOCS_BIN:-$CARGO_TARGET_DIR/release/extract-docs}"
+GENERATE_MARKDOWN_BIN="${GENERATE_MARKDOWN_BIN:-$CARGO_TARGET_DIR/release/generate-markdown}"
+
+# Template and mappings paths - allow override via environment
+TEMPLATE_PATH="${TEMPLATE_PATH:-$SCRIPT_DIR/templates/reference_template.md}"
+SECTION_MAPPINGS_PATH="${SECTION_MAPPINGS_PATH:-$SCRIPT_DIR/section_name_mappings.json}"
+
+# Check if binaries are pre-built (skip build step)
+SKIP_BUILD="${SKIP_BUILD:-false}"
+
+export CARGO_TARGET_DIR
+
+log_info() {
+    echo -e "${GREEN}[INFO]${NC} $1"
+}
+
+log_warn() {
+    echo -e "${YELLOW}[WARN]${NC} $1"
+}
+
+log_error() {
+    echo -e "${RED}[ERROR]${NC} $1"
+}
+
+cleanup() {
+    if [[ -d "$TEMP_DIR" ]]; then
+        rm -rf "$TEMP_DIR"
+    fi
+}
+
+trap cleanup EXIT
+
+main() {
+    log_info "Starting config documentation generation..."
+
+    # Create necessary directories
+    mkdir -p "$OUTPUT_DIR"
+    mkdir -p "$TEMP_DIR"
+
+    cd "$PROJECT_ROOT"
+
+    # Step 1: Build the documentation generation tools
+    if [[ "$SKIP_BUILD" != "true" ]]; then
+        log_info "Building documentation generation tools..."
+        cargo build --package config-docs-generator --release
+    fi
+
+    # Step 2: Extract documentation from source code using rustdoc
+    log_info "Extracting configuration documentation using rustdoc..."
+    EXTRACTED_JSON="$TEMP_DIR/extracted-config-docs.json"
+
+    # Determine the list of structs to document from section_name_mappings.json
+    # If the caller sets $TARGET_STRUCTS explicitly we honour that override.
+    if [[ -z "${TARGET_STRUCTS:-}" ]]; then
+        TARGET_STRUCTS="$(jq -r 'keys | join(",")' "$SECTION_MAPPINGS_PATH")"
+    fi
+    log_info "Structs to be documented: $TARGET_STRUCTS"
+
+    "$EXTRACT_DOCS_BIN" \
+        --package stackslib \
+        --structs "$TARGET_STRUCTS" \
+        --output "$EXTRACTED_JSON"
+
+    # Step 3: Generate Markdown
+    log_info "Generating Markdown documentation..."
+    MARKDOWN_OUTPUT="$OUTPUT_DIR/configuration-reference.md"
+
+    # Call the command
+    "$GENERATE_MARKDOWN_BIN" --input "$EXTRACTED_JSON" --output "$MARKDOWN_OUTPUT" --template "$TEMPLATE_PATH" --section-name-mappings "$SECTION_MAPPINGS_PATH"
+
+    log_info "Documentation generation complete!"
+    log_info "Generated files:"
+    log_info "  - Configuration reference: $MARKDOWN_OUTPUT"
+    log_info "  - Intermediate JSON: $EXTRACTED_JSON"
+
+    # Verify output
+    if [[ -f "$MARKDOWN_OUTPUT" ]]; then
+        WORD_COUNT=$(wc -w < "$MARKDOWN_OUTPUT")
+        log_info "Generated Markdown contains $WORD_COUNT words"
+    else
+        log_error "Expected output file not found: $MARKDOWN_OUTPUT"
+        exit 1
+    fi
+}
+
+# Help function
+show_help() {
+    cat << EOF
+generate-config-docs.sh - Generate configuration documentation for Stacks node
+
+USAGE:
+    $0 [OPTIONS]
+
+OPTIONS:
+    -h, --help      Show this help message
+
+DESCRIPTION:
+    This script generates comprehensive Markdown documentation for all TOML
+    configuration options available in the Stacks node. The documentation is
+    automatically extracted from Rust source code comments.
+
+    The process involves:
+    1. Building the documentation generation tools
+    2. Extracting configuration struct documentation from source code
+    3. Converting to Markdown format
+
+    Source file: stackslib/src/config/mod.rs
+
+OUTPUT:
+    docs/generated/configuration-reference.md
+
+EOF
+}
+
+# Parse command line arguments
+while [[ $# -gt 0 ]]; do
+    case $1 in
+        -h|--help)
+            show_help
+            exit 0
+            ;;
+        *)
+            log_error "Unknown option: $1"
+            show_help
+            exit 1
+            ;;
+    esac
+    shift
+done
+
+main "$@"
diff --git a/contrib/tools/config-docs-generator/section_name_mappings.json b/contrib/tools/config-docs-generator/section_name_mappings.json
new file mode 100644
index 0000000000..95a071a785
--- /dev/null
+++ b/contrib/tools/config-docs-generator/section_name_mappings.json
@@ -0,0 +1,9 @@
+{
+    "BurnchainConfig": "[burnchain]",
+    "NodeConfig": "[node]",
+    "MinerConfig": "[miner]",
+    "ConnectionOptionsFile": "[connection_options]",
+    "FeeEstimationConfigFile": "[fee_estimation]",
+    "EventObserverConfigFile": "[[events_observer]]",
+    "InitialBalanceFile": "[[ustx_balance]]"
+}
diff --git a/contrib/tools/config-docs-generator/src/extract_docs.rs b/contrib/tools/config-docs-generator/src/extract_docs.rs
new file mode 100644
index 0000000000..689b7a7fac
--- /dev/null
+++ b/contrib/tools/config-docs-generator/src/extract_docs.rs
@@ -0,0 +1,2792 @@
+// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation
+// Copyright (C) 2020-2025 Stacks Open Internet Foundation
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with this program.  If not, see .
+use std::collections::{HashMap, HashSet};
+use std::fs;
+use std::process::Command as StdCommand;
+
+use anyhow::{Context, Result};
+use clap::{Arg, Command as ClapCommand};
+use once_cell::sync::Lazy;
+use serde::{Deserialize, Serialize};
+
+// Static regex for finding constant references in documentation
+static CONSTANT_REFERENCE_REGEX: Lazy =
+    Lazy::new(|| regex::Regex::new(r"\[`([A-Z_][A-Z0-9_]*)`\]").unwrap());
+
+#[derive(Debug, Serialize, Deserialize)]
+pub struct FieldDoc {
+    pub name: String,
+    pub description: String,
+    pub default_value: Option,
+    pub notes: Option>,
+    pub deprecated: Option,
+    pub toml_example: Option,
+    pub required: Option,
+    pub units: Option,
+}
+
+#[derive(Debug, Serialize, Deserialize)]
+pub struct StructDoc {
+    pub name: String,
+    pub description: Option,
+    pub fields: Vec,
+}
+
+#[derive(Debug, Serialize, Deserialize)]
+struct ConfigDocs {
+    structs: Vec,
+    referenced_constants: HashMap>, // Name -> Resolved Value (or None)
+}
+
+// JSON navigation helper functions
+/// Navigate through nested JSON structure using an array of keys
+/// Returns None if any part of the path doesn't exist
+///
+/// Example: get_json_path(value, &["inner", "struct", "kind"])
+/// is equivalent to value.get("inner")?.get("struct")?.get("kind")
+fn get_json_path<'a>(value: &'a serde_json::Value, path: &[&str]) -> Option<&'a serde_json::Value> {
+    let mut current = value;
+
+    for &key in path {
+        current = current.get(key)?;
+    }
+
+    Some(current)
+}
+
+/// Navigate to an array at the given JSON path
+/// Returns None if the path doesn't exist or the value is not an array
+fn get_json_array<'a>(
+    value: &'a serde_json::Value,
+    path: &[&str],
+) -> Option<&'a Vec> {
+    get_json_path(value, path)?.as_array()
+}
+
+/// Navigate to an object at the given JSON path
+/// Returns None if the path doesn't exist or the value is not an object
+fn get_json_object<'a>(
+    value: &'a serde_json::Value,
+    path: &[&str],
+) -> Option<&'a serde_json::Map> {
+    get_json_path(value, path)?.as_object()
+}
+
+/// Navigate to a string at the given JSON path
+/// Returns None if the path doesn't exist or the value is not a string
+fn get_json_string<'a>(value: &'a serde_json::Value, path: &[&str]) -> Option<&'a str> {
+    get_json_path(value, path)?.as_str()
+}
+
+fn main() -> Result<()> {
+    let matches = ClapCommand::new("extract-docs")
+        .about("Extract documentation from Rust source code using rustdoc JSON")
+        .arg(
+            Arg::new("package")
+                .long("package")
+                .short('p')
+                .value_name("PACKAGE")
+                .help("Package to extract docs for")
+                .required(true),
+        )
+        .arg(
+            Arg::new("output")
+                .long("output")
+                .short('o')
+                .value_name("FILE")
+                .help("Output JSON file")
+                .required(true),
+        )
+        .arg(
+            Arg::new("structs")
+                .long("structs")
+                .value_name("NAMES")
+                .help("Comma-separated list of struct names to extract")
+                .required(true),
+        )
+        .get_matches();
+
+    let package = matches.get_one::("package").unwrap();
+    let output_file = matches.get_one::("output").unwrap();
+    let target_structs: Option> = matches
+        .get_one::("structs")
+        .map(|s| s.split(',').map(|s| s.trim().to_string()).collect());
+
+    // Generate rustdoc JSON
+    let rustdoc_json = generate_rustdoc_json(package)?;
+
+    // Extract configuration documentation from the rustdoc JSON
+    let config_docs = extract_config_docs_from_rustdoc(&rustdoc_json, &target_structs)?;
+
+    // Write the extracted docs to file
+    fs::write(output_file, serde_json::to_string_pretty(&config_docs)?)?;
+
+    println!("Successfully extracted documentation to {}", output_file);
+    println!(
+        "Found {} structs with documentation",
+        config_docs.structs.len()
+    );
+    Ok(())
+}
+
+fn generate_rustdoc_json(package: &str) -> Result {
+    // List of crates to generate rustdoc for (in addition to the main package)
+    // These crates contain constants that might be referenced in documentation
+    // NOTE: This list must be manually updated if new dependencies containing
+    // constants referenced in doc comments are added to the project
+    let additional_crates = ["stacks-common"];
+
+    // Respect CARGO_TARGET_DIR environment variable for rustdoc output
+    let rustdoc_target_dir = std::env::var("CARGO_TARGET_DIR")
+        .unwrap_or_else(|_| "target".to_string())
+        + "/rustdoc-json";
+
+    // WARNING: This tool relies on nightly rustdoc JSON output (-Z unstable-options --output-format json)
+    // The JSON format is subject to change with new Rust nightly versions and could break this tool.
+    // Use cargo rustdoc with nightly to generate JSON for the main package
+    let output = StdCommand::new("cargo")
+        .args([
+            "+nightly",
+            "rustdoc",
+            "--lib",
+            "-p",
+            package,
+            "--target-dir",
+            &rustdoc_target_dir,
+            "--",
+            "-Z",
+            "unstable-options",
+            "--output-format",
+            "json",
+            "--document-private-items",
+        ])
+        .output()
+        .context("Failed to run cargo rustdoc command")?;
+
+    if !output.status.success() {
+        let stderr = String::from_utf8_lossy(&output.stderr);
+        anyhow::bail!("cargo rustdoc failed: {}", stderr);
+    }
+
+    // Generate rustdoc for additional crates that might contain referenced constants
+    for additional_crate in &additional_crates {
+        let error_msg = format!(
+            "Failed to run cargo rustdoc command for {}",
+            additional_crate
+        );
+        let output = StdCommand::new("cargo")
+            .args([
+                "+nightly",
+                "rustdoc",
+                "--lib",
+                "-p",
+                additional_crate,
+                "--target-dir",
+                &rustdoc_target_dir,
+                "--",
+                "-Z",
+                "unstable-options",
+                "--output-format",
+                "json",
+                "--document-private-items",
+            ])
+            .output()
+            .context(error_msg)?;
+
+        if !output.status.success() {
+            let stderr = String::from_utf8_lossy(&output.stderr);
+            eprintln!(
+                "Warning: Failed to generate rustdoc for {}: {}",
+                additional_crate, stderr
+            );
+        }
+    }
+
+    // Map package names to their library names if different
+    // For most packages, the library name is the same as package name with hyphens replaced by underscores
+    // But some packages have custom library names defined in Cargo.toml
+    // NOTE: This mapping must be updated if new packages with different library names are processed
+    let lib_name = match package {
+        "stackslib" => "blockstack_lib".to_string(),
+        _ => package.replace('-', "_"),
+    };
+
+    // Read the generated JSON file - rustdoc generates it based on library name
+    let json_file_path = format!("{}/doc/{}.json", rustdoc_target_dir, lib_name);
+    let json_content = std::fs::read_to_string(json_file_path)
+        .context("Failed to read generated rustdoc JSON file")?;
+
+    serde_json::from_str(&json_content).context("Failed to parse rustdoc JSON output")
+}
+
+fn extract_config_docs_from_rustdoc(
+    rustdoc_json: &serde_json::Value,
+    target_structs: &Option>,
+) -> Result {
+    let mut structs = Vec::new();
+    let mut all_referenced_constants = std::collections::HashSet::new();
+
+    // Access the main index containing all items from the rustdoc JSON output
+    let index = get_json_object(rustdoc_json, &["index"])
+        .context("Missing 'index' field in rustdoc JSON")?;
+
+    for (_item_id, item) in index {
+        // Extract the item's name from rustdoc JSON structure
+        if let Some(name) = get_json_string(item, &["name"]) {
+            // Check if this item is a struct by looking for the "struct" field
+            if get_json_object(item, &["inner", "struct"]).is_some() {
+                // Check if this struct is in our target list (if specified)
+                if let Some(targets) = target_structs {
+                    if !targets.contains(&name.to_string()) {
+                        continue;
+                    }
+                }
+
+                let (struct_doc_opt, referenced_constants) =
+                    extract_struct_from_rustdoc_index(index, name, item)?;
+
+                if let Some(struct_doc) = struct_doc_opt {
+                    structs.push(struct_doc);
+                }
+                all_referenced_constants.extend(referenced_constants);
+            }
+        }
+    }
+
+    // Resolve all collected constant references
+    let mut referenced_constants = HashMap::new();
+    for constant_name in all_referenced_constants {
+        let resolved_value = resolve_constant_reference(&constant_name, index);
+        referenced_constants.insert(constant_name, resolved_value);
+    }
+
+    Ok(ConfigDocs {
+        structs,
+        referenced_constants,
+    })
+}
+
+fn extract_struct_from_rustdoc_index(
+    index: &serde_json::Map,
+    struct_name: &str,
+    struct_item: &serde_json::Value,
+) -> Result<(Option, HashSet)> {
+    let mut all_referenced_constants = std::collections::HashSet::new();
+
+    // Extract struct documentation
+    let description = get_json_string(struct_item, &["docs"]).map(|s| s.to_string());
+
+    // Collect constant references from struct description
+    if let Some(desc) = &description {
+        all_referenced_constants.extend(find_constant_references(desc));
+    }
+
+    // Extract fields
+    let (fields, referenced_constants) = extract_struct_fields(index, struct_item)?;
+
+    // Extend referenced constants
+    all_referenced_constants.extend(referenced_constants);
+
+    if !fields.is_empty() || description.is_some() {
+        let struct_doc = StructDoc {
+            name: struct_name.to_string(),
+            description,
+            fields,
+        };
+        Ok((Some(struct_doc), all_referenced_constants))
+    } else {
+        Ok((None, all_referenced_constants))
+    }
+}
+
+fn extract_struct_fields(
+    index: &serde_json::Map,
+    struct_item: &serde_json::Value,
+) -> Result<(Vec, std::collections::HashSet)> {
+    let mut fields = Vec::new();
+    let mut all_referenced_constants = std::collections::HashSet::new();
+
+    // Navigate through rustdoc JSON structure to access struct fields
+    // Path: item.inner.struct.kind.plain.fields[]
+    if let Some(field_ids) =
+        get_json_array(struct_item, &["inner", "struct", "kind", "plain", "fields"])
+    {
+        for field_id in field_ids {
+            // Field IDs can be either integers or strings in rustdoc JSON, try both formats
+            let field_item = if let Some(field_id_num) = field_id.as_u64() {
+                // Numeric field ID - convert to string for index lookup
+                index.get(&field_id_num.to_string())
+            } else if let Some(field_id_str) = field_id.as_str() {
+                // String field ID - use directly for index lookup
+                index.get(field_id_str)
+            } else {
+                None
+            };
+
+            if let Some(field_item) = field_item {
+                // Extract the field's name from the rustdoc item
+                let field_name = get_json_string(field_item, &["name"])
+                    .unwrap_or("unknown")
+                    .to_string();
+
+                // Extract the field's documentation text from rustdoc
+                let field_docs = get_json_string(field_item, &["docs"])
+                    .unwrap_or("")
+                    .to_string();
+
+                // Parse the structured documentation
+                let (field_doc, referenced_constants) =
+                    parse_field_documentation(&field_docs, &field_name)?;
+
+                // Only include fields that have documentation
+                if !field_doc.description.is_empty() || field_doc.default_value.is_some() {
+                    fields.push(field_doc);
+                }
+
+                // Extend referenced constants
+                all_referenced_constants.extend(referenced_constants);
+            }
+        }
+    }
+
+    Ok((fields, all_referenced_constants))
+}
+
+fn parse_field_documentation(
+    doc_text: &str,
+    field_name: &str,
+) -> Result<(FieldDoc, std::collections::HashSet)> {
+    let mut default_value = None;
+    let mut notes = None;
+    let mut deprecated = None;
+    let mut toml_example = None;
+    let mut required = None;
+    let mut units = None;
+    let mut referenced_constants = std::collections::HashSet::new();
+
+    // Split on --- separator if present
+    let parts: Vec<&str> = doc_text.split("---").collect();
+
+    let description = parts[0].trim().to_string();
+
+    // Collect constant references from description
+    referenced_constants.extend(find_constant_references(&description));
+
+    // Parse metadata section if present
+    if parts.len() >= 2 {
+        let metadata_section = parts[1];
+
+        // Parse @default: annotations
+        if let Some(default_match) = extract_annotation(metadata_section, "default") {
+            // Collect constant references from default value
+            referenced_constants.extend(find_constant_references(&default_match));
+            default_value = Some(default_match);
+        }
+
+        // Parse @notes: annotations
+        if let Some(notes_text) = extract_annotation(metadata_section, "notes") {
+            // Collect constant references from notes
+            referenced_constants.extend(find_constant_references(¬es_text));
+
+            let mut note_items: Vec = Vec::new();
+            let mut current_note = String::new();
+            let mut in_note = false;
+
+            for line in notes_text.lines() {
+                let trimmed = line.trim();
+
+                // Skip empty lines
+                if trimmed.is_empty() {
+                    continue;
+                }
+
+                // Check if this line starts a new note (bullet point)
+                if trimmed.starts_with("- ") || trimmed.starts_with("* ") {
+                    // If we were building a previous note, save it
+                    if in_note && !current_note.trim().is_empty() {
+                        note_items.push(current_note.trim().to_string());
+                    }
+
+                    // Start a new note (remove the bullet point)
+                    current_note = trimmed[2..].trim().to_string();
+                    in_note = true;
+                } else if in_note {
+                    // This is a continuation line for the current note
+                    if !current_note.is_empty() {
+                        current_note.push(' ');
+                    }
+                    current_note.push_str(trimmed);
+                }
+                // If not in_note and doesn't start with bullet, ignore the line
+            }
+
+            // Don't forget the last note
+            if in_note && !current_note.trim().is_empty() {
+                note_items.push(current_note.trim().to_string());
+            }
+
+            if !note_items.is_empty() {
+                notes = Some(note_items);
+            }
+        }
+
+        // Parse @deprecated: annotations
+        if let Some(deprecated_text) = extract_annotation(metadata_section, "deprecated") {
+            // Collect constant references from deprecated text
+            referenced_constants.extend(find_constant_references(&deprecated_text));
+            deprecated = Some(deprecated_text);
+        }
+
+        // Parse @toml_example: annotations
+        if let Some(example_text) = extract_annotation(metadata_section, "toml_example") {
+            // Note: We typically don't expect constant references in TOML examples,
+            // but we'll check anyway for completeness
+            referenced_constants.extend(find_constant_references(&example_text));
+            toml_example = Some(example_text);
+        }
+
+        // Parse @required: annotations
+        if let Some(required_text) = extract_annotation(metadata_section, "required") {
+            let required_bool = match required_text.trim() {
+                "" => false, // Empty string defaults to false
+                text => text.parse::().unwrap_or_else(|_| {
+                    eprintln!(
+                        "Warning: Invalid @required value '{}' for field '{}', defaulting to false",
+                        text, field_name
+                    );
+                    false
+                }),
+            };
+            required = Some(required_bool);
+        }
+
+        // Parse @units: annotations
+        if let Some(units_text) = extract_annotation(metadata_section, "units") {
+            // Collect constant references from units text
+            referenced_constants.extend(find_constant_references(&units_text));
+            units = Some(units_text);
+        }
+    }
+
+    let field_doc = FieldDoc {
+        name: field_name.to_string(),
+        description,
+        default_value,
+        notes,
+        deprecated,
+        toml_example,
+        required,
+        units,
+    };
+
+    Ok((field_doc, referenced_constants))
+}
+
+/// Parse a YAML-style literal block scalar (|) from comment lines
+/// Preserves newlines and internal indentation relative to the block base indentation
+fn parse_literal_block_scalar(lines: &[&str], _base_indent: usize) -> String {
+    if lines.is_empty() {
+        return String::new();
+    }
+
+    // Find the first non-empty content line to determine block indentation
+    let content_lines: Vec<&str> = lines
+        .iter()
+        .skip_while(|line| line.trim().is_empty())
+        .copied()
+        .collect();
+
+    if content_lines.is_empty() {
+        return String::new();
+    }
+
+    // Determine block indentation from the first content line
+    let block_indent = content_lines[0].len() - content_lines[0].trim_start().len();
+
+    // Process all lines, preserving relative indentation within the block
+    let mut result_lines = Vec::new();
+    for line in lines {
+        if line.trim().is_empty() {
+            // Preserve empty lines
+            result_lines.push(String::new());
+        } else {
+            let line_indent = line.len() - line.trim_start().len();
+            if line_indent >= block_indent {
+                // Remove only the common block indentation, preserving relative indentation
+                let content = &line[block_indent.min(line.len())..];
+                result_lines.push(content.to_string());
+            } else {
+                // Line is less indented than block base - should not happen in well-formed blocks
+                result_lines.push(line.trim_start().to_string());
+            }
+        }
+    }
+
+    // Remove trailing empty lines (clip chomping style)
+    while let Some(last) = result_lines.last() {
+        if last.is_empty() {
+            result_lines.pop();
+        } else {
+            break;
+        }
+    }
+
+    result_lines.join("\n")
+}
+
+/// Parse a YAML-style folded block scalar (>)
+/// Folds lines into paragraphs, preserving more-indented lines as literal blocks
+fn parse_folded_block_scalar(lines: &[&str], _base_indent: usize) -> String {
+    if lines.is_empty() {
+        return String::new();
+    }
+
+    // Find the first non-empty content line to determine block indentation
+    let content_lines: Vec<&str> = lines
+        .iter()
+        .skip_while(|line| line.trim().is_empty())
+        .copied()
+        .collect();
+
+    if content_lines.is_empty() {
+        return String::new();
+    }
+
+    // Determine block indentation from the first content line
+    let block_indent = content_lines[0].len() - content_lines[0].trim_start().len();
+
+    let mut result = String::new();
+    let mut current_paragraph = Vec::new();
+    let mut in_literal_block = false;
+
+    for line in lines {
+        if line.trim().is_empty() {
+            if in_literal_block {
+                // Empty line in literal block - preserve it
+                result.push('\n');
+            } else if !current_paragraph.is_empty() {
+                // End current paragraph
+                result.push_str(¤t_paragraph.join(" "));
+                result.push_str("\n\n");
+                current_paragraph.clear();
+            }
+            continue;
+        }
+
+        let line_indent = line.len() - line.trim_start().len();
+        let content = if line_indent >= block_indent {
+            &line[block_indent.min(line.len())..]
+        } else {
+            line.trim_start()
+        };
+
+        let relative_indent = line_indent.saturating_sub(block_indent);
+
+        if relative_indent > 0 {
+            // More indented line - start or continue literal block
+            if !in_literal_block {
+                // Finish current paragraph before starting literal block
+                if !current_paragraph.is_empty() {
+                    result.push_str(¤t_paragraph.join(" "));
+                    result.push('\n');
+                    current_paragraph.clear();
+                }
+                in_literal_block = true;
+            }
+            // Add literal line with preserved indentation
+            result.push_str(content);
+            result.push('\n');
+        } else {
+            // Normal indentation - folded content
+            if in_literal_block {
+                // Exit literal block
+                in_literal_block = false;
+                if !result.is_empty() && !result.ends_with('\n') {
+                    result.push('\n');
+                }
+            }
+            // Add to current paragraph
+            current_paragraph.push(content);
+        }
+    }
+
+    // Finish any remaining paragraph
+    if !current_paragraph.is_empty() {
+        result.push_str(¤t_paragraph.join(" "));
+    }
+
+    // Apply "clip" chomping style (consistent with literal parser)
+    // Remove trailing empty lines but preserve a single trailing newline if content exists
+    let trimmed = result.trim_end_matches('\n');
+    if !trimmed.is_empty() && result.ends_with('\n') {
+        format!("{}\n", trimmed)
+    } else {
+        trimmed.to_string()
+    }
+}
+
+fn extract_annotation(metadata_section: &str, annotation_name: &str) -> Option {
+    let annotation_pattern = format!("@{}:", annotation_name);
+
+    if let Some(_start_pos) = metadata_section.find(&annotation_pattern) {
+        // Split the metadata section into lines for processing
+        let all_lines: Vec<&str> = metadata_section.lines().collect();
+
+        // Find which line contains our annotation
+        let mut annotation_line_idx = None;
+        for (idx, line) in all_lines.iter().enumerate() {
+            if line.contains(&annotation_pattern) {
+                annotation_line_idx = Some(idx);
+                break;
+            }
+        }
+
+        let annotation_line_idx = annotation_line_idx?;
+        let annotation_line = all_lines[annotation_line_idx];
+
+        // Find the position of the annotation pattern within this line
+        let pattern_pos = annotation_line.find(&annotation_pattern)?;
+        let after_colon = &annotation_line[pattern_pos + annotation_pattern.len()..];
+
+        // Check for multiline indicators immediately after the colon
+        let trimmed_after_colon = after_colon.trim_start();
+
+        if trimmed_after_colon.starts_with('|') {
+            // Literal block scalar mode (|)
+            // Content starts from the next line, ignoring any text after | on the same line
+            let block_lines = collect_annotation_block_lines(
+                &all_lines,
+                annotation_line_idx + 1,
+                annotation_line,
+            );
+
+            // Convert to owned strings for the parser
+            let owned_lines: Vec = block_lines.iter().map(|s| s.to_string()).collect();
+
+            // Convert back to string slices for the parser
+            let string_refs: Vec<&str> = owned_lines.iter().map(|s| s.as_str()).collect();
+            let base_indent = annotation_line.len() - annotation_line.trim_start().len();
+            let result = parse_literal_block_scalar(&string_refs, base_indent);
+            if result.trim().is_empty() {
+                return None;
+            } else {
+                return Some(result);
+            }
+        } else if trimmed_after_colon.starts_with('>') {
+            // Folded block scalar mode (>)
+            // Content starts from the next line, ignoring any text after > on the same line
+            let block_lines = collect_annotation_block_lines(
+                &all_lines,
+                annotation_line_idx + 1,
+                annotation_line,
+            );
+
+            // Convert to owned strings for the parser
+            let owned_lines: Vec = block_lines.iter().map(|s| s.to_string()).collect();
+
+            // Convert back to string slices for the parser
+            let string_refs: Vec<&str> = owned_lines.iter().map(|s| s.as_str()).collect();
+            let base_indent = annotation_line.len() - annotation_line.trim_start().len();
+            let result = parse_folded_block_scalar(&string_refs, base_indent);
+            if result.trim().is_empty() {
+                return None;
+            } else {
+                return Some(result);
+            }
+        } else {
+            // Default literal-like multiline mode
+            // Content can start on the same line or the next line
+            let mut content_lines = Vec::new();
+
+            // Check if there's content on the same line after the colon
+            if !trimmed_after_colon.is_empty() {
+                content_lines.push(trimmed_after_colon);
+            }
+
+            // Collect subsequent lines that belong to this annotation
+            let block_lines = collect_annotation_block_lines(
+                &all_lines,
+                annotation_line_idx + 1,
+                annotation_line,
+            );
+
+            // For default mode, preserve relative indentation within the block
+            if !block_lines.is_empty() {
+                // Find the base indentation from the first non-empty content line
+                let mut base_indent = None;
+                for line in &block_lines {
+                    let trimmed = line.trim();
+                    if !trimmed.is_empty() {
+                        base_indent = Some(line.len() - line.trim_start().len());
+                        break;
+                    }
+                }
+
+                // Process lines preserving relative indentation
+                for line in block_lines {
+                    let trimmed = line.trim();
+                    if !trimmed.is_empty() {
+                        if let Some(base) = base_indent {
+                            let line_indent = line.len() - line.trim_start().len();
+                            if line_indent >= base {
+                                // Remove only the common base indentation, preserving relative indentation
+                                let content = &line[base.min(line.len())..];
+                                content_lines.push(content);
+                            } else {
+                                // Line is less indented than base - use trimmed content
+                                content_lines.push(trimmed);
+                            }
+                        } else {
+                            content_lines.push(trimmed);
+                        }
+                    }
+                }
+            }
+
+            if content_lines.is_empty() {
+                return None;
+            }
+
+            // Join lines preserving the structure - this maintains internal newlines and relative indentation
+            let result = content_lines.join("\n");
+
+            // Apply standard trimming and return if not empty
+            let cleaned = result.trim();
+            if !cleaned.is_empty() {
+                return Some(cleaned.to_string());
+            }
+        }
+    }
+
+    None
+}
+
+/// Collect lines that belong to an annotation block, stopping at the next annotation or end
+fn collect_annotation_block_lines<'a>(
+    all_lines: &[&'a str],
+    start_idx: usize,
+    annotation_line: &str,
+) -> Vec<&'a str> {
+    let mut block_lines = Vec::new();
+    let annotation_indent = annotation_line.len() - annotation_line.trim_start().len();
+
+    for &line in all_lines.iter().skip(start_idx) {
+        let trimmed = line.trim();
+
+        // Stop if we hit another annotation at the same or lesser indentation level
+        if trimmed.starts_with('@') && trimmed.contains(':') {
+            let line_indent = line.len() - line.trim_start().len();
+            if line_indent <= annotation_indent {
+                break;
+            }
+        }
+
+        // Stop if we hit a line that's clearly not part of the comment block
+        // (very different indentation or structure)
+        let line_indent = line.len() - line.trim_start().len();
+        if !trimmed.is_empty() && line_indent < annotation_indent {
+            break;
+        }
+
+        block_lines.push(line);
+    }
+
+    block_lines
+}
+
+fn resolve_constant_reference(
+    name: &str,
+    rustdoc_index: &serde_json::Map,
+) -> Option {
+    // First, try to find the constant in the main rustdoc index
+    if let Some(value) = resolve_constant_in_index(name, rustdoc_index) {
+        return Some(value);
+    }
+
+    // If not found in main index, try additional crates
+    let additional_crate_libs = ["stacks_common"]; // Library names for additional crates
+
+    for lib_name in &additional_crate_libs {
+        let json_file_path = format!("target/rustdoc-json/doc/{}.json", lib_name);
+        if let Ok(json_content) = std::fs::read_to_string(&json_file_path) {
+            if let Ok(rustdoc_json) = serde_json::from_str::(&json_content) {
+                if let Some(index) = get_json_object(&rustdoc_json, &["index"]) {
+                    if let Some(value) = resolve_constant_in_index(name, index) {
+                        return Some(value);
+                    }
+                }
+            }
+        }
+    }
+
+    None
+}
+
+fn resolve_constant_in_index(
+    name: &str,
+    rustdoc_index: &serde_json::Map,
+) -> Option {
+    // Look for a constant with the given name in the rustdoc index
+    for (_item_id, item) in rustdoc_index {
+        // Check if this item's name matches the constant we're looking for
+        if let Some(item_name) = get_json_string(item, &["name"]) {
+            if item_name == name {
+                // Check if this item is a constant by looking for the "constant" field
+                if let Some(constant_data) = get_json_object(item, &["inner", "constant"]) {
+                    // Try newer rustdoc JSON structure first (with nested 'const' field)
+                    let constant_data_value = serde_json::Value::Object(constant_data.clone());
+                    if get_json_object(&constant_data_value, &["const"]).is_some() {
+                        // For literal constants, prefer expr which doesn't have type suffix
+                        if get_json_path(&constant_data_value, &["const", "is_literal"])
+                            .and_then(|v| v.as_bool())
+                            == Some(true)
+                        {
+                            // Access the expression field for literal constant values
+                            if let Some(expr) =
+                                get_json_string(&constant_data_value, &["const", "expr"])
+                            {
+                                if expr != "_" {
+                                    return Some(expr.to_string());
+                                }
+                            }
+                        }
+
+                        // For computed constants or when expr is "_", use value but strip type suffix
+                        if let Some(value) =
+                            get_json_string(&constant_data_value, &["const", "value"])
+                        {
+                            return Some(strip_type_suffix(value));
+                        }
+
+                        // Fallback to expr if value is not available
+                        if let Some(expr) =
+                            get_json_string(&constant_data_value, &["const", "expr"])
+                        {
+                            if expr != "_" {
+                                return Some(expr.to_string());
+                            }
+                        }
+                    }
+
+                    // Fall back to older rustdoc JSON structure for compatibility
+                    if let Some(value) = get_json_string(&constant_data_value, &["value"]) {
+                        return Some(strip_type_suffix(value));
+                    }
+                    if let Some(expr) = get_json_string(&constant_data_value, &["expr"]) {
+                        if expr != "_" {
+                            return Some(expr.to_string());
+                        }
+                    }
+
+                    // For some constants, the value might be in the type field if it's a simple literal
+                    if let Some(type_str) = get_json_string(&constant_data_value, &["type"]) {
+                        // Handle simple numeric or string literals embedded in type
+                        return Some(type_str.to_string());
+                    }
+                }
+            }
+        }
+    }
+    None
+}
+
+/// Strip type suffixes from rustdoc constant values (e.g., "50u64" -> "50", "402_653_196u32" -> "402_653_196")
+fn strip_type_suffix(value: &str) -> String {
+    // Common Rust integer type suffixes
+    let suffixes = [
+        "u8", "u16", "u32", "u64", "u128", "usize", "i8", "i16", "i32", "i64", "i128", "isize",
+        "f32", "f64",
+    ];
+
+    for suffix in &suffixes {
+        if let Some(without_suffix) = value.strip_suffix(suffix) {
+            // Only strip if the remaining part looks like a numeric literal
+            // (contains only digits, underscores, dots, minus signs, or quotes for string literals)
+            if !without_suffix.is_empty()
+                && (without_suffix
+                    .chars()
+                    .all(|c| c.is_ascii_digit() || c == '_' || c == '.' || c == '-')
+                    || (without_suffix.starts_with('"') && without_suffix.ends_with('"')))
+            {
+                return without_suffix.to_string();
+            }
+        }
+    }
+
+    // If no valid suffix found, return as-is
+    value.to_string()
+}
+
+fn find_constant_references(text: &str) -> std::collections::HashSet {
+    let mut constants = std::collections::HashSet::new();
+
+    for captures in CONSTANT_REFERENCE_REGEX.captures_iter(text) {
+        if let Some(constant_name) = captures.get(1) {
+            constants.insert(constant_name.as_str().to_string());
+        }
+    }
+
+    constants
+}
+
+#[cfg(test)]
+mod tests {
+    use serde_json::json;
+
+    use super::*;
+
+    #[test]
+    fn test_parse_field_documentation_basic() {
+        let doc_text = "This is a basic field description.";
+        let result = parse_field_documentation(doc_text, "test_field").unwrap();
+
+        assert_eq!(result.0.name, "test_field");
+        assert_eq!(result.0.description, "This is a basic field description.");
+        assert_eq!(result.0.default_value, None);
+        assert_eq!(result.0.notes, None);
+        assert_eq!(result.0.deprecated, None);
+        assert_eq!(result.0.toml_example, None);
+    }
+
+    #[test]
+    fn test_parse_field_documentation_with_metadata() {
+        let doc_text = r#"This is a field with metadata.
+---
+@default: `"test_value"`
+@notes:
+  - This is a note.
+  - This is another note.
+@deprecated: This field is deprecated.
+@toml_example: |
+  key = "value"
+  other = 123"#;
+
+        let result = parse_field_documentation(doc_text, "test_field").unwrap();
+
+        assert_eq!(result.0.name, "test_field");
+        assert_eq!(result.0.description, "This is a field with metadata.");
+        assert_eq!(result.0.default_value, Some("`\"test_value\"`".to_string()));
+        assert_eq!(
+            result.0.notes,
+            Some(vec![
+                "This is a note.".to_string(),
+                "This is another note.".to_string()
+            ])
+        );
+        assert_eq!(
+            result.0.deprecated,
+            Some("This field is deprecated.".to_string())
+        );
+        assert_eq!(
+            result.0.toml_example,
+            Some("key = \"value\"\nother = 123".to_string())
+        );
+    }
+
+    #[test]
+    fn test_parse_field_documentation_multiline_default() {
+        let doc_text = r#"Multi-line field description.
+---
+@default: Derived from [`BurnchainConfig::mode`] ([`CHAIN_ID_MAINNET`] for `mainnet`,
+  [`CHAIN_ID_TESTNET`] otherwise).
+@notes:
+  - Warning: Do not modify this unless you really know what you're doing."#;
+
+        let result = parse_field_documentation(doc_text, "test_field").unwrap();
+
+        assert_eq!(result.0.name, "test_field");
+        assert_eq!(result.0.description, "Multi-line field description.");
+        assert!(result.0.default_value.is_some());
+        let default_val = result.0.default_value.unwrap();
+        assert!(default_val.contains("Derived from"));
+        assert!(default_val.contains("CHAIN_ID_MAINNET"));
+        assert_eq!(
+            result.0.notes,
+            Some(vec![
+                "Warning: Do not modify this unless you really know what you're doing.".to_string()
+            ])
+        );
+    }
+
+    #[test]
+    fn test_parse_field_documentation_multiline_notes() {
+        let doc_text = r#"Field with multi-line notes.
+---
+@notes:
+  - This is a single line note.
+  - This is a multi-line note that
+    spans across multiple lines
+    and should be treated as one note.
+  - Another single line note.
+  - Final multi-line note that also
+    continues on the next line."#;
+
+        let result = parse_field_documentation(doc_text, "test_field").unwrap();
+        let (field_doc, _) = result;
+
+        assert_eq!(field_doc.name, "test_field");
+        assert_eq!(field_doc.description, "Field with multi-line notes.");
+
+        let notes = field_doc.notes.expect("Should have notes");
+        assert_eq!(notes.len(), 4);
+        assert_eq!(notes[0], "This is a single line note.");
+        assert_eq!(
+            notes[1],
+            "This is a multi-line note that spans across multiple lines and should be treated as one note."
+        );
+        assert_eq!(notes[2], "Another single line note.");
+        assert_eq!(
+            notes[3],
+            "Final multi-line note that also continues on the next line."
+        );
+    }
+
+    #[test]
+    fn test_parse_field_documentation_multiline_notes_mixed_bullets() {
+        let doc_text = r#"Field with mixed bullet styles.
+---
+@notes:
+  - First note with dash.
+  * Second note with asterisk
+    that continues.
+  - Third note with dash again
+    and multiple continuation lines
+    should all be joined together."#;
+
+        let result = parse_field_documentation(doc_text, "test_field").unwrap();
+        let (field_doc, _) = result;
+
+        let notes = field_doc.notes.expect("Should have notes");
+        assert_eq!(notes.len(), 3);
+        assert_eq!(notes[0], "First note with dash.");
+        assert_eq!(notes[1], "Second note with asterisk that continues.");
+        assert_eq!(
+            notes[2],
+            "Third note with dash again and multiple continuation lines should all be joined together."
+        );
+    }
+
+    #[test]
+    fn test_parse_field_documentation_notes_with_empty_lines() {
+        let doc_text = r#"Field with notes that have empty lines.
+---
+@notes:
+  - First note.
+
+  - Second note after empty line
+    with continuation.
+
+  - Third note after another empty line."#;
+
+        let result = parse_field_documentation(doc_text, "test_field").unwrap();
+        let (field_doc, _) = result;
+
+        let notes = field_doc.notes.expect("Should have notes");
+        assert_eq!(notes.len(), 3);
+        assert_eq!(notes[0], "First note.");
+        assert_eq!(notes[1], "Second note after empty line with continuation.");
+        assert_eq!(notes[2], "Third note after another empty line.");
+    }
+
+    #[test]
+    fn test_parse_field_documentation_notes_with_intralinks() {
+        let doc_text = r#"Field with notes containing intralinks.
+---
+@notes:
+  - If [`SomeConfig::field`] is `true`, the node will
+    use the default estimator.
+  - See [`CONSTANT_VALUE`] for details."#;
+
+        let result = parse_field_documentation(doc_text, "test_field").unwrap();
+        let (field_doc, referenced_constants) = result;
+
+        let notes = field_doc.notes.expect("Should have notes");
+        assert_eq!(notes.len(), 2);
+        assert_eq!(
+            notes[0],
+            "If [`SomeConfig::field`] is `true`, the node will use the default estimator."
+        );
+        assert_eq!(notes[1], "See [`CONSTANT_VALUE`] for details.");
+
+        // Check that constants were collected
+        assert!(referenced_constants.contains("CONSTANT_VALUE"));
+    }
+
+    #[test]
+    fn test_extract_annotation_basic() {
+        let metadata = "@default: `\"test\"`\n@notes: Some notes here.";
+
+        let default = extract_annotation(metadata, "default");
+        let notes = extract_annotation(metadata, "notes");
+        let missing = extract_annotation(metadata, "missing");
+
+        assert_eq!(default, Some("`\"test\"`".to_string()));
+        assert_eq!(notes, Some("Some notes here.".to_string()));
+        assert_eq!(missing, None);
+    }
+
+    #[test]
+    fn test_extract_annotation_toml_example() {
+        let metadata = r#"@toml_example: |
+  key = "value"
+  number = 42
+  nested = { a = 1, b = 2 }"#;
+
+        let result = extract_annotation(metadata, "toml_example");
+        assert!(result.is_some());
+        let toml = result.unwrap();
+        assert!(toml.contains("key = \"value\""));
+        assert!(toml.contains("number = 42"));
+        assert!(toml.contains("nested = { a = 1, b = 2 }"));
+    }
+
+    #[test]
+    fn test_extract_annotation_multiline() {
+        let metadata = r#"@notes:
+  - First note with important details.
+  - Second note with more info.
+@default: `None`"#;
+
+        let notes = extract_annotation(metadata, "notes");
+        let default = extract_annotation(metadata, "default");
+
+        assert!(notes.is_some());
+        let notes_text = notes.unwrap();
+        assert!(notes_text.contains("First note"));
+        assert!(notes_text.contains("Second note"));
+        assert_eq!(default, Some("`None`".to_string()));
+    }
+
+    #[test]
+    fn test_extract_struct_fields_from_mock_data() {
+        let mock_index = json!({
+            "struct_1": {
+                "name": "TestStruct",
+                "inner": {
+                    "struct": {
+                        "kind": {
+                            "plain": {
+                                "fields": ["field_1", "field_2"]
+                            }
+                        }
+                    }
+                }
+            },
+            "field_1": {
+                "name": "test_field",
+                "docs": "A test field.\n---\n@default: `42`"
+            },
+            "field_2": {
+                "name": "another_field",
+                "docs": "Another field with notes.\n---\n@default: `\"hello\"`\n@notes:\n  - This is a note."
+            }
+        });
+
+        let index = mock_index.as_object().unwrap();
+        let struct_item = &mock_index["struct_1"];
+
+        let (fields, _referenced_constants) = extract_struct_fields(index, struct_item).unwrap();
+
+        assert_eq!(fields.len(), 2);
+
+        let first_field = &fields[0];
+        assert_eq!(first_field.name, "test_field");
+        assert_eq!(first_field.description, "A test field.");
+        assert_eq!(first_field.default_value, Some("`42`".to_string()));
+
+        let second_field = &fields[1];
+        assert_eq!(second_field.name, "another_field");
+        assert_eq!(second_field.description, "Another field with notes.");
+        assert_eq!(second_field.default_value, Some("`\"hello\"`".to_string()));
+        assert_eq!(
+            second_field.notes,
+            Some(vec!["This is a note.".to_string()])
+        );
+    }
+
+    #[test]
+    fn test_extract_struct_from_rustdoc_index() {
+        let mock_index = json!({
+            "struct_1": {
+                "name": "TestStruct",
+                "docs": "This is a test struct for configuration.",
+                "inner": {
+                    "struct": {
+                        "kind": {
+                            "plain": {
+                                "fields": ["field_1"]
+                            }
+                        }
+                    }
+                }
+            },
+            "field_1": {
+                "name": "config_field",
+                "docs": "Configuration field.\n---\n@default: `\"default\"`"
+            }
+        });
+
+        let index = mock_index.as_object().unwrap();
+        let struct_item = &mock_index["struct_1"];
+
+        let result = extract_struct_from_rustdoc_index(index, "TestStruct", struct_item).unwrap();
+
+        assert!(result.0.is_some());
+        let struct_doc = result.0.unwrap();
+        assert_eq!(struct_doc.name, "TestStruct");
+        assert_eq!(
+            struct_doc.description,
+            Some("This is a test struct for configuration.".to_string())
+        );
+        assert_eq!(struct_doc.fields.len(), 1);
+        assert_eq!(struct_doc.fields[0].name, "config_field");
+    }
+
+    #[test]
+    fn test_extract_config_docs_from_rustdoc() {
+        let mock_rustdoc = json!({
+            "index": {
+                "item_1": {
+                    "name": "ConfigStruct",
+                    "inner": {
+                        "struct": {
+                            "kind": {
+                                "plain": {
+                                    "fields": ["field_1"]
+                                }
+                            }
+                        }
+                    },
+                    "docs": "A configuration struct."
+                },
+                "item_2": {
+                    "name": "NonStruct",
+                    "inner": {
+                        "function": {}
+                    }
+                },
+                "field_1": {
+                    "name": "setting",
+                    "docs": "A configuration setting.\n---\n@default: `true`"
+                }
+            }
+        });
+
+        let target_structs = Some(vec!["ConfigStruct".to_string()]);
+        let result = extract_config_docs_from_rustdoc(&mock_rustdoc, &target_structs).unwrap();
+
+        assert_eq!(result.structs.len(), 1);
+        let struct_doc = &result.structs[0];
+        assert_eq!(struct_doc.name, "ConfigStruct");
+        assert_eq!(
+            struct_doc.description,
+            Some("A configuration struct.".to_string())
+        );
+        assert_eq!(struct_doc.fields.len(), 1);
+    }
+
+    #[test]
+    fn test_extract_config_docs_filter_by_target() {
+        let mock_rustdoc = json!({
+            "index": {
+                "item_1": {
+                    "name": "WantedStruct",
+                    "inner": {
+                        "struct": {
+                            "kind": {
+                                "plain": {
+                                    "fields": []
+                                }
+                            }
+                        }
+                    },
+                    "docs": "Wanted struct."
+                },
+                "item_2": {
+                    "name": "UnwantedStruct",
+                    "inner": {
+                        "struct": {
+                            "kind": {
+                                "plain": {
+                                    "fields": []
+                                }
+                            }
+                        }
+                    },
+                    "docs": "Unwanted struct."
+                }
+            }
+        });
+
+        let target_structs = Some(vec!["WantedStruct".to_string()]);
+        let result = extract_config_docs_from_rustdoc(&mock_rustdoc, &target_structs).unwrap();
+
+        assert_eq!(result.structs.len(), 1);
+        assert_eq!(result.structs[0].name, "WantedStruct");
+    }
+
+    #[test]
+    fn test_extract_config_docs_no_filter() {
+        let mock_rustdoc = json!({
+            "index": {
+                "item_1": {
+                    "name": "Struct1",
+                    "inner": {
+                        "struct": {
+                            "kind": {
+                                "plain": {
+                                    "fields": []
+                                }
+                            }
+                        }
+                    },
+                    "docs": "First struct."
+                },
+                "item_2": {
+                    "name": "Struct2",
+                    "inner": {
+                        "struct": {
+                            "kind": {
+                                "plain": {
+                                    "fields": []
+                                }
+                            }
+                        }
+                    },
+                    "docs": "Second struct."
+                }
+            }
+        });
+
+        let result = extract_config_docs_from_rustdoc(&mock_rustdoc, &None).unwrap();
+
+        assert_eq!(result.structs.len(), 2);
+        let names: Vec<&str> = result.structs.iter().map(|s| s.name.as_str()).collect();
+        assert!(names.contains(&"Struct1"));
+        assert!(names.contains(&"Struct2"));
+    }
+
+    #[test]
+    fn test_parse_field_documentation_empty_notes() {
+        let doc_text = r#"Field with empty notes.
+---
+@default: `None`
+@notes:
+
+
+@deprecated: Old field"#;
+
+        let result = parse_field_documentation(doc_text, "test_field").unwrap();
+
+        assert_eq!(result.0.name, "test_field");
+        assert_eq!(result.0.description, "Field with empty notes.");
+        assert_eq!(result.0.default_value, Some("`None`".to_string()));
+        assert_eq!(result.0.notes, None); // Empty notes should result in None
+        assert_eq!(result.0.deprecated, Some("Old field".to_string()));
+    }
+
+    #[test]
+    fn test_parse_field_documentation_bullet_points_cleanup() {
+        let doc_text = r#"Field with bullet notes.
+---
+@notes:
+  - First bullet point
+  * Second bullet point
+  - Third bullet point"#;
+
+        let result = parse_field_documentation(doc_text, "test_field").unwrap();
+
+        assert_eq!(
+            result.0.notes,
+            Some(vec![
+                "First bullet point".to_string(),
+                "Second bullet point".to_string(),
+                "Third bullet point".to_string()
+            ])
+        );
+    }
+
+    #[test]
+    fn test_extract_annotation_edge_cases() {
+        // Test with annotation at the end
+        let metadata1 = "@default: `value`";
+        assert_eq!(
+            extract_annotation(metadata1, "default"),
+            Some("`value`".to_string())
+        );
+
+        // Test with empty annotation
+        let metadata2 = "@default:\n@notes: something";
+        assert_eq!(extract_annotation(metadata2, "default"), None);
+
+        // Test with annotation containing colons
+        let metadata3 = "@notes: URL: https://example.com:8080/path";
+        let notes = extract_annotation(metadata3, "notes");
+        assert_eq!(
+            notes,
+            Some("URL: https://example.com:8080/path".to_string())
+        );
+
+        // Test with whitespace-only annotation
+        let metadata_whitespace = "@default:      \n@notes: something";
+        assert_eq!(
+            extract_annotation(metadata_whitespace, "default"),
+            None,
+            "Annotation with only whitespace should be None"
+        );
+
+        // Test with annotation containing only newline
+        let metadata_newline = "@default:\n@notes: something";
+        assert_eq!(
+            extract_annotation(metadata_newline, "default"),
+            None,
+            "Annotation with only newline should be None"
+        );
+    }
+
+    #[test]
+    fn test_extract_struct_fields_numeric_field_ids() {
+        let mock_index = json!({
+            "struct_1": {
+                "name": "TestStruct",
+                "inner": {
+                    "struct": {
+                        "kind": {
+                            "plain": {
+                                "fields": [123, 456] // Numeric field IDs
+                            }
+                        }
+                    }
+                }
+            },
+            "123": {
+                "name": "numeric_field",
+                "docs": "Field with numeric ID.\n---\n@default: `0`"
+            },
+            "456": {
+                "name": "another_numeric",
+                "docs": "Another numeric field."
+            }
+        });
+
+        let index = mock_index.as_object().unwrap();
+        let struct_item = &mock_index["struct_1"];
+
+        let (fields, _referenced_constants) = extract_struct_fields(index, struct_item).unwrap();
+
+        assert_eq!(fields.len(), 2);
+        assert_eq!(fields[0].name, "numeric_field");
+        assert_eq!(fields[1].name, "another_numeric");
+    }
+
+    #[test]
+    fn test_extract_struct_fields_missing_field_data() {
+        let mock_index = json!({
+            "struct_1": {
+                "name": "TestStruct",
+                "inner": {
+                    "struct": {
+                        "kind": {
+                            "plain": {
+                                "fields": ["missing_field", "present_field"]
+                            }
+                        }
+                    }
+                }
+            },
+            "present_field": {
+                "name": "present",
+                "docs": "This field exists."
+            }
+            // "missing_field" is intentionally not in the index
+        });
+
+        let index = mock_index.as_object().unwrap();
+        let struct_item = &mock_index["struct_1"];
+
+        let (fields, _referenced_constants) = extract_struct_fields(index, struct_item).unwrap();
+
+        // Should only include the present field
+        assert_eq!(fields.len(), 1);
+        assert_eq!(fields[0].name, "present");
+    }
+
+    #[test]
+    fn test_extract_config_docs_missing_index() {
+        let invalid_rustdoc = json!({
+            "not_index": {}
+        });
+
+        let result = extract_config_docs_from_rustdoc(&invalid_rustdoc, &None);
+        assert!(result.is_err());
+        assert!(
+            result
+                .unwrap_err()
+                .to_string()
+                .contains("Missing 'index' field")
+        );
+    }
+
+    #[test]
+    fn test_extract_struct_fields_no_documentation() {
+        let mock_index = json!({
+            "struct_1": {
+                "name": "TestStruct",
+                "inner": {
+                    "struct": {
+                        "kind": {
+                            "plain": {
+                                "fields": ["field_1"]
+                            }
+                        }
+                    }
+                }
+            },
+            "field_1": {
+                "name": "undocumented_field",
+                "docs": ""  // Empty documentation
+            }
+        });
+
+        let index = mock_index.as_object().unwrap();
+        let struct_item = &mock_index["struct_1"];
+
+        let (fields, _referenced_constants) = extract_struct_fields(index, struct_item).unwrap();
+
+        // Fields without documentation should be excluded
+        assert_eq!(fields.len(), 0);
+    }
+
+    #[test]
+    fn test_extract_struct_fields_malformed_structure() {
+        let mock_index = json!({
+            "struct_1": {
+                "name": "TestStruct",
+                "inner": {
+                    "struct": {
+                        "kind": {
+                            "tuple": {}  // Not a "plain" struct
+                        }
+                    }
+                }
+            }
+        });
+
+        let index = mock_index.as_object().unwrap();
+        let struct_item = &mock_index["struct_1"];
+
+        let (fields, _referenced_constants) = extract_struct_fields(index, struct_item).unwrap();
+
+        // Should handle malformed structures gracefully
+        assert_eq!(fields.len(), 0);
+    }
+
+    #[test]
+    fn test_parse_field_documentation_complex_annotations() {
+        let doc_text = r#"Complex field with all annotation types and edge cases.
+
+This description spans multiple lines
+and includes various formatting.
+---
+@default: Dynamically determined.
+  - If the `[miner]` section *is present* in the config file, the [`NodeConfig::seed`] is used.
+  - If the `[miner]` section *is not present*, this is `None`, and mining operations will fail.
+@notes:
+  - **Warning:** This field requires careful configuration.
+  - Only relevant if [`NodeConfig::miner`] is `true`.
+  - Units: milliseconds.
+@deprecated: Use `new_field` instead. This will be removed in version 2.0.
+@toml_example: |
+  # This is a comment
+  [section]
+  field = "value"
+
+  # Another section
+  [other_section]
+  number = 42
+  array = ["a", "b", "c"]"#;
+
+        let result = parse_field_documentation(doc_text, "complex_field").unwrap();
+
+        assert_eq!(result.0.name, "complex_field");
+        assert!(result.0.description.contains("Complex field"));
+        assert!(result.0.description.contains("multiple lines"));
+
+        let default_val = result.0.default_value.unwrap();
+        assert!(default_val.contains("Dynamically determined"));
+        assert!(default_val.contains("NodeConfig::seed"));
+
+        let notes = result.0.notes.unwrap();
+        assert_eq!(notes.len(), 3);
+        assert!(notes[0].contains("Warning"));
+        assert!(notes[1].contains("Only relevant"));
+        assert!(notes[2].contains("Units: milliseconds"));
+
+        assert!(
+            result
+                .0
+                .deprecated
+                .unwrap()
+                .contains("Use `new_field` instead")
+        );
+
+        let toml_example = result.0.toml_example.unwrap();
+        assert!(toml_example.contains("# This is a comment"));
+        assert!(toml_example.contains("[section]"));
+        assert!(toml_example.contains("array = [\"a\", \"b\", \"c\"]"));
+    }
+
+    #[test]
+    fn test_extract_annotation_overlapping_patterns() {
+        let metadata = r#"@config_value: `"not_default"`
+@default: `"actual_default"`
+@notes_info: Some other annotation
+@notes: Actual notes here
+@deprecated_old: Old deprecation
+@deprecated: Current deprecation"#;
+
+        // Should extract the correct annotations, not get confused by similar names
+        assert_eq!(
+            extract_annotation(metadata, "default"),
+            Some("`\"actual_default\"`".to_string())
+        );
+        assert_eq!(
+            extract_annotation(metadata, "notes"),
+            Some("Actual notes here".to_string())
+        );
+        assert_eq!(
+            extract_annotation(metadata, "deprecated"),
+            Some("Current deprecation".to_string())
+        );
+
+        // Should not find non-existent annotations
+        assert_eq!(extract_annotation(metadata, "nonexistent"), None);
+        assert_eq!(extract_annotation(metadata, "missing"), None);
+    }
+
+    #[test]
+    fn test_extract_struct_from_rustdoc_index_no_fields_no_description() {
+        let mock_index = json!({
+            "struct_1": {
+                "name": "EmptyStruct",
+                "inner": {
+                    "struct": {
+                        "kind": {
+                            "plain": {
+                                "fields": []
+                            }
+                        }
+                    }
+                }
+                // No "docs" field
+            }
+        });
+
+        let index = mock_index.as_object().unwrap();
+        let struct_item = &mock_index["struct_1"];
+
+        let result = extract_struct_from_rustdoc_index(index, "EmptyStruct", struct_item).unwrap();
+
+        // Should return None for structs with no fields and no description
+        assert!(result.0.is_none());
+    }
+
+    #[test]
+    fn test_parse_field_documentation_only_description() {
+        let doc_text = "Just a simple description with no metadata separator.";
+        let result = parse_field_documentation(doc_text, "simple_field").unwrap();
+
+        assert_eq!(result.0.name, "simple_field");
+        assert_eq!(
+            result.0.description,
+            "Just a simple description with no metadata separator."
+        );
+        assert_eq!(result.0.default_value, None);
+        assert_eq!(result.0.notes, None);
+        assert_eq!(result.0.deprecated, None);
+        assert_eq!(result.0.toml_example, None);
+    }
+
+    #[test]
+    fn test_package_to_library_name_mapping() {
+        // Test the logic inside generate_rustdoc_json for mapping package names to library names
+        // We can't easily test generate_rustdoc_json directly since it runs external commands,
+        // but we can test the mapping logic
+
+        // Test the special case for stackslib
+        let lib_name = match "stackslib" {
+            "stackslib" => "blockstack_lib".to_string(),
+            pkg => pkg.replace('-', "_"),
+        };
+        assert_eq!(lib_name, "blockstack_lib");
+
+        // Test normal package names with hyphens
+        let lib_name = match "config-docs-generator" {
+            "stackslib" => "blockstack_lib".to_string(),
+            pkg => pkg.replace('-', "_"),
+        };
+        assert_eq!(lib_name, "config_docs_generator");
+
+        // Test package name without hyphens
+        let lib_name = match "normalpackage" {
+            "stackslib" => "blockstack_lib".to_string(),
+            pkg => pkg.replace('-', "_"),
+        };
+        assert_eq!(lib_name, "normalpackage");
+    }
+
+    #[test]
+    fn test_find_constant_references() {
+        // Test finding constant references in text
+        let text1 = "This field uses [`DEFAULT_VALUE`] as default.";
+        let constants1 = find_constant_references(text1);
+        assert_eq!(constants1.len(), 1);
+        assert!(constants1.contains("DEFAULT_VALUE"));
+
+        // Test multiple constants
+        let text2 = "Uses [`CONST_A`] and [`CONST_B`] values.";
+        let constants2 = find_constant_references(text2);
+        assert_eq!(constants2.len(), 2);
+        assert!(constants2.contains("CONST_A"));
+        assert!(constants2.contains("CONST_B"));
+
+        // Test no constants
+        let text3 = "This text has no constant references.";
+        let constants3 = find_constant_references(text3);
+        assert_eq!(constants3.len(), 0);
+
+        // Test mixed content
+        let text4 =
+            "Field uses [`MY_CONSTANT`] and links to [`SomeStruct::field`] but not `lowercase`.";
+        let constants4 = find_constant_references(text4);
+        assert_eq!(constants4.len(), 1);
+        assert!(constants4.contains("MY_CONSTANT"));
+        assert!(!constants4.contains("SomeStruct::field")); // Should not match struct::field patterns
+        assert!(!constants4.contains("lowercase")); // Should not match lowercase
+    }
+
+    #[test]
+    fn test_resolve_constant_reference() {
+        // Create mock rustdoc index with a constant
+        let mock_index = serde_json::json!({
+            "const_1": {
+                "name": "TEST_CONSTANT",
+                "inner": {
+                    "constant": {
+                        "expr": "42",
+                        "type": "u32"
+                    }
+                }
+            },
+            "const_2": {
+                "name": "STRING_CONST",
+                "inner": {
+                    "constant": {
+                        "value": "\"hello\"",
+                        "type": "&str"
+                    }
+                }
+            },
+            "not_const": {
+                "name": "NotAConstant",
+                "inner": {
+                    "function": {}
+                }
+            }
+        });
+
+        let index = mock_index.as_object().unwrap();
+
+        // Test resolving existing constant with expr field
+        let result1 = resolve_constant_reference("TEST_CONSTANT", index);
+        assert_eq!(result1, Some("42".to_string()));
+
+        // Test resolving existing constant with value field
+        let result2 = resolve_constant_reference("STRING_CONST", index);
+        assert_eq!(result2, Some("\"hello\"".to_string()));
+
+        // Test resolving non-existent constant
+        let result3 = resolve_constant_reference("NONEXISTENT", index);
+        assert_eq!(result3, None);
+
+        // Test resolving non-constant item
+        let result4 = resolve_constant_reference("NotAConstant", index);
+        assert_eq!(result4, None);
+    }
+
+    #[test]
+    fn test_resolve_computed_constant() {
+        // Test computed constants that have "_" in expr and actual value in value field
+        let mock_index = serde_json::json!({
+            "computed_const": {
+                "name": "COMPUTED_CONSTANT",
+                "inner": {
+                    "constant": {
+                        "const": {
+                            "expr": "_",
+                            "value": "402_653_196u32",
+                            "is_literal": false
+                        },
+                        "type": {
+                            "primitive": "u32"
+                        }
+                    }
+                }
+            },
+            "literal_const": {
+                "name": "LITERAL_CONSTANT",
+                "inner": {
+                    "constant": {
+                        "const": {
+                            "expr": "100",
+                            "value": "100u32",
+                            "is_literal": true
+                        },
+                        "type": {
+                            "primitive": "u32"
+                        }
+                    }
+                }
+            }
+        });
+
+        let index = mock_index.as_object().unwrap();
+
+        // Test resolving computed constant - should get the value without type suffix
+        let result1 = resolve_constant_in_index("COMPUTED_CONSTANT", index);
+        assert_eq!(result1, Some("402_653_196".to_string()));
+
+        // Test resolving literal constant - should get expr which is clean
+        let result2 = resolve_constant_in_index("LITERAL_CONSTANT", index);
+        assert_eq!(result2, Some("100".to_string()));
+    }
+
+    #[test]
+    fn test_parse_field_documentation_with_constants() {
+        let doc_text = r#"This field uses [`DEFAULT_TIMEOUT`] milliseconds.
+---
+@default: [`DEFAULT_VALUE`]
+@notes:
+  - See [`MAX_RETRIES`] for retry limit.
+  - Warning about [`DEPRECATED_CONST`]."#;
+
+        let result = parse_field_documentation(doc_text, "test_field").unwrap();
+
+        // Check that constants were collected
+        assert_eq!(result.1.len(), 4);
+        assert!(result.1.contains("DEFAULT_TIMEOUT"));
+        assert!(result.1.contains("DEFAULT_VALUE"));
+        assert!(result.1.contains("MAX_RETRIES"));
+        assert!(result.1.contains("DEPRECATED_CONST"));
+
+        // Check that normal parsing still works
+        assert_eq!(result.0.name, "test_field");
+        assert!(result.0.description.contains("DEFAULT_TIMEOUT"));
+        assert!(result.0.default_value.is_some());
+        assert!(result.0.notes.is_some());
+    }
+
+    #[test]
+    fn test_extract_config_docs_with_constants() {
+        let mock_rustdoc = serde_json::json!({
+            "index": {
+                "struct_1": {
+                    "name": "TestStruct",
+                    "inner": {
+                        "struct": {
+                            "kind": {
+                                "plain": {
+                                    "fields": ["field_1"]
+                                }
+                            }
+                        }
+                    },
+                    "docs": "Struct that uses [`STRUCT_CONSTANT`]."
+                },
+                "field_1": {
+                    "name": "test_field",
+                    "docs": "Field using [`FIELD_CONSTANT`].\n---\n@default: [`DEFAULT_CONST`]"
+                },
+                "const_1": {
+                    "name": "STRUCT_CONSTANT",
+                    "inner": {
+                        "constant": {
+                            "expr": "100"
+                        }
+                    }
+                },
+                "const_2": {
+                    "name": "FIELD_CONSTANT",
+                    "inner": {
+                        "constant": {
+                            "value": "\"test\""
+                        }
+                    }
+                },
+                "const_3": {
+                    "name": "DEFAULT_CONST",
+                    "inner": {
+                        "constant": {
+                            "expr": "42"
+                        }
+                    }
+                }
+            }
+        });
+
+        let result = extract_config_docs_from_rustdoc(&mock_rustdoc, &None).unwrap();
+
+        // Check that constants were resolved
+        assert_eq!(result.referenced_constants.len(), 3);
+        assert_eq!(
+            result.referenced_constants.get("STRUCT_CONSTANT"),
+            Some(&Some("100".to_string()))
+        );
+        assert_eq!(
+            result.referenced_constants.get("FIELD_CONSTANT"),
+            Some(&Some("\"test\"".to_string()))
+        );
+        assert_eq!(
+            result.referenced_constants.get("DEFAULT_CONST"),
+            Some(&Some("42".to_string()))
+        );
+
+        // Check that struct was extracted normally
+        assert_eq!(result.structs.len(), 1);
+        assert_eq!(result.structs[0].name, "TestStruct");
+    }
+
+    #[test]
+    fn test_extract_config_docs_with_unresolvable_constants() {
+        let mock_rustdoc = serde_json::json!({
+            "index": {
+                "struct_1": {
+                    "name": "TestStruct",
+                    "inner": {
+                        "struct": {
+                            "kind": {
+                                "plain": {
+                                    "fields": ["field_1"]
+                                }
+                            }
+                        }
+                    },
+                    "docs": "Struct that references [`MISSING_CONSTANT`]."
+                },
+                "field_1": {
+                    "name": "test_field",
+                    "docs": "Field description."
+                }
+            }
+        });
+
+        let result = extract_config_docs_from_rustdoc(&mock_rustdoc, &None).unwrap();
+
+        // Check that unresolvable constant is recorded with None value
+        assert_eq!(result.referenced_constants.len(), 1);
+        assert_eq!(
+            result.referenced_constants.get("MISSING_CONSTANT"),
+            Some(&None)
+        );
+    }
+
+    #[test]
+    fn test_private_items_included_in_rustdoc() {
+        // This test verifies that our fix for including private items in rustdoc generation
+        // allows us to resolve private constants that were previously inaccessible
+
+        // Simulate a rustdoc JSON that includes both public and private constants
+        // (which should happen with --document-private-items flag)
+        let mock_rustdoc = serde_json::json!({
+            "index": {
+                "struct_1": {
+                    "name": "TestStruct",
+                    "inner": {
+                        "struct": {
+                            "kind": {
+                                "plain": {
+                                    "fields": ["field_1"]
+                                }
+                            }
+                        }
+                    },
+                    "docs": "Struct description."
+                },
+                "field_1": {
+                    "name": "test_field",
+                    "docs": "Field that uses [`PRIVATE_CONSTANT`] and [`PUBLIC_CONSTANT`]."
+                },
+                // Public constant (would be included without --document-private-items)
+                "const_public": {
+                    "name": "PUBLIC_CONSTANT",
+                    "inner": {
+                        "constant": {
+                            "const": {
+                                "expr": "100",
+                                "type": "u32"
+                            }
+                        }
+                    },
+                    "visibility": "public"
+                },
+                // Private constant (only included with --document-private-items)
+                "const_private": {
+                    "name": "PRIVATE_CONSTANT",
+                    "inner": {
+                        "constant": {
+                            "const": {
+                                "expr": "200",
+                                "type": "u32"
+                            }
+                        }
+                    },
+                    "visibility": "crate"
+                }
+            }
+        });
+
+        let result = extract_config_docs_from_rustdoc(&mock_rustdoc, &None).unwrap();
+
+        // Both constants should be resolved now
+        assert_eq!(result.referenced_constants.len(), 2);
+        assert_eq!(
+            result.referenced_constants.get("PUBLIC_CONSTANT"),
+            Some(&Some("100".to_string()))
+        );
+        assert_eq!(
+            result.referenced_constants.get("PRIVATE_CONSTANT"),
+            Some(&Some("200".to_string()))
+        );
+    }
+
+    #[test]
+    fn test_multi_crate_constant_resolution() {
+        // This test verifies that our multi-crate constant resolution works
+        // It simulates the case where constants are defined in different crates
+
+        // Create a mock rustdoc index for the main crate (without the target constant)
+        let main_index = serde_json::json!({
+            "const_main": {
+                "name": "MAIN_CONSTANT",
+                "inner": {
+                    "constant": {
+                        "const": {
+                            "expr": "100",
+                            "type": "u32"
+                        }
+                    }
+                }
+            }
+        });
+
+        let main_index_obj = main_index.as_object().unwrap();
+
+        // Test resolving a constant that exists in main index
+        let result1 = resolve_constant_in_index("MAIN_CONSTANT", main_index_obj);
+        assert_eq!(result1, Some("100".to_string()));
+
+        // Test resolving a constant that doesn't exist in main index
+        let result2 = resolve_constant_in_index("EXTERNAL_CONSTANT", main_index_obj);
+        assert_eq!(result2, None);
+
+        // Note: Testing the full resolve_constant_reference function that reads from files
+        // would require setting up actual rustdoc JSON files, which is complex for unit tests.
+        // The integration test via the full pipeline covers this functionality.
+    }
+
+    #[test]
+    fn test_strip_type_suffix() {
+        // Test various type suffixes
+        assert_eq!(strip_type_suffix("50u64"), "50");
+        assert_eq!(strip_type_suffix("402_653_196u32"), "402_653_196");
+        assert_eq!(strip_type_suffix("100i32"), "100");
+        assert_eq!(strip_type_suffix("255u8"), "255");
+        assert_eq!(strip_type_suffix("3.14f32"), "3.14");
+        assert_eq!(strip_type_suffix("2.718f64"), "2.718");
+        assert_eq!(strip_type_suffix("1000usize"), "1000");
+        assert_eq!(strip_type_suffix("-42i64"), "-42");
+
+        // Test values without type suffixes (should remain unchanged)
+        assert_eq!(strip_type_suffix("42"), "42");
+        assert_eq!(strip_type_suffix("3.14"), "3.14");
+        assert_eq!(strip_type_suffix("hello"), "hello");
+        assert_eq!(strip_type_suffix("\"string\""), "\"string\"");
+
+        // Test edge cases
+        assert_eq!(strip_type_suffix(""), "");
+        assert_eq!(strip_type_suffix("u32"), "u32"); // Just the type name, not a suffixed value
+        assert_eq!(strip_type_suffix("value_u32_test"), "value_u32_test"); // Contains but doesn't end with type
+    }
+
+    #[test]
+    fn test_parse_field_documentation_with_required_and_units() {
+        let doc_text = r#"Field with required and units annotations.
+---
+@default: `5000`
+@required: true
+@units: milliseconds
+@notes:
+  - This field has all new features."#;
+
+        let result = parse_field_documentation(doc_text, "test_field").unwrap();
+
+        assert_eq!(result.0.name, "test_field");
+        assert_eq!(
+            result.0.description,
+            "Field with required and units annotations."
+        );
+        assert_eq!(result.0.default_value, Some("`5000`".to_string()));
+        assert_eq!(result.0.required, Some(true));
+        assert_eq!(result.0.units, Some("milliseconds".to_string()));
+        assert_eq!(
+            result.0.notes,
+            Some(vec!["This field has all new features.".to_string()])
+        );
+    }
+
+    #[test]
+    fn test_parse_field_documentation_required_variants() {
+        // Test "true" variant
+        let doc_text1 = r#"Required field.
+---
+@required: true"#;
+        let result1 = parse_field_documentation(doc_text1, "field1").unwrap();
+        assert_eq!(result1.0.required, Some(true));
+
+        // Test "false" variant
+        let doc_text2 = r#"Optional field.
+---
+@required: false"#;
+        let result2 = parse_field_documentation(doc_text2, "field2").unwrap();
+        assert_eq!(result2.0.required, Some(false));
+
+        // Test "TRUE" variant
+        let doc_text3 = r#"Required field.
+---
+@required: TRUE"#; // Needs to be lowercase, will default to false, but will log a warning
+        let result3 = parse_field_documentation(doc_text3, "field3").unwrap();
+        assert_eq!(result3.0.required, Some(false));
+
+        // Test "FALSE" variant
+        let doc_text4 = r#"Optional field.
+---
+@required: FALSE"#; // Needs to be lowercase, will default to false, but will log a warning
+        let result4 = parse_field_documentation(doc_text4, "field4").unwrap();
+        assert_eq!(result4.0.required, Some(false));
+
+        // Test invalid variant (should default to false with warning)
+        let doc_text5 = r#"Invalid required field.
+---
+@required: maybe"#;
+        let result5 = parse_field_documentation(doc_text5, "field5").unwrap();
+        assert_eq!(result5.0.required, Some(false));
+    }
+
+    #[test]
+    fn test_extract_annotation_literal_block_mode() {
+        let metadata = r#"@notes: |
+  This is a literal block
+    with preserved indentation
+  and multiple lines."#;
+
+        let result = extract_annotation(metadata, "notes");
+        assert!(result.is_some());
+        let notes = result.unwrap();
+        assert!(notes.contains("This is a literal block"));
+        assert!(notes.contains("  with preserved indentation"));
+        assert!(notes.contains("and multiple lines"));
+        // Should preserve newlines
+        assert!(notes.contains('\n'));
+    }
+
+    #[test]
+    fn test_extract_annotation_folded_block_mode() {
+        let metadata = r#"@default: >
+  This is a folded block
+  that should join lines
+  together.
+
+  But preserve paragraph breaks."#;
+
+        let result = extract_annotation(metadata, "default");
+        assert!(result.is_some());
+        let default = result.unwrap();
+        // Folded blocks should join lines with spaces
+        assert!(default.contains("This is a folded block that should join lines together."));
+        // But preserve paragraph breaks
+        assert!(default.contains("But preserve paragraph breaks."));
+    }
+
+    #[test]
+    fn test_extract_annotation_default_multiline_mode() {
+        let metadata = r#"@notes:
+  - First bullet point
+  - Second bullet point with
+    continuation on next line
+  - Third bullet point"#;
+
+        let result = extract_annotation(metadata, "notes");
+        assert!(result.is_some());
+        let notes = result.unwrap();
+        assert!(notes.contains("First bullet point"));
+        assert!(notes.contains("Second bullet point with"));
+        assert!(notes.contains("continuation on next line"));
+        assert!(notes.contains("Third bullet point"));
+    }
+
+    #[test]
+    fn test_extract_annotation_literal_block_with_same_line_content() {
+        let metadata = r#"@toml_example: | This content is on the same line
+  And this content is on the next line
+  With proper indentation preserved"#;
+
+        let result = extract_annotation(metadata, "toml_example");
+        assert!(result.is_some());
+        let toml = result.unwrap();
+        // Should only include content from subsequent lines, ignoring same-line content
+        assert!(!toml.contains("This content is on the same line"));
+        assert!(toml.contains("And this content is on the next line"));
+        assert!(toml.contains("With proper indentation preserved"));
+    }
+
+    #[test]
+    fn test_units_with_constant_references() {
+        let doc_text = r#"Field with units containing constant references.
+---
+@units: [`DEFAULT_TIMEOUT_MS`] milliseconds"#;
+
+        let result = parse_field_documentation(doc_text, "test_field").unwrap();
+        let (field_doc, referenced_constants) = result;
+
+        assert_eq!(
+            field_doc.units,
+            Some("[`DEFAULT_TIMEOUT_MS`] milliseconds".to_string())
+        );
+        // Check that constants were collected from units
+        assert!(referenced_constants.contains("DEFAULT_TIMEOUT_MS"));
+    }
+
+    #[test]
+    fn test_extract_annotation_default_mode_preserves_relative_indent() {
+        let metadata = r#"@notes:
+  - Main item 1
+    - Sub item 1a
+      - Sub-sub item 1a1
+    - Sub item 1b
+  - Main item 2"#;
+
+        let result = extract_annotation(metadata, "notes");
+        assert!(result.is_some());
+        let notes = result.unwrap();
+
+        // Should preserve relative indentation within the block
+        assert!(notes.contains("- Main item 1"));
+        assert!(notes.contains("  - Sub item 1a")); // 2 spaces more indented
+        assert!(notes.contains("    - Sub-sub item 1a1")); // 4 spaces more indented
+        assert!(notes.contains("  - Sub item 1b")); // Back to 2 spaces
+        assert!(notes.contains("- Main item 2")); // Back to base level
+    }
+
+    #[test]
+    fn test_extract_annotation_default_mode_mixed_indentation() {
+        let metadata = r#"@default:
+  First line with base indentation
+    Second line more indented
+  Third line back to base
+      Fourth line very indented"#;
+
+        let result = extract_annotation(metadata, "default");
+        assert!(result.is_some());
+        let default_val = result.unwrap();
+
+        // Should preserve relative spacing
+        let lines: Vec<&str> = default_val.lines().collect();
+        assert_eq!(lines[0], "First line with base indentation");
+        assert_eq!(lines[1], "  Second line more indented"); // 2 extra spaces
+        assert_eq!(lines[2], "Third line back to base");
+        assert_eq!(lines[3], "    Fourth line very indented"); // 4 extra spaces
+    }
+
+    #[test]
+    fn test_extract_annotation_toml_example_consistency() {
+        // Test that @toml_example now uses standard parsing (no special handling)
+        let metadata = r#"@toml_example: |
+  key = "value"
+    indented_key = "nested"
+  other = 123"#;
+
+        let result = extract_annotation(metadata, "toml_example");
+        assert!(result.is_some());
+        let toml = result.unwrap();
+
+        // Should use standard literal block parsing
+        assert!(toml.contains("key = \"value\""));
+        assert!(toml.contains("  indented_key = \"nested\"")); // Preserved relative indent
+        assert!(toml.contains("other = 123"));
+    }
+
+    #[test]
+    fn test_parse_folded_block_scalar_clip_chomping() {
+        // Test that folded blocks use "clip" chomping (consistent with literal)
+        let lines = vec![
+            "    First paragraph line",
+            "    continues here.",
+            "",
+            "    Second paragraph",
+            "    also continues.",
+            "",
+            "", // Extra empty lines at end
+        ];
+
+        let result = parse_folded_block_scalar(&lines, 0);
+
+        // Should fold lines within paragraphs but preserve paragraph breaks
+        assert!(result.contains("First paragraph line continues here."));
+        assert!(result.contains("Second paragraph also continues."));
+
+        // Should use clip chomping - preserve single trailing newline if content ends with one
+        // But since we're folding, the exact behavior depends on implementation
+        assert!(!result.ends_with("\n\n")); // Should not have multiple trailing newlines
+    }
+
+    #[test]
+    fn test_extract_annotation_edge_cases_empty_and_whitespace() {
+        // Test annotations with only whitespace or empty content
+        let metadata1 = "@default: |";
+        let metadata2 = "@notes:\n    \n    \n"; // Only whitespace lines
+        let metadata3 = "@deprecated: >\n"; // Folded with no content
+
+        assert_eq!(extract_annotation(metadata1, "default"), None);
+        assert_eq!(extract_annotation(metadata2, "notes"), None);
+        assert_eq!(extract_annotation(metadata3, "deprecated"), None);
+    }
+
+    #[test]
+    fn test_required_field_validation_comprehensive() {
+        // Test all supported boolean representations for @required
+        let test_cases = vec![
+            ("true", Some(true)),
+            ("True", Some(false)), // Need to be lowercase
+            ("TRUE", Some(false)), // Need to be lowercase
+            ("false", Some(false)),
+            ("False", Some(false)), // Will default to false, but will log a warning
+            ("FALSE", Some(false)), // Will default to false, but will log a warning
+            ("maybe", Some(false)), // Invalid defaults to false
+            ("invalid", Some(false)),
+        ];
+
+        for (input, expected) in test_cases {
+            let doc_text = format!("Test field.\n---\n@required: {}", input);
+            let result = parse_field_documentation(&doc_text, "test_field").unwrap();
+            assert_eq!(result.0.required, expected, "Failed for input: '{}'", input);
+        }
+
+        // Test empty @required annotation (should return None, not Some(false))
+        let doc_text_empty = "Test field.\n---\n@required:";
+        let result_empty = parse_field_documentation(doc_text_empty, "test_field").unwrap();
+        assert_eq!(
+            result_empty.0.required, None,
+            "Empty @required should not be parsed"
+        );
+    }
+
+    #[test]
+    fn test_units_with_multiline_content() {
+        // Test units annotation with multiline content
+        let doc_text = r#"Field with multiline units.
+---
+@units: |
+  seconds (range: 1-3600)
+  Default: [`DEFAULT_TIMEOUT`] seconds
+@required: true"#;
+
+        let result = parse_field_documentation(doc_text, "test_field").unwrap();
+        let (field_doc, referenced_constants) = result;
+
+        assert!(field_doc.units.is_some());
+        let units = field_doc.units.unwrap();
+        assert!(units.contains("seconds (range: 1-3600)"));
+        assert!(units.contains("Default: [`DEFAULT_TIMEOUT`] seconds"));
+        assert_eq!(field_doc.required, Some(true));
+        assert!(referenced_constants.contains("DEFAULT_TIMEOUT"));
+    }
+
+    #[test]
+    fn test_extract_annotation_literal_and_folded_ignore_same_line_content() {
+        // Test that same-line content is ignored for both | and >
+        let metadata_literal = r#"@notes: | Ignored same line content
+  Next line content
+  Another line"#;
+
+        let metadata_folded = r#"@default: > Ignored same line content
+  Next line content
+  Another line"#;
+
+        let literal_result = extract_annotation(metadata_literal, "notes").unwrap();
+        let folded_result = extract_annotation(metadata_folded, "default").unwrap();
+
+        // Same-line content should be ignored
+        assert!(!literal_result.contains("Ignored same line content"));
+        assert!(!folded_result.contains("Ignored same line content"));
+
+        // Literal mode should preserve all content from subsequent lines
+        assert!(literal_result.contains("Next line content"));
+        assert!(literal_result.contains("Another line"));
+
+        let literal_lines: Vec<&str> = literal_result.lines().collect();
+        assert_eq!(literal_lines.len(), 2);
+        assert_eq!(literal_lines[0], "Next line content");
+        assert_eq!(literal_lines[1], "Another line");
+
+        // Folded mode should fold the subsequent lines
+        assert!(folded_result.contains("Next line content"));
+        assert!(folded_result.contains("Another line"));
+
+        // In folded mode, lines at same indentation get joined with spaces
+        let expected_folded = "Next line content Another line";
+        assert_eq!(folded_result.trim(), expected_folded);
+    }
+
+    #[test]
+    fn test_json_navigation_helpers() {
+        let test_json = json!({
+            "level1": {
+                "level2": {
+                    "level3": "value",
+                    "array": ["item1", "item2"],
+                    "object": {
+                        "key": "value"
+                    }
+                },
+                "string_field": "test_string"
+            }
+        });
+
+        // Test get_json_path - valid paths
+        assert!(get_json_path(&test_json, &["level1"]).is_some());
+        assert!(get_json_path(&test_json, &["level1", "level2"]).is_some());
+        assert!(get_json_path(&test_json, &["level1", "level2", "level3"]).is_some());
+
+        // Test get_json_path - invalid paths
+        assert!(get_json_path(&test_json, &["nonexistent"]).is_none());
+        assert!(get_json_path(&test_json, &["level1", "nonexistent"]).is_none());
+        assert!(get_json_path(&test_json, &["level1", "level2", "level3", "too_deep"]).is_none());
+
+        // Test get_json_string
+        assert_eq!(
+            get_json_string(&test_json, &["level1", "level2", "level3"]),
+            Some("value")
+        );
+        assert_eq!(
+            get_json_string(&test_json, &["level1", "string_field"]),
+            Some("test_string")
+        );
+        assert!(get_json_string(&test_json, &["level1", "level2", "array"]).is_none()); // not a string
+
+        // Test get_json_array
+        let array_result = get_json_array(&test_json, &["level1", "level2", "array"]);
+        assert!(array_result.is_some());
+        assert_eq!(array_result.unwrap().len(), 2);
+        assert!(get_json_array(&test_json, &["level1", "string_field"]).is_none()); // not an array
+
+        // Test get_json_object
+        assert!(get_json_object(&test_json, &["level1"]).is_some());
+        assert!(get_json_object(&test_json, &["level1", "level2"]).is_some());
+        assert!(get_json_object(&test_json, &["level1", "level2", "object"]).is_some());
+        assert!(get_json_object(&test_json, &["level1", "string_field"]).is_none()); // not an object
+    }
+
+    #[test]
+    fn test_resolve_constant_in_index_edge_cases() {
+        // Test with empty index
+        let empty_index = serde_json::Map::new();
+        let result = resolve_constant_in_index("ANY_CONSTANT", &empty_index);
+        assert_eq!(result, None);
+
+        // Test with index containing non-constant items
+        let mock_index = serde_json::json!({
+            "item_1": {
+                "name": "NotAConstant",
+                "inner": {
+                    "function": {}
+                }
+            }
+        });
+        let index = mock_index.as_object().unwrap();
+        let result = resolve_constant_in_index("NotAConstant", index);
+        assert_eq!(result, None);
+    }
+
+    #[test]
+    fn test_resolve_constant_in_index_malformed_constant() {
+        // Test constant without value or expr - falls back to type field
+        let mock_index = serde_json::json!({
+            "const_1": {
+                "name": "MALFORMED_CONSTANT",
+                "inner": {
+                    "constant": {
+                        "type": "u32"
+                        // Missing value and expr fields
+                    }
+                }
+            }
+        });
+        let index = mock_index.as_object().unwrap();
+        let result = resolve_constant_in_index("MALFORMED_CONSTANT", index);
+        assert_eq!(result, Some("u32".to_string()));
+    }
+
+    #[test]
+    fn test_resolve_constant_in_index_underscore_expr() {
+        // Test constant with "_" expr and no value - falls back to type field
+        let mock_index = serde_json::json!({
+            "const_1": {
+                "name": "COMPUTED_CONSTANT",
+                "inner": {
+                    "constant": {
+                        "expr": "_",
+                        "type": "u32"
+                        // No value field
+                    }
+                }
+            }
+        });
+        let index = mock_index.as_object().unwrap();
+        let result = resolve_constant_in_index("COMPUTED_CONSTANT", index);
+        assert_eq!(result, Some("u32".to_string()));
+    }
+
+    #[test]
+    fn test_strip_type_suffix_edge_cases() {
+        // Test with invalid suffixes that shouldn't be stripped
+        assert_eq!(strip_type_suffix("123abc"), "123abc");
+        assert_eq!(
+            strip_type_suffix("value_with_u32_in_middle"),
+            "value_with_u32_in_middle"
+        );
+
+        // Test with partial type names
+        assert_eq!(strip_type_suffix("u"), "u");
+        assert_eq!(strip_type_suffix("u3"), "u3");
+
+        // Test with non-numeric values before type suffix
+        assert_eq!(strip_type_suffix("abcu32"), "abcu32");
+
+        // Test string literals with type suffixes inside
+        assert_eq!(strip_type_suffix("\"value_u32\""), "\"value_u32\"");
+    }
+
+    #[test]
+    fn test_get_json_navigation_edge_cases() {
+        let test_json = serde_json::json!({
+            "level1": {
+                "string": "value",
+                "number": 42,
+                "boolean": true,
+                "null_value": null
+            }
+        });
+
+        // Test getting wrong types
+        assert!(get_json_string(&test_json, &["level1", "number"]).is_none());
+        assert!(get_json_array(&test_json, &["level1", "string"]).is_none());
+        assert!(get_json_object(&test_json, &["level1", "boolean"]).is_none());
+
+        // Test deep paths that don't exist
+        assert!(get_json_path(&test_json, &["level1", "string", "deeper"]).is_none());
+        assert!(get_json_path(&test_json, &["nonexistent", "path"]).is_none());
+
+        // Test null values
+        assert!(get_json_string(&test_json, &["level1", "null_value"]).is_none());
+    }
+
+    #[test]
+    fn test_parse_field_documentation_edge_cases() {
+        // Test with only separator, no content
+        let doc_text = "Description\n---\n";
+        let result = parse_field_documentation(doc_text, "test_field").unwrap();
+        assert_eq!(result.0.description, "Description");
+        assert_eq!(result.0.default_value, None);
+
+        // Test with multiple separators
+        let doc_text = "Description\n---\n@default: value\n---\nIgnored section";
+        let result = parse_field_documentation(doc_text, "test_field").unwrap();
+        assert_eq!(result.0.description, "Description");
+        assert_eq!(result.0.default_value, Some("value".to_string()));
+
+        // Test with empty description
+        let doc_text = "\n---\n@default: value";
+        let result = parse_field_documentation(doc_text, "test_field").unwrap();
+        assert_eq!(result.0.description, "");
+        assert_eq!(result.0.default_value, Some("value".to_string()));
+    }
+
+    #[test]
+    fn test_extract_annotation_malformed_input() {
+        // Test with annotation without colon
+        let metadata = "@default no_colon_here\n@notes: valid";
+        assert_eq!(extract_annotation(metadata, "default"), None);
+        assert_eq!(
+            extract_annotation(metadata, "notes"),
+            Some("valid".to_string())
+        );
+
+        // Test with nested annotations - this will actually find "inside" because the function
+        // looks for the pattern anywhere in a line, not necessarily at the start
+        let metadata = "text with @default: inside\n@actual: real_value";
+        assert_eq!(
+            extract_annotation(metadata, "default"),
+            Some("inside".to_string())
+        );
+        assert_eq!(
+            extract_annotation(metadata, "actual"),
+            Some("real_value".to_string())
+        );
+    }
+
+    #[test]
+    fn test_parse_literal_block_scalar_edge_cases() {
+        // Test with empty input
+        let result = parse_literal_block_scalar(&[], 0);
+        assert_eq!(result, "");
+
+        // Test with only empty lines
+        let lines = vec!["", "  ", "\t", ""];
+        let result = parse_literal_block_scalar(&lines, 0);
+        assert_eq!(result, "");
+
+        // Test with mixed indentation
+        let lines = vec!["  line1", "    line2", "line3", "      line4"];
+        let result = parse_literal_block_scalar(&lines, 0);
+        assert!(result.contains("line1"));
+        assert!(result.contains("  line2")); // Preserved relative indent
+        assert!(result.contains("line3"));
+        assert!(result.contains("    line4")); // Preserved relative indent
+    }
+
+    #[test]
+    fn test_parse_folded_block_scalar_edge_cases() {
+        // Test with empty input
+        let result = parse_folded_block_scalar(&[], 0);
+        assert_eq!(result, "");
+
+        // Test with only empty lines
+        let lines = vec!["", "  ", "\t"];
+        let result = parse_folded_block_scalar(&lines, 0);
+        assert_eq!(result, "");
+
+        // Test paragraph separation
+        let lines = vec![
+            "  First paragraph line",
+            "  continues here",
+            "",
+            "  Second paragraph",
+            "  also continues",
+        ];
+        let result = parse_folded_block_scalar(&lines, 0);
+        assert!(result.contains("First paragraph line continues here"));
+        assert!(result.contains("Second paragraph also continues"));
+        // Should have paragraph separation
+        assert!(result.matches('\n').count() >= 1);
+    }
+
+    #[test]
+    fn test_collect_annotation_block_lines_edge_cases() {
+        let lines = vec![
+            "@first: value1",
+            "  content line 1",
+            "  content line 2",
+            "@second: value2",
+            "  different content",
+        ];
+
+        // Test collecting until next annotation
+        let result = collect_annotation_block_lines(&lines, 1, "@first: value1");
+        assert_eq!(result.len(), 2);
+        assert_eq!(result[0], "  content line 1");
+        assert_eq!(result[1], "  content line 2");
+
+        // Test collecting from end
+        let result = collect_annotation_block_lines(&lines, 4, "@second: value2");
+        assert_eq!(result.len(), 1);
+        assert_eq!(result[0], "  different content");
+    }
+
+    #[test]
+    fn test_find_constant_references_edge_cases() {
+        // Test with malformed brackets
+        let text = "[INCOMPLETE or [`VALID_CONSTANT`] and `not_constant`";
+        let constants = find_constant_references(text);
+        assert_eq!(constants.len(), 1);
+        assert!(constants.contains("VALID_CONSTANT"));
+
+        // Test with nested brackets - this won't match because [ in the middle breaks the pattern
+        let text = "[`OUTER_[INNER]_CONSTANT`]";
+        let constants = find_constant_references(text);
+        assert_eq!(constants.len(), 0);
+
+        // Test with empty brackets
+        let text = "[``] and [`VALID`]";
+        let constants = find_constant_references(text);
+        assert_eq!(constants.len(), 1);
+        assert!(constants.contains("VALID"));
+    }
+
+    #[test]
+    fn test_extract_struct_fields_complex_scenarios() {
+        // Test struct with no fields array
+        let mock_index = serde_json::json!({
+            "struct_1": {
+                "name": "EmptyStruct",
+                "inner": {
+                    "struct": {
+                        "kind": {
+                            "plain": {
+                                // No fields array
+                            }
+                        }
+                    }
+                }
+            }
+        });
+
+        let index = mock_index.as_object().unwrap();
+        let struct_item = &mock_index["struct_1"];
+        let (fields, _) = extract_struct_fields(index, struct_item).unwrap();
+        assert_eq!(fields.len(), 0);
+
+        // Test struct with empty fields array
+        let mock_index = serde_json::json!({
+            "struct_1": {
+                "name": "EmptyFieldsStruct",
+                "inner": {
+                    "struct": {
+                        "kind": {
+                            "plain": {
+                                "fields": []
+                            }
+                        }
+                    }
+                }
+            }
+        });
+
+        let index = mock_index.as_object().unwrap();
+        let struct_item = &mock_index["struct_1"];
+        let (fields, _) = extract_struct_fields(index, struct_item).unwrap();
+        assert_eq!(fields.len(), 0);
+    }
+}
diff --git a/contrib/tools/config-docs-generator/src/generate_markdown.rs b/contrib/tools/config-docs-generator/src/generate_markdown.rs
new file mode 100644
index 0000000000..c0a06bc0fd
--- /dev/null
+++ b/contrib/tools/config-docs-generator/src/generate_markdown.rs
@@ -0,0 +1,1338 @@
+// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation
+// Copyright (C) 2020-2025 Stacks Open Internet Foundation
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with this program.  If not, see .
+use std::collections::HashMap;
+use std::fs;
+
+use anyhow::{Context, Result};
+use clap::{Arg, Command};
+use once_cell::sync::Lazy;
+use serde::{Deserialize, Serialize};
+
+#[derive(Debug, Serialize, Deserialize, Clone)]
+struct FieldDoc {
+    name: String,
+    description: String,
+    default_value: Option,
+    notes: Option>,
+    deprecated: Option,
+    toml_example: Option,
+    required: Option,
+    units: Option,
+}
+
+#[derive(Debug, Serialize, Deserialize)]
+struct StructDoc {
+    name: String,
+    description: Option,
+    fields: Vec,
+}
+
+#[derive(Debug, Serialize, Deserialize)]
+struct ConfigDocs {
+    structs: Vec,
+    referenced_constants: HashMap>, // Name -> Resolved Value (or None)
+}
+
+// Global context for cross-references
+struct GlobalContext {
+    // Map from struct name to markdown section anchor
+    struct_to_anchor: HashMap,
+    // Map from field name to (struct_name, anchor) for finding cross-references
+    field_to_struct: HashMap,
+    // Map from constant name to value (if we can extract them)
+    constants: HashMap,
+    // Custom section name mappings
+    custom_mappings: HashMap,
+}
+
+// Static regex for finding intra-documentation links - compiled once at startup
+static LINK_REGEX_BACKTICKS: Lazy =
+    Lazy::new(|| regex::Regex::new(r"\[`([A-Za-z0-9_:]+)`\]").unwrap());
+
+fn main() -> Result<()> {
+    let matches = Command::new("generate-markdown")
+        .about("Generate Markdown documentation from extracted config docs JSON")
+        .arg(
+            Arg::new("input")
+                .long("input")
+                .value_name("FILE")
+                .help("Input JSON file with extracted documentation")
+                .required(true),
+        )
+        .arg(
+            Arg::new("output")
+                .long("output")
+                .value_name("FILE")
+                .help("Output Markdown file")
+                .required(true),
+        )
+        .arg(
+            Arg::new("template")
+                .long("template")
+                .value_name("FILE")
+                .help(
+                    "Optional markdown template file (defaults to templates/reference_template.md)",
+                )
+                .required(true),
+        )
+        .arg(
+            Arg::new("mappings")
+                .long("section-name-mappings")
+                .value_name("FILE")
+                .help("Optional JSON file for struct name to TOML section name mappings")
+                .required(true),
+        )
+        .get_matches();
+
+    let input_path = matches.get_one::("input").unwrap();
+    let output_path = matches.get_one::("output").unwrap();
+    let template_path = matches.get_one::("template").unwrap();
+    let mappings_path = matches.get_one::("mappings").unwrap();
+
+    let input_content = fs::read_to_string(input_path)
+        .with_context(|| format!("Failed to read input JSON file: {}", input_path))?;
+
+    let config_docs: ConfigDocs =
+        serde_json::from_str(&input_content).with_context(|| "Failed to parse input JSON")?;
+
+    let custom_mappings = load_section_name_mappings(mappings_path)?;
+
+    let markdown = generate_markdown(&config_docs, template_path, &custom_mappings)?;
+
+    fs::write(output_path, markdown)
+        .with_context(|| format!("Failed to write output file: {}", output_path))?;
+
+    println!(
+        "Successfully generated Markdown documentation at {}",
+        output_path
+    );
+    Ok(())
+}
+
+fn load_section_name_mappings(mappings_file: &str) -> Result> {
+    let content = fs::read_to_string(mappings_file).with_context(|| {
+        format!(
+            "Failed to read section name mappings file: {}",
+            mappings_file
+        )
+    })?;
+
+    let mappings: HashMap = serde_json::from_str(&content).with_context(|| {
+        format!(
+            "Failed to parse section name mappings JSON: {}",
+            mappings_file
+        )
+    })?;
+
+    Ok(mappings)
+}
+
+fn load_template(template_path: &str) -> Result {
+    fs::read_to_string(template_path)
+        .with_context(|| format!("Failed to read template file: {}", template_path))
+}
+
+fn render_template(template: &str, variables: HashMap) -> String {
+    let mut result = template.to_string();
+
+    for (key, value) in variables {
+        let placeholder = format!("{{{{{}}}}}", key);
+        result = result.replace(&placeholder, &value);
+    }
+
+    result
+}
+
+fn generate_markdown(
+    config_docs: &ConfigDocs,
+    template_path: &str,
+    custom_mappings: &HashMap,
+) -> Result {
+    // Load template
+    let template = load_template(template_path)?;
+
+    // Build global context for cross-references
+    let global_context = build_global_context(config_docs, custom_mappings);
+
+    // Build table of contents
+    let mut toc_content = String::new();
+    for struct_doc in &config_docs.structs {
+        let section_name = struct_to_section_name(&struct_doc.name, custom_mappings);
+        toc_content.push_str(&format!(
+            "- [{}]({})\n",
+            section_name,
+            section_anchor(§ion_name)
+        ));
+    }
+
+    // Generate sections for each struct
+    let mut struct_sections = String::new();
+    for struct_doc in &config_docs.structs {
+        generate_struct_section(
+            &mut struct_sections,
+            struct_doc,
+            &global_context,
+            custom_mappings,
+        )?;
+        struct_sections.push('\n');
+    }
+
+    // Prepare template variables
+    let mut template_vars = HashMap::new();
+    template_vars.insert("toc_content".to_string(), toc_content);
+    template_vars.insert("struct_sections".to_string(), struct_sections);
+
+    // Render template with variables
+    let output = render_template(&template, template_vars);
+
+    Ok(output)
+}
+
+fn build_global_context(
+    config_docs: &ConfigDocs,
+    custom_mappings: &HashMap,
+) -> GlobalContext {
+    let mut struct_to_anchor = HashMap::new();
+    let mut field_to_struct = HashMap::new();
+    let mut resolved_constants_map = HashMap::new();
+
+    // Build mappings
+    for struct_doc in &config_docs.structs {
+        let section_name = struct_to_section_name(&struct_doc.name, custom_mappings);
+        let anchor = section_anchor(§ion_name);
+        struct_to_anchor.insert(struct_doc.name.clone(), anchor.clone());
+
+        for field in &struct_doc.fields {
+            field_to_struct.insert(
+                field.name.clone(),
+                (struct_doc.name.clone(), anchor.clone()),
+            );
+        }
+    }
+
+    // Populate constants from the parsed ConfigDocs.referenced_constants
+    for (name, opt_value) in &config_docs.referenced_constants {
+        if let Some(value) = opt_value {
+            resolved_constants_map.insert(name.clone(), value.clone());
+        }
+    }
+
+    GlobalContext {
+        struct_to_anchor,
+        field_to_struct,
+        constants: resolved_constants_map,
+        custom_mappings: custom_mappings.clone(),
+    }
+}
+
+fn generate_struct_section(
+    output: &mut String,
+    struct_doc: &StructDoc,
+    global_context: &GlobalContext,
+    custom_mappings: &HashMap,
+) -> Result<()> {
+    let section_name = struct_to_section_name(&struct_doc.name, custom_mappings);
+    output.push_str(&format!("## {}\n\n", section_name));
+
+    // Add struct description if available
+    if let Some(description) = &struct_doc.description {
+        output.push_str(&format!(
+            "{}\n\n",
+            process_intralinks_with_context(description, global_context, &struct_doc.name)
+        ));
+    }
+
+    // Only create table if there are fields
+    if struct_doc.fields.is_empty() {
+        output.push_str("*No configurable parameters documented.*\n\n");
+        return Ok(());
+    }
+
+    // Sort fields: non-deprecated first, then deprecated
+    let mut sorted_fields = struct_doc.fields.clone();
+    sorted_fields.sort_by(|a, b| {
+        let a_deprecated = is_deprecated(a);
+        let b_deprecated = is_deprecated(b);
+
+        match (a_deprecated, b_deprecated) {
+            (false, true) => std::cmp::Ordering::Less, // non-deprecated first
+            (true, false) => std::cmp::Ordering::Greater, // deprecated last
+            _ => a.name.cmp(&b.name),                  // alphabetical within groups
+        }
+    });
+
+    // Parameter table header
+    output.push_str("| Parameter | Description | Default |\n");
+    output.push_str("|-----------|-------------|----------|\n");
+
+    // Generate table rows for each field
+    for field in &sorted_fields {
+        generate_field_row(output, field, &struct_doc.name, global_context)?;
+    }
+
+    output.push('\n');
+    Ok(())
+}
+
+fn generate_field_row(
+    output: &mut String,
+    field: &FieldDoc,
+    struct_name: &str,
+    global_context: &GlobalContext,
+) -> Result<()> {
+    // Create proper anchor ID
+    let section_name = struct_to_section_name_with_context(struct_name, global_context);
+    let anchor_id = format!(
+        "{}-{}",
+        section_name.trim_start_matches('[').trim_end_matches(']'),
+        field.name
+    );
+
+    // Use HTML span with id for proper anchoring
+    let field_name = if is_deprecated(field) {
+        format!(
+            "~~[{}](#{})~~",
+            anchor_id,
+            escape_markdown(&field.name),
+            anchor_id
+        )
+    } else {
+        format!(
+            "[{}](#{})",
+            anchor_id,
+            escape_markdown(&field.name),
+            anchor_id
+        )
+    };
+
+    // Build comprehensive description column with struct context
+    let mut description_parts = Vec::new();
+
+    // Main description
+    if !field.description.is_empty() {
+        let main_desc = if let Some(separator_pos) = field.description.find("---") {
+            field.description[..separator_pos].trim()
+        } else {
+            &field.description
+        };
+
+        if !main_desc.is_empty() {
+            // Check if this description contains hierarchical lists (indented bullet points)
+            let has_hierarchical_lists = main_desc.lines().any(|line| {
+                let trimmed = line.trim();
+                let leading_spaces = line.len() - line.trim_start().len();
+                trimmed.starts_with("- ") && leading_spaces > 0
+            });
+
+            let processed_desc = if has_hierarchical_lists {
+                // Use hierarchical list processing to preserve indentation
+                process_hierarchical_lists(main_desc, global_context, struct_name)
+            } else {
+                // Use regular processing with intra-links
+                process_intralinks_with_context(main_desc, global_context, struct_name)
+                    .replace('\n', "
") + }; + + description_parts.push(processed_desc); + } + } + + // Add notes if present + if let Some(notes) = &field.notes { + let mut notes_section = String::new(); + notes_section.push_str("

**Notes:**"); + for note in notes { + notes_section.push_str(&format!( + "
- {}", + process_intralinks_with_context(note, global_context, struct_name) + )); + } + description_parts.push(notes_section); + } + + // Add deprecation warning if present + if let Some(deprecated) = &field.deprecated { + description_parts.push(format!("

**⚠️ DEPRECATED:** {}", deprecated)); + } + + // Add TOML example if present + if let Some(toml_example) = &field.toml_example { + let clean_example = if toml_example.starts_with('|') { + toml_example.trim_start_matches('|').trim_start() + } else { + toml_example + }; + + // Use HTML pre/code formatting that works properly in markdown tables + // instead of markdown fenced code blocks which get mangled by br tag conversion + let escaped_example = clean_example + .replace('&', "&") + .replace('<', "<") + .replace('>', ">") + .replace('\n', " "); // Use HTML entity for newline to avoid
conversion + + let example_section = format!( + "

**Example:**
{}
", + escaped_example // HTML entities will be rendered as newlines by
+        );
+        description_parts.push(example_section);
+    }
+
+    // Add units information if present
+    if let Some(units) = &field.units {
+        let units_text = process_intralinks_with_context(units, global_context, struct_name);
+        description_parts.push(format!("

**Units:** {}", units_text)); + } + + let description = if description_parts.is_empty() { + "*No description available*".to_string() + } else { + description_parts.join("") + }; + + // Default value column - handle required fields + let default_value = match (&field.required, &field.default_value) { + // If explicitly marked as required=true, show as required regardless of default + (Some(true), _) => "**Required**".to_string(), + // If explicitly marked as required=false and has default, show the default + (Some(false), Some(default)) => { + process_intralinks_with_context(default, global_context, struct_name) + } + // If explicitly marked as required=false but no default, show as optional + (Some(false), None) => "*Optional*".to_string(), + // If required field is not specified, use default behavior (backward compatibility) + (None, Some(default)) => { + process_intralinks_with_context(default, global_context, struct_name) + } + (None, None) => "**Required**".to_string(), + }; + + output.push_str(&format!( + "| {} | {} | {} |\n", + field_name, + escape_markdown_table(&description), + escape_markdown_table(&default_value) + )); + + Ok(()) +} + +fn escape_markdown_table(text: &str) -> String { + text.replace('|', "\\|").replace('\n', "
") +} + +fn is_deprecated(field: &FieldDoc) -> bool { + field.deprecated.is_some() +} + +fn struct_to_section_name(struct_name: &str, custom_mappings: &HashMap) -> String { + // Check custom mappings first + if let Some(section_name) = custom_mappings.get(struct_name) { + return section_name.clone(); + } + format!("[{}]", struct_name.to_lowercase()) +} + +fn struct_to_section_name_with_context( + struct_name: &str, + global_context: &GlobalContext, +) -> String { + struct_to_section_name(struct_name, &global_context.custom_mappings) +} + +fn escape_markdown(text: &str) -> String { + text.replace('|', "\\|") + .replace('[', "\\[") + .replace(']', "\\]") +} + +fn section_anchor(section: &str) -> String { + format!( + "#{}", + section + .to_lowercase() + .replace(' ', "-") + .replace("[", "") + .replace("]", "") + ) +} + +fn process_intralinks_with_context( + text: &str, + global_context: &GlobalContext, + current_struct_name: &str, +) -> String { + // Process cross-references in both formats: + // 1. [`StructName::field`] or [`CONSTANT_NAME`] (with backticks) + LINK_REGEX_BACKTICKS + .replace_all(text, |caps: ®ex::Captures| { + process_reference(&caps[1], global_context, current_struct_name) + }) + .to_string() +} + +fn process_reference( + reference: &str, + global_context: &GlobalContext, + current_struct_name: &str, +) -> String { + if reference.contains("::") { + // This is a struct::field reference + let parts: Vec<&str> = reference.split("::").collect(); + if parts.len() == 2 { + let ref_struct_name = parts[0]; + let field_name = parts[1]; + + // Check if the referenced struct exists in our docs + if global_context + .struct_to_anchor + .contains_key(ref_struct_name) + { + // Create proper anchor ID + let section_name = + struct_to_section_name_with_context(ref_struct_name, global_context); + let anchor_id = format!( + "{}-{}", + section_name.trim_start_matches('[').trim_end_matches(']'), + field_name + ); + + // Check if it's the same struct or different struct + if ref_struct_name == current_struct_name { + // Same struct: just show field name + return format!("[{}](#{}) ", field_name, anchor_id); + } else { + // Different struct: show [config_section].field_name as a link + let config_section = section_name.trim_start_matches('[').trim_end_matches(']'); + return format!("[[{}].{}](#{}) ", config_section, field_name, anchor_id); + } + } + } + } else { + // This might be a constant reference + if let Some(value) = global_context.constants.get(reference) { + return format!("`{value}`"); + } + + // Check if it's a standalone field name (without struct prefix) + if let Some((field_struct_name, _anchor)) = global_context.field_to_struct.get(reference) { + let section_name = + struct_to_section_name_with_context(field_struct_name, global_context); + let anchor_id = format!( + "{}-{}", + section_name.trim_start_matches('[').trim_end_matches(']'), + reference + ); + + // Check if it's the same struct or different struct + if field_struct_name == current_struct_name { + // Same struct: just show field name + return format!("[{}](#{}) ", reference, anchor_id); + } else { + // Different struct: show [config_section].field_name as a link + let config_section = section_name.trim_start_matches('[').trim_end_matches(']'); + return format!("[[{}].{}](#{}) ", config_section, reference, anchor_id); + } + } + } + + // If we can't resolve the reference, keep the text + format!("`{reference}`") +} + +/// Process text to preserve hierarchical list indentation +/// Converts markdown-style indented lists to HTML that preserves indentation in table cells +fn process_hierarchical_lists( + text: &str, + global_context: &GlobalContext, + struct_name: &str, +) -> String { + let lines: Vec<&str> = text.lines().collect(); + let mut result = Vec::new(); + + for line in lines { + if line.trim().starts_with("- ") { + // Count leading spaces to determine indentation level + let leading_spaces = line.len() - line.trim_start().len(); + + // Convert spaces to non-breaking spaces for HTML preservation + // Every 2 spaces becomes 2   entities for visual indentation + let indent_html = " ".repeat(leading_spaces); + + // Process intra-links in the content + let content = line.trim(); + let processed_content = + process_intralinks_with_context(content, global_context, struct_name); + + result.push(format!("{}{}", indent_html, processed_content)); + } else { + // Process intra-links in non-bullet lines too + let processed_line = process_intralinks_with_context(line, global_context, struct_name); + result.push(processed_line); + } + } + + result.join("
") +} + +#[cfg(test)] +mod tests { + use super::*; + + // Helper function to create a basic FieldDoc for testing + fn create_field_doc(name: &str, description: &str) -> FieldDoc { + FieldDoc { + name: name.to_string(), + description: description.to_string(), + default_value: None, + notes: None, + deprecated: None, + toml_example: None, + required: None, + units: None, + } + } + + // Helper function to create a basic StructDoc for testing + fn create_struct_doc( + name: &str, + description: Option<&str>, + fields: Vec, + ) -> StructDoc { + StructDoc { + name: name.to_string(), + description: description.map(|s| s.to_string()), + fields, + } + } + + // Helper function to create a basic ConfigDocs for testing + fn create_config_docs(structs: Vec) -> ConfigDocs { + ConfigDocs { + structs, + referenced_constants: HashMap::new(), + } + } + + // Helper function to create a mock GlobalContext for testing + fn create_mock_global_context() -> GlobalContext { + let mut struct_to_anchor = HashMap::new(); + let mut field_to_struct = HashMap::new(); + let mut constants = HashMap::new(); + let mut custom_mappings = HashMap::new(); + + // Add custom mappings like the real ones + custom_mappings.insert("NodeConfig".to_string(), "[node]".to_string()); + custom_mappings.insert("MinerConfig".to_string(), "[miner]".to_string()); + + // Add some test structs and fields + struct_to_anchor.insert("NodeConfig".to_string(), "#node".to_string()); + struct_to_anchor.insert("MinerConfig".to_string(), "#miner".to_string()); + + field_to_struct.insert( + "test_field".to_string(), + ("NodeConfig".to_string(), "#node".to_string()), + ); + field_to_struct.insert( + "other_field".to_string(), + ("MinerConfig".to_string(), "#miner".to_string()), + ); + + constants.insert("TEST_CONSTANT".to_string(), "42".to_string()); + constants.insert("ANOTHER_CONSTANT".to_string(), "true".to_string()); + + GlobalContext { + struct_to_anchor, + field_to_struct, + constants, + custom_mappings, + } + } + + // I. Basic Markdown Generation Tests + + #[test] + fn test_generate_markdown_empty_config() { + let config_docs = create_config_docs(vec![]); + let template_path = "templates/reference_template.md"; + let result = generate_markdown(&config_docs, template_path, &HashMap::new()).unwrap(); + + assert!(result.contains("# Stacks Node Configuration Reference")); + assert!(result.contains("## Table of Contents")); + // Should not contain any specific struct sections + assert!(!result.contains("## [")); + } + + #[test] + fn test_generate_markdown_with_one_struct_no_fields() { + let struct_doc = create_struct_doc("TestStruct", Some("A test struct"), vec![]); + let config_docs = create_config_docs(vec![struct_doc]); + let template_path = "templates/reference_template.md"; + let result = generate_markdown(&config_docs, template_path, &HashMap::new()).unwrap(); + + assert!(result.contains("# Stacks Node Configuration Reference")); + assert!(result.contains("- [[teststruct]](#teststruct)")); + assert!(result.contains("## [teststruct]")); + assert!(result.contains("A test struct")); + assert!(result.contains("*No configurable parameters documented.*")); + } + + #[test] + fn test_generate_markdown_with_one_struct_with_fields() { + let field = create_field_doc("test_field", "A test field"); + let struct_doc = create_struct_doc("TestStruct", Some("A test struct"), vec![field]); + let config_docs = create_config_docs(vec![struct_doc]); + let template_path = "templates/reference_template.md"; + let result = generate_markdown(&config_docs, template_path, &HashMap::new()).unwrap(); + + assert!(result.contains("# Stacks Node Configuration Reference")); + assert!(result.contains("- [[teststruct]](#teststruct)")); + assert!(result.contains("## [teststruct]")); + assert!(result.contains("A test struct")); + assert!(result.contains("| Parameter | Description | Default |")); + assert!(result.contains("test_field")); + assert!(result.contains("A test field")); + } + + // II. Section & Anchor Generation Tests + + #[test] + fn test_struct_to_section_name_known_structs() { + let mut mappings = HashMap::new(); + // Load the expected mappings based on section_name_mappings.json + mappings.insert("BurnchainConfig".to_string(), "[burnchain]".to_string()); + mappings.insert("NodeConfig".to_string(), "[node]".to_string()); + mappings.insert("MinerConfig".to_string(), "[miner]".to_string()); + mappings.insert( + "ConnectionOptionsFile".to_string(), + "[connection_options]".to_string(), + ); + mappings.insert( + "FeeEstimationConfigFile".to_string(), + "[fee_estimation]".to_string(), + ); + mappings.insert( + "EventObserverConfigFile".to_string(), + "[[events_observer]]".to_string(), + ); + mappings.insert( + "InitialBalanceFile".to_string(), + "[[ustx_balance]]".to_string(), + ); + + assert_eq!( + struct_to_section_name("BurnchainConfig", &mappings), + "[burnchain]" + ); + assert_eq!(struct_to_section_name("NodeConfig", &mappings), "[node]"); + assert_eq!(struct_to_section_name("MinerConfig", &mappings), "[miner]"); + assert_eq!( + struct_to_section_name("ConnectionOptionsFile", &mappings), + "[connection_options]" + ); + assert_eq!( + struct_to_section_name("FeeEstimationConfigFile", &mappings), + "[fee_estimation]" + ); + assert_eq!( + struct_to_section_name("EventObserverConfigFile", &mappings), + "[[events_observer]]" + ); + assert_eq!( + struct_to_section_name("InitialBalanceFile", &mappings), + "[[ustx_balance]]" + ); + } + + #[test] + fn test_struct_to_section_name_unknown_struct() { + let mappings = HashMap::new(); + assert_eq!( + struct_to_section_name("MyCustomConfig", &mappings), + "[mycustomconfig]" + ); + assert_eq!( + struct_to_section_name("UnknownStruct", &mappings), + "[unknownstruct]" + ); + } + + #[test] + fn test_section_anchor_generation() { + assert_eq!(section_anchor("[node]"), "#node"); + assert_eq!(section_anchor("[burnchain]"), "#burnchain"); + assert_eq!(section_anchor("[my custom section]"), "#my-custom-section"); + assert_eq!( + section_anchor("[connection_options]"), + "#connection_options" + ); + } + + // III. Field Row Generation Tests + + #[test] + fn test_generate_field_row_basic_field() { + let field = create_field_doc("basic_field", "A basic field description"); + let global_context = create_mock_global_context(); + let mut output = String::new(); + + generate_field_row(&mut output, &field, "TestStruct", &global_context).unwrap(); + + assert!(output.contains("basic_field")); + assert!(output.contains("A basic field description")); + assert!(output.contains("**Required**")); + assert!(output.contains("")); + } + + #[test] + fn test_generate_field_row_with_default_value() { + let mut field = create_field_doc("field_with_default", "Field with default value"); + field.default_value = Some("`42`".to_string()); + let global_context = create_mock_global_context(); + let mut output = String::new(); + + generate_field_row(&mut output, &field, "TestStruct", &global_context).unwrap(); + + assert!(output.contains("field_with_default")); + assert!(output.contains("Field with default value")); + assert!(output.contains("`42`")); + assert!(!output.contains("**Required**")); + } + + #[test] + fn test_generate_field_row_without_default_value() { + let field = create_field_doc("required_field", "A required field"); + let global_context = create_mock_global_context(); + let mut output = String::new(); + + generate_field_row(&mut output, &field, "TestStruct", &global_context).unwrap(); + + assert!(output.contains("required_field")); + assert!(output.contains("**Required**")); + } + + #[test] + fn test_generate_field_row_with_notes() { + let mut field = create_field_doc("field_with_notes", "Field with notes"); + field.notes = Some(vec!["First note".to_string(), "Second note".to_string()]); + let global_context = create_mock_global_context(); + let mut output = String::new(); + + generate_field_row(&mut output, &field, "TestStruct", &global_context).unwrap(); + + assert!(output.contains("**Notes:**")); + assert!(output.contains("- First note")); + assert!(output.contains("- Second note")); + } + + #[test] + fn test_generate_field_row_deprecated_field() { + let mut field = create_field_doc("old_field", "An old field"); + field.deprecated = Some("Use new_field instead".to_string()); + let global_context = create_mock_global_context(); + let mut output = String::new(); + + generate_field_row(&mut output, &field, "TestStruct", &global_context).unwrap(); + + assert!(output.contains("~~")); + assert!(output.contains("**⚠️ DEPRECATED:**")); + assert!(output.contains("Use new_field instead")); + } + + #[test] + fn test_generate_field_row_with_toml_example() { + let mut field = create_field_doc("field_with_example", "Field with TOML example"); + field.toml_example = Some("key = \"value\"\nnumber = 42".to_string()); + let global_context = create_mock_global_context(); + let mut output = String::new(); + + generate_field_row(&mut output, &field, "TestStruct", &global_context).unwrap(); + + assert!(output.contains("**Example:**")); + assert!(output.contains("
"));
+        assert!(output.contains("key = \"value\""));
+        assert!(output.contains("number = 42"));
+        assert!(output.contains("
")); + } + + #[test] + fn test_generate_field_row_toml_example_preserves_newlines() { + let mut field = create_field_doc("multiline_example", "Field with multiline TOML example"); + field.toml_example = + Some("key = \"value\"\nnested = {\n sub_key = \"sub_value\"\n}".to_string()); + let global_context = create_mock_global_context(); + let mut output = String::new(); + + generate_field_row(&mut output, &field, "TestStruct", &global_context).unwrap(); + + assert!(output.contains("**Example:**")); + assert!(output.contains("
"));
+        assert!(output.contains("
")); + + // Find the code block content + let pre_start = output.find("
").unwrap();
+        let pre_end = output.find("
").unwrap(); + let code_content = &output[pre_start..pre_end + "
".len()]; + + // Should NOT contain
tags inside the code block + assert!( + !code_content.contains("
"), + "Code block should not contain
tags" + ); + + // Should contain HTML entities for newlines instead + assert!( + code_content.contains(" "), + "Code block should contain HTML entities for newlines" + ); + + // Should contain the key-value pairs + assert!(code_content.contains("key = \"value\"")); + assert!(code_content.contains("sub_key = \"sub_value\"")); + + // Should contain the actual newline characters in the original TOML + assert!(field.toml_example.as_ref().unwrap().contains('\n')); + } + + #[test] + fn test_generate_field_row_hierarchical_lists() { + let field = create_field_doc( + "complex_list_field", + r"Field with hierarchical lists: +- Main item 1 + - Sub item 1a + - Sub-sub item 1a1 + - Sub item 1b +- Main item 2 + - Sub item 2a", + ); + let global_context = create_mock_global_context(); + let mut output = String::new(); + + generate_field_row(&mut output, &field, "TestStruct", &global_context).unwrap(); + + // Verify that indentation is preserved with   entities + assert!(output.contains("- Main item 1")); + assert!(output.contains("  - Sub item 1a")); + assert!(output.contains("    - Sub-sub item 1a1")); + assert!(output.contains("  - Sub item 1b")); + assert!(output.contains("- Main item 2")); + assert!(output.contains("  - Sub item 2a")); + } + + #[test] + fn test_generate_field_row_hierarchical_lists_with_intralinks() { + let field = create_field_doc( + "list_with_links", + r"Field with links in hierarchical lists: +- Main item with [`TEST_CONSTANT`] + - Sub item with [`NodeConfig::test_field`] + - Sub-sub item with [`other_field`]", + ); + let global_context = create_mock_global_context(); + let mut output = String::new(); + + generate_field_row(&mut output, &field, "TestStruct", &global_context).unwrap(); + + // Verify that indentation is preserved AND intra-links are processed + assert!(output.contains("- Main item with `42`")); // constant resolved + assert!( + output.contains("  - Sub item with [[node].test_field](#node-test_field)") + ); // field link with indentation + assert!(output.contains( + "    - Sub-sub item with [[miner].other_field](#miner-other_field)" + )); // cross-struct field link with indentation + } + + #[test] + fn test_generate_field_row_with_required_true() { + let mut field = create_field_doc("required_field", "A required field"); + field.required = Some(true); + field.default_value = Some("`default_value`".to_string()); // Even with default, should show as required + let global_context = create_mock_global_context(); + let mut output = String::new(); + + generate_field_row(&mut output, &field, "TestStruct", &global_context).unwrap(); + + assert!(output.contains("required_field")); + assert!(output.contains("A required field")); + assert!(output.contains("**Required**")); + assert!(!output.contains("`default_value`")); // Should not show default when required=true + } + + #[test] + fn test_generate_field_row_with_required_false_and_default() { + let mut field = create_field_doc("optional_field", "An optional field"); + field.required = Some(false); + field.default_value = Some("`42`".to_string()); + let global_context = create_mock_global_context(); + let mut output = String::new(); + + generate_field_row(&mut output, &field, "TestStruct", &global_context).unwrap(); + + assert!(output.contains("optional_field")); + assert!(output.contains("An optional field")); + assert!(output.contains("`42`")); + assert!(!output.contains("**Required**")); + } + + #[test] + fn test_generate_field_row_with_required_false_no_default() { + let mut field = create_field_doc("optional_field", "An optional field"); + field.required = Some(false); + field.default_value = None; + let global_context = create_mock_global_context(); + let mut output = String::new(); + + generate_field_row(&mut output, &field, "TestStruct", &global_context).unwrap(); + + assert!(output.contains("optional_field")); + assert!(output.contains("An optional field")); + assert!(output.contains("*Optional*")); + assert!(!output.contains("**Required**")); + } + + #[test] + fn test_generate_field_row_with_units() { + let mut field = create_field_doc("timeout_field", "A timeout field"); + field.units = Some("milliseconds".to_string()); + field.default_value = Some("`5000`".to_string()); + let global_context = create_mock_global_context(); + let mut output = String::new(); + + generate_field_row(&mut output, &field, "TestStruct", &global_context).unwrap(); + + assert!(output.contains("timeout_field")); + assert!(output.contains("A timeout field")); + assert!(output.contains("**Units:** milliseconds")); + assert!(output.contains("`5000`")); + } + + #[test] + fn test_generate_field_row_with_units_and_constants() { + let mut field = create_field_doc("timeout_field", "A timeout field"); + field.units = Some("[`TEST_CONSTANT`] milliseconds".to_string()); + field.default_value = Some("`5000`".to_string()); + let global_context = create_mock_global_context(); + let mut output = String::new(); + + generate_field_row(&mut output, &field, "TestStruct", &global_context).unwrap(); + + assert!(output.contains("timeout_field")); + assert!(output.contains("A timeout field")); + assert!(output.contains("**Units:** `42` milliseconds")); // Constant should be resolved + assert!(output.contains("`5000`")); + } + + #[test] + fn test_generate_field_row_all_new_features() { + let mut field = create_field_doc("complex_field", "A field with all new features"); + field.required = Some(true); + field.units = Some("seconds".to_string()); + field.notes = Some(vec!["Important note".to_string()]); + field.toml_example = Some("field = 30".to_string()); + let global_context = create_mock_global_context(); + let mut output = String::new(); + + generate_field_row(&mut output, &field, "TestStruct", &global_context).unwrap(); + + assert!(output.contains("complex_field")); + assert!(output.contains("A field with all new features")); + assert!(output.contains("**Required**")); + assert!(output.contains("**Units:** seconds")); + assert!(output.contains("**Notes:**")); + assert!(output.contains("- Important note")); + assert!(output.contains("**Example:**")); + assert!(output.contains("field = 30")); + } + + #[test] + fn test_generate_field_row_units_with_constants_and_intralinks() { + let mut field = create_field_doc("timeout_field", "A timeout field"); + field.units = + Some("[`TEST_CONSTANT`] seconds (see [`NodeConfig::test_field`])".to_string()); + field.default_value = Some("`30`".to_string()); + let global_context = create_mock_global_context(); + let mut output = String::new(); + + generate_field_row(&mut output, &field, "TestStruct", &global_context).unwrap(); + + assert!(output.contains("timeout_field")); + assert!(output.contains("**Units:**")); + // Constants should be resolved and intralinks processed + assert!(output.contains("`42`")); // TEST_CONSTANT resolved + assert!(output.contains("[[node].test_field](#node-test_field)")); // Cross-struct reference format + } + + #[test] + fn test_generate_field_row_required_field_combinations() { + let global_context = create_mock_global_context(); + + // Test required=true with default (should show Required, not default) + let mut field1 = create_field_doc("req_with_default", "Required with default"); + field1.required = Some(true); + field1.default_value = Some("`ignored`".to_string()); + let mut output1 = String::new(); + generate_field_row(&mut output1, &field1, "TestStruct", &global_context).unwrap(); + assert!(output1.contains("**Required**")); + assert!(!output1.contains("`ignored`")); + + // Test required=false with default (should show default) + let mut field2 = create_field_doc("opt_with_default", "Optional with default"); + field2.required = Some(false); + field2.default_value = Some("`42`".to_string()); + let mut output2 = String::new(); + generate_field_row(&mut output2, &field2, "TestStruct", &global_context).unwrap(); + assert!(output2.contains("`42`")); + assert!(!output2.contains("**Required**")); + assert!(!output2.contains("*Optional*")); + + // Test required=false without default (should show Optional) + let mut field3 = create_field_doc("opt_no_default", "Optional without default"); + field3.required = Some(false); + field3.default_value = None; + let mut output3 = String::new(); + generate_field_row(&mut output3, &field3, "TestStruct", &global_context).unwrap(); + assert!(output3.contains("*Optional*")); + assert!(!output3.contains("**Required**")); + + // Test no required field specified (backward compatibility) + let mut field4 = create_field_doc("legacy_field", "Legacy field"); + field4.required = None; + field4.default_value = Some("`legacy`".to_string()); + let mut output4 = String::new(); + generate_field_row(&mut output4, &field4, "TestStruct", &global_context).unwrap(); + assert!(output4.contains("`legacy`")); + assert!(!output4.contains("**Required**")); + } + + #[test] + fn test_generate_field_row_comprehensive_integration() { + // Test a field with all possible attributes + let mut field = create_field_doc( + "comprehensive_field", + "A comprehensive field demonstrating all features.\n\nThis includes multiple paragraphs.", + ); + field.default_value = Some("`[\"default\", \"values\"]`".to_string()); + field.required = Some(false); + field.units = Some("milliseconds (range: 100-5000)".to_string()); + field.notes = Some(vec![ + "This is the first note with [`TEST_CONSTANT`]".to_string(), + "This is the second note referencing [`NodeConfig::test_field`]".to_string(), + ]); + field.deprecated = + Some("Use new_comprehensive_field instead. Will be removed in v3.0.".to_string()); + field.toml_example = + Some("comprehensive_field = [\n \"value1\",\n \"value2\"\n]".to_string()); + + let global_context = create_mock_global_context(); + let mut output = String::new(); + + generate_field_row(&mut output, &field, "NodeConfig", &global_context).unwrap(); + + // Verify field name with deprecation strikethrough + assert!(output.contains("~~")); + assert!(output.contains("comprehensive_field")); + + // Verify description processing + assert!(output.contains("A comprehensive field")); + assert!(output.contains("This includes multiple paragraphs")); + + // Verify default value (since required=false and has default) + assert!(output.contains("`[\"default\", \"values\"]`")); + assert!(!output.contains("**Required**")); + assert!(!output.contains("*Optional*")); + + // Verify units + assert!(output.contains("**Units:** milliseconds (range: 100-5000)")); + + // Verify notes with intralink processing + assert!(output.contains("**Notes:**")); + assert!(output.contains("- This is the first note with `42`")); // Constant resolved + assert!( + output.contains("- This is the second note referencing [test_field](#node-test_field)") + ); // Intralink + + // Verify deprecation warning + assert!(output.contains("**⚠️ DEPRECATED:**")); + assert!(output.contains("Use new_comprehensive_field instead")); + + // Verify TOML example with proper formatting + assert!(output.contains("**Example:**")); + assert!(output.contains("
"));
+        assert!(output.contains("comprehensive_field = ["));
+        assert!(output.contains("\"value1\","));
+        assert!(output.contains("\"value2\""));
+        assert!(output.contains("
")); + } + + #[test] + fn test_load_section_name_mappings_file_not_found() { + let result = load_section_name_mappings("nonexistent.json"); + assert!(result.is_err()); + assert!(result.unwrap_err().to_string().contains("Failed to read")); + } + + #[test] + fn test_load_section_name_mappings_invalid_json() { + use std::io::Write; + + use tempfile::NamedTempFile; + + let mut temp_file = NamedTempFile::new().unwrap(); + writeln!(temp_file, "invalid json content").unwrap(); + + let result = load_section_name_mappings(temp_file.path().to_str().unwrap()); + assert!(result.is_err()); + assert!( + result + .unwrap_err() + .to_string() + .contains("Failed to parse section name mappings JSON") + ); + } + + #[test] + fn test_load_template_file_not_found() { + let result = load_template("nonexistent_template.md"); + assert!(result.is_err()); + assert!( + result + .unwrap_err() + .to_string() + .contains("Failed to read template file") + ); + } + + #[test] + fn test_render_template_complex_substitutions() { + let template = "Hello {{name}}! Your score is {{score}}. {{missing}} should stay as is."; + let mut variables = HashMap::new(); + variables.insert("name".to_string(), "Alice".to_string()); + variables.insert("score".to_string(), "100".to_string()); + + let result = render_template(template, variables); + assert_eq!( + result, + "Hello Alice! Your score is 100. {{missing}} should stay as is." + ); + } + + #[test] + fn test_render_template_empty_variables() { + let template = "Template with {{variable}} that won't be replaced"; + let result = render_template(template, HashMap::new()); + assert_eq!(result, "Template with {{variable}} that won't be replaced"); + } + + #[test] + fn test_render_template_multiple_same_variable() { + let template = "{{name}} said hello to {{name}} twice"; + let mut variables = HashMap::new(); + variables.insert("name".to_string(), "Bob".to_string()); + + let result = render_template(template, variables); + assert_eq!(result, "Bob said hello to Bob twice"); + } + + #[test] + fn test_generate_markdown_error_paths() { + // Test with invalid template path + let config_docs = create_config_docs(vec![]); + let custom_mappings = HashMap::new(); + + let result = generate_markdown(&config_docs, "nonexistent_template.md", &custom_mappings); + assert!(result.is_err()); + assert!( + result + .unwrap_err() + .to_string() + .contains("Failed to read template file") + ); + } + + #[test] + fn test_escape_markdown_edge_cases() { + assert_eq!(escape_markdown(""), ""); + assert_eq!(escape_markdown("normal text"), "normal text"); + assert_eq!(escape_markdown("[text]"), "\\[text\\]"); + assert_eq!(escape_markdown("table|cell"), "table\\|cell"); + assert_eq!(escape_markdown("[table|cell]"), "\\[table\\|cell\\]"); + } + + #[test] + fn test_escape_markdown_table_edge_cases() { + assert_eq!(escape_markdown_table(""), ""); + assert_eq!(escape_markdown_table("normal text"), "normal text"); + assert_eq!(escape_markdown_table("table|cell"), "table\\|cell"); + assert_eq!(escape_markdown_table("line\nbreak"), "line
break"); + assert_eq!( + escape_markdown_table("both|pipe\nand newline"), + "both\\|pipe
and newline" + ); + } + + #[test] + fn test_section_anchor_edge_cases() { + assert_eq!(section_anchor(""), "#"); + assert_eq!(section_anchor("UPPERCASE"), "#uppercase"); + assert_eq!( + section_anchor("[complex section name]"), + "#complex-section-name" + ); + assert_eq!(section_anchor("Multiple Spaces"), "#multiple---spaces"); + assert_eq!( + section_anchor("[section_with_underscores]"), + "#section_with_underscores" + ); + } + + #[test] + fn test_process_reference_edge_cases() { + let global_context = create_mock_global_context(); + + // Test unknown reference + let result = process_reference("UNKNOWN_CONSTANT", &global_context, "TestStruct"); + assert_eq!(result, "`UNKNOWN_CONSTANT`"); + + // Test malformed struct::field reference + let result = process_reference("OnlyStruct::", &global_context, "TestStruct"); + assert_eq!(result, "`OnlyStruct::`"); + + // Test empty reference + let result = process_reference("", &global_context, "TestStruct"); + assert_eq!(result, "``"); + } + + #[test] + fn test_struct_to_section_name_edge_cases() { + let mappings = HashMap::new(); + + // Test empty struct name + assert_eq!(struct_to_section_name("", &mappings), "[]"); + + // Test struct name with special characters + assert_eq!( + struct_to_section_name("Struct_With_Underscores", &mappings), + "[struct_with_underscores]" + ); + + // Test very long struct name + let long_name = "A".repeat(100); + let expected = format!("[{}]", "a".repeat(100)); + assert_eq!(struct_to_section_name(&long_name, &mappings), expected); + } +} diff --git a/contrib/tools/config-docs-generator/templates/reference_template.md b/contrib/tools/config-docs-generator/templates/reference_template.md new file mode 100644 index 0000000000..cee6c3dbf3 --- /dev/null +++ b/contrib/tools/config-docs-generator/templates/reference_template.md @@ -0,0 +1,11 @@ +# Stacks Node Configuration Reference + +This document provides a comprehensive reference for all configuration options available in the Stacks node TOML configuration file. + +The configuration is automatically generated from the Rust source code documentation. + +## Table of Contents + +{{toc_content}} + +{{struct_sections}} diff --git a/contrib/tools/config-docs-generator/tests/fixtures/minimal_config.json b/contrib/tools/config-docs-generator/tests/fixtures/minimal_config.json new file mode 100644 index 0000000000..3ede4781ca --- /dev/null +++ b/contrib/tools/config-docs-generator/tests/fixtures/minimal_config.json @@ -0,0 +1,80 @@ +{ + "structs": [ + { + "name": "NodeConfig", + "description": "Configuration settings for a Stacks node", + "fields": [ + { + "name": "name", + "description": "Human-readable name for the node. Primarily used for identification in testing\nenvironments (e.g., deriving log file names, temporary directory names).", + "default_value": "`\"helium-node\"`", + "notes": null, + "deprecated": null, + "toml_example": null, + "required": null, + "units": null + }, + { + "name": "seed", + "description": "The node's Bitcoin wallet private key, provided as a hex string in the config file.\nUsed to initialize the node's keychain for signing operations.\nIf [`MinerConfig::mining_key`] is not set, this seed may also be used for\nmining-related signing.", + "default_value": "Randomly generated 32 bytes", + "notes": [ + "Required if [`NodeConfig::miner`] is `true` and [`MinerConfig::mining_key`] is absent." + ], + "deprecated": null, + "toml_example": null, + "required": null, + "units": null + }, + { + "name": "rpc_bind", + "description": "The IPv4 address and port (e.g., \"0.0.0.0:20443\") on which the node's HTTP RPC\nserver should bind and listen for incoming API requests.", + "default_value": "`\"0.0.0.0:20443\"`", + "notes": null, + "deprecated": null, + "toml_example": null, + "required": null, + "units": null + }, + { + "name": "bootstrap_node", + "description": "A list of initial peer nodes used to bootstrap connections into the Stacks P2P\nnetwork. Peers are specified in a configuration file as comma-separated\nstrings in the format `\"PUBKEY@IP:PORT\"` or `\"PUBKEY@HOSTNAME:PORT\"`. DNS\nhostnames are resolved during configuration loading.", + "default_value": "`[]` (empty vector)", + "notes": null, + "deprecated": null, + "toml_example": "bootstrap_node = \"pubkey1@example.com:30444,pubkey2@192.168.1.100:20444\"", + "required": null, + "units": null + }, + { + "name": "miner", + "description": "Flag indicating whether this node should activate its mining logic and attempt to\nproduce Stacks blocks. Setting this to `true` typically requires providing\nnecessary private keys (either [`NodeConfig::seed`] or [`MinerConfig::mining_key`]).\nIt also influences default behavior for settings like\n[`NodeConfig::require_affirmed_anchor_blocks`].", + "default_value": "`false`", + "notes": null, + "deprecated": null, + "toml_example": null, + "required": null, + "units": null + }, + { + "name": "microblock_frequency", + "description": "How often to attempt producing microblocks, in milliseconds.", + "default_value": "`30_000` (30 seconds)", + "notes": [ + "Only applies when [`NodeConfig::mine_microblocks`] is true and before Epoch 2.5." + ], + "deprecated": "This setting is ignored in Epoch 2.5+.", + "toml_example": null, + "required": null, + "units": "milliseconds" + } + ] + } + ], + "referenced_constants": { + "MinerConfig::mining_key": null, + "NodeConfig::miner": null, + "NodeConfig::mine_microblocks": null, + "NodeConfig::require_affirmed_anchor_blocks": null + } +} \ No newline at end of file diff --git a/contrib/tools/config-docs-generator/tests/fixtures/test_mappings.json b/contrib/tools/config-docs-generator/tests/fixtures/test_mappings.json new file mode 100644 index 0000000000..49826a9fc4 --- /dev/null +++ b/contrib/tools/config-docs-generator/tests/fixtures/test_mappings.json @@ -0,0 +1,3 @@ +{ + "NodeConfig": "[node]" +} \ No newline at end of file diff --git a/contrib/tools/config-docs-generator/tests/fixtures/test_template.md b/contrib/tools/config-docs-generator/tests/fixtures/test_template.md new file mode 100644 index 0000000000..e25d14bb94 --- /dev/null +++ b/contrib/tools/config-docs-generator/tests/fixtures/test_template.md @@ -0,0 +1,15 @@ +# Test Configuration Reference + +This is a test template for integration testing. + +## Table of Contents + +{{toc_content}} + +## Configuration Sections + +{{struct_sections}} + +## End of Document + +Generated with test template. \ No newline at end of file diff --git a/contrib/tools/config-docs-generator/tests/integration.rs b/contrib/tools/config-docs-generator/tests/integration.rs new file mode 100644 index 0000000000..b67b62d124 --- /dev/null +++ b/contrib/tools/config-docs-generator/tests/integration.rs @@ -0,0 +1,708 @@ +use std::fs; + +use assert_cmd::Command; +use serde_json::json; +use tempfile::TempDir; + +#[test] +fn test_extract_docs_missing_arguments() { + let mut cmd = Command::cargo_bin("extract-docs").unwrap(); + let output = cmd.output().unwrap(); + + assert!(!output.status.success()); + let stderr = String::from_utf8_lossy(&output.stderr); + assert!(stderr.contains("required arguments were not provided")); +} + +#[test] +fn test_extract_docs_help() { + let mut cmd = Command::cargo_bin("extract-docs").unwrap(); + cmd.arg("--help"); + let output = cmd.output().unwrap(); + + assert!(output.status.success()); + let stdout = String::from_utf8_lossy(&output.stdout); + assert!(stdout.contains("Extract documentation from Rust source code")); +} + +#[test] +fn test_extract_docs_invalid_package() { + let temp_dir = TempDir::new().unwrap(); + let output_file = temp_dir.path().join("output.json"); + + let mut cmd = Command::cargo_bin("extract-docs").unwrap(); + cmd.args([ + "--package", + "nonexistent-package", + "--output", + output_file.to_str().unwrap(), + "--structs", + "TestStruct", + ]); + + let output = cmd.output().unwrap(); + assert!(!output.status.success()); + let stderr = String::from_utf8_lossy(&output.stderr); + assert!(stderr.contains("cargo rustdoc failed")); +} + +#[test] +fn test_generate_markdown_missing_arguments() { + let mut cmd = Command::cargo_bin("generate-markdown").unwrap(); + let output = cmd.output().unwrap(); + + assert!(!output.status.success()); + let stderr = String::from_utf8_lossy(&output.stderr); + assert!(stderr.contains("required arguments were not provided")); +} + +#[test] +fn test_generate_markdown_help() { + let mut cmd = Command::cargo_bin("generate-markdown").unwrap(); + cmd.arg("--help"); + let output = cmd.output().unwrap(); + + assert!(output.status.success()); + let stdout = String::from_utf8_lossy(&output.stdout); + assert!(stdout.contains("Generate Markdown documentation")); +} + +#[test] +fn test_generate_markdown_missing_input_file() { + let temp_dir = TempDir::new().unwrap(); + let output_file = temp_dir.path().join("output.md"); + let template_file = temp_dir.path().join("template.md"); + let mappings_file = temp_dir.path().join("mappings.json"); + + // Create valid template and mappings files + fs::write( + &template_file, + "# Test\n{{toc_content}}\n{{struct_sections}}", + ) + .unwrap(); + fs::write(&mappings_file, "{}").unwrap(); + + let mut cmd = Command::cargo_bin("generate-markdown").unwrap(); + cmd.args([ + "--input", + "nonexistent.json", + "--output", + output_file.to_str().unwrap(), + "--template", + template_file.to_str().unwrap(), + "--section-name-mappings", + mappings_file.to_str().unwrap(), + ]); + + let output = cmd.output().unwrap(); + assert!(!output.status.success()); + let stderr = String::from_utf8_lossy(&output.stderr); + assert!(stderr.contains("Failed to read input JSON file")); +} + +#[test] +fn test_generate_markdown_invalid_input_json() { + let temp_dir = TempDir::new().unwrap(); + let input_file = temp_dir.path().join("input.json"); + let output_file = temp_dir.path().join("output.md"); + let template_file = temp_dir.path().join("template.md"); + let mappings_file = temp_dir.path().join("mappings.json"); + + // Create invalid JSON input + fs::write(&input_file, "invalid json").unwrap(); + fs::write( + &template_file, + "# Test\n{{toc_content}}\n{{struct_sections}}", + ) + .unwrap(); + fs::write(&mappings_file, "{}").unwrap(); + + let mut cmd = Command::cargo_bin("generate-markdown").unwrap(); + cmd.args([ + "--input", + input_file.to_str().unwrap(), + "--output", + output_file.to_str().unwrap(), + "--template", + template_file.to_str().unwrap(), + "--section-name-mappings", + mappings_file.to_str().unwrap(), + ]); + + let output = cmd.output().unwrap(); + assert!(!output.status.success()); + let stderr = String::from_utf8_lossy(&output.stderr); + assert!(stderr.contains("Failed to parse input JSON")); +} + +#[test] +fn test_generate_markdown_missing_template_file() { + let temp_dir = TempDir::new().unwrap(); + let input_file = temp_dir.path().join("input.json"); + let output_file = temp_dir.path().join("output.md"); + let mappings_file = temp_dir.path().join("mappings.json"); + + // Create valid input and mappings + let config_docs = json!({ + "structs": [], + "referenced_constants": {} + }); + fs::write( + &input_file, + serde_json::to_string_pretty(&config_docs).unwrap(), + ) + .unwrap(); + fs::write(&mappings_file, "{}").unwrap(); + + let mut cmd = Command::cargo_bin("generate-markdown").unwrap(); + cmd.args([ + "--input", + input_file.to_str().unwrap(), + "--output", + output_file.to_str().unwrap(), + "--template", + "nonexistent_template.md", + "--section-name-mappings", + mappings_file.to_str().unwrap(), + ]); + + let output = cmd.output().unwrap(); + assert!(!output.status.success()); + let stderr = String::from_utf8_lossy(&output.stderr); + assert!(stderr.contains("Failed to read template file")); +} + +#[test] +fn test_generate_markdown_invalid_mappings_json() { + let temp_dir = TempDir::new().unwrap(); + let input_file = temp_dir.path().join("input.json"); + let output_file = temp_dir.path().join("output.md"); + let template_file = temp_dir.path().join("template.md"); + let mappings_file = temp_dir.path().join("mappings.json"); + + // Create valid input and template, invalid mappings + let config_docs = json!({ + "structs": [], + "referenced_constants": {} + }); + fs::write( + &input_file, + serde_json::to_string_pretty(&config_docs).unwrap(), + ) + .unwrap(); + fs::write( + &template_file, + "# Test\n{{toc_content}}\n{{struct_sections}}", + ) + .unwrap(); + fs::write(&mappings_file, "invalid json").unwrap(); + + let mut cmd = Command::cargo_bin("generate-markdown").unwrap(); + cmd.args([ + "--input", + input_file.to_str().unwrap(), + "--output", + output_file.to_str().unwrap(), + "--template", + template_file.to_str().unwrap(), + "--section-name-mappings", + mappings_file.to_str().unwrap(), + ]); + + let output = cmd.output().unwrap(); + assert!(!output.status.success()); + let stderr = String::from_utf8_lossy(&output.stderr); + assert!(stderr.contains("Failed to parse section name mappings JSON")); +} + +#[test] +fn test_generate_markdown_successful_execution() { + let temp_dir = TempDir::new().unwrap(); + let input_file = temp_dir.path().join("input.json"); + let output_file = temp_dir.path().join("output.md"); + let template_file = temp_dir.path().join("template.md"); + let mappings_file = temp_dir.path().join("mappings.json"); + + // Create valid test data + let config_docs = json!({ + "structs": [{ + "name": "TestStruct", + "description": "A test configuration struct", + "fields": [{ + "name": "test_field", + "description": "A test field", + "default_value": "`42`", + "notes": null, + "deprecated": null, + "toml_example": null, + "required": null, + "units": null + }] + }], + "referenced_constants": {} + }); + + fs::write( + &input_file, + serde_json::to_string_pretty(&config_docs).unwrap(), + ) + .unwrap(); + fs::write( + &template_file, + "# Configuration Reference\n\n{{toc_content}}\n\n{{struct_sections}}", + ) + .unwrap(); + fs::write(&mappings_file, r#"{"TestStruct": "[test]"}"#).unwrap(); + + let mut cmd = Command::cargo_bin("generate-markdown").unwrap(); + cmd.args([ + "--input", + input_file.to_str().unwrap(), + "--output", + output_file.to_str().unwrap(), + "--template", + template_file.to_str().unwrap(), + "--section-name-mappings", + mappings_file.to_str().unwrap(), + ]); + + let output = cmd.output().unwrap(); + assert!(output.status.success()); + let stdout = String::from_utf8_lossy(&output.stdout); + assert!(stdout.contains("Successfully generated Markdown documentation")); + + // Verify output file was created and contains expected content + let output_content = fs::read_to_string(&output_file).unwrap(); + assert!(output_content.contains("Configuration Reference")); + assert!(output_content.contains("[test]")); + assert!(output_content.contains("test_field")); + assert!(output_content.contains("A test field")); +} + +#[test] +fn test_generate_markdown_file_write_permission_error() { + let temp_dir = TempDir::new().unwrap(); + let input_file = temp_dir.path().join("input.json"); + let template_file = temp_dir.path().join("template.md"); + let mappings_file = temp_dir.path().join("mappings.json"); + + // Create valid input files + let config_docs = json!({ + "structs": [], + "referenced_constants": {} + }); + fs::write( + &input_file, + serde_json::to_string_pretty(&config_docs).unwrap(), + ) + .unwrap(); + fs::write( + &template_file, + "# Test\n{{toc_content}}\n{{struct_sections}}", + ) + .unwrap(); + fs::write(&mappings_file, "{}").unwrap(); + + // Try to write to a directory that doesn't exist (should fail) + let invalid_output = "/nonexistent/path/output.md"; + + let mut cmd = Command::cargo_bin("generate-markdown").unwrap(); + cmd.args([ + "--input", + input_file.to_str().unwrap(), + "--output", + invalid_output, + "--template", + template_file.to_str().unwrap(), + "--section-name-mappings", + mappings_file.to_str().unwrap(), + ]); + + let output = cmd.output().unwrap(); + assert!(!output.status.success()); + let stderr = String::from_utf8_lossy(&output.stderr); + assert!(stderr.contains("Failed to write output file")); +} + +// New comprehensive integration tests using real fixture data + +#[test] +fn test_generate_markdown_with_real_fixture_data() { + let temp_dir = TempDir::new().unwrap(); + let output_file = temp_dir.path().join("output.md"); + + // Use the fixture files we created + let mut cmd = Command::cargo_bin("generate-markdown").unwrap(); + cmd.args([ + "--input", + "tests/fixtures/minimal_config.json", + "--output", + output_file.to_str().unwrap(), + "--template", + "tests/fixtures/test_template.md", + "--section-name-mappings", + "tests/fixtures/test_mappings.json", + ]); + + let output = cmd.output().unwrap(); + assert!(output.status.success()); + let stdout = String::from_utf8_lossy(&output.stdout); + assert!(stdout.contains("Successfully generated Markdown documentation")); + + // Verify output file was created and contains expected realistic content + let output_content = fs::read_to_string(&output_file).unwrap(); + assert!(output_content.contains("Test Configuration Reference")); + assert!(output_content.contains("[node]")); + assert!(output_content.contains("Configuration settings for a Stacks node")); + assert!(output_content.contains("seed")); + assert!(output_content.contains("rpc_bind")); + assert!(output_content.contains("MinerConfig::mining_key")); + assert!(output_content.contains("DEPRECATED")); + assert!(output_content.contains("Units")); + assert!(output_content.contains("milliseconds")); + assert!(output_content.contains("Example:")); + assert!(output_content.contains("bootstrap_node")); +} + +#[test] +fn test_generate_markdown_with_complex_field_features() { + let temp_dir = TempDir::new().unwrap(); + let input_file = temp_dir.path().join("input.json"); + let output_file = temp_dir.path().join("output.md"); + let template_file = temp_dir.path().join("template.md"); + let mappings_file = temp_dir.path().join("mappings.json"); + + // Create test data with all field features + let config_docs = json!({ + "structs": [{ + "name": "ComplexStruct", + "description": "A struct with all possible field features", + "fields": [ + { + "name": "basic_field", + "description": "A basic field with description", + "default_value": "`42`", + "notes": null, + "deprecated": null, + "toml_example": null, + "required": null, + "units": null + }, + { + "name": "field_with_notes", + "description": "A field with multiple notes", + "default_value": "`\"default\"`", + "notes": [ + "First note about this field", + "Second note with more details" + ], + "deprecated": null, + "toml_example": null, + "required": null, + "units": null + }, + { + "name": "deprecated_field", + "description": "A deprecated field", + "default_value": "`false`", + "notes": null, + "deprecated": "This field is deprecated since version 2.0", + "toml_example": null, + "required": null, + "units": null + }, + { + "name": "field_with_toml_example", + "description": "A field with TOML example", + "default_value": "`{}`", + "notes": null, + "deprecated": null, + "toml_example": "field_with_toml_example = { key = \"value\", number = 123 }", + "required": null, + "units": null + }, + { + "name": "required_field", + "description": "A required field", + "default_value": null, + "notes": null, + "deprecated": null, + "toml_example": null, + "required": true, + "units": null + }, + { + "name": "field_with_units", + "description": "A field with units", + "default_value": "`30_000`", + "notes": null, + "deprecated": null, + "toml_example": null, + "required": null, + "units": "milliseconds" + } + ] + }], + "referenced_constants": {} + }); + + fs::write( + &input_file, + serde_json::to_string_pretty(&config_docs).unwrap(), + ) + .unwrap(); + fs::write( + &template_file, + "# Complex Test\n\n{{toc_content}}\n\n{{struct_sections}}", + ) + .unwrap(); + fs::write(&mappings_file, r#"{}"#).unwrap(); + + let mut cmd = Command::cargo_bin("generate-markdown").unwrap(); + cmd.args([ + "--input", + input_file.to_str().unwrap(), + "--output", + output_file.to_str().unwrap(), + "--template", + template_file.to_str().unwrap(), + "--section-name-mappings", + mappings_file.to_str().unwrap(), + ]); + + let output = cmd.output().unwrap(); + assert!(output.status.success()); + let stdout = String::from_utf8_lossy(&output.stdout); + assert!(stdout.contains("Successfully generated Markdown documentation")); + + // Verify all field features are properly rendered + let output_content = fs::read_to_string(&output_file).unwrap(); + assert!(output_content.contains("Complex Test")); + assert!(output_content.contains("[complexstruct]")); + assert!(output_content.contains("basic_field")); + assert!(output_content.contains("field_with_notes")); + assert!(output_content.contains("First note about this field")); + assert!(output_content.contains("Second note with more details")); + assert!(output_content.contains("deprecated_field")); + assert!(output_content.contains("**⚠️ DEPRECATED:**")); + assert!(output_content.contains("deprecated since version 2.0")); + assert!(output_content.contains("field_with_toml_example")); + assert!(output_content.contains("required_field")); + assert!(output_content.contains("**Required**")); + assert!(output_content.contains("field_with_units")); + assert!(output_content.contains("milliseconds")); +} + +#[test] +fn test_generate_markdown_with_constant_references() { + let temp_dir = TempDir::new().unwrap(); + let input_file = temp_dir.path().join("input.json"); + let output_file = temp_dir.path().join("output.md"); + let template_file = temp_dir.path().join("template.md"); + let mappings_file = temp_dir.path().join("mappings.json"); + + // Create test data with constant references + let config_docs = json!({ + "structs": [{ + "name": "ConfigWithConstants", + "description": "A struct with constant references", + "fields": [{ + "name": "timeout", + "description": "Connection timeout using [`DEFAULT_TIMEOUT`] constant", + "default_value": "[`DEFAULT_TIMEOUT`]", + "notes": ["See [`MAX_RETRIES`] for retry logic"], + "deprecated": null, + "toml_example": null, + "required": null, + "units": "seconds" + }] + }], + "referenced_constants": { + "DEFAULT_TIMEOUT": "30", + "MAX_RETRIES": "3" + } + }); + + fs::write( + &input_file, + serde_json::to_string_pretty(&config_docs).unwrap(), + ) + .unwrap(); + fs::write( + &template_file, + "# Constants Test\n\n{{toc_content}}\n\n{{struct_sections}}", + ) + .unwrap(); + fs::write(&mappings_file, r#"{}"#).unwrap(); + + let mut cmd = Command::cargo_bin("generate-markdown").unwrap(); + cmd.args([ + "--input", + input_file.to_str().unwrap(), + "--output", + output_file.to_str().unwrap(), + "--template", + template_file.to_str().unwrap(), + "--section-name-mappings", + mappings_file.to_str().unwrap(), + ]); + + let output = cmd.output().unwrap(); + assert!(output.status.success()); + let stdout = String::from_utf8_lossy(&output.stdout); + assert!(stdout.contains("Successfully generated Markdown documentation")); + + // Verify constant references are properly processed + let output_content = fs::read_to_string(&output_file).unwrap(); + assert!(output_content.contains("Constants Test")); + assert!(output_content.contains("[configwithconstants]")); + assert!(output_content.contains("timeout")); + assert!(output_content.contains("30")); // DEFAULT_TIMEOUT resolved + assert!(output_content.contains("3")); // MAX_RETRIES resolved +} + +#[test] +fn test_generate_markdown_empty_struct_description() { + let temp_dir = TempDir::new().unwrap(); + let input_file = temp_dir.path().join("input.json"); + let output_file = temp_dir.path().join("output.md"); + let template_file = temp_dir.path().join("template.md"); + let mappings_file = temp_dir.path().join("mappings.json"); + + // Create test data with null struct description + let config_docs = json!({ + "structs": [{ + "name": "NoDescStruct", + "description": null, + "fields": [{ + "name": "field", + "description": "A field in a struct with no description", + "default_value": "`value`", + "notes": null, + "deprecated": null, + "toml_example": null, + "required": null, + "units": null + }] + }], + "referenced_constants": {} + }); + + fs::write( + &input_file, + serde_json::to_string_pretty(&config_docs).unwrap(), + ) + .unwrap(); + fs::write( + &template_file, + "# No Description Test\n\n{{toc_content}}\n\n{{struct_sections}}", + ) + .unwrap(); + fs::write(&mappings_file, r#"{}"#).unwrap(); + + let mut cmd = Command::cargo_bin("generate-markdown").unwrap(); + cmd.args([ + "--input", + input_file.to_str().unwrap(), + "--output", + output_file.to_str().unwrap(), + "--template", + template_file.to_str().unwrap(), + "--section-name-mappings", + mappings_file.to_str().unwrap(), + ]); + + let output = cmd.output().unwrap(); + assert!(output.status.success()); + let stdout = String::from_utf8_lossy(&output.stdout); + assert!(stdout.contains("Successfully generated Markdown documentation")); + + // Verify struct with null description is handled properly + let output_content = fs::read_to_string(&output_file).unwrap(); + assert!(output_content.contains("No Description Test")); + assert!(output_content.contains("[nodescstruct]")); + assert!(output_content.contains("field")); +} + +#[test] +fn test_generate_markdown_multiple_structs() { + let temp_dir = TempDir::new().unwrap(); + let input_file = temp_dir.path().join("input.json"); + let output_file = temp_dir.path().join("output.md"); + let template_file = temp_dir.path().join("template.md"); + let mappings_file = temp_dir.path().join("mappings.json"); + + // Create test data with multiple structs + let config_docs = json!({ + "structs": [ + { + "name": "FirstStruct", + "description": "The first configuration struct", + "fields": [{ + "name": "first_field", + "description": "Field in first struct", + "default_value": "`1`", + "notes": null, + "deprecated": null, + "toml_example": null, + "required": null, + "units": null + }] + }, + { + "name": "SecondStruct", + "description": "The second configuration struct", + "fields": [{ + "name": "second_field", + "description": "Field in second struct", + "default_value": "`2`", + "notes": null, + "deprecated": null, + "toml_example": null, + "required": null, + "units": null + }] + } + ], + "referenced_constants": {} + }); + + fs::write( + &input_file, + serde_json::to_string_pretty(&config_docs).unwrap(), + ) + .unwrap(); + fs::write( + &template_file, + "# Multiple Structs Test\n\n{{toc_content}}\n\n{{struct_sections}}", + ) + .unwrap(); + fs::write(&mappings_file, r#"{}"#).unwrap(); + + let mut cmd = Command::cargo_bin("generate-markdown").unwrap(); + cmd.args([ + "--input", + input_file.to_str().unwrap(), + "--output", + output_file.to_str().unwrap(), + "--template", + template_file.to_str().unwrap(), + "--section-name-mappings", + mappings_file.to_str().unwrap(), + ]); + + let output = cmd.output().unwrap(); + assert!(output.status.success()); + let stdout = String::from_utf8_lossy(&output.stdout); + assert!(stdout.contains("Successfully generated Markdown documentation")); + + // Verify both structs are properly rendered + let output_content = fs::read_to_string(&output_file).unwrap(); + assert!(output_content.contains("Multiple Structs Test")); + assert!(output_content.contains("[firststruct]")); + assert!(output_content.contains("[secondstruct]")); + assert!(output_content.contains("first_field")); + assert!(output_content.contains("second_field")); + assert!(output_content.contains("The first configuration struct")); + assert!(output_content.contains("The second configuration struct")); +}